1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/debugfs.h> 34 #include <linux/highmem.h> 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/errno.h> 38 #include <linux/pci.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/bitmap.h> 42 #if defined(CONFIG_X86) 43 #include <asm/pat.h> 44 #endif 45 #include <linux/sched.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/task.h> 48 #include <linux/delay.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/ib_cache.h> 52 #include <linux/mlx5/port.h> 53 #include <linux/mlx5/vport.h> 54 #include <linux/mlx5/fs.h> 55 #include <linux/list.h> 56 #include <rdma/ib_smi.h> 57 #include <rdma/ib_umem.h> 58 #include <linux/in.h> 59 #include <linux/etherdevice.h> 60 #include "mlx5_ib.h" 61 #include "ib_rep.h" 62 #include "cmd.h" 63 #include <linux/mlx5/fs_helpers.h> 64 #include <linux/mlx5/accel.h> 65 #include <rdma/uverbs_std_types.h> 66 #include <rdma/mlx5_user_ioctl_verbs.h> 67 #include <rdma/mlx5_user_ioctl_cmds.h> 68 69 #define UVERBS_MODULE_NAME mlx5_ib 70 #include <rdma/uverbs_named_ioctl.h> 71 72 #define DRIVER_NAME "mlx5_ib" 73 #define DRIVER_VERSION "5.0-0" 74 75 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 76 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 77 MODULE_LICENSE("Dual BSD/GPL"); 78 79 static char mlx5_version[] = 80 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 81 DRIVER_VERSION "\n"; 82 83 struct mlx5_ib_event_work { 84 struct work_struct work; 85 struct mlx5_core_dev *dev; 86 void *context; 87 enum mlx5_dev_event event; 88 unsigned long param; 89 }; 90 91 enum { 92 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 93 }; 94 95 static struct workqueue_struct *mlx5_ib_event_wq; 96 static LIST_HEAD(mlx5_ib_unaffiliated_port_list); 97 static LIST_HEAD(mlx5_ib_dev_list); 98 /* 99 * This mutex should be held when accessing either of the above lists 100 */ 101 static DEFINE_MUTEX(mlx5_ib_multiport_mutex); 102 103 /* We can't use an array for xlt_emergency_page because dma_map_single 104 * doesn't work on kernel modules memory 105 */ 106 static unsigned long xlt_emergency_page; 107 static struct mutex xlt_emergency_page_mutex; 108 109 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) 110 { 111 struct mlx5_ib_dev *dev; 112 113 mutex_lock(&mlx5_ib_multiport_mutex); 114 dev = mpi->ibdev; 115 mutex_unlock(&mlx5_ib_multiport_mutex); 116 return dev; 117 } 118 119 static enum rdma_link_layer 120 mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 121 { 122 switch (port_type_cap) { 123 case MLX5_CAP_PORT_TYPE_IB: 124 return IB_LINK_LAYER_INFINIBAND; 125 case MLX5_CAP_PORT_TYPE_ETH: 126 return IB_LINK_LAYER_ETHERNET; 127 default: 128 return IB_LINK_LAYER_UNSPECIFIED; 129 } 130 } 131 132 static enum rdma_link_layer 133 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 134 { 135 struct mlx5_ib_dev *dev = to_mdev(device); 136 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 137 138 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 139 } 140 141 static int get_port_state(struct ib_device *ibdev, 142 u8 port_num, 143 enum ib_port_state *state) 144 { 145 struct ib_port_attr attr; 146 int ret; 147 148 memset(&attr, 0, sizeof(attr)); 149 ret = ibdev->query_port(ibdev, port_num, &attr); 150 if (!ret) 151 *state = attr.state; 152 return ret; 153 } 154 155 static int mlx5_netdev_event(struct notifier_block *this, 156 unsigned long event, void *ptr) 157 { 158 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); 159 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 160 u8 port_num = roce->native_port_num; 161 struct mlx5_core_dev *mdev; 162 struct mlx5_ib_dev *ibdev; 163 164 ibdev = roce->dev; 165 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 166 if (!mdev) 167 return NOTIFY_DONE; 168 169 switch (event) { 170 case NETDEV_REGISTER: 171 case NETDEV_UNREGISTER: 172 write_lock(&roce->netdev_lock); 173 if (ibdev->rep) { 174 struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; 175 struct net_device *rep_ndev; 176 177 rep_ndev = mlx5_ib_get_rep_netdev(esw, 178 ibdev->rep->vport); 179 if (rep_ndev == ndev) 180 roce->netdev = (event == NETDEV_UNREGISTER) ? 181 NULL : ndev; 182 } else if (ndev->dev.parent == &mdev->pdev->dev) { 183 roce->netdev = (event == NETDEV_UNREGISTER) ? 184 NULL : ndev; 185 } 186 write_unlock(&roce->netdev_lock); 187 break; 188 189 case NETDEV_CHANGE: 190 case NETDEV_UP: 191 case NETDEV_DOWN: { 192 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); 193 struct net_device *upper = NULL; 194 195 if (lag_ndev) { 196 upper = netdev_master_upper_dev_get(lag_ndev); 197 dev_put(lag_ndev); 198 } 199 200 if ((upper == ndev || (!upper && ndev == roce->netdev)) 201 && ibdev->ib_active) { 202 struct ib_event ibev = { }; 203 enum ib_port_state port_state; 204 205 if (get_port_state(&ibdev->ib_dev, port_num, 206 &port_state)) 207 goto done; 208 209 if (roce->last_port_state == port_state) 210 goto done; 211 212 roce->last_port_state = port_state; 213 ibev.device = &ibdev->ib_dev; 214 if (port_state == IB_PORT_DOWN) 215 ibev.event = IB_EVENT_PORT_ERR; 216 else if (port_state == IB_PORT_ACTIVE) 217 ibev.event = IB_EVENT_PORT_ACTIVE; 218 else 219 goto done; 220 221 ibev.element.port_num = port_num; 222 ib_dispatch_event(&ibev); 223 } 224 break; 225 } 226 227 default: 228 break; 229 } 230 done: 231 mlx5_ib_put_native_port_mdev(ibdev, port_num); 232 return NOTIFY_DONE; 233 } 234 235 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 236 u8 port_num) 237 { 238 struct mlx5_ib_dev *ibdev = to_mdev(device); 239 struct net_device *ndev; 240 struct mlx5_core_dev *mdev; 241 242 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 243 if (!mdev) 244 return NULL; 245 246 ndev = mlx5_lag_get_roce_netdev(mdev); 247 if (ndev) 248 goto out; 249 250 /* Ensure ndev does not disappear before we invoke dev_hold() 251 */ 252 read_lock(&ibdev->roce[port_num - 1].netdev_lock); 253 ndev = ibdev->roce[port_num - 1].netdev; 254 if (ndev) 255 dev_hold(ndev); 256 read_unlock(&ibdev->roce[port_num - 1].netdev_lock); 257 258 out: 259 mlx5_ib_put_native_port_mdev(ibdev, port_num); 260 return ndev; 261 } 262 263 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, 264 u8 ib_port_num, 265 u8 *native_port_num) 266 { 267 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 268 ib_port_num); 269 struct mlx5_core_dev *mdev = NULL; 270 struct mlx5_ib_multiport_info *mpi; 271 struct mlx5_ib_port *port; 272 273 if (!mlx5_core_mp_enabled(ibdev->mdev) || 274 ll != IB_LINK_LAYER_ETHERNET) { 275 if (native_port_num) 276 *native_port_num = ib_port_num; 277 return ibdev->mdev; 278 } 279 280 if (native_port_num) 281 *native_port_num = 1; 282 283 port = &ibdev->port[ib_port_num - 1]; 284 if (!port) 285 return NULL; 286 287 spin_lock(&port->mp.mpi_lock); 288 mpi = ibdev->port[ib_port_num - 1].mp.mpi; 289 if (mpi && !mpi->unaffiliate) { 290 mdev = mpi->mdev; 291 /* If it's the master no need to refcount, it'll exist 292 * as long as the ib_dev exists. 293 */ 294 if (!mpi->is_master) 295 mpi->mdev_refcnt++; 296 } 297 spin_unlock(&port->mp.mpi_lock); 298 299 return mdev; 300 } 301 302 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num) 303 { 304 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 305 port_num); 306 struct mlx5_ib_multiport_info *mpi; 307 struct mlx5_ib_port *port; 308 309 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 310 return; 311 312 port = &ibdev->port[port_num - 1]; 313 314 spin_lock(&port->mp.mpi_lock); 315 mpi = ibdev->port[port_num - 1].mp.mpi; 316 if (mpi->is_master) 317 goto out; 318 319 mpi->mdev_refcnt--; 320 if (mpi->unaffiliate) 321 complete(&mpi->unref_comp); 322 out: 323 spin_unlock(&port->mp.mpi_lock); 324 } 325 326 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 327 u8 *active_width) 328 { 329 switch (eth_proto_oper) { 330 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 331 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 332 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 333 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 334 *active_width = IB_WIDTH_1X; 335 *active_speed = IB_SPEED_SDR; 336 break; 337 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 338 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 339 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 340 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 341 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 342 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 343 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 344 *active_width = IB_WIDTH_1X; 345 *active_speed = IB_SPEED_QDR; 346 break; 347 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 348 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 349 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 350 *active_width = IB_WIDTH_1X; 351 *active_speed = IB_SPEED_EDR; 352 break; 353 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 354 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 355 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 356 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 357 *active_width = IB_WIDTH_4X; 358 *active_speed = IB_SPEED_QDR; 359 break; 360 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 361 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 362 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 363 *active_width = IB_WIDTH_1X; 364 *active_speed = IB_SPEED_HDR; 365 break; 366 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 367 *active_width = IB_WIDTH_4X; 368 *active_speed = IB_SPEED_FDR; 369 break; 370 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 371 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 372 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 373 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 374 *active_width = IB_WIDTH_4X; 375 *active_speed = IB_SPEED_EDR; 376 break; 377 default: 378 return -EINVAL; 379 } 380 381 return 0; 382 } 383 384 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 385 struct ib_port_attr *props) 386 { 387 struct mlx5_ib_dev *dev = to_mdev(device); 388 struct mlx5_core_dev *mdev; 389 struct net_device *ndev, *upper; 390 enum ib_mtu ndev_ib_mtu; 391 bool put_mdev = true; 392 u16 qkey_viol_cntr; 393 u32 eth_prot_oper; 394 u8 mdev_port_num; 395 int err; 396 397 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 398 if (!mdev) { 399 /* This means the port isn't affiliated yet. Get the 400 * info for the master port instead. 401 */ 402 put_mdev = false; 403 mdev = dev->mdev; 404 mdev_port_num = 1; 405 port_num = 1; 406 } 407 408 /* Possible bad flows are checked before filling out props so in case 409 * of an error it will still be zeroed out. 410 */ 411 err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, 412 mdev_port_num); 413 if (err) 414 goto out; 415 416 props->active_width = IB_WIDTH_4X; 417 props->active_speed = IB_SPEED_QDR; 418 419 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 420 &props->active_width); 421 422 props->port_cap_flags |= IB_PORT_CM_SUP; 423 props->ip_gids = true; 424 425 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 426 roce_address_table_size); 427 props->max_mtu = IB_MTU_4096; 428 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 429 props->pkey_tbl_len = 1; 430 props->state = IB_PORT_DOWN; 431 props->phys_state = 3; 432 433 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); 434 props->qkey_viol_cntr = qkey_viol_cntr; 435 436 /* If this is a stub query for an unaffiliated port stop here */ 437 if (!put_mdev) 438 goto out; 439 440 ndev = mlx5_ib_get_netdev(device, port_num); 441 if (!ndev) 442 goto out; 443 444 if (mlx5_lag_is_active(dev->mdev)) { 445 rcu_read_lock(); 446 upper = netdev_master_upper_dev_get_rcu(ndev); 447 if (upper) { 448 dev_put(ndev); 449 ndev = upper; 450 dev_hold(ndev); 451 } 452 rcu_read_unlock(); 453 } 454 455 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 456 props->state = IB_PORT_ACTIVE; 457 props->phys_state = 5; 458 } 459 460 ndev_ib_mtu = iboe_get_mtu(ndev->mtu); 461 462 dev_put(ndev); 463 464 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 465 out: 466 if (put_mdev) 467 mlx5_ib_put_native_port_mdev(dev, port_num); 468 return err; 469 } 470 471 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, 472 unsigned int index, const union ib_gid *gid, 473 const struct ib_gid_attr *attr) 474 { 475 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 476 u8 roce_version = 0; 477 u8 roce_l3_type = 0; 478 bool vlan = false; 479 u8 mac[ETH_ALEN]; 480 u16 vlan_id = 0; 481 482 if (gid) { 483 gid_type = attr->gid_type; 484 ether_addr_copy(mac, attr->ndev->dev_addr); 485 486 if (is_vlan_dev(attr->ndev)) { 487 vlan = true; 488 vlan_id = vlan_dev_vlan_id(attr->ndev); 489 } 490 } 491 492 switch (gid_type) { 493 case IB_GID_TYPE_IB: 494 roce_version = MLX5_ROCE_VERSION_1; 495 break; 496 case IB_GID_TYPE_ROCE_UDP_ENCAP: 497 roce_version = MLX5_ROCE_VERSION_2; 498 if (ipv6_addr_v4mapped((void *)gid)) 499 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; 500 else 501 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; 502 break; 503 504 default: 505 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); 506 } 507 508 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, 509 roce_l3_type, gid->raw, mac, vlan, 510 vlan_id, port_num); 511 } 512 513 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, 514 __always_unused void **context) 515 { 516 return set_roce_addr(to_mdev(attr->device), attr->port_num, 517 attr->index, &attr->gid, attr); 518 } 519 520 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, 521 __always_unused void **context) 522 { 523 return set_roce_addr(to_mdev(attr->device), attr->port_num, 524 attr->index, NULL, NULL); 525 } 526 527 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, 528 const struct ib_gid_attr *attr) 529 { 530 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 531 return 0; 532 533 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 534 } 535 536 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 537 { 538 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 539 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 540 return 0; 541 } 542 543 enum { 544 MLX5_VPORT_ACCESS_METHOD_MAD, 545 MLX5_VPORT_ACCESS_METHOD_HCA, 546 MLX5_VPORT_ACCESS_METHOD_NIC, 547 }; 548 549 static int mlx5_get_vport_access_method(struct ib_device *ibdev) 550 { 551 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 552 return MLX5_VPORT_ACCESS_METHOD_MAD; 553 554 if (mlx5_ib_port_link_layer(ibdev, 1) == 555 IB_LINK_LAYER_ETHERNET) 556 return MLX5_VPORT_ACCESS_METHOD_NIC; 557 558 return MLX5_VPORT_ACCESS_METHOD_HCA; 559 } 560 561 static void get_atomic_caps(struct mlx5_ib_dev *dev, 562 u8 atomic_size_qp, 563 struct ib_device_attr *props) 564 { 565 u8 tmp; 566 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 567 u8 atomic_req_8B_endianness_mode = 568 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); 569 570 /* Check if HW supports 8 bytes standard atomic operations and capable 571 * of host endianness respond 572 */ 573 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 574 if (((atomic_operations & tmp) == tmp) && 575 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 576 (atomic_req_8B_endianness_mode)) { 577 props->atomic_cap = IB_ATOMIC_HCA; 578 } else { 579 props->atomic_cap = IB_ATOMIC_NONE; 580 } 581 } 582 583 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, 584 struct ib_device_attr *props) 585 { 586 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 587 588 get_atomic_caps(dev, atomic_size_qp, props); 589 } 590 591 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev, 592 struct ib_device_attr *props) 593 { 594 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 595 596 get_atomic_caps(dev, atomic_size_qp, props); 597 } 598 599 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev) 600 { 601 struct ib_device_attr props = {}; 602 603 get_atomic_caps_dc(dev, &props); 604 return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false; 605 } 606 static int mlx5_query_system_image_guid(struct ib_device *ibdev, 607 __be64 *sys_image_guid) 608 { 609 struct mlx5_ib_dev *dev = to_mdev(ibdev); 610 struct mlx5_core_dev *mdev = dev->mdev; 611 u64 tmp; 612 int err; 613 614 switch (mlx5_get_vport_access_method(ibdev)) { 615 case MLX5_VPORT_ACCESS_METHOD_MAD: 616 return mlx5_query_mad_ifc_system_image_guid(ibdev, 617 sys_image_guid); 618 619 case MLX5_VPORT_ACCESS_METHOD_HCA: 620 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 621 break; 622 623 case MLX5_VPORT_ACCESS_METHOD_NIC: 624 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 625 break; 626 627 default: 628 return -EINVAL; 629 } 630 631 if (!err) 632 *sys_image_guid = cpu_to_be64(tmp); 633 634 return err; 635 636 } 637 638 static int mlx5_query_max_pkeys(struct ib_device *ibdev, 639 u16 *max_pkeys) 640 { 641 struct mlx5_ib_dev *dev = to_mdev(ibdev); 642 struct mlx5_core_dev *mdev = dev->mdev; 643 644 switch (mlx5_get_vport_access_method(ibdev)) { 645 case MLX5_VPORT_ACCESS_METHOD_MAD: 646 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 647 648 case MLX5_VPORT_ACCESS_METHOD_HCA: 649 case MLX5_VPORT_ACCESS_METHOD_NIC: 650 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 651 pkey_table_size)); 652 return 0; 653 654 default: 655 return -EINVAL; 656 } 657 } 658 659 static int mlx5_query_vendor_id(struct ib_device *ibdev, 660 u32 *vendor_id) 661 { 662 struct mlx5_ib_dev *dev = to_mdev(ibdev); 663 664 switch (mlx5_get_vport_access_method(ibdev)) { 665 case MLX5_VPORT_ACCESS_METHOD_MAD: 666 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 667 668 case MLX5_VPORT_ACCESS_METHOD_HCA: 669 case MLX5_VPORT_ACCESS_METHOD_NIC: 670 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 671 672 default: 673 return -EINVAL; 674 } 675 } 676 677 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 678 __be64 *node_guid) 679 { 680 u64 tmp; 681 int err; 682 683 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 684 case MLX5_VPORT_ACCESS_METHOD_MAD: 685 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 686 687 case MLX5_VPORT_ACCESS_METHOD_HCA: 688 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 689 break; 690 691 case MLX5_VPORT_ACCESS_METHOD_NIC: 692 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 693 break; 694 695 default: 696 return -EINVAL; 697 } 698 699 if (!err) 700 *node_guid = cpu_to_be64(tmp); 701 702 return err; 703 } 704 705 struct mlx5_reg_node_desc { 706 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 707 }; 708 709 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 710 { 711 struct mlx5_reg_node_desc in; 712 713 if (mlx5_use_mad_ifc(dev)) 714 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 715 716 memset(&in, 0, sizeof(in)); 717 718 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 719 sizeof(struct mlx5_reg_node_desc), 720 MLX5_REG_NODE_DESC, 0, 0); 721 } 722 723 static int mlx5_ib_query_device(struct ib_device *ibdev, 724 struct ib_device_attr *props, 725 struct ib_udata *uhw) 726 { 727 struct mlx5_ib_dev *dev = to_mdev(ibdev); 728 struct mlx5_core_dev *mdev = dev->mdev; 729 int err = -ENOMEM; 730 int max_sq_desc; 731 int max_rq_sg; 732 int max_sq_sg; 733 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 734 bool raw_support = !mlx5_core_mp_enabled(mdev); 735 struct mlx5_ib_query_device_resp resp = {}; 736 size_t resp_len; 737 u64 max_tso; 738 739 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 740 if (uhw->outlen && uhw->outlen < resp_len) 741 return -EINVAL; 742 else 743 resp.response_length = resp_len; 744 745 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 746 return -EINVAL; 747 748 memset(props, 0, sizeof(*props)); 749 err = mlx5_query_system_image_guid(ibdev, 750 &props->sys_image_guid); 751 if (err) 752 return err; 753 754 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 755 if (err) 756 return err; 757 758 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 759 if (err) 760 return err; 761 762 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 763 (fw_rev_min(dev->mdev) << 16) | 764 fw_rev_sub(dev->mdev); 765 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 766 IB_DEVICE_PORT_ACTIVE_EVENT | 767 IB_DEVICE_SYS_IMAGE_GUID | 768 IB_DEVICE_RC_RNR_NAK_GEN; 769 770 if (MLX5_CAP_GEN(mdev, pkv)) 771 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 772 if (MLX5_CAP_GEN(mdev, qkv)) 773 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 774 if (MLX5_CAP_GEN(mdev, apm)) 775 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 776 if (MLX5_CAP_GEN(mdev, xrc)) 777 props->device_cap_flags |= IB_DEVICE_XRC; 778 if (MLX5_CAP_GEN(mdev, imaicl)) { 779 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 780 IB_DEVICE_MEM_WINDOW_TYPE_2B; 781 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 782 /* We support 'Gappy' memory registration too */ 783 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 784 } 785 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 786 if (MLX5_CAP_GEN(mdev, sho)) { 787 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 788 /* At this stage no support for signature handover */ 789 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 790 IB_PROT_T10DIF_TYPE_2 | 791 IB_PROT_T10DIF_TYPE_3; 792 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 793 IB_GUARD_T10DIF_CSUM; 794 } 795 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 796 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 797 798 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { 799 if (MLX5_CAP_ETH(mdev, csum_cap)) { 800 /* Legacy bit to support old userspace libraries */ 801 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 802 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; 803 } 804 805 if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) 806 props->raw_packet_caps |= 807 IB_RAW_PACKET_CAP_CVLAN_STRIPPING; 808 809 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 810 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 811 if (max_tso) { 812 resp.tso_caps.max_tso = 1 << max_tso; 813 resp.tso_caps.supported_qpts |= 814 1 << IB_QPT_RAW_PACKET; 815 resp.response_length += sizeof(resp.tso_caps); 816 } 817 } 818 819 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 820 resp.rss_caps.rx_hash_function = 821 MLX5_RX_HASH_FUNC_TOEPLITZ; 822 resp.rss_caps.rx_hash_fields_mask = 823 MLX5_RX_HASH_SRC_IPV4 | 824 MLX5_RX_HASH_DST_IPV4 | 825 MLX5_RX_HASH_SRC_IPV6 | 826 MLX5_RX_HASH_DST_IPV6 | 827 MLX5_RX_HASH_SRC_PORT_TCP | 828 MLX5_RX_HASH_DST_PORT_TCP | 829 MLX5_RX_HASH_SRC_PORT_UDP | 830 MLX5_RX_HASH_DST_PORT_UDP | 831 MLX5_RX_HASH_INNER; 832 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 833 MLX5_ACCEL_IPSEC_CAP_DEVICE) 834 resp.rss_caps.rx_hash_fields_mask |= 835 MLX5_RX_HASH_IPSEC_SPI; 836 resp.response_length += sizeof(resp.rss_caps); 837 } 838 } else { 839 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 840 resp.response_length += sizeof(resp.tso_caps); 841 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 842 resp.response_length += sizeof(resp.rss_caps); 843 } 844 845 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 846 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 847 props->device_cap_flags |= IB_DEVICE_UD_TSO; 848 } 849 850 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && 851 MLX5_CAP_GEN(dev->mdev, general_notification_event) && 852 raw_support) 853 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; 854 855 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 856 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) 857 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 858 859 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 860 MLX5_CAP_ETH(dev->mdev, scatter_fcs) && 861 raw_support) { 862 /* Legacy bit to support old userspace libraries */ 863 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 864 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; 865 } 866 867 if (MLX5_CAP_DEV_MEM(mdev, memic)) { 868 props->max_dm_size = 869 MLX5_CAP_DEV_MEM(mdev, max_memic_size); 870 } 871 872 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 873 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 874 875 if (MLX5_CAP_GEN(mdev, end_pad)) 876 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; 877 878 props->vendor_part_id = mdev->pdev->device; 879 props->hw_ver = mdev->pdev->revision; 880 881 props->max_mr_size = ~0ull; 882 props->page_size_cap = ~(min_page_size - 1); 883 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 884 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 885 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 886 sizeof(struct mlx5_wqe_data_seg); 887 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); 888 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - 889 sizeof(struct mlx5_wqe_raddr_seg)) / 890 sizeof(struct mlx5_wqe_data_seg); 891 props->max_send_sge = max_sq_sg; 892 props->max_recv_sge = max_rq_sg; 893 props->max_sge_rd = MLX5_MAX_SGE_RD; 894 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 895 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 896 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 897 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 898 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 899 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 900 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 901 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 902 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 903 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 904 props->max_srq_sge = max_rq_sg - 1; 905 props->max_fast_reg_page_list_len = 906 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 907 get_atomic_caps_qp(dev, props); 908 props->masked_atomic_cap = IB_ATOMIC_NONE; 909 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 910 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 911 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 912 props->max_mcast_grp; 913 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 914 props->max_ah = INT_MAX; 915 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 916 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 917 918 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 919 if (MLX5_CAP_GEN(mdev, pg)) 920 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 921 props->odp_caps = dev->odp_caps; 922 #endif 923 924 if (MLX5_CAP_GEN(mdev, cd)) 925 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 926 927 if (!mlx5_core_is_pf(mdev)) 928 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 929 930 if (mlx5_ib_port_link_layer(ibdev, 1) == 931 IB_LINK_LAYER_ETHERNET && raw_support) { 932 props->rss_caps.max_rwq_indirection_tables = 933 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 934 props->rss_caps.max_rwq_indirection_table_size = 935 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 936 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 937 props->max_wq_type_rq = 938 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 939 } 940 941 if (MLX5_CAP_GEN(mdev, tag_matching)) { 942 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 943 props->tm_caps.max_num_tags = 944 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 945 props->tm_caps.flags = IB_TM_CAP_RC; 946 props->tm_caps.max_ops = 947 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 948 props->tm_caps.max_sge = MLX5_TM_MAX_SGE; 949 } 950 951 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { 952 props->cq_caps.max_cq_moderation_count = 953 MLX5_MAX_CQ_COUNT; 954 props->cq_caps.max_cq_moderation_period = 955 MLX5_MAX_CQ_PERIOD; 956 } 957 958 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 959 resp.response_length += sizeof(resp.cqe_comp_caps); 960 961 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { 962 resp.cqe_comp_caps.max_num = 963 MLX5_CAP_GEN(dev->mdev, 964 cqe_compression_max_num); 965 966 resp.cqe_comp_caps.supported_format = 967 MLX5_IB_CQE_RES_FORMAT_HASH | 968 MLX5_IB_CQE_RES_FORMAT_CSUM; 969 970 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) 971 resp.cqe_comp_caps.supported_format |= 972 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; 973 } 974 } 975 976 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) && 977 raw_support) { 978 if (MLX5_CAP_QOS(mdev, packet_pacing) && 979 MLX5_CAP_GEN(mdev, qos)) { 980 resp.packet_pacing_caps.qp_rate_limit_max = 981 MLX5_CAP_QOS(mdev, packet_pacing_max_rate); 982 resp.packet_pacing_caps.qp_rate_limit_min = 983 MLX5_CAP_QOS(mdev, packet_pacing_min_rate); 984 resp.packet_pacing_caps.supported_qpts |= 985 1 << IB_QPT_RAW_PACKET; 986 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && 987 MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) 988 resp.packet_pacing_caps.cap_flags |= 989 MLX5_IB_PP_SUPPORT_BURST; 990 } 991 resp.response_length += sizeof(resp.packet_pacing_caps); 992 } 993 994 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, 995 uhw->outlen)) { 996 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) 997 resp.mlx5_ib_support_multi_pkt_send_wqes = 998 MLX5_IB_ALLOW_MPW; 999 1000 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) 1001 resp.mlx5_ib_support_multi_pkt_send_wqes |= 1002 MLX5_IB_SUPPORT_EMPW; 1003 1004 resp.response_length += 1005 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); 1006 } 1007 1008 if (field_avail(typeof(resp), flags, uhw->outlen)) { 1009 resp.response_length += sizeof(resp.flags); 1010 1011 if (MLX5_CAP_GEN(mdev, cqe_compression_128)) 1012 resp.flags |= 1013 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; 1014 1015 if (MLX5_CAP_GEN(mdev, cqe_128_always)) 1016 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; 1017 } 1018 1019 if (field_avail(typeof(resp), sw_parsing_caps, 1020 uhw->outlen)) { 1021 resp.response_length += sizeof(resp.sw_parsing_caps); 1022 if (MLX5_CAP_ETH(mdev, swp)) { 1023 resp.sw_parsing_caps.sw_parsing_offloads |= 1024 MLX5_IB_SW_PARSING; 1025 1026 if (MLX5_CAP_ETH(mdev, swp_csum)) 1027 resp.sw_parsing_caps.sw_parsing_offloads |= 1028 MLX5_IB_SW_PARSING_CSUM; 1029 1030 if (MLX5_CAP_ETH(mdev, swp_lso)) 1031 resp.sw_parsing_caps.sw_parsing_offloads |= 1032 MLX5_IB_SW_PARSING_LSO; 1033 1034 if (resp.sw_parsing_caps.sw_parsing_offloads) 1035 resp.sw_parsing_caps.supported_qpts = 1036 BIT(IB_QPT_RAW_PACKET); 1037 } 1038 } 1039 1040 if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) && 1041 raw_support) { 1042 resp.response_length += sizeof(resp.striding_rq_caps); 1043 if (MLX5_CAP_GEN(mdev, striding_rq)) { 1044 resp.striding_rq_caps.min_single_stride_log_num_of_bytes = 1045 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 1046 resp.striding_rq_caps.max_single_stride_log_num_of_bytes = 1047 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; 1048 resp.striding_rq_caps.min_single_wqe_log_num_of_strides = 1049 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1050 resp.striding_rq_caps.max_single_wqe_log_num_of_strides = 1051 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; 1052 resp.striding_rq_caps.supported_qpts = 1053 BIT(IB_QPT_RAW_PACKET); 1054 } 1055 } 1056 1057 if (field_avail(typeof(resp), tunnel_offloads_caps, 1058 uhw->outlen)) { 1059 resp.response_length += sizeof(resp.tunnel_offloads_caps); 1060 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) 1061 resp.tunnel_offloads_caps |= 1062 MLX5_IB_TUNNELED_OFFLOADS_VXLAN; 1063 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) 1064 resp.tunnel_offloads_caps |= 1065 MLX5_IB_TUNNELED_OFFLOADS_GENEVE; 1066 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) 1067 resp.tunnel_offloads_caps |= 1068 MLX5_IB_TUNNELED_OFFLOADS_GRE; 1069 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1070 MLX5_FLEX_PROTO_CW_MPLS_GRE) 1071 resp.tunnel_offloads_caps |= 1072 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; 1073 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1074 MLX5_FLEX_PROTO_CW_MPLS_UDP) 1075 resp.tunnel_offloads_caps |= 1076 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; 1077 } 1078 1079 if (uhw->outlen) { 1080 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 1081 1082 if (err) 1083 return err; 1084 } 1085 1086 return 0; 1087 } 1088 1089 enum mlx5_ib_width { 1090 MLX5_IB_WIDTH_1X = 1 << 0, 1091 MLX5_IB_WIDTH_2X = 1 << 1, 1092 MLX5_IB_WIDTH_4X = 1 << 2, 1093 MLX5_IB_WIDTH_8X = 1 << 3, 1094 MLX5_IB_WIDTH_12X = 1 << 4 1095 }; 1096 1097 static int translate_active_width(struct ib_device *ibdev, u8 active_width, 1098 u8 *ib_width) 1099 { 1100 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1101 int err = 0; 1102 1103 if (active_width & MLX5_IB_WIDTH_1X) { 1104 *ib_width = IB_WIDTH_1X; 1105 } else if (active_width & MLX5_IB_WIDTH_2X) { 1106 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 1107 (int)active_width); 1108 err = -EINVAL; 1109 } else if (active_width & MLX5_IB_WIDTH_4X) { 1110 *ib_width = IB_WIDTH_4X; 1111 } else if (active_width & MLX5_IB_WIDTH_8X) { 1112 *ib_width = IB_WIDTH_8X; 1113 } else if (active_width & MLX5_IB_WIDTH_12X) { 1114 *ib_width = IB_WIDTH_12X; 1115 } else { 1116 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 1117 (int)active_width); 1118 err = -EINVAL; 1119 } 1120 1121 return err; 1122 } 1123 1124 static int mlx5_mtu_to_ib_mtu(int mtu) 1125 { 1126 switch (mtu) { 1127 case 256: return 1; 1128 case 512: return 2; 1129 case 1024: return 3; 1130 case 2048: return 4; 1131 case 4096: return 5; 1132 default: 1133 pr_warn("invalid mtu\n"); 1134 return -1; 1135 } 1136 } 1137 1138 enum ib_max_vl_num { 1139 __IB_MAX_VL_0 = 1, 1140 __IB_MAX_VL_0_1 = 2, 1141 __IB_MAX_VL_0_3 = 3, 1142 __IB_MAX_VL_0_7 = 4, 1143 __IB_MAX_VL_0_14 = 5, 1144 }; 1145 1146 enum mlx5_vl_hw_cap { 1147 MLX5_VL_HW_0 = 1, 1148 MLX5_VL_HW_0_1 = 2, 1149 MLX5_VL_HW_0_2 = 3, 1150 MLX5_VL_HW_0_3 = 4, 1151 MLX5_VL_HW_0_4 = 5, 1152 MLX5_VL_HW_0_5 = 6, 1153 MLX5_VL_HW_0_6 = 7, 1154 MLX5_VL_HW_0_7 = 8, 1155 MLX5_VL_HW_0_14 = 15 1156 }; 1157 1158 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 1159 u8 *max_vl_num) 1160 { 1161 switch (vl_hw_cap) { 1162 case MLX5_VL_HW_0: 1163 *max_vl_num = __IB_MAX_VL_0; 1164 break; 1165 case MLX5_VL_HW_0_1: 1166 *max_vl_num = __IB_MAX_VL_0_1; 1167 break; 1168 case MLX5_VL_HW_0_3: 1169 *max_vl_num = __IB_MAX_VL_0_3; 1170 break; 1171 case MLX5_VL_HW_0_7: 1172 *max_vl_num = __IB_MAX_VL_0_7; 1173 break; 1174 case MLX5_VL_HW_0_14: 1175 *max_vl_num = __IB_MAX_VL_0_14; 1176 break; 1177 1178 default: 1179 return -EINVAL; 1180 } 1181 1182 return 0; 1183 } 1184 1185 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 1186 struct ib_port_attr *props) 1187 { 1188 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1189 struct mlx5_core_dev *mdev = dev->mdev; 1190 struct mlx5_hca_vport_context *rep; 1191 u16 max_mtu; 1192 u16 oper_mtu; 1193 int err; 1194 u8 ib_link_width_oper; 1195 u8 vl_hw_cap; 1196 1197 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1198 if (!rep) { 1199 err = -ENOMEM; 1200 goto out; 1201 } 1202 1203 /* props being zeroed by the caller, avoid zeroing it here */ 1204 1205 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); 1206 if (err) 1207 goto out; 1208 1209 props->lid = rep->lid; 1210 props->lmc = rep->lmc; 1211 props->sm_lid = rep->sm_lid; 1212 props->sm_sl = rep->sm_sl; 1213 props->state = rep->vport_state; 1214 props->phys_state = rep->port_physical_state; 1215 props->port_cap_flags = rep->cap_mask1; 1216 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 1217 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 1218 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 1219 props->bad_pkey_cntr = rep->pkey_violation_counter; 1220 props->qkey_viol_cntr = rep->qkey_violation_counter; 1221 props->subnet_timeout = rep->subnet_timeout; 1222 props->init_type_reply = rep->init_type_reply; 1223 1224 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); 1225 if (err) 1226 goto out; 1227 1228 err = translate_active_width(ibdev, ib_link_width_oper, 1229 &props->active_width); 1230 if (err) 1231 goto out; 1232 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1233 if (err) 1234 goto out; 1235 1236 mlx5_query_port_max_mtu(mdev, &max_mtu, port); 1237 1238 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); 1239 1240 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); 1241 1242 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); 1243 1244 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); 1245 if (err) 1246 goto out; 1247 1248 err = translate_max_vl_num(ibdev, vl_hw_cap, 1249 &props->max_vl_num); 1250 out: 1251 kfree(rep); 1252 return err; 1253 } 1254 1255 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1256 struct ib_port_attr *props) 1257 { 1258 unsigned int count; 1259 int ret; 1260 1261 switch (mlx5_get_vport_access_method(ibdev)) { 1262 case MLX5_VPORT_ACCESS_METHOD_MAD: 1263 ret = mlx5_query_mad_ifc_port(ibdev, port, props); 1264 break; 1265 1266 case MLX5_VPORT_ACCESS_METHOD_HCA: 1267 ret = mlx5_query_hca_port(ibdev, port, props); 1268 break; 1269 1270 case MLX5_VPORT_ACCESS_METHOD_NIC: 1271 ret = mlx5_query_port_roce(ibdev, port, props); 1272 break; 1273 1274 default: 1275 ret = -EINVAL; 1276 } 1277 1278 if (!ret && props) { 1279 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1280 struct mlx5_core_dev *mdev; 1281 bool put_mdev = true; 1282 1283 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); 1284 if (!mdev) { 1285 /* If the port isn't affiliated yet query the master. 1286 * The master and slave will have the same values. 1287 */ 1288 mdev = dev->mdev; 1289 port = 1; 1290 put_mdev = false; 1291 } 1292 count = mlx5_core_reserved_gids_count(mdev); 1293 if (put_mdev) 1294 mlx5_ib_put_native_port_mdev(dev, port); 1295 props->gid_tbl_len -= count; 1296 } 1297 return ret; 1298 } 1299 1300 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, 1301 struct ib_port_attr *props) 1302 { 1303 int ret; 1304 1305 /* Only link layer == ethernet is valid for representors */ 1306 ret = mlx5_query_port_roce(ibdev, port, props); 1307 if (ret || !props) 1308 return ret; 1309 1310 /* We don't support GIDS */ 1311 props->gid_tbl_len = 0; 1312 1313 return ret; 1314 } 1315 1316 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 1317 union ib_gid *gid) 1318 { 1319 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1320 struct mlx5_core_dev *mdev = dev->mdev; 1321 1322 switch (mlx5_get_vport_access_method(ibdev)) { 1323 case MLX5_VPORT_ACCESS_METHOD_MAD: 1324 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 1325 1326 case MLX5_VPORT_ACCESS_METHOD_HCA: 1327 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); 1328 1329 default: 1330 return -EINVAL; 1331 } 1332 1333 } 1334 1335 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port, 1336 u16 index, u16 *pkey) 1337 { 1338 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1339 struct mlx5_core_dev *mdev; 1340 bool put_mdev = true; 1341 u8 mdev_port_num; 1342 int err; 1343 1344 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); 1345 if (!mdev) { 1346 /* The port isn't affiliated yet, get the PKey from the master 1347 * port. For RoCE the PKey tables will be the same. 1348 */ 1349 put_mdev = false; 1350 mdev = dev->mdev; 1351 mdev_port_num = 1; 1352 } 1353 1354 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, 1355 index, pkey); 1356 if (put_mdev) 1357 mlx5_ib_put_native_port_mdev(dev, port); 1358 1359 return err; 1360 } 1361 1362 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1363 u16 *pkey) 1364 { 1365 switch (mlx5_get_vport_access_method(ibdev)) { 1366 case MLX5_VPORT_ACCESS_METHOD_MAD: 1367 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 1368 1369 case MLX5_VPORT_ACCESS_METHOD_HCA: 1370 case MLX5_VPORT_ACCESS_METHOD_NIC: 1371 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); 1372 default: 1373 return -EINVAL; 1374 } 1375 } 1376 1377 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 1378 struct ib_device_modify *props) 1379 { 1380 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1381 struct mlx5_reg_node_desc in; 1382 struct mlx5_reg_node_desc out; 1383 int err; 1384 1385 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 1386 return -EOPNOTSUPP; 1387 1388 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 1389 return 0; 1390 1391 /* 1392 * If possible, pass node desc to FW, so it can generate 1393 * a 144 trap. If cmd fails, just ignore. 1394 */ 1395 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1396 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1397 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1398 if (err) 1399 return err; 1400 1401 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1402 1403 return err; 1404 } 1405 1406 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask, 1407 u32 value) 1408 { 1409 struct mlx5_hca_vport_context ctx = {}; 1410 struct mlx5_core_dev *mdev; 1411 u8 mdev_port_num; 1412 int err; 1413 1414 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 1415 if (!mdev) 1416 return -ENODEV; 1417 1418 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); 1419 if (err) 1420 goto out; 1421 1422 if (~ctx.cap_mask1_perm & mask) { 1423 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", 1424 mask, ctx.cap_mask1_perm); 1425 err = -EINVAL; 1426 goto out; 1427 } 1428 1429 ctx.cap_mask1 = value; 1430 ctx.cap_mask1_perm = mask; 1431 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, 1432 0, &ctx); 1433 1434 out: 1435 mlx5_ib_put_native_port_mdev(dev, port_num); 1436 1437 return err; 1438 } 1439 1440 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1441 struct ib_port_modify *props) 1442 { 1443 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1444 struct ib_port_attr attr; 1445 u32 tmp; 1446 int err; 1447 u32 change_mask; 1448 u32 value; 1449 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1450 IB_LINK_LAYER_INFINIBAND); 1451 1452 /* CM layer calls ib_modify_port() regardless of the link layer. For 1453 * Ethernet ports, qkey violation and Port capabilities are meaningless. 1454 */ 1455 if (!is_ib) 1456 return 0; 1457 1458 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1459 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1460 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1461 return set_port_caps_atomic(dev, port, change_mask, value); 1462 } 1463 1464 mutex_lock(&dev->cap_mask_mutex); 1465 1466 err = ib_query_port(ibdev, port, &attr); 1467 if (err) 1468 goto out; 1469 1470 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1471 ~props->clr_port_cap_mask; 1472 1473 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1474 1475 out: 1476 mutex_unlock(&dev->cap_mask_mutex); 1477 return err; 1478 } 1479 1480 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) 1481 { 1482 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", 1483 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); 1484 } 1485 1486 static u16 calc_dynamic_bfregs(int uars_per_sys_page) 1487 { 1488 /* Large page with non 4k uar support might limit the dynamic size */ 1489 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) 1490 return MLX5_MIN_DYN_BFREGS; 1491 1492 return MLX5_MAX_DYN_BFREGS; 1493 } 1494 1495 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, 1496 struct mlx5_ib_alloc_ucontext_req_v2 *req, 1497 struct mlx5_bfreg_info *bfregi) 1498 { 1499 int uars_per_sys_page; 1500 int bfregs_per_sys_page; 1501 int ref_bfregs = req->total_num_bfregs; 1502 1503 if (req->total_num_bfregs == 0) 1504 return -EINVAL; 1505 1506 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); 1507 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); 1508 1509 if (req->total_num_bfregs > MLX5_MAX_BFREGS) 1510 return -ENOMEM; 1511 1512 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); 1513 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; 1514 /* This holds the required static allocation asked by the user */ 1515 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); 1516 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) 1517 return -EINVAL; 1518 1519 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; 1520 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); 1521 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; 1522 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; 1523 1524 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", 1525 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", 1526 lib_uar_4k ? "yes" : "no", ref_bfregs, 1527 req->total_num_bfregs, bfregi->total_num_bfregs, 1528 bfregi->num_sys_pages); 1529 1530 return 0; 1531 } 1532 1533 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) 1534 { 1535 struct mlx5_bfreg_info *bfregi; 1536 int err; 1537 int i; 1538 1539 bfregi = &context->bfregi; 1540 for (i = 0; i < bfregi->num_static_sys_pages; i++) { 1541 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); 1542 if (err) 1543 goto error; 1544 1545 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); 1546 } 1547 1548 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) 1549 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; 1550 1551 return 0; 1552 1553 error: 1554 for (--i; i >= 0; i--) 1555 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) 1556 mlx5_ib_warn(dev, "failed to free uar %d\n", i); 1557 1558 return err; 1559 } 1560 1561 static void deallocate_uars(struct mlx5_ib_dev *dev, 1562 struct mlx5_ib_ucontext *context) 1563 { 1564 struct mlx5_bfreg_info *bfregi; 1565 int i; 1566 1567 bfregi = &context->bfregi; 1568 for (i = 0; i < bfregi->num_sys_pages; i++) 1569 if (i < bfregi->num_static_sys_pages || 1570 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) 1571 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); 1572 } 1573 1574 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) 1575 { 1576 int err; 1577 1578 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1579 return 0; 1580 1581 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn); 1582 if (err) 1583 return err; 1584 1585 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1586 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1587 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1588 return err; 1589 1590 mutex_lock(&dev->lb_mutex); 1591 dev->user_td++; 1592 1593 if (dev->user_td == 2) 1594 err = mlx5_nic_vport_update_local_lb(dev->mdev, true); 1595 1596 mutex_unlock(&dev->lb_mutex); 1597 return err; 1598 } 1599 1600 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) 1601 { 1602 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1603 return; 1604 1605 mlx5_core_dealloc_transport_domain(dev->mdev, tdn); 1606 1607 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1608 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1609 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1610 return; 1611 1612 mutex_lock(&dev->lb_mutex); 1613 dev->user_td--; 1614 1615 if (dev->user_td < 2) 1616 mlx5_nic_vport_update_local_lb(dev->mdev, false); 1617 1618 mutex_unlock(&dev->lb_mutex); 1619 } 1620 1621 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 1622 struct ib_udata *udata) 1623 { 1624 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1625 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1626 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1627 struct mlx5_core_dev *mdev = dev->mdev; 1628 struct mlx5_ib_ucontext *context; 1629 struct mlx5_bfreg_info *bfregi; 1630 int ver; 1631 int err; 1632 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1633 max_cqe_version); 1634 u32 dump_fill_mkey; 1635 bool lib_uar_4k; 1636 1637 if (!dev->ib_active) 1638 return ERR_PTR(-EAGAIN); 1639 1640 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1641 ver = 0; 1642 else if (udata->inlen >= min_req_v2) 1643 ver = 2; 1644 else 1645 return ERR_PTR(-EINVAL); 1646 1647 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1648 if (err) 1649 return ERR_PTR(err); 1650 1651 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) 1652 return ERR_PTR(-EOPNOTSUPP); 1653 1654 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1655 return ERR_PTR(-EOPNOTSUPP); 1656 1657 req.total_num_bfregs = ALIGN(req.total_num_bfregs, 1658 MLX5_NON_FP_BFREGS_PER_UAR); 1659 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) 1660 return ERR_PTR(-EINVAL); 1661 1662 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1663 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1664 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1665 resp.cache_line_size = cache_line_size(); 1666 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1667 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1668 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1669 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1670 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1671 resp.cqe_version = min_t(__u8, 1672 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1673 req.max_cqe_version); 1674 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1675 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; 1676 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1677 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; 1678 resp.response_length = min(offsetof(typeof(resp), response_length) + 1679 sizeof(resp.response_length), udata->outlen); 1680 1681 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) { 1682 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) 1683 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; 1684 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) 1685 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; 1686 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1687 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; 1688 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) 1689 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; 1690 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ 1691 } 1692 1693 context = kzalloc(sizeof(*context), GFP_KERNEL); 1694 if (!context) 1695 return ERR_PTR(-ENOMEM); 1696 1697 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; 1698 bfregi = &context->bfregi; 1699 1700 /* updates req->total_num_bfregs */ 1701 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); 1702 if (err) 1703 goto out_ctx; 1704 1705 mutex_init(&bfregi->lock); 1706 bfregi->lib_uar_4k = lib_uar_4k; 1707 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), 1708 GFP_KERNEL); 1709 if (!bfregi->count) { 1710 err = -ENOMEM; 1711 goto out_ctx; 1712 } 1713 1714 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, 1715 sizeof(*bfregi->sys_pages), 1716 GFP_KERNEL); 1717 if (!bfregi->sys_pages) { 1718 err = -ENOMEM; 1719 goto out_count; 1720 } 1721 1722 err = allocate_uars(dev, context); 1723 if (err) 1724 goto out_sys_pages; 1725 1726 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1727 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 1728 #endif 1729 1730 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn); 1731 if (err) 1732 goto out_uars; 1733 1734 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { 1735 /* Block DEVX on Infiniband as of SELinux */ 1736 if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) { 1737 err = -EPERM; 1738 goto out_td; 1739 } 1740 1741 err = mlx5_ib_devx_create(dev, context); 1742 if (err) 1743 goto out_td; 1744 } 1745 1746 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1747 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey); 1748 if (err) 1749 goto out_mdev; 1750 } 1751 1752 INIT_LIST_HEAD(&context->vma_private_list); 1753 mutex_init(&context->vma_private_list_mutex); 1754 INIT_LIST_HEAD(&context->db_page_list); 1755 mutex_init(&context->db_page_mutex); 1756 1757 resp.tot_bfregs = req.total_num_bfregs; 1758 resp.num_ports = dev->num_ports; 1759 1760 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1761 resp.response_length += sizeof(resp.cqe_version); 1762 1763 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1764 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1765 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1766 resp.response_length += sizeof(resp.cmds_supp_uhw); 1767 } 1768 1769 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { 1770 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { 1771 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); 1772 resp.eth_min_inline++; 1773 } 1774 resp.response_length += sizeof(resp.eth_min_inline); 1775 } 1776 1777 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) { 1778 if (mdev->clock_info) 1779 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); 1780 resp.response_length += sizeof(resp.clock_info_versions); 1781 } 1782 1783 /* 1784 * We don't want to expose information from the PCI bar that is located 1785 * after 4096 bytes, so if the arch only supports larger pages, let's 1786 * pretend we don't support reading the HCA's core clock. This is also 1787 * forced by mmap function. 1788 */ 1789 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1790 if (PAGE_SIZE <= 4096) { 1791 resp.comp_mask |= 1792 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1793 resp.hca_core_clock_offset = 1794 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; 1795 } 1796 resp.response_length += sizeof(resp.hca_core_clock_offset); 1797 } 1798 1799 if (field_avail(typeof(resp), log_uar_size, udata->outlen)) 1800 resp.response_length += sizeof(resp.log_uar_size); 1801 1802 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) 1803 resp.response_length += sizeof(resp.num_uars_per_page); 1804 1805 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) { 1806 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; 1807 resp.response_length += sizeof(resp.num_dyn_bfregs); 1808 } 1809 1810 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) { 1811 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1812 resp.dump_fill_mkey = dump_fill_mkey; 1813 resp.comp_mask |= 1814 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; 1815 } 1816 resp.response_length += sizeof(resp.dump_fill_mkey); 1817 } 1818 1819 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1820 if (err) 1821 goto out_mdev; 1822 1823 bfregi->ver = ver; 1824 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; 1825 context->cqe_version = resp.cqe_version; 1826 context->lib_caps = req.lib_caps; 1827 print_lib_caps(dev, context->lib_caps); 1828 1829 return &context->ibucontext; 1830 1831 out_mdev: 1832 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) 1833 mlx5_ib_devx_destroy(dev, context); 1834 out_td: 1835 mlx5_ib_dealloc_transport_domain(dev, context->tdn); 1836 1837 out_uars: 1838 deallocate_uars(dev, context); 1839 1840 out_sys_pages: 1841 kfree(bfregi->sys_pages); 1842 1843 out_count: 1844 kfree(bfregi->count); 1845 1846 out_ctx: 1847 kfree(context); 1848 1849 return ERR_PTR(err); 1850 } 1851 1852 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1853 { 1854 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1855 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1856 struct mlx5_bfreg_info *bfregi; 1857 1858 if (context->devx_uid) 1859 mlx5_ib_devx_destroy(dev, context); 1860 1861 bfregi = &context->bfregi; 1862 mlx5_ib_dealloc_transport_domain(dev, context->tdn); 1863 1864 deallocate_uars(dev, context); 1865 kfree(bfregi->sys_pages); 1866 kfree(bfregi->count); 1867 kfree(context); 1868 1869 return 0; 1870 } 1871 1872 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, 1873 int uar_idx) 1874 { 1875 int fw_uars_per_page; 1876 1877 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; 1878 1879 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; 1880 } 1881 1882 static int get_command(unsigned long offset) 1883 { 1884 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 1885 } 1886 1887 static int get_arg(unsigned long offset) 1888 { 1889 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 1890 } 1891 1892 static int get_index(unsigned long offset) 1893 { 1894 return get_arg(offset); 1895 } 1896 1897 /* Index resides in an extra byte to enable larger values than 255 */ 1898 static int get_extended_index(unsigned long offset) 1899 { 1900 return get_arg(offset) | ((offset >> 16) & 0xff) << 8; 1901 } 1902 1903 static void mlx5_ib_vma_open(struct vm_area_struct *area) 1904 { 1905 /* vma_open is called when a new VMA is created on top of our VMA. This 1906 * is done through either mremap flow or split_vma (usually due to 1907 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1908 * as this VMA is strongly hardware related. Therefore we set the 1909 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1910 * calling us again and trying to do incorrect actions. We assume that 1911 * the original VMA size is exactly a single page, and therefore all 1912 * "splitting" operation will not happen to it. 1913 */ 1914 area->vm_ops = NULL; 1915 } 1916 1917 static void mlx5_ib_vma_close(struct vm_area_struct *area) 1918 { 1919 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1920 1921 /* It's guaranteed that all VMAs opened on a FD are closed before the 1922 * file itself is closed, therefore no sync is needed with the regular 1923 * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1924 * However need a sync with accessing the vma as part of 1925 * mlx5_ib_disassociate_ucontext. 1926 * The close operation is usually called under mm->mmap_sem except when 1927 * process is exiting. 1928 * The exiting case is handled explicitly as part of 1929 * mlx5_ib_disassociate_ucontext. 1930 */ 1931 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1932 1933 /* setting the vma context pointer to null in the mlx5_ib driver's 1934 * private data, to protect a race condition in 1935 * mlx5_ib_disassociate_ucontext(). 1936 */ 1937 mlx5_ib_vma_priv_data->vma = NULL; 1938 mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex); 1939 list_del(&mlx5_ib_vma_priv_data->list); 1940 mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex); 1941 kfree(mlx5_ib_vma_priv_data); 1942 } 1943 1944 static const struct vm_operations_struct mlx5_ib_vm_ops = { 1945 .open = mlx5_ib_vma_open, 1946 .close = mlx5_ib_vma_close 1947 }; 1948 1949 static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1950 struct mlx5_ib_ucontext *ctx) 1951 { 1952 struct mlx5_ib_vma_private_data *vma_prv; 1953 struct list_head *vma_head = &ctx->vma_private_list; 1954 1955 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1956 if (!vma_prv) 1957 return -ENOMEM; 1958 1959 vma_prv->vma = vma; 1960 vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex; 1961 vma->vm_private_data = vma_prv; 1962 vma->vm_ops = &mlx5_ib_vm_ops; 1963 1964 mutex_lock(&ctx->vma_private_list_mutex); 1965 list_add(&vma_prv->list, vma_head); 1966 mutex_unlock(&ctx->vma_private_list_mutex); 1967 1968 return 0; 1969 } 1970 1971 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 1972 { 1973 struct vm_area_struct *vma; 1974 struct mlx5_ib_vma_private_data *vma_private, *n; 1975 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1976 1977 mutex_lock(&context->vma_private_list_mutex); 1978 list_for_each_entry_safe(vma_private, n, &context->vma_private_list, 1979 list) { 1980 vma = vma_private->vma; 1981 zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); 1982 /* context going to be destroyed, should 1983 * not access ops any more. 1984 */ 1985 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); 1986 vma->vm_ops = NULL; 1987 list_del(&vma_private->list); 1988 kfree(vma_private); 1989 } 1990 mutex_unlock(&context->vma_private_list_mutex); 1991 } 1992 1993 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1994 { 1995 switch (cmd) { 1996 case MLX5_IB_MMAP_WC_PAGE: 1997 return "WC"; 1998 case MLX5_IB_MMAP_REGULAR_PAGE: 1999 return "best effort WC"; 2000 case MLX5_IB_MMAP_NC_PAGE: 2001 return "NC"; 2002 case MLX5_IB_MMAP_DEVICE_MEM: 2003 return "Device Memory"; 2004 default: 2005 return NULL; 2006 } 2007 } 2008 2009 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, 2010 struct vm_area_struct *vma, 2011 struct mlx5_ib_ucontext *context) 2012 { 2013 phys_addr_t pfn; 2014 int err; 2015 2016 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2017 return -EINVAL; 2018 2019 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) 2020 return -EOPNOTSUPP; 2021 2022 if (vma->vm_flags & VM_WRITE) 2023 return -EPERM; 2024 2025 if (!dev->mdev->clock_info_page) 2026 return -EOPNOTSUPP; 2027 2028 pfn = page_to_pfn(dev->mdev->clock_info_page); 2029 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, 2030 vma->vm_page_prot); 2031 if (err) 2032 return err; 2033 2034 return mlx5_ib_set_vma_data(vma, context); 2035 } 2036 2037 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 2038 struct vm_area_struct *vma, 2039 struct mlx5_ib_ucontext *context) 2040 { 2041 struct mlx5_bfreg_info *bfregi = &context->bfregi; 2042 int err; 2043 unsigned long idx; 2044 phys_addr_t pfn; 2045 pgprot_t prot; 2046 u32 bfreg_dyn_idx = 0; 2047 u32 uar_index; 2048 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); 2049 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : 2050 bfregi->num_static_sys_pages; 2051 2052 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2053 return -EINVAL; 2054 2055 if (dyn_uar) 2056 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; 2057 else 2058 idx = get_index(vma->vm_pgoff); 2059 2060 if (idx >= max_valid_idx) { 2061 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", 2062 idx, max_valid_idx); 2063 return -EINVAL; 2064 } 2065 2066 switch (cmd) { 2067 case MLX5_IB_MMAP_WC_PAGE: 2068 case MLX5_IB_MMAP_ALLOC_WC: 2069 /* Some architectures don't support WC memory */ 2070 #if defined(CONFIG_X86) 2071 if (!pat_enabled()) 2072 return -EPERM; 2073 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 2074 return -EPERM; 2075 #endif 2076 /* fall through */ 2077 case MLX5_IB_MMAP_REGULAR_PAGE: 2078 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 2079 prot = pgprot_writecombine(vma->vm_page_prot); 2080 break; 2081 case MLX5_IB_MMAP_NC_PAGE: 2082 prot = pgprot_noncached(vma->vm_page_prot); 2083 break; 2084 default: 2085 return -EINVAL; 2086 } 2087 2088 if (dyn_uar) { 2089 int uars_per_page; 2090 2091 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); 2092 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); 2093 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { 2094 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", 2095 bfreg_dyn_idx, bfregi->total_num_bfregs); 2096 return -EINVAL; 2097 } 2098 2099 mutex_lock(&bfregi->lock); 2100 /* Fail if uar already allocated, first bfreg index of each 2101 * page holds its count. 2102 */ 2103 if (bfregi->count[bfreg_dyn_idx]) { 2104 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); 2105 mutex_unlock(&bfregi->lock); 2106 return -EINVAL; 2107 } 2108 2109 bfregi->count[bfreg_dyn_idx]++; 2110 mutex_unlock(&bfregi->lock); 2111 2112 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); 2113 if (err) { 2114 mlx5_ib_warn(dev, "UAR alloc failed\n"); 2115 goto free_bfreg; 2116 } 2117 } else { 2118 uar_index = bfregi->sys_pages[idx]; 2119 } 2120 2121 pfn = uar_index2pfn(dev, uar_index); 2122 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 2123 2124 vma->vm_page_prot = prot; 2125 err = io_remap_pfn_range(vma, vma->vm_start, pfn, 2126 PAGE_SIZE, vma->vm_page_prot); 2127 if (err) { 2128 mlx5_ib_err(dev, 2129 "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n", 2130 err, mmap_cmd2str(cmd)); 2131 err = -EAGAIN; 2132 goto err; 2133 } 2134 2135 err = mlx5_ib_set_vma_data(vma, context); 2136 if (err) 2137 goto err; 2138 2139 if (dyn_uar) 2140 bfregi->sys_pages[idx] = uar_index; 2141 return 0; 2142 2143 err: 2144 if (!dyn_uar) 2145 return err; 2146 2147 mlx5_cmd_free_uar(dev->mdev, idx); 2148 2149 free_bfreg: 2150 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); 2151 2152 return err; 2153 } 2154 2155 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 2156 { 2157 struct mlx5_ib_ucontext *mctx = to_mucontext(context); 2158 struct mlx5_ib_dev *dev = to_mdev(context->device); 2159 u16 page_idx = get_extended_index(vma->vm_pgoff); 2160 size_t map_size = vma->vm_end - vma->vm_start; 2161 u32 npages = map_size >> PAGE_SHIFT; 2162 phys_addr_t pfn; 2163 pgprot_t prot; 2164 2165 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != 2166 page_idx + npages) 2167 return -EINVAL; 2168 2169 pfn = ((pci_resource_start(dev->mdev->pdev, 0) + 2170 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> 2171 PAGE_SHIFT) + 2172 page_idx; 2173 prot = pgprot_writecombine(vma->vm_page_prot); 2174 vma->vm_page_prot = prot; 2175 2176 if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size, 2177 vma->vm_page_prot)) 2178 return -EAGAIN; 2179 2180 return mlx5_ib_set_vma_data(vma, mctx); 2181 } 2182 2183 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 2184 { 2185 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 2186 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 2187 unsigned long command; 2188 phys_addr_t pfn; 2189 2190 command = get_command(vma->vm_pgoff); 2191 switch (command) { 2192 case MLX5_IB_MMAP_WC_PAGE: 2193 case MLX5_IB_MMAP_NC_PAGE: 2194 case MLX5_IB_MMAP_REGULAR_PAGE: 2195 case MLX5_IB_MMAP_ALLOC_WC: 2196 return uar_mmap(dev, command, vma, context); 2197 2198 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 2199 return -ENOSYS; 2200 2201 case MLX5_IB_MMAP_CORE_CLOCK: 2202 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2203 return -EINVAL; 2204 2205 if (vma->vm_flags & VM_WRITE) 2206 return -EPERM; 2207 2208 /* Don't expose to user-space information it shouldn't have */ 2209 if (PAGE_SIZE > 4096) 2210 return -EOPNOTSUPP; 2211 2212 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 2213 pfn = (dev->mdev->iseg_base + 2214 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 2215 PAGE_SHIFT; 2216 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 2217 PAGE_SIZE, vma->vm_page_prot)) 2218 return -EAGAIN; 2219 break; 2220 case MLX5_IB_MMAP_CLOCK_INFO: 2221 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2222 2223 case MLX5_IB_MMAP_DEVICE_MEM: 2224 return dm_mmap(ibcontext, vma); 2225 2226 default: 2227 return -EINVAL; 2228 } 2229 2230 return 0; 2231 } 2232 2233 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 2234 struct ib_ucontext *context, 2235 struct ib_dm_alloc_attr *attr, 2236 struct uverbs_attr_bundle *attrs) 2237 { 2238 u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2239 struct mlx5_memic *memic = &to_mdev(ibdev)->memic; 2240 phys_addr_t memic_addr; 2241 struct mlx5_ib_dm *dm; 2242 u64 start_offset; 2243 u32 page_idx; 2244 int err; 2245 2246 dm = kzalloc(sizeof(*dm), GFP_KERNEL); 2247 if (!dm) 2248 return ERR_PTR(-ENOMEM); 2249 2250 mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n", 2251 attr->length, act_size, attr->alignment); 2252 2253 err = mlx5_cmd_alloc_memic(memic, &memic_addr, 2254 act_size, attr->alignment); 2255 if (err) 2256 goto err_free; 2257 2258 start_offset = memic_addr & ~PAGE_MASK; 2259 page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) - 2260 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> 2261 PAGE_SHIFT; 2262 2263 err = uverbs_copy_to(attrs, 2264 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2265 &start_offset, sizeof(start_offset)); 2266 if (err) 2267 goto err_dealloc; 2268 2269 err = uverbs_copy_to(attrs, 2270 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2271 &page_idx, sizeof(page_idx)); 2272 if (err) 2273 goto err_dealloc; 2274 2275 bitmap_set(to_mucontext(context)->dm_pages, page_idx, 2276 DIV_ROUND_UP(act_size, PAGE_SIZE)); 2277 2278 dm->dev_addr = memic_addr; 2279 2280 return &dm->ibdm; 2281 2282 err_dealloc: 2283 mlx5_cmd_dealloc_memic(memic, memic_addr, 2284 act_size); 2285 err_free: 2286 kfree(dm); 2287 return ERR_PTR(err); 2288 } 2289 2290 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) 2291 { 2292 struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic; 2293 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2294 u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE); 2295 u32 page_idx; 2296 int ret; 2297 2298 ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size); 2299 if (ret) 2300 return ret; 2301 2302 page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) - 2303 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> 2304 PAGE_SHIFT; 2305 bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages, 2306 page_idx, 2307 DIV_ROUND_UP(act_size, PAGE_SIZE)); 2308 2309 kfree(dm); 2310 2311 return 0; 2312 } 2313 2314 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 2315 struct ib_ucontext *context, 2316 struct ib_udata *udata) 2317 { 2318 struct mlx5_ib_alloc_pd_resp resp; 2319 struct mlx5_ib_pd *pd; 2320 int err; 2321 2322 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 2323 if (!pd) 2324 return ERR_PTR(-ENOMEM); 2325 2326 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 2327 if (err) { 2328 kfree(pd); 2329 return ERR_PTR(err); 2330 } 2331 2332 if (context) { 2333 resp.pdn = pd->pdn; 2334 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 2335 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 2336 kfree(pd); 2337 return ERR_PTR(-EFAULT); 2338 } 2339 } 2340 2341 return &pd->ibpd; 2342 } 2343 2344 static int mlx5_ib_dealloc_pd(struct ib_pd *pd) 2345 { 2346 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 2347 struct mlx5_ib_pd *mpd = to_mpd(pd); 2348 2349 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 2350 kfree(mpd); 2351 2352 return 0; 2353 } 2354 2355 enum { 2356 MATCH_CRITERIA_ENABLE_OUTER_BIT, 2357 MATCH_CRITERIA_ENABLE_MISC_BIT, 2358 MATCH_CRITERIA_ENABLE_INNER_BIT, 2359 MATCH_CRITERIA_ENABLE_MISC2_BIT 2360 }; 2361 2362 #define HEADER_IS_ZERO(match_criteria, headers) \ 2363 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 2364 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 2365 2366 static u8 get_match_criteria_enable(u32 *match_criteria) 2367 { 2368 u8 match_criteria_enable; 2369 2370 match_criteria_enable = 2371 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 2372 MATCH_CRITERIA_ENABLE_OUTER_BIT; 2373 match_criteria_enable |= 2374 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 2375 MATCH_CRITERIA_ENABLE_MISC_BIT; 2376 match_criteria_enable |= 2377 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 2378 MATCH_CRITERIA_ENABLE_INNER_BIT; 2379 match_criteria_enable |= 2380 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 2381 MATCH_CRITERIA_ENABLE_MISC2_BIT; 2382 2383 return match_criteria_enable; 2384 } 2385 2386 static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 2387 { 2388 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 2389 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2390 } 2391 2392 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 2393 bool inner) 2394 { 2395 if (inner) { 2396 MLX5_SET(fte_match_set_misc, 2397 misc_c, inner_ipv6_flow_label, mask); 2398 MLX5_SET(fte_match_set_misc, 2399 misc_v, inner_ipv6_flow_label, val); 2400 } else { 2401 MLX5_SET(fte_match_set_misc, 2402 misc_c, outer_ipv6_flow_label, mask); 2403 MLX5_SET(fte_match_set_misc, 2404 misc_v, outer_ipv6_flow_label, val); 2405 } 2406 } 2407 2408 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 2409 { 2410 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 2411 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 2412 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 2413 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 2414 } 2415 2416 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 2417 { 2418 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 2419 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 2420 return -EOPNOTSUPP; 2421 2422 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 2423 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 2424 return -EOPNOTSUPP; 2425 2426 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 2427 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 2428 return -EOPNOTSUPP; 2429 2430 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 2431 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 2432 return -EOPNOTSUPP; 2433 2434 return 0; 2435 } 2436 2437 #define LAST_ETH_FIELD vlan_tag 2438 #define LAST_IB_FIELD sl 2439 #define LAST_IPV4_FIELD tos 2440 #define LAST_IPV6_FIELD traffic_class 2441 #define LAST_TCP_UDP_FIELD src_port 2442 #define LAST_TUNNEL_FIELD tunnel_id 2443 #define LAST_FLOW_TAG_FIELD tag_id 2444 #define LAST_DROP_FIELD size 2445 #define LAST_COUNTERS_FIELD counters 2446 2447 /* Field is the last supported field */ 2448 #define FIELDS_NOT_SUPPORTED(filter, field)\ 2449 memchr_inv((void *)&filter.field +\ 2450 sizeof(filter.field), 0,\ 2451 sizeof(filter) -\ 2452 offsetof(typeof(filter), field) -\ 2453 sizeof(filter.field)) 2454 2455 static int parse_flow_flow_action(const union ib_flow_spec *ib_spec, 2456 const struct ib_flow_attr *flow_attr, 2457 struct mlx5_flow_act *action) 2458 { 2459 struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act); 2460 2461 switch (maction->ib_action.type) { 2462 case IB_FLOW_ACTION_ESP: 2463 /* Currently only AES_GCM keymat is supported by the driver */ 2464 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 2465 action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ? 2466 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 2467 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 2468 return 0; 2469 default: 2470 return -EOPNOTSUPP; 2471 } 2472 } 2473 2474 static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, 2475 u32 *match_v, const union ib_flow_spec *ib_spec, 2476 const struct ib_flow_attr *flow_attr, 2477 struct mlx5_flow_act *action, u32 prev_type) 2478 { 2479 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 2480 misc_parameters); 2481 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 2482 misc_parameters); 2483 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 2484 misc_parameters_2); 2485 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 2486 misc_parameters_2); 2487 void *headers_c; 2488 void *headers_v; 2489 int match_ipv; 2490 int ret; 2491 2492 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2493 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2494 inner_headers); 2495 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2496 inner_headers); 2497 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2498 ft_field_support.inner_ip_version); 2499 } else { 2500 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2501 outer_headers); 2502 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2503 outer_headers); 2504 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2505 ft_field_support.outer_ip_version); 2506 } 2507 2508 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2509 case IB_FLOW_SPEC_ETH: 2510 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 2511 return -EOPNOTSUPP; 2512 2513 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2514 dmac_47_16), 2515 ib_spec->eth.mask.dst_mac); 2516 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2517 dmac_47_16), 2518 ib_spec->eth.val.dst_mac); 2519 2520 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2521 smac_47_16), 2522 ib_spec->eth.mask.src_mac); 2523 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2524 smac_47_16), 2525 ib_spec->eth.val.src_mac); 2526 2527 if (ib_spec->eth.mask.vlan_tag) { 2528 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2529 cvlan_tag, 1); 2530 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2531 cvlan_tag, 1); 2532 2533 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2534 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 2535 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2536 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 2537 2538 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2539 first_cfi, 2540 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 2541 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2542 first_cfi, 2543 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 2544 2545 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2546 first_prio, 2547 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 2548 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2549 first_prio, 2550 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 2551 } 2552 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2553 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 2554 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2555 ethertype, ntohs(ib_spec->eth.val.ether_type)); 2556 break; 2557 case IB_FLOW_SPEC_IPV4: 2558 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 2559 return -EOPNOTSUPP; 2560 2561 if (match_ipv) { 2562 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2563 ip_version, 0xf); 2564 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2565 ip_version, MLX5_FS_IPV4_VERSION); 2566 } else { 2567 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2568 ethertype, 0xffff); 2569 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2570 ethertype, ETH_P_IP); 2571 } 2572 2573 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2574 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2575 &ib_spec->ipv4.mask.src_ip, 2576 sizeof(ib_spec->ipv4.mask.src_ip)); 2577 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2578 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2579 &ib_spec->ipv4.val.src_ip, 2580 sizeof(ib_spec->ipv4.val.src_ip)); 2581 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2582 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2583 &ib_spec->ipv4.mask.dst_ip, 2584 sizeof(ib_spec->ipv4.mask.dst_ip)); 2585 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2586 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2587 &ib_spec->ipv4.val.dst_ip, 2588 sizeof(ib_spec->ipv4.val.dst_ip)); 2589 2590 set_tos(headers_c, headers_v, 2591 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 2592 2593 set_proto(headers_c, headers_v, 2594 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); 2595 break; 2596 case IB_FLOW_SPEC_IPV6: 2597 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 2598 return -EOPNOTSUPP; 2599 2600 if (match_ipv) { 2601 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2602 ip_version, 0xf); 2603 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2604 ip_version, MLX5_FS_IPV6_VERSION); 2605 } else { 2606 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2607 ethertype, 0xffff); 2608 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2609 ethertype, ETH_P_IPV6); 2610 } 2611 2612 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2613 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2614 &ib_spec->ipv6.mask.src_ip, 2615 sizeof(ib_spec->ipv6.mask.src_ip)); 2616 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2617 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2618 &ib_spec->ipv6.val.src_ip, 2619 sizeof(ib_spec->ipv6.val.src_ip)); 2620 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2621 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2622 &ib_spec->ipv6.mask.dst_ip, 2623 sizeof(ib_spec->ipv6.mask.dst_ip)); 2624 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2625 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2626 &ib_spec->ipv6.val.dst_ip, 2627 sizeof(ib_spec->ipv6.val.dst_ip)); 2628 2629 set_tos(headers_c, headers_v, 2630 ib_spec->ipv6.mask.traffic_class, 2631 ib_spec->ipv6.val.traffic_class); 2632 2633 set_proto(headers_c, headers_v, 2634 ib_spec->ipv6.mask.next_hdr, 2635 ib_spec->ipv6.val.next_hdr); 2636 2637 set_flow_label(misc_params_c, misc_params_v, 2638 ntohl(ib_spec->ipv6.mask.flow_label), 2639 ntohl(ib_spec->ipv6.val.flow_label), 2640 ib_spec->type & IB_FLOW_SPEC_INNER); 2641 break; 2642 case IB_FLOW_SPEC_ESP: 2643 if (ib_spec->esp.mask.seq) 2644 return -EOPNOTSUPP; 2645 2646 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 2647 ntohl(ib_spec->esp.mask.spi)); 2648 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 2649 ntohl(ib_spec->esp.val.spi)); 2650 break; 2651 case IB_FLOW_SPEC_TCP: 2652 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2653 LAST_TCP_UDP_FIELD)) 2654 return -EOPNOTSUPP; 2655 2656 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2657 0xff); 2658 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2659 IPPROTO_TCP); 2660 2661 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 2662 ntohs(ib_spec->tcp_udp.mask.src_port)); 2663 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 2664 ntohs(ib_spec->tcp_udp.val.src_port)); 2665 2666 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 2667 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2668 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 2669 ntohs(ib_spec->tcp_udp.val.dst_port)); 2670 break; 2671 case IB_FLOW_SPEC_UDP: 2672 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2673 LAST_TCP_UDP_FIELD)) 2674 return -EOPNOTSUPP; 2675 2676 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2677 0xff); 2678 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2679 IPPROTO_UDP); 2680 2681 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 2682 ntohs(ib_spec->tcp_udp.mask.src_port)); 2683 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 2684 ntohs(ib_spec->tcp_udp.val.src_port)); 2685 2686 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 2687 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2688 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 2689 ntohs(ib_spec->tcp_udp.val.dst_port)); 2690 break; 2691 case IB_FLOW_SPEC_GRE: 2692 if (ib_spec->gre.mask.c_ks_res0_ver) 2693 return -EOPNOTSUPP; 2694 2695 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2696 0xff); 2697 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2698 IPPROTO_GRE); 2699 2700 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 2701 ntohs(ib_spec->gre.mask.protocol)); 2702 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 2703 ntohs(ib_spec->gre.val.protocol)); 2704 2705 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 2706 gre_key_h), 2707 &ib_spec->gre.mask.key, 2708 sizeof(ib_spec->gre.mask.key)); 2709 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 2710 gre_key_h), 2711 &ib_spec->gre.val.key, 2712 sizeof(ib_spec->gre.val.key)); 2713 break; 2714 case IB_FLOW_SPEC_MPLS: 2715 switch (prev_type) { 2716 case IB_FLOW_SPEC_UDP: 2717 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2718 ft_field_support.outer_first_mpls_over_udp), 2719 &ib_spec->mpls.mask.tag)) 2720 return -EOPNOTSUPP; 2721 2722 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2723 outer_first_mpls_over_udp), 2724 &ib_spec->mpls.val.tag, 2725 sizeof(ib_spec->mpls.val.tag)); 2726 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2727 outer_first_mpls_over_udp), 2728 &ib_spec->mpls.mask.tag, 2729 sizeof(ib_spec->mpls.mask.tag)); 2730 break; 2731 case IB_FLOW_SPEC_GRE: 2732 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2733 ft_field_support.outer_first_mpls_over_gre), 2734 &ib_spec->mpls.mask.tag)) 2735 return -EOPNOTSUPP; 2736 2737 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2738 outer_first_mpls_over_gre), 2739 &ib_spec->mpls.val.tag, 2740 sizeof(ib_spec->mpls.val.tag)); 2741 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2742 outer_first_mpls_over_gre), 2743 &ib_spec->mpls.mask.tag, 2744 sizeof(ib_spec->mpls.mask.tag)); 2745 break; 2746 default: 2747 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2748 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2749 ft_field_support.inner_first_mpls), 2750 &ib_spec->mpls.mask.tag)) 2751 return -EOPNOTSUPP; 2752 2753 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2754 inner_first_mpls), 2755 &ib_spec->mpls.val.tag, 2756 sizeof(ib_spec->mpls.val.tag)); 2757 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2758 inner_first_mpls), 2759 &ib_spec->mpls.mask.tag, 2760 sizeof(ib_spec->mpls.mask.tag)); 2761 } else { 2762 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2763 ft_field_support.outer_first_mpls), 2764 &ib_spec->mpls.mask.tag)) 2765 return -EOPNOTSUPP; 2766 2767 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2768 outer_first_mpls), 2769 &ib_spec->mpls.val.tag, 2770 sizeof(ib_spec->mpls.val.tag)); 2771 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2772 outer_first_mpls), 2773 &ib_spec->mpls.mask.tag, 2774 sizeof(ib_spec->mpls.mask.tag)); 2775 } 2776 } 2777 break; 2778 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2779 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 2780 LAST_TUNNEL_FIELD)) 2781 return -EOPNOTSUPP; 2782 2783 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 2784 ntohl(ib_spec->tunnel.mask.tunnel_id)); 2785 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 2786 ntohl(ib_spec->tunnel.val.tunnel_id)); 2787 break; 2788 case IB_FLOW_SPEC_ACTION_TAG: 2789 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 2790 LAST_FLOW_TAG_FIELD)) 2791 return -EOPNOTSUPP; 2792 if (ib_spec->flow_tag.tag_id >= BIT(24)) 2793 return -EINVAL; 2794 2795 action->flow_tag = ib_spec->flow_tag.tag_id; 2796 action->flags |= FLOW_ACT_HAS_TAG; 2797 break; 2798 case IB_FLOW_SPEC_ACTION_DROP: 2799 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 2800 LAST_DROP_FIELD)) 2801 return -EOPNOTSUPP; 2802 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 2803 break; 2804 case IB_FLOW_SPEC_ACTION_HANDLE: 2805 ret = parse_flow_flow_action(ib_spec, flow_attr, action); 2806 if (ret) 2807 return ret; 2808 break; 2809 case IB_FLOW_SPEC_ACTION_COUNT: 2810 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 2811 LAST_COUNTERS_FIELD)) 2812 return -EOPNOTSUPP; 2813 2814 /* for now support only one counters spec per flow */ 2815 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 2816 return -EINVAL; 2817 2818 action->counters = ib_spec->flow_count.counters; 2819 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 2820 break; 2821 default: 2822 return -EINVAL; 2823 } 2824 2825 return 0; 2826 } 2827 2828 /* If a flow could catch both multicast and unicast packets, 2829 * it won't fall into the multicast flow steering table and this rule 2830 * could steal other multicast packets. 2831 */ 2832 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 2833 { 2834 union ib_flow_spec *flow_spec; 2835 2836 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 2837 ib_attr->num_of_specs < 1) 2838 return false; 2839 2840 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 2841 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 2842 struct ib_flow_spec_ipv4 *ipv4_spec; 2843 2844 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 2845 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 2846 return true; 2847 2848 return false; 2849 } 2850 2851 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 2852 struct ib_flow_spec_eth *eth_spec; 2853 2854 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 2855 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 2856 is_multicast_ether_addr(eth_spec->val.dst_mac); 2857 } 2858 2859 return false; 2860 } 2861 2862 enum valid_spec { 2863 VALID_SPEC_INVALID, 2864 VALID_SPEC_VALID, 2865 VALID_SPEC_NA, 2866 }; 2867 2868 static enum valid_spec 2869 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 2870 const struct mlx5_flow_spec *spec, 2871 const struct mlx5_flow_act *flow_act, 2872 bool egress) 2873 { 2874 const u32 *match_c = spec->match_criteria; 2875 bool is_crypto = 2876 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 2877 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 2878 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 2879 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 2880 2881 /* 2882 * Currently only crypto is supported in egress, when regular egress 2883 * rules would be supported, always return VALID_SPEC_NA. 2884 */ 2885 if (!is_crypto) 2886 return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA; 2887 2888 return is_crypto && is_ipsec && 2889 (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ? 2890 VALID_SPEC_VALID : VALID_SPEC_INVALID; 2891 } 2892 2893 static bool is_valid_spec(struct mlx5_core_dev *mdev, 2894 const struct mlx5_flow_spec *spec, 2895 const struct mlx5_flow_act *flow_act, 2896 bool egress) 2897 { 2898 /* We curretly only support ipsec egress flow */ 2899 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 2900 } 2901 2902 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 2903 const struct ib_flow_attr *flow_attr, 2904 bool check_inner) 2905 { 2906 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 2907 int match_ipv = check_inner ? 2908 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2909 ft_field_support.inner_ip_version) : 2910 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2911 ft_field_support.outer_ip_version); 2912 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 2913 bool ipv4_spec_valid, ipv6_spec_valid; 2914 unsigned int ip_spec_type = 0; 2915 bool has_ethertype = false; 2916 unsigned int spec_index; 2917 bool mask_valid = true; 2918 u16 eth_type = 0; 2919 bool type_valid; 2920 2921 /* Validate that ethertype is correct */ 2922 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 2923 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 2924 ib_spec->eth.mask.ether_type) { 2925 mask_valid = (ib_spec->eth.mask.ether_type == 2926 htons(0xffff)); 2927 has_ethertype = true; 2928 eth_type = ntohs(ib_spec->eth.val.ether_type); 2929 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 2930 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 2931 ip_spec_type = ib_spec->type; 2932 } 2933 ib_spec = (void *)ib_spec + ib_spec->size; 2934 } 2935 2936 type_valid = (!has_ethertype) || (!ip_spec_type); 2937 if (!type_valid && mask_valid) { 2938 ipv4_spec_valid = (eth_type == ETH_P_IP) && 2939 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 2940 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 2941 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 2942 2943 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 2944 (((eth_type == ETH_P_MPLS_UC) || 2945 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 2946 } 2947 2948 return type_valid; 2949 } 2950 2951 static bool is_valid_attr(struct mlx5_core_dev *mdev, 2952 const struct ib_flow_attr *flow_attr) 2953 { 2954 return is_valid_ethertype(mdev, flow_attr, false) && 2955 is_valid_ethertype(mdev, flow_attr, true); 2956 } 2957 2958 static void put_flow_table(struct mlx5_ib_dev *dev, 2959 struct mlx5_ib_flow_prio *prio, bool ft_added) 2960 { 2961 prio->refcount -= !!ft_added; 2962 if (!prio->refcount) { 2963 mlx5_destroy_flow_table(prio->flow_table); 2964 prio->flow_table = NULL; 2965 } 2966 } 2967 2968 static void counters_clear_description(struct ib_counters *counters) 2969 { 2970 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 2971 2972 mutex_lock(&mcounters->mcntrs_mutex); 2973 kfree(mcounters->counters_data); 2974 mcounters->counters_data = NULL; 2975 mcounters->cntrs_max_index = 0; 2976 mutex_unlock(&mcounters->mcntrs_mutex); 2977 } 2978 2979 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 2980 { 2981 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 2982 struct mlx5_ib_flow_handler, 2983 ibflow); 2984 struct mlx5_ib_flow_handler *iter, *tmp; 2985 struct mlx5_ib_dev *dev = handler->dev; 2986 2987 mutex_lock(&dev->flow_db->lock); 2988 2989 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 2990 mlx5_del_flow_rules(iter->rule); 2991 put_flow_table(dev, iter->prio, true); 2992 list_del(&iter->list); 2993 kfree(iter); 2994 } 2995 2996 mlx5_del_flow_rules(handler->rule); 2997 put_flow_table(dev, handler->prio, true); 2998 if (handler->ibcounters && 2999 atomic_read(&handler->ibcounters->usecnt) == 1) 3000 counters_clear_description(handler->ibcounters); 3001 3002 mutex_unlock(&dev->flow_db->lock); 3003 if (handler->flow_matcher) 3004 atomic_dec(&handler->flow_matcher->usecnt); 3005 kfree(handler); 3006 3007 return 0; 3008 } 3009 3010 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 3011 { 3012 priority *= 2; 3013 if (!dont_trap) 3014 priority++; 3015 return priority; 3016 } 3017 3018 enum flow_table_type { 3019 MLX5_IB_FT_RX, 3020 MLX5_IB_FT_TX 3021 }; 3022 3023 #define MLX5_FS_MAX_TYPES 6 3024 #define MLX5_FS_MAX_ENTRIES BIT(16) 3025 3026 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 3027 struct mlx5_ib_flow_prio *prio, 3028 int priority, 3029 int num_entries, int num_groups) 3030 { 3031 struct mlx5_flow_table *ft; 3032 3033 ft = mlx5_create_auto_grouped_flow_table(ns, priority, 3034 num_entries, 3035 num_groups, 3036 0, 0); 3037 if (IS_ERR(ft)) 3038 return ERR_CAST(ft); 3039 3040 prio->flow_table = ft; 3041 prio->refcount = 0; 3042 return prio; 3043 } 3044 3045 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 3046 struct ib_flow_attr *flow_attr, 3047 enum flow_table_type ft_type) 3048 { 3049 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 3050 struct mlx5_flow_namespace *ns = NULL; 3051 struct mlx5_ib_flow_prio *prio; 3052 struct mlx5_flow_table *ft; 3053 int max_table_size; 3054 int num_entries; 3055 int num_groups; 3056 int priority; 3057 3058 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3059 log_max_ft_size)); 3060 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3061 if (ft_type == MLX5_IB_FT_TX) 3062 priority = 0; 3063 else if (flow_is_multicast_only(flow_attr) && 3064 !dont_trap) 3065 priority = MLX5_IB_FLOW_MCAST_PRIO; 3066 else 3067 priority = ib_prio_to_core_prio(flow_attr->priority, 3068 dont_trap); 3069 ns = mlx5_get_flow_namespace(dev->mdev, 3070 ft_type == MLX5_IB_FT_TX ? 3071 MLX5_FLOW_NAMESPACE_EGRESS : 3072 MLX5_FLOW_NAMESPACE_BYPASS); 3073 num_entries = MLX5_FS_MAX_ENTRIES; 3074 num_groups = MLX5_FS_MAX_TYPES; 3075 prio = &dev->flow_db->prios[priority]; 3076 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3077 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3078 ns = mlx5_get_flow_namespace(dev->mdev, 3079 MLX5_FLOW_NAMESPACE_LEFTOVERS); 3080 build_leftovers_ft_param(&priority, 3081 &num_entries, 3082 &num_groups); 3083 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 3084 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3085 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 3086 allow_sniffer_and_nic_rx_shared_tir)) 3087 return ERR_PTR(-ENOTSUPP); 3088 3089 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 3090 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 3091 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 3092 3093 prio = &dev->flow_db->sniffer[ft_type]; 3094 priority = 0; 3095 num_entries = 1; 3096 num_groups = 1; 3097 } 3098 3099 if (!ns) 3100 return ERR_PTR(-ENOTSUPP); 3101 3102 if (num_entries > max_table_size) 3103 return ERR_PTR(-ENOMEM); 3104 3105 ft = prio->flow_table; 3106 if (!ft) 3107 return _get_prio(ns, prio, priority, num_entries, num_groups); 3108 3109 return prio; 3110 } 3111 3112 static void set_underlay_qp(struct mlx5_ib_dev *dev, 3113 struct mlx5_flow_spec *spec, 3114 u32 underlay_qpn) 3115 { 3116 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 3117 spec->match_criteria, 3118 misc_parameters); 3119 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3120 misc_parameters); 3121 3122 if (underlay_qpn && 3123 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3124 ft_field_support.bth_dst_qp)) { 3125 MLX5_SET(fte_match_set_misc, 3126 misc_params_v, bth_dst_qp, underlay_qpn); 3127 MLX5_SET(fte_match_set_misc, 3128 misc_params_c, bth_dst_qp, 0xffffff); 3129 } 3130 } 3131 3132 static int read_flow_counters(struct ib_device *ibdev, 3133 struct mlx5_read_counters_attr *read_attr) 3134 { 3135 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; 3136 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3137 3138 return mlx5_fc_query(dev->mdev, fc, 3139 &read_attr->out[IB_COUNTER_PACKETS], 3140 &read_attr->out[IB_COUNTER_BYTES]); 3141 } 3142 3143 /* flow counters currently expose two counters packets and bytes */ 3144 #define FLOW_COUNTERS_NUM 2 3145 static int counters_set_description(struct ib_counters *counters, 3146 enum mlx5_ib_counters_type counters_type, 3147 struct mlx5_ib_flow_counters_desc *desc_data, 3148 u32 ncounters) 3149 { 3150 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3151 u32 cntrs_max_index = 0; 3152 int i; 3153 3154 if (counters_type != MLX5_IB_COUNTERS_FLOW) 3155 return -EINVAL; 3156 3157 /* init the fields for the object */ 3158 mcounters->type = counters_type; 3159 mcounters->read_counters = read_flow_counters; 3160 mcounters->counters_num = FLOW_COUNTERS_NUM; 3161 mcounters->ncounters = ncounters; 3162 /* each counter entry have both description and index pair */ 3163 for (i = 0; i < ncounters; i++) { 3164 if (desc_data[i].description > IB_COUNTER_BYTES) 3165 return -EINVAL; 3166 3167 if (cntrs_max_index <= desc_data[i].index) 3168 cntrs_max_index = desc_data[i].index + 1; 3169 } 3170 3171 mutex_lock(&mcounters->mcntrs_mutex); 3172 mcounters->counters_data = desc_data; 3173 mcounters->cntrs_max_index = cntrs_max_index; 3174 mutex_unlock(&mcounters->mcntrs_mutex); 3175 3176 return 0; 3177 } 3178 3179 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2)) 3180 static int flow_counters_set_data(struct ib_counters *ibcounters, 3181 struct mlx5_ib_create_flow *ucmd) 3182 { 3183 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters); 3184 struct mlx5_ib_flow_counters_data *cntrs_data = NULL; 3185 struct mlx5_ib_flow_counters_desc *desc_data = NULL; 3186 bool hw_hndl = false; 3187 int ret = 0; 3188 3189 if (ucmd && ucmd->ncounters_data != 0) { 3190 cntrs_data = ucmd->data; 3191 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) 3192 return -EINVAL; 3193 3194 desc_data = kcalloc(cntrs_data->ncounters, 3195 sizeof(*desc_data), 3196 GFP_KERNEL); 3197 if (!desc_data) 3198 return -ENOMEM; 3199 3200 if (copy_from_user(desc_data, 3201 u64_to_user_ptr(cntrs_data->counters_data), 3202 sizeof(*desc_data) * cntrs_data->ncounters)) { 3203 ret = -EFAULT; 3204 goto free; 3205 } 3206 } 3207 3208 if (!mcounters->hw_cntrs_hndl) { 3209 mcounters->hw_cntrs_hndl = mlx5_fc_create( 3210 to_mdev(ibcounters->device)->mdev, false); 3211 if (IS_ERR(mcounters->hw_cntrs_hndl)) { 3212 ret = PTR_ERR(mcounters->hw_cntrs_hndl); 3213 goto free; 3214 } 3215 hw_hndl = true; 3216 } 3217 3218 if (desc_data) { 3219 /* counters already bound to at least one flow */ 3220 if (mcounters->cntrs_max_index) { 3221 ret = -EINVAL; 3222 goto free_hndl; 3223 } 3224 3225 ret = counters_set_description(ibcounters, 3226 MLX5_IB_COUNTERS_FLOW, 3227 desc_data, 3228 cntrs_data->ncounters); 3229 if (ret) 3230 goto free_hndl; 3231 3232 } else if (!mcounters->cntrs_max_index) { 3233 /* counters not bound yet, must have udata passed */ 3234 ret = -EINVAL; 3235 goto free_hndl; 3236 } 3237 3238 return 0; 3239 3240 free_hndl: 3241 if (hw_hndl) { 3242 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, 3243 mcounters->hw_cntrs_hndl); 3244 mcounters->hw_cntrs_hndl = NULL; 3245 } 3246 free: 3247 kfree(desc_data); 3248 return ret; 3249 } 3250 3251 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 3252 struct mlx5_ib_flow_prio *ft_prio, 3253 const struct ib_flow_attr *flow_attr, 3254 struct mlx5_flow_destination *dst, 3255 u32 underlay_qpn, 3256 struct mlx5_ib_create_flow *ucmd) 3257 { 3258 struct mlx5_flow_table *ft = ft_prio->flow_table; 3259 struct mlx5_ib_flow_handler *handler; 3260 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; 3261 struct mlx5_flow_spec *spec; 3262 struct mlx5_flow_destination dest_arr[2] = {}; 3263 struct mlx5_flow_destination *rule_dst = dest_arr; 3264 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 3265 unsigned int spec_index; 3266 u32 prev_type = 0; 3267 int err = 0; 3268 int dest_num = 0; 3269 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3270 3271 if (!is_valid_attr(dev->mdev, flow_attr)) 3272 return ERR_PTR(-EINVAL); 3273 3274 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3275 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 3276 if (!handler || !spec) { 3277 err = -ENOMEM; 3278 goto free; 3279 } 3280 3281 INIT_LIST_HEAD(&handler->list); 3282 if (dst) { 3283 memcpy(&dest_arr[0], dst, sizeof(*dst)); 3284 dest_num++; 3285 } 3286 3287 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3288 err = parse_flow_attr(dev->mdev, spec->match_criteria, 3289 spec->match_value, 3290 ib_flow, flow_attr, &flow_act, 3291 prev_type); 3292 if (err < 0) 3293 goto free; 3294 3295 prev_type = ((union ib_flow_spec *)ib_flow)->type; 3296 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 3297 } 3298 3299 if (!flow_is_multicast_only(flow_attr)) 3300 set_underlay_qp(dev, spec, underlay_qpn); 3301 3302 if (dev->rep) { 3303 void *misc; 3304 3305 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3306 misc_parameters); 3307 MLX5_SET(fte_match_set_misc, misc, source_port, 3308 dev->rep->vport); 3309 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3310 misc_parameters); 3311 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 3312 } 3313 3314 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 3315 3316 if (is_egress && 3317 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 3318 err = -EINVAL; 3319 goto free; 3320 } 3321 3322 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 3323 struct mlx5_ib_mcounters *mcounters; 3324 3325 err = flow_counters_set_data(flow_act.counters, ucmd); 3326 if (err) 3327 goto free; 3328 3329 mcounters = to_mcounters(flow_act.counters); 3330 handler->ibcounters = flow_act.counters; 3331 dest_arr[dest_num].type = 3332 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 3333 dest_arr[dest_num].counter_id = 3334 mlx5_fc_id(mcounters->hw_cntrs_hndl); 3335 dest_num++; 3336 } 3337 3338 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3339 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { 3340 rule_dst = NULL; 3341 dest_num = 0; 3342 } 3343 } else { 3344 if (is_egress) 3345 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3346 else 3347 flow_act.action |= 3348 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 3349 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 3350 } 3351 3352 if ((flow_act.flags & FLOW_ACT_HAS_TAG) && 3353 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3354 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3355 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 3356 flow_act.flow_tag, flow_attr->type); 3357 err = -EINVAL; 3358 goto free; 3359 } 3360 handler->rule = mlx5_add_flow_rules(ft, spec, 3361 &flow_act, 3362 rule_dst, dest_num); 3363 3364 if (IS_ERR(handler->rule)) { 3365 err = PTR_ERR(handler->rule); 3366 goto free; 3367 } 3368 3369 ft_prio->refcount++; 3370 handler->prio = ft_prio; 3371 handler->dev = dev; 3372 3373 ft_prio->flow_table = ft; 3374 free: 3375 if (err && handler) { 3376 if (handler->ibcounters && 3377 atomic_read(&handler->ibcounters->usecnt) == 1) 3378 counters_clear_description(handler->ibcounters); 3379 kfree(handler); 3380 } 3381 kvfree(spec); 3382 return err ? ERR_PTR(err) : handler; 3383 } 3384 3385 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 3386 struct mlx5_ib_flow_prio *ft_prio, 3387 const struct ib_flow_attr *flow_attr, 3388 struct mlx5_flow_destination *dst) 3389 { 3390 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 3391 } 3392 3393 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 3394 struct mlx5_ib_flow_prio *ft_prio, 3395 struct ib_flow_attr *flow_attr, 3396 struct mlx5_flow_destination *dst) 3397 { 3398 struct mlx5_ib_flow_handler *handler_dst = NULL; 3399 struct mlx5_ib_flow_handler *handler = NULL; 3400 3401 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 3402 if (!IS_ERR(handler)) { 3403 handler_dst = create_flow_rule(dev, ft_prio, 3404 flow_attr, dst); 3405 if (IS_ERR(handler_dst)) { 3406 mlx5_del_flow_rules(handler->rule); 3407 ft_prio->refcount--; 3408 kfree(handler); 3409 handler = handler_dst; 3410 } else { 3411 list_add(&handler_dst->list, &handler->list); 3412 } 3413 } 3414 3415 return handler; 3416 } 3417 enum { 3418 LEFTOVERS_MC, 3419 LEFTOVERS_UC, 3420 }; 3421 3422 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 3423 struct mlx5_ib_flow_prio *ft_prio, 3424 struct ib_flow_attr *flow_attr, 3425 struct mlx5_flow_destination *dst) 3426 { 3427 struct mlx5_ib_flow_handler *handler_ucast = NULL; 3428 struct mlx5_ib_flow_handler *handler = NULL; 3429 3430 static struct { 3431 struct ib_flow_attr flow_attr; 3432 struct ib_flow_spec_eth eth_flow; 3433 } leftovers_specs[] = { 3434 [LEFTOVERS_MC] = { 3435 .flow_attr = { 3436 .num_of_specs = 1, 3437 .size = sizeof(leftovers_specs[0]) 3438 }, 3439 .eth_flow = { 3440 .type = IB_FLOW_SPEC_ETH, 3441 .size = sizeof(struct ib_flow_spec_eth), 3442 .mask = {.dst_mac = {0x1} }, 3443 .val = {.dst_mac = {0x1} } 3444 } 3445 }, 3446 [LEFTOVERS_UC] = { 3447 .flow_attr = { 3448 .num_of_specs = 1, 3449 .size = sizeof(leftovers_specs[0]) 3450 }, 3451 .eth_flow = { 3452 .type = IB_FLOW_SPEC_ETH, 3453 .size = sizeof(struct ib_flow_spec_eth), 3454 .mask = {.dst_mac = {0x1} }, 3455 .val = {.dst_mac = {} } 3456 } 3457 } 3458 }; 3459 3460 handler = create_flow_rule(dev, ft_prio, 3461 &leftovers_specs[LEFTOVERS_MC].flow_attr, 3462 dst); 3463 if (!IS_ERR(handler) && 3464 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 3465 handler_ucast = create_flow_rule(dev, ft_prio, 3466 &leftovers_specs[LEFTOVERS_UC].flow_attr, 3467 dst); 3468 if (IS_ERR(handler_ucast)) { 3469 mlx5_del_flow_rules(handler->rule); 3470 ft_prio->refcount--; 3471 kfree(handler); 3472 handler = handler_ucast; 3473 } else { 3474 list_add(&handler_ucast->list, &handler->list); 3475 } 3476 } 3477 3478 return handler; 3479 } 3480 3481 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 3482 struct mlx5_ib_flow_prio *ft_rx, 3483 struct mlx5_ib_flow_prio *ft_tx, 3484 struct mlx5_flow_destination *dst) 3485 { 3486 struct mlx5_ib_flow_handler *handler_rx; 3487 struct mlx5_ib_flow_handler *handler_tx; 3488 int err; 3489 static const struct ib_flow_attr flow_attr = { 3490 .num_of_specs = 0, 3491 .size = sizeof(flow_attr) 3492 }; 3493 3494 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 3495 if (IS_ERR(handler_rx)) { 3496 err = PTR_ERR(handler_rx); 3497 goto err; 3498 } 3499 3500 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 3501 if (IS_ERR(handler_tx)) { 3502 err = PTR_ERR(handler_tx); 3503 goto err_tx; 3504 } 3505 3506 list_add(&handler_tx->list, &handler_rx->list); 3507 3508 return handler_rx; 3509 3510 err_tx: 3511 mlx5_del_flow_rules(handler_rx->rule); 3512 ft_rx->refcount--; 3513 kfree(handler_rx); 3514 err: 3515 return ERR_PTR(err); 3516 } 3517 3518 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 3519 struct ib_flow_attr *flow_attr, 3520 int domain, 3521 struct ib_udata *udata) 3522 { 3523 struct mlx5_ib_dev *dev = to_mdev(qp->device); 3524 struct mlx5_ib_qp *mqp = to_mqp(qp); 3525 struct mlx5_ib_flow_handler *handler = NULL; 3526 struct mlx5_flow_destination *dst = NULL; 3527 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 3528 struct mlx5_ib_flow_prio *ft_prio; 3529 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3530 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 3531 size_t min_ucmd_sz, required_ucmd_sz; 3532 int err; 3533 int underlay_qpn; 3534 3535 if (udata && udata->inlen) { 3536 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) + 3537 sizeof(ucmd_hdr.reserved); 3538 if (udata->inlen < min_ucmd_sz) 3539 return ERR_PTR(-EOPNOTSUPP); 3540 3541 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 3542 if (err) 3543 return ERR_PTR(err); 3544 3545 /* currently supports only one counters data */ 3546 if (ucmd_hdr.ncounters_data > 1) 3547 return ERR_PTR(-EINVAL); 3548 3549 required_ucmd_sz = min_ucmd_sz + 3550 sizeof(struct mlx5_ib_flow_counters_data) * 3551 ucmd_hdr.ncounters_data; 3552 if (udata->inlen > required_ucmd_sz && 3553 !ib_is_udata_cleared(udata, required_ucmd_sz, 3554 udata->inlen - required_ucmd_sz)) 3555 return ERR_PTR(-EOPNOTSUPP); 3556 3557 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 3558 if (!ucmd) 3559 return ERR_PTR(-ENOMEM); 3560 3561 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 3562 if (err) 3563 goto free_ucmd; 3564 } 3565 3566 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 3567 err = -ENOMEM; 3568 goto free_ucmd; 3569 } 3570 3571 if (domain != IB_FLOW_DOMAIN_USER || 3572 flow_attr->port > dev->num_ports || 3573 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | 3574 IB_FLOW_ATTR_FLAGS_EGRESS))) { 3575 err = -EINVAL; 3576 goto free_ucmd; 3577 } 3578 3579 if (is_egress && 3580 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3581 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3582 err = -EINVAL; 3583 goto free_ucmd; 3584 } 3585 3586 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3587 if (!dst) { 3588 err = -ENOMEM; 3589 goto free_ucmd; 3590 } 3591 3592 mutex_lock(&dev->flow_db->lock); 3593 3594 ft_prio = get_flow_table(dev, flow_attr, 3595 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 3596 if (IS_ERR(ft_prio)) { 3597 err = PTR_ERR(ft_prio); 3598 goto unlock; 3599 } 3600 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3601 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 3602 if (IS_ERR(ft_prio_tx)) { 3603 err = PTR_ERR(ft_prio_tx); 3604 ft_prio_tx = NULL; 3605 goto destroy_ft; 3606 } 3607 } 3608 3609 if (is_egress) { 3610 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 3611 } else { 3612 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 3613 if (mqp->flags & MLX5_IB_QP_RSS) 3614 dst->tir_num = mqp->rss_qp.tirn; 3615 else 3616 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 3617 } 3618 3619 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3620 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 3621 handler = create_dont_trap_rule(dev, ft_prio, 3622 flow_attr, dst); 3623 } else { 3624 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? 3625 mqp->underlay_qpn : 0; 3626 handler = _create_flow_rule(dev, ft_prio, flow_attr, 3627 dst, underlay_qpn, ucmd); 3628 } 3629 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3630 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3631 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 3632 dst); 3633 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3634 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 3635 } else { 3636 err = -EINVAL; 3637 goto destroy_ft; 3638 } 3639 3640 if (IS_ERR(handler)) { 3641 err = PTR_ERR(handler); 3642 handler = NULL; 3643 goto destroy_ft; 3644 } 3645 3646 mutex_unlock(&dev->flow_db->lock); 3647 kfree(dst); 3648 kfree(ucmd); 3649 3650 return &handler->ibflow; 3651 3652 destroy_ft: 3653 put_flow_table(dev, ft_prio, false); 3654 if (ft_prio_tx) 3655 put_flow_table(dev, ft_prio_tx, false); 3656 unlock: 3657 mutex_unlock(&dev->flow_db->lock); 3658 kfree(dst); 3659 free_ucmd: 3660 kfree(ucmd); 3661 return ERR_PTR(err); 3662 } 3663 3664 static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev, 3665 int priority, bool mcast) 3666 { 3667 int max_table_size; 3668 struct mlx5_flow_namespace *ns = NULL; 3669 struct mlx5_ib_flow_prio *prio; 3670 3671 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3672 log_max_ft_size)); 3673 if (max_table_size < MLX5_FS_MAX_ENTRIES) 3674 return ERR_PTR(-ENOMEM); 3675 3676 if (mcast) 3677 priority = MLX5_IB_FLOW_MCAST_PRIO; 3678 else 3679 priority = ib_prio_to_core_prio(priority, false); 3680 3681 ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS); 3682 if (!ns) 3683 return ERR_PTR(-ENOTSUPP); 3684 3685 prio = &dev->flow_db->prios[priority]; 3686 3687 if (prio->flow_table) 3688 return prio; 3689 3690 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES, 3691 MLX5_FS_MAX_TYPES); 3692 } 3693 3694 static struct mlx5_ib_flow_handler * 3695 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 3696 struct mlx5_ib_flow_prio *ft_prio, 3697 struct mlx5_flow_destination *dst, 3698 struct mlx5_ib_flow_matcher *fs_matcher, 3699 void *cmd_in, int inlen) 3700 { 3701 struct mlx5_ib_flow_handler *handler; 3702 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; 3703 struct mlx5_flow_spec *spec; 3704 struct mlx5_flow_table *ft = ft_prio->flow_table; 3705 int err = 0; 3706 3707 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3708 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 3709 if (!handler || !spec) { 3710 err = -ENOMEM; 3711 goto free; 3712 } 3713 3714 INIT_LIST_HEAD(&handler->list); 3715 3716 memcpy(spec->match_value, cmd_in, inlen); 3717 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 3718 fs_matcher->mask_len); 3719 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 3720 3721 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3722 handler->rule = mlx5_add_flow_rules(ft, spec, 3723 &flow_act, dst, 1); 3724 3725 if (IS_ERR(handler->rule)) { 3726 err = PTR_ERR(handler->rule); 3727 goto free; 3728 } 3729 3730 ft_prio->refcount++; 3731 handler->prio = ft_prio; 3732 handler->dev = dev; 3733 ft_prio->flow_table = ft; 3734 3735 free: 3736 if (err) 3737 kfree(handler); 3738 kvfree(spec); 3739 return err ? ERR_PTR(err) : handler; 3740 } 3741 3742 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 3743 void *match_v) 3744 { 3745 void *match_c; 3746 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 3747 void *dmac, *dmac_mask; 3748 void *ipv4, *ipv4_mask; 3749 3750 if (!(fs_matcher->match_criteria_enable & 3751 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 3752 return false; 3753 3754 match_c = fs_matcher->matcher_mask.match_params; 3755 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 3756 outer_headers); 3757 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 3758 outer_headers); 3759 3760 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 3761 dmac_47_16); 3762 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 3763 dmac_47_16); 3764 3765 if (is_multicast_ether_addr(dmac) && 3766 is_multicast_ether_addr(dmac_mask)) 3767 return true; 3768 3769 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 3770 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 3771 3772 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 3773 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 3774 3775 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 3776 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 3777 return true; 3778 3779 return false; 3780 } 3781 3782 struct mlx5_ib_flow_handler * 3783 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 3784 struct mlx5_ib_flow_matcher *fs_matcher, 3785 void *cmd_in, int inlen, int dest_id, 3786 int dest_type) 3787 { 3788 struct mlx5_flow_destination *dst; 3789 struct mlx5_ib_flow_prio *ft_prio; 3790 int priority = fs_matcher->priority; 3791 struct mlx5_ib_flow_handler *handler; 3792 bool mcast; 3793 int err; 3794 3795 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 3796 return ERR_PTR(-EOPNOTSUPP); 3797 3798 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 3799 return ERR_PTR(-ENOMEM); 3800 3801 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3802 if (!dst) 3803 return ERR_PTR(-ENOMEM); 3804 3805 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 3806 mutex_lock(&dev->flow_db->lock); 3807 3808 ft_prio = _get_flow_table(dev, priority, mcast); 3809 if (IS_ERR(ft_prio)) { 3810 err = PTR_ERR(ft_prio); 3811 goto unlock; 3812 } 3813 3814 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { 3815 dst->type = dest_type; 3816 dst->tir_num = dest_id; 3817 } else { 3818 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 3819 dst->ft_num = dest_id; 3820 } 3821 3822 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in, 3823 inlen); 3824 3825 if (IS_ERR(handler)) { 3826 err = PTR_ERR(handler); 3827 goto destroy_ft; 3828 } 3829 3830 mutex_unlock(&dev->flow_db->lock); 3831 atomic_inc(&fs_matcher->usecnt); 3832 handler->flow_matcher = fs_matcher; 3833 3834 kfree(dst); 3835 3836 return handler; 3837 3838 destroy_ft: 3839 put_flow_table(dev, ft_prio, false); 3840 unlock: 3841 mutex_unlock(&dev->flow_db->lock); 3842 kfree(dst); 3843 3844 return ERR_PTR(err); 3845 } 3846 3847 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 3848 { 3849 u32 flags = 0; 3850 3851 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 3852 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 3853 3854 return flags; 3855 } 3856 3857 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 3858 static struct ib_flow_action * 3859 mlx5_ib_create_flow_action_esp(struct ib_device *device, 3860 const struct ib_flow_action_attrs_esp *attr, 3861 struct uverbs_attr_bundle *attrs) 3862 { 3863 struct mlx5_ib_dev *mdev = to_mdev(device); 3864 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 3865 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 3866 struct mlx5_ib_flow_action *action; 3867 u64 action_flags; 3868 u64 flags; 3869 int err = 0; 3870 3871 err = uverbs_get_flags64( 3872 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 3873 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 3874 if (err) 3875 return ERR_PTR(err); 3876 3877 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 3878 3879 /* We current only support a subset of the standard features. Only a 3880 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 3881 * (with overlap). Full offload mode isn't supported. 3882 */ 3883 if (!attr->keymat || attr->replay || attr->encap || 3884 attr->spi || attr->seq || attr->tfc_pad || 3885 attr->hard_limit_pkts || 3886 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 3887 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 3888 return ERR_PTR(-EOPNOTSUPP); 3889 3890 if (attr->keymat->protocol != 3891 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 3892 return ERR_PTR(-EOPNOTSUPP); 3893 3894 aes_gcm = &attr->keymat->keymat.aes_gcm; 3895 3896 if (aes_gcm->icv_len != 16 || 3897 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 3898 return ERR_PTR(-EOPNOTSUPP); 3899 3900 action = kmalloc(sizeof(*action), GFP_KERNEL); 3901 if (!action) 3902 return ERR_PTR(-ENOMEM); 3903 3904 action->esp_aes_gcm.ib_flags = attr->flags; 3905 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 3906 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 3907 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 3908 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 3909 sizeof(accel_attrs.keymat.aes_gcm.salt)); 3910 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 3911 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 3912 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 3913 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 3914 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 3915 3916 accel_attrs.esn = attr->esn; 3917 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 3918 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 3919 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 3920 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 3921 3922 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 3923 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 3924 3925 action->esp_aes_gcm.ctx = 3926 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 3927 if (IS_ERR(action->esp_aes_gcm.ctx)) { 3928 err = PTR_ERR(action->esp_aes_gcm.ctx); 3929 goto err_parse; 3930 } 3931 3932 action->esp_aes_gcm.ib_flags = attr->flags; 3933 3934 return &action->ib_action; 3935 3936 err_parse: 3937 kfree(action); 3938 return ERR_PTR(err); 3939 } 3940 3941 static int 3942 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 3943 const struct ib_flow_action_attrs_esp *attr, 3944 struct uverbs_attr_bundle *attrs) 3945 { 3946 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 3947 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 3948 int err = 0; 3949 3950 if (attr->keymat || attr->replay || attr->encap || 3951 attr->spi || attr->seq || attr->tfc_pad || 3952 attr->hard_limit_pkts || 3953 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 3954 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 3955 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 3956 return -EOPNOTSUPP; 3957 3958 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 3959 * be modified. 3960 */ 3961 if (!(maction->esp_aes_gcm.ib_flags & 3962 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 3963 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 3964 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 3965 return -EINVAL; 3966 3967 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 3968 sizeof(accel_attrs)); 3969 3970 accel_attrs.esn = attr->esn; 3971 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 3972 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 3973 else 3974 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 3975 3976 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 3977 &accel_attrs); 3978 if (err) 3979 return err; 3980 3981 maction->esp_aes_gcm.ib_flags &= 3982 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 3983 maction->esp_aes_gcm.ib_flags |= 3984 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 3985 3986 return 0; 3987 } 3988 3989 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 3990 { 3991 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 3992 3993 switch (action->type) { 3994 case IB_FLOW_ACTION_ESP: 3995 /* 3996 * We only support aes_gcm by now, so we implicitly know this is 3997 * the underline crypto. 3998 */ 3999 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 4000 break; 4001 default: 4002 WARN_ON(true); 4003 break; 4004 } 4005 4006 kfree(maction); 4007 return 0; 4008 } 4009 4010 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4011 { 4012 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4013 struct mlx5_ib_qp *mqp = to_mqp(ibqp); 4014 int err; 4015 4016 if (mqp->flags & MLX5_IB_QP_UNDERLAY) { 4017 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); 4018 return -EOPNOTSUPP; 4019 } 4020 4021 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 4022 if (err) 4023 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 4024 ibqp->qp_num, gid->raw); 4025 4026 return err; 4027 } 4028 4029 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4030 { 4031 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4032 int err; 4033 4034 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 4035 if (err) 4036 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 4037 ibqp->qp_num, gid->raw); 4038 4039 return err; 4040 } 4041 4042 static int init_node_data(struct mlx5_ib_dev *dev) 4043 { 4044 int err; 4045 4046 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 4047 if (err) 4048 return err; 4049 4050 dev->mdev->rev_id = dev->mdev->pdev->revision; 4051 4052 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 4053 } 4054 4055 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 4056 char *buf) 4057 { 4058 struct mlx5_ib_dev *dev = 4059 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 4060 4061 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 4062 } 4063 4064 static ssize_t show_reg_pages(struct device *device, 4065 struct device_attribute *attr, char *buf) 4066 { 4067 struct mlx5_ib_dev *dev = 4068 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 4069 4070 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 4071 } 4072 4073 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 4074 char *buf) 4075 { 4076 struct mlx5_ib_dev *dev = 4077 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 4078 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 4079 } 4080 4081 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 4082 char *buf) 4083 { 4084 struct mlx5_ib_dev *dev = 4085 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 4086 return sprintf(buf, "%x\n", dev->mdev->rev_id); 4087 } 4088 4089 static ssize_t show_board(struct device *device, struct device_attribute *attr, 4090 char *buf) 4091 { 4092 struct mlx5_ib_dev *dev = 4093 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 4094 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 4095 dev->mdev->board_id); 4096 } 4097 4098 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 4099 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 4100 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 4101 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 4102 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 4103 4104 static struct device_attribute *mlx5_class_attributes[] = { 4105 &dev_attr_hw_rev, 4106 &dev_attr_hca_type, 4107 &dev_attr_board_id, 4108 &dev_attr_fw_pages, 4109 &dev_attr_reg_pages, 4110 }; 4111 4112 static void pkey_change_handler(struct work_struct *work) 4113 { 4114 struct mlx5_ib_port_resources *ports = 4115 container_of(work, struct mlx5_ib_port_resources, 4116 pkey_change_work); 4117 4118 mutex_lock(&ports->devr->mutex); 4119 mlx5_ib_gsi_pkey_change(ports->gsi); 4120 mutex_unlock(&ports->devr->mutex); 4121 } 4122 4123 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 4124 { 4125 struct mlx5_ib_qp *mqp; 4126 struct mlx5_ib_cq *send_mcq, *recv_mcq; 4127 struct mlx5_core_cq *mcq; 4128 struct list_head cq_armed_list; 4129 unsigned long flags_qp; 4130 unsigned long flags_cq; 4131 unsigned long flags; 4132 4133 INIT_LIST_HEAD(&cq_armed_list); 4134 4135 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 4136 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 4137 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 4138 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 4139 if (mqp->sq.tail != mqp->sq.head) { 4140 send_mcq = to_mcq(mqp->ibqp.send_cq); 4141 spin_lock_irqsave(&send_mcq->lock, flags_cq); 4142 if (send_mcq->mcq.comp && 4143 mqp->ibqp.send_cq->comp_handler) { 4144 if (!send_mcq->mcq.reset_notify_added) { 4145 send_mcq->mcq.reset_notify_added = 1; 4146 list_add_tail(&send_mcq->mcq.reset_notify, 4147 &cq_armed_list); 4148 } 4149 } 4150 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 4151 } 4152 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 4153 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 4154 /* no handling is needed for SRQ */ 4155 if (!mqp->ibqp.srq) { 4156 if (mqp->rq.tail != mqp->rq.head) { 4157 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 4158 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 4159 if (recv_mcq->mcq.comp && 4160 mqp->ibqp.recv_cq->comp_handler) { 4161 if (!recv_mcq->mcq.reset_notify_added) { 4162 recv_mcq->mcq.reset_notify_added = 1; 4163 list_add_tail(&recv_mcq->mcq.reset_notify, 4164 &cq_armed_list); 4165 } 4166 } 4167 spin_unlock_irqrestore(&recv_mcq->lock, 4168 flags_cq); 4169 } 4170 } 4171 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 4172 } 4173 /*At that point all inflight post send were put to be executed as of we 4174 * lock/unlock above locks Now need to arm all involved CQs. 4175 */ 4176 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 4177 mcq->comp(mcq); 4178 } 4179 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 4180 } 4181 4182 static void delay_drop_handler(struct work_struct *work) 4183 { 4184 int err; 4185 struct mlx5_ib_delay_drop *delay_drop = 4186 container_of(work, struct mlx5_ib_delay_drop, 4187 delay_drop_work); 4188 4189 atomic_inc(&delay_drop->events_cnt); 4190 4191 mutex_lock(&delay_drop->lock); 4192 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, 4193 delay_drop->timeout); 4194 if (err) { 4195 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", 4196 delay_drop->timeout); 4197 delay_drop->activate = false; 4198 } 4199 mutex_unlock(&delay_drop->lock); 4200 } 4201 4202 static void mlx5_ib_handle_event(struct work_struct *_work) 4203 { 4204 struct mlx5_ib_event_work *work = 4205 container_of(_work, struct mlx5_ib_event_work, work); 4206 struct mlx5_ib_dev *ibdev; 4207 struct ib_event ibev; 4208 bool fatal = false; 4209 u8 port = (u8)work->param; 4210 4211 if (mlx5_core_is_mp_slave(work->dev)) { 4212 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 4213 if (!ibdev) 4214 goto out; 4215 } else { 4216 ibdev = work->context; 4217 } 4218 4219 switch (work->event) { 4220 case MLX5_DEV_EVENT_SYS_ERROR: 4221 ibev.event = IB_EVENT_DEVICE_FATAL; 4222 mlx5_ib_handle_internal_error(ibdev); 4223 fatal = true; 4224 break; 4225 4226 case MLX5_DEV_EVENT_PORT_UP: 4227 case MLX5_DEV_EVENT_PORT_DOWN: 4228 case MLX5_DEV_EVENT_PORT_INITIALIZED: 4229 /* In RoCE, port up/down events are handled in 4230 * mlx5_netdev_event(). 4231 */ 4232 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4233 IB_LINK_LAYER_ETHERNET) 4234 goto out; 4235 4236 ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ? 4237 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4238 break; 4239 4240 case MLX5_DEV_EVENT_LID_CHANGE: 4241 ibev.event = IB_EVENT_LID_CHANGE; 4242 break; 4243 4244 case MLX5_DEV_EVENT_PKEY_CHANGE: 4245 ibev.event = IB_EVENT_PKEY_CHANGE; 4246 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 4247 break; 4248 4249 case MLX5_DEV_EVENT_GUID_CHANGE: 4250 ibev.event = IB_EVENT_GID_CHANGE; 4251 break; 4252 4253 case MLX5_DEV_EVENT_CLIENT_REREG: 4254 ibev.event = IB_EVENT_CLIENT_REREGISTER; 4255 break; 4256 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 4257 schedule_work(&ibdev->delay_drop.delay_drop_work); 4258 goto out; 4259 default: 4260 goto out; 4261 } 4262 4263 ibev.device = &ibdev->ib_dev; 4264 ibev.element.port_num = port; 4265 4266 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { 4267 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 4268 goto out; 4269 } 4270 4271 if (ibdev->ib_active) 4272 ib_dispatch_event(&ibev); 4273 4274 if (fatal) 4275 ibdev->ib_active = false; 4276 out: 4277 kfree(work); 4278 } 4279 4280 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 4281 enum mlx5_dev_event event, unsigned long param) 4282 { 4283 struct mlx5_ib_event_work *work; 4284 4285 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4286 if (!work) 4287 return; 4288 4289 INIT_WORK(&work->work, mlx5_ib_handle_event); 4290 work->dev = dev; 4291 work->param = param; 4292 work->context = context; 4293 work->event = event; 4294 4295 queue_work(mlx5_ib_event_wq, &work->work); 4296 } 4297 4298 static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4299 { 4300 struct mlx5_hca_vport_context vport_ctx; 4301 int err; 4302 int port; 4303 4304 for (port = 1; port <= dev->num_ports; port++) { 4305 dev->mdev->port_caps[port - 1].has_smi = false; 4306 if (MLX5_CAP_GEN(dev->mdev, port_type) == 4307 MLX5_CAP_PORT_TYPE_IB) { 4308 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { 4309 err = mlx5_query_hca_vport_context(dev->mdev, 0, 4310 port, 0, 4311 &vport_ctx); 4312 if (err) { 4313 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 4314 port, err); 4315 return err; 4316 } 4317 dev->mdev->port_caps[port - 1].has_smi = 4318 vport_ctx.has_smi; 4319 } else { 4320 dev->mdev->port_caps[port - 1].has_smi = true; 4321 } 4322 } 4323 } 4324 return 0; 4325 } 4326 4327 static void get_ext_port_caps(struct mlx5_ib_dev *dev) 4328 { 4329 int port; 4330 4331 for (port = 1; port <= dev->num_ports; port++) 4332 mlx5_query_ext_port_caps(dev, port); 4333 } 4334 4335 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4336 { 4337 struct ib_device_attr *dprops = NULL; 4338 struct ib_port_attr *pprops = NULL; 4339 int err = -ENOMEM; 4340 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 4341 4342 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 4343 if (!pprops) 4344 goto out; 4345 4346 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 4347 if (!dprops) 4348 goto out; 4349 4350 err = set_has_smi_cap(dev); 4351 if (err) 4352 goto out; 4353 4354 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 4355 if (err) { 4356 mlx5_ib_warn(dev, "query_device failed %d\n", err); 4357 goto out; 4358 } 4359 4360 memset(pprops, 0, sizeof(*pprops)); 4361 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 4362 if (err) { 4363 mlx5_ib_warn(dev, "query_port %d failed %d\n", 4364 port, err); 4365 goto out; 4366 } 4367 4368 dev->mdev->port_caps[port - 1].pkey_table_len = 4369 dprops->max_pkeys; 4370 dev->mdev->port_caps[port - 1].gid_table_len = 4371 pprops->gid_tbl_len; 4372 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n", 4373 port, dprops->max_pkeys, pprops->gid_tbl_len); 4374 4375 out: 4376 kfree(pprops); 4377 kfree(dprops); 4378 4379 return err; 4380 } 4381 4382 static void destroy_umrc_res(struct mlx5_ib_dev *dev) 4383 { 4384 int err; 4385 4386 err = mlx5_mr_cache_cleanup(dev); 4387 if (err) 4388 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 4389 4390 if (dev->umrc.qp) 4391 mlx5_ib_destroy_qp(dev->umrc.qp); 4392 if (dev->umrc.cq) 4393 ib_free_cq(dev->umrc.cq); 4394 if (dev->umrc.pd) 4395 ib_dealloc_pd(dev->umrc.pd); 4396 } 4397 4398 enum { 4399 MAX_UMR_WR = 128, 4400 }; 4401 4402 static int create_umr_res(struct mlx5_ib_dev *dev) 4403 { 4404 struct ib_qp_init_attr *init_attr = NULL; 4405 struct ib_qp_attr *attr = NULL; 4406 struct ib_pd *pd; 4407 struct ib_cq *cq; 4408 struct ib_qp *qp; 4409 int ret; 4410 4411 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 4412 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 4413 if (!attr || !init_attr) { 4414 ret = -ENOMEM; 4415 goto error_0; 4416 } 4417 4418 pd = ib_alloc_pd(&dev->ib_dev, 0); 4419 if (IS_ERR(pd)) { 4420 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 4421 ret = PTR_ERR(pd); 4422 goto error_0; 4423 } 4424 4425 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 4426 if (IS_ERR(cq)) { 4427 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 4428 ret = PTR_ERR(cq); 4429 goto error_2; 4430 } 4431 4432 init_attr->send_cq = cq; 4433 init_attr->recv_cq = cq; 4434 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 4435 init_attr->cap.max_send_wr = MAX_UMR_WR; 4436 init_attr->cap.max_send_sge = 1; 4437 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 4438 init_attr->port_num = 1; 4439 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 4440 if (IS_ERR(qp)) { 4441 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 4442 ret = PTR_ERR(qp); 4443 goto error_3; 4444 } 4445 qp->device = &dev->ib_dev; 4446 qp->real_qp = qp; 4447 qp->uobject = NULL; 4448 qp->qp_type = MLX5_IB_QPT_REG_UMR; 4449 qp->send_cq = init_attr->send_cq; 4450 qp->recv_cq = init_attr->recv_cq; 4451 4452 attr->qp_state = IB_QPS_INIT; 4453 attr->port_num = 1; 4454 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 4455 IB_QP_PORT, NULL); 4456 if (ret) { 4457 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 4458 goto error_4; 4459 } 4460 4461 memset(attr, 0, sizeof(*attr)); 4462 attr->qp_state = IB_QPS_RTR; 4463 attr->path_mtu = IB_MTU_256; 4464 4465 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4466 if (ret) { 4467 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 4468 goto error_4; 4469 } 4470 4471 memset(attr, 0, sizeof(*attr)); 4472 attr->qp_state = IB_QPS_RTS; 4473 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4474 if (ret) { 4475 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 4476 goto error_4; 4477 } 4478 4479 dev->umrc.qp = qp; 4480 dev->umrc.cq = cq; 4481 dev->umrc.pd = pd; 4482 4483 sema_init(&dev->umrc.sem, MAX_UMR_WR); 4484 ret = mlx5_mr_cache_init(dev); 4485 if (ret) { 4486 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 4487 goto error_4; 4488 } 4489 4490 kfree(attr); 4491 kfree(init_attr); 4492 4493 return 0; 4494 4495 error_4: 4496 mlx5_ib_destroy_qp(qp); 4497 dev->umrc.qp = NULL; 4498 4499 error_3: 4500 ib_free_cq(cq); 4501 dev->umrc.cq = NULL; 4502 4503 error_2: 4504 ib_dealloc_pd(pd); 4505 dev->umrc.pd = NULL; 4506 4507 error_0: 4508 kfree(attr); 4509 kfree(init_attr); 4510 return ret; 4511 } 4512 4513 static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 4514 { 4515 switch (umr_fence_cap) { 4516 case MLX5_CAP_UMR_FENCE_NONE: 4517 return MLX5_FENCE_MODE_NONE; 4518 case MLX5_CAP_UMR_FENCE_SMALL: 4519 return MLX5_FENCE_MODE_INITIATOR_SMALL; 4520 default: 4521 return MLX5_FENCE_MODE_STRONG_ORDERING; 4522 } 4523 } 4524 4525 static int create_dev_resources(struct mlx5_ib_resources *devr) 4526 { 4527 struct ib_srq_init_attr attr; 4528 struct mlx5_ib_dev *dev; 4529 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 4530 int port; 4531 int ret = 0; 4532 4533 dev = container_of(devr, struct mlx5_ib_dev, devr); 4534 4535 mutex_init(&devr->mutex); 4536 4537 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 4538 if (IS_ERR(devr->p0)) { 4539 ret = PTR_ERR(devr->p0); 4540 goto error0; 4541 } 4542 devr->p0->device = &dev->ib_dev; 4543 devr->p0->uobject = NULL; 4544 atomic_set(&devr->p0->usecnt, 0); 4545 4546 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 4547 if (IS_ERR(devr->c0)) { 4548 ret = PTR_ERR(devr->c0); 4549 goto error1; 4550 } 4551 devr->c0->device = &dev->ib_dev; 4552 devr->c0->uobject = NULL; 4553 devr->c0->comp_handler = NULL; 4554 devr->c0->event_handler = NULL; 4555 devr->c0->cq_context = NULL; 4556 atomic_set(&devr->c0->usecnt, 0); 4557 4558 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 4559 if (IS_ERR(devr->x0)) { 4560 ret = PTR_ERR(devr->x0); 4561 goto error2; 4562 } 4563 devr->x0->device = &dev->ib_dev; 4564 devr->x0->inode = NULL; 4565 atomic_set(&devr->x0->usecnt, 0); 4566 mutex_init(&devr->x0->tgt_qp_mutex); 4567 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 4568 4569 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 4570 if (IS_ERR(devr->x1)) { 4571 ret = PTR_ERR(devr->x1); 4572 goto error3; 4573 } 4574 devr->x1->device = &dev->ib_dev; 4575 devr->x1->inode = NULL; 4576 atomic_set(&devr->x1->usecnt, 0); 4577 mutex_init(&devr->x1->tgt_qp_mutex); 4578 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 4579 4580 memset(&attr, 0, sizeof(attr)); 4581 attr.attr.max_sge = 1; 4582 attr.attr.max_wr = 1; 4583 attr.srq_type = IB_SRQT_XRC; 4584 attr.ext.cq = devr->c0; 4585 attr.ext.xrc.xrcd = devr->x0; 4586 4587 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 4588 if (IS_ERR(devr->s0)) { 4589 ret = PTR_ERR(devr->s0); 4590 goto error4; 4591 } 4592 devr->s0->device = &dev->ib_dev; 4593 devr->s0->pd = devr->p0; 4594 devr->s0->uobject = NULL; 4595 devr->s0->event_handler = NULL; 4596 devr->s0->srq_context = NULL; 4597 devr->s0->srq_type = IB_SRQT_XRC; 4598 devr->s0->ext.xrc.xrcd = devr->x0; 4599 devr->s0->ext.cq = devr->c0; 4600 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 4601 atomic_inc(&devr->s0->ext.cq->usecnt); 4602 atomic_inc(&devr->p0->usecnt); 4603 atomic_set(&devr->s0->usecnt, 0); 4604 4605 memset(&attr, 0, sizeof(attr)); 4606 attr.attr.max_sge = 1; 4607 attr.attr.max_wr = 1; 4608 attr.srq_type = IB_SRQT_BASIC; 4609 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 4610 if (IS_ERR(devr->s1)) { 4611 ret = PTR_ERR(devr->s1); 4612 goto error5; 4613 } 4614 devr->s1->device = &dev->ib_dev; 4615 devr->s1->pd = devr->p0; 4616 devr->s1->uobject = NULL; 4617 devr->s1->event_handler = NULL; 4618 devr->s1->srq_context = NULL; 4619 devr->s1->srq_type = IB_SRQT_BASIC; 4620 devr->s1->ext.cq = devr->c0; 4621 atomic_inc(&devr->p0->usecnt); 4622 atomic_set(&devr->s1->usecnt, 0); 4623 4624 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 4625 INIT_WORK(&devr->ports[port].pkey_change_work, 4626 pkey_change_handler); 4627 devr->ports[port].devr = devr; 4628 } 4629 4630 return 0; 4631 4632 error5: 4633 mlx5_ib_destroy_srq(devr->s0); 4634 error4: 4635 mlx5_ib_dealloc_xrcd(devr->x1); 4636 error3: 4637 mlx5_ib_dealloc_xrcd(devr->x0); 4638 error2: 4639 mlx5_ib_destroy_cq(devr->c0); 4640 error1: 4641 mlx5_ib_dealloc_pd(devr->p0); 4642 error0: 4643 return ret; 4644 } 4645 4646 static void destroy_dev_resources(struct mlx5_ib_resources *devr) 4647 { 4648 struct mlx5_ib_dev *dev = 4649 container_of(devr, struct mlx5_ib_dev, devr); 4650 int port; 4651 4652 mlx5_ib_destroy_srq(devr->s1); 4653 mlx5_ib_destroy_srq(devr->s0); 4654 mlx5_ib_dealloc_xrcd(devr->x0); 4655 mlx5_ib_dealloc_xrcd(devr->x1); 4656 mlx5_ib_destroy_cq(devr->c0); 4657 mlx5_ib_dealloc_pd(devr->p0); 4658 4659 /* Make sure no change P_Key work items are still executing */ 4660 for (port = 0; port < dev->num_ports; ++port) 4661 cancel_work_sync(&devr->ports[port].pkey_change_work); 4662 } 4663 4664 static u32 get_core_cap_flags(struct ib_device *ibdev, 4665 struct mlx5_hca_vport_context *rep) 4666 { 4667 struct mlx5_ib_dev *dev = to_mdev(ibdev); 4668 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 4669 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 4670 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 4671 bool raw_support = !mlx5_core_mp_enabled(dev->mdev); 4672 u32 ret = 0; 4673 4674 if (rep->grh_required) 4675 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; 4676 4677 if (ll == IB_LINK_LAYER_INFINIBAND) 4678 return ret | RDMA_CORE_PORT_IBA_IB; 4679 4680 if (raw_support) 4681 ret |= RDMA_CORE_PORT_RAW_PACKET; 4682 4683 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 4684 return ret; 4685 4686 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 4687 return ret; 4688 4689 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 4690 ret |= RDMA_CORE_PORT_IBA_ROCE; 4691 4692 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 4693 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 4694 4695 return ret; 4696 } 4697 4698 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 4699 struct ib_port_immutable *immutable) 4700 { 4701 struct ib_port_attr attr; 4702 struct mlx5_ib_dev *dev = to_mdev(ibdev); 4703 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); 4704 struct mlx5_hca_vport_context rep = {0}; 4705 int err; 4706 4707 err = ib_query_port(ibdev, port_num, &attr); 4708 if (err) 4709 return err; 4710 4711 if (ll == IB_LINK_LAYER_INFINIBAND) { 4712 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, 4713 &rep); 4714 if (err) 4715 return err; 4716 } 4717 4718 immutable->pkey_tbl_len = attr.pkey_tbl_len; 4719 immutable->gid_tbl_len = attr.gid_tbl_len; 4720 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); 4721 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce)) 4722 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 4723 4724 return 0; 4725 } 4726 4727 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, 4728 struct ib_port_immutable *immutable) 4729 { 4730 struct ib_port_attr attr; 4731 int err; 4732 4733 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 4734 4735 err = ib_query_port(ibdev, port_num, &attr); 4736 if (err) 4737 return err; 4738 4739 immutable->pkey_tbl_len = attr.pkey_tbl_len; 4740 immutable->gid_tbl_len = attr.gid_tbl_len; 4741 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 4742 4743 return 0; 4744 } 4745 4746 static void get_dev_fw_str(struct ib_device *ibdev, char *str) 4747 { 4748 struct mlx5_ib_dev *dev = 4749 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 4750 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", 4751 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), 4752 fw_rev_sub(dev->mdev)); 4753 } 4754 4755 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) 4756 { 4757 struct mlx5_core_dev *mdev = dev->mdev; 4758 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, 4759 MLX5_FLOW_NAMESPACE_LAG); 4760 struct mlx5_flow_table *ft; 4761 int err; 4762 4763 if (!ns || !mlx5_lag_is_active(mdev)) 4764 return 0; 4765 4766 err = mlx5_cmd_create_vport_lag(mdev); 4767 if (err) 4768 return err; 4769 4770 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); 4771 if (IS_ERR(ft)) { 4772 err = PTR_ERR(ft); 4773 goto err_destroy_vport_lag; 4774 } 4775 4776 dev->flow_db->lag_demux_ft = ft; 4777 return 0; 4778 4779 err_destroy_vport_lag: 4780 mlx5_cmd_destroy_vport_lag(mdev); 4781 return err; 4782 } 4783 4784 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) 4785 { 4786 struct mlx5_core_dev *mdev = dev->mdev; 4787 4788 if (dev->flow_db->lag_demux_ft) { 4789 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); 4790 dev->flow_db->lag_demux_ft = NULL; 4791 4792 mlx5_cmd_destroy_vport_lag(mdev); 4793 } 4794 } 4795 4796 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 4797 { 4798 int err; 4799 4800 dev->roce[port_num].nb.notifier_call = mlx5_netdev_event; 4801 err = register_netdevice_notifier(&dev->roce[port_num].nb); 4802 if (err) { 4803 dev->roce[port_num].nb.notifier_call = NULL; 4804 return err; 4805 } 4806 4807 return 0; 4808 } 4809 4810 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 4811 { 4812 if (dev->roce[port_num].nb.notifier_call) { 4813 unregister_netdevice_notifier(&dev->roce[port_num].nb); 4814 dev->roce[port_num].nb.notifier_call = NULL; 4815 } 4816 } 4817 4818 static int mlx5_enable_eth(struct mlx5_ib_dev *dev) 4819 { 4820 int err; 4821 4822 if (MLX5_CAP_GEN(dev->mdev, roce)) { 4823 err = mlx5_nic_vport_enable_roce(dev->mdev); 4824 if (err) 4825 return err; 4826 } 4827 4828 err = mlx5_eth_lag_init(dev); 4829 if (err) 4830 goto err_disable_roce; 4831 4832 return 0; 4833 4834 err_disable_roce: 4835 if (MLX5_CAP_GEN(dev->mdev, roce)) 4836 mlx5_nic_vport_disable_roce(dev->mdev); 4837 4838 return err; 4839 } 4840 4841 static void mlx5_disable_eth(struct mlx5_ib_dev *dev) 4842 { 4843 mlx5_eth_lag_cleanup(dev); 4844 if (MLX5_CAP_GEN(dev->mdev, roce)) 4845 mlx5_nic_vport_disable_roce(dev->mdev); 4846 } 4847 4848 struct mlx5_ib_counter { 4849 const char *name; 4850 size_t offset; 4851 }; 4852 4853 #define INIT_Q_COUNTER(_name) \ 4854 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)} 4855 4856 static const struct mlx5_ib_counter basic_q_cnts[] = { 4857 INIT_Q_COUNTER(rx_write_requests), 4858 INIT_Q_COUNTER(rx_read_requests), 4859 INIT_Q_COUNTER(rx_atomic_requests), 4860 INIT_Q_COUNTER(out_of_buffer), 4861 }; 4862 4863 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = { 4864 INIT_Q_COUNTER(out_of_sequence), 4865 }; 4866 4867 static const struct mlx5_ib_counter retrans_q_cnts[] = { 4868 INIT_Q_COUNTER(duplicate_request), 4869 INIT_Q_COUNTER(rnr_nak_retry_err), 4870 INIT_Q_COUNTER(packet_seq_err), 4871 INIT_Q_COUNTER(implied_nak_seq_err), 4872 INIT_Q_COUNTER(local_ack_timeout_err), 4873 }; 4874 4875 #define INIT_CONG_COUNTER(_name) \ 4876 { .name = #_name, .offset = \ 4877 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)} 4878 4879 static const struct mlx5_ib_counter cong_cnts[] = { 4880 INIT_CONG_COUNTER(rp_cnp_ignored), 4881 INIT_CONG_COUNTER(rp_cnp_handled), 4882 INIT_CONG_COUNTER(np_ecn_marked_roce_packets), 4883 INIT_CONG_COUNTER(np_cnp_sent), 4884 }; 4885 4886 static const struct mlx5_ib_counter extended_err_cnts[] = { 4887 INIT_Q_COUNTER(resp_local_length_error), 4888 INIT_Q_COUNTER(resp_cqe_error), 4889 INIT_Q_COUNTER(req_cqe_error), 4890 INIT_Q_COUNTER(req_remote_invalid_request), 4891 INIT_Q_COUNTER(req_remote_access_errors), 4892 INIT_Q_COUNTER(resp_remote_access_errors), 4893 INIT_Q_COUNTER(resp_cqe_flush_error), 4894 INIT_Q_COUNTER(req_cqe_flush_error), 4895 }; 4896 4897 #define INIT_EXT_PPCNT_COUNTER(_name) \ 4898 { .name = #_name, .offset = \ 4899 MLX5_BYTE_OFF(ppcnt_reg, \ 4900 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)} 4901 4902 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { 4903 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), 4904 }; 4905 4906 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) 4907 { 4908 int i; 4909 4910 for (i = 0; i < dev->num_ports; i++) { 4911 if (dev->port[i].cnts.set_id_valid) 4912 mlx5_core_dealloc_q_counter(dev->mdev, 4913 dev->port[i].cnts.set_id); 4914 kfree(dev->port[i].cnts.names); 4915 kfree(dev->port[i].cnts.offsets); 4916 } 4917 } 4918 4919 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, 4920 struct mlx5_ib_counters *cnts) 4921 { 4922 u32 num_counters; 4923 4924 num_counters = ARRAY_SIZE(basic_q_cnts); 4925 4926 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) 4927 num_counters += ARRAY_SIZE(out_of_seq_q_cnts); 4928 4929 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) 4930 num_counters += ARRAY_SIZE(retrans_q_cnts); 4931 4932 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) 4933 num_counters += ARRAY_SIZE(extended_err_cnts); 4934 4935 cnts->num_q_counters = num_counters; 4936 4937 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 4938 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); 4939 num_counters += ARRAY_SIZE(cong_cnts); 4940 } 4941 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 4942 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); 4943 num_counters += ARRAY_SIZE(ext_ppcnt_cnts); 4944 } 4945 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL); 4946 if (!cnts->names) 4947 return -ENOMEM; 4948 4949 cnts->offsets = kcalloc(num_counters, 4950 sizeof(cnts->offsets), GFP_KERNEL); 4951 if (!cnts->offsets) 4952 goto err_names; 4953 4954 return 0; 4955 4956 err_names: 4957 kfree(cnts->names); 4958 cnts->names = NULL; 4959 return -ENOMEM; 4960 } 4961 4962 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, 4963 const char **names, 4964 size_t *offsets) 4965 { 4966 int i; 4967 int j = 0; 4968 4969 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { 4970 names[j] = basic_q_cnts[i].name; 4971 offsets[j] = basic_q_cnts[i].offset; 4972 } 4973 4974 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { 4975 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { 4976 names[j] = out_of_seq_q_cnts[i].name; 4977 offsets[j] = out_of_seq_q_cnts[i].offset; 4978 } 4979 } 4980 4981 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 4982 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { 4983 names[j] = retrans_q_cnts[i].name; 4984 offsets[j] = retrans_q_cnts[i].offset; 4985 } 4986 } 4987 4988 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { 4989 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { 4990 names[j] = extended_err_cnts[i].name; 4991 offsets[j] = extended_err_cnts[i].offset; 4992 } 4993 } 4994 4995 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 4996 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { 4997 names[j] = cong_cnts[i].name; 4998 offsets[j] = cong_cnts[i].offset; 4999 } 5000 } 5001 5002 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5003 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) { 5004 names[j] = ext_ppcnt_cnts[i].name; 5005 offsets[j] = ext_ppcnt_cnts[i].offset; 5006 } 5007 } 5008 } 5009 5010 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) 5011 { 5012 int err = 0; 5013 int i; 5014 5015 for (i = 0; i < dev->num_ports; i++) { 5016 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); 5017 if (err) 5018 goto err_alloc; 5019 5020 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, 5021 dev->port[i].cnts.offsets); 5022 5023 err = mlx5_core_alloc_q_counter(dev->mdev, 5024 &dev->port[i].cnts.set_id); 5025 if (err) { 5026 mlx5_ib_warn(dev, 5027 "couldn't allocate queue counter for port %d, err %d\n", 5028 i + 1, err); 5029 goto err_alloc; 5030 } 5031 dev->port[i].cnts.set_id_valid = true; 5032 } 5033 5034 return 0; 5035 5036 err_alloc: 5037 mlx5_ib_dealloc_counters(dev); 5038 return err; 5039 } 5040 5041 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 5042 u8 port_num) 5043 { 5044 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5045 struct mlx5_ib_port *port = &dev->port[port_num - 1]; 5046 5047 /* We support only per port stats */ 5048 if (port_num == 0) 5049 return NULL; 5050 5051 return rdma_alloc_hw_stats_struct(port->cnts.names, 5052 port->cnts.num_q_counters + 5053 port->cnts.num_cong_counters + 5054 port->cnts.num_ext_ppcnt_counters, 5055 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5056 } 5057 5058 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, 5059 struct mlx5_ib_port *port, 5060 struct rdma_hw_stats *stats) 5061 { 5062 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 5063 void *out; 5064 __be32 val; 5065 int ret, i; 5066 5067 out = kvzalloc(outlen, GFP_KERNEL); 5068 if (!out) 5069 return -ENOMEM; 5070 5071 ret = mlx5_core_query_q_counter(mdev, 5072 port->cnts.set_id, 0, 5073 out, outlen); 5074 if (ret) 5075 goto free; 5076 5077 for (i = 0; i < port->cnts.num_q_counters; i++) { 5078 val = *(__be32 *)(out + port->cnts.offsets[i]); 5079 stats->value[i] = (u64)be32_to_cpu(val); 5080 } 5081 5082 free: 5083 kvfree(out); 5084 return ret; 5085 } 5086 5087 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, 5088 struct mlx5_ib_port *port, 5089 struct rdma_hw_stats *stats) 5090 { 5091 int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters; 5092 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 5093 int ret, i; 5094 void *out; 5095 5096 out = kvzalloc(sz, GFP_KERNEL); 5097 if (!out) 5098 return -ENOMEM; 5099 5100 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out); 5101 if (ret) 5102 goto free; 5103 5104 for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) { 5105 stats->value[i + offset] = 5106 be64_to_cpup((__be64 *)(out + 5107 port->cnts.offsets[i + offset])); 5108 } 5109 5110 free: 5111 kvfree(out); 5112 return ret; 5113 } 5114 5115 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 5116 struct rdma_hw_stats *stats, 5117 u8 port_num, int index) 5118 { 5119 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5120 struct mlx5_ib_port *port = &dev->port[port_num - 1]; 5121 struct mlx5_core_dev *mdev; 5122 int ret, num_counters; 5123 u8 mdev_port_num; 5124 5125 if (!stats) 5126 return -EINVAL; 5127 5128 num_counters = port->cnts.num_q_counters + 5129 port->cnts.num_cong_counters + 5130 port->cnts.num_ext_ppcnt_counters; 5131 5132 /* q_counters are per IB device, query the master mdev */ 5133 ret = mlx5_ib_query_q_counters(dev->mdev, port, stats); 5134 if (ret) 5135 return ret; 5136 5137 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5138 ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats); 5139 if (ret) 5140 return ret; 5141 } 5142 5143 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5144 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, 5145 &mdev_port_num); 5146 if (!mdev) { 5147 /* If port is not affiliated yet, its in down state 5148 * which doesn't have any counters yet, so it would be 5149 * zero. So no need to read from the HCA. 5150 */ 5151 goto done; 5152 } 5153 ret = mlx5_lag_query_cong_counters(dev->mdev, 5154 stats->value + 5155 port->cnts.num_q_counters, 5156 port->cnts.num_cong_counters, 5157 port->cnts.offsets + 5158 port->cnts.num_q_counters); 5159 5160 mlx5_ib_put_native_port_mdev(dev, port_num); 5161 if (ret) 5162 return ret; 5163 } 5164 5165 done: 5166 return num_counters; 5167 } 5168 5169 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, 5170 enum rdma_netdev_t type, 5171 struct rdma_netdev_alloc_params *params) 5172 { 5173 if (type != RDMA_NETDEV_IPOIB) 5174 return -EOPNOTSUPP; 5175 5176 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); 5177 } 5178 5179 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev) 5180 { 5181 if (!dev->delay_drop.dbg) 5182 return; 5183 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs); 5184 kfree(dev->delay_drop.dbg); 5185 dev->delay_drop.dbg = NULL; 5186 } 5187 5188 static void cancel_delay_drop(struct mlx5_ib_dev *dev) 5189 { 5190 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5191 return; 5192 5193 cancel_work_sync(&dev->delay_drop.delay_drop_work); 5194 delay_drop_debugfs_cleanup(dev); 5195 } 5196 5197 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, 5198 size_t count, loff_t *pos) 5199 { 5200 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5201 char lbuf[20]; 5202 int len; 5203 5204 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); 5205 return simple_read_from_buffer(buf, count, pos, lbuf, len); 5206 } 5207 5208 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, 5209 size_t count, loff_t *pos) 5210 { 5211 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5212 u32 timeout; 5213 u32 var; 5214 5215 if (kstrtouint_from_user(buf, count, 0, &var)) 5216 return -EFAULT; 5217 5218 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 5219 1000); 5220 if (timeout != var) 5221 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", 5222 timeout); 5223 5224 delay_drop->timeout = timeout; 5225 5226 return count; 5227 } 5228 5229 static const struct file_operations fops_delay_drop_timeout = { 5230 .owner = THIS_MODULE, 5231 .open = simple_open, 5232 .write = delay_drop_timeout_write, 5233 .read = delay_drop_timeout_read, 5234 }; 5235 5236 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) 5237 { 5238 struct mlx5_ib_dbg_delay_drop *dbg; 5239 5240 if (!mlx5_debugfs_root) 5241 return 0; 5242 5243 dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); 5244 if (!dbg) 5245 return -ENOMEM; 5246 5247 dev->delay_drop.dbg = dbg; 5248 5249 dbg->dir_debugfs = 5250 debugfs_create_dir("delay_drop", 5251 dev->mdev->priv.dbg_root); 5252 if (!dbg->dir_debugfs) 5253 goto out_debugfs; 5254 5255 dbg->events_cnt_debugfs = 5256 debugfs_create_atomic_t("num_timeout_events", 0400, 5257 dbg->dir_debugfs, 5258 &dev->delay_drop.events_cnt); 5259 if (!dbg->events_cnt_debugfs) 5260 goto out_debugfs; 5261 5262 dbg->rqs_cnt_debugfs = 5263 debugfs_create_atomic_t("num_rqs", 0400, 5264 dbg->dir_debugfs, 5265 &dev->delay_drop.rqs_cnt); 5266 if (!dbg->rqs_cnt_debugfs) 5267 goto out_debugfs; 5268 5269 dbg->timeout_debugfs = 5270 debugfs_create_file("timeout", 0600, 5271 dbg->dir_debugfs, 5272 &dev->delay_drop, 5273 &fops_delay_drop_timeout); 5274 if (!dbg->timeout_debugfs) 5275 goto out_debugfs; 5276 5277 return 0; 5278 5279 out_debugfs: 5280 delay_drop_debugfs_cleanup(dev); 5281 return -ENOMEM; 5282 } 5283 5284 static void init_delay_drop(struct mlx5_ib_dev *dev) 5285 { 5286 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5287 return; 5288 5289 mutex_init(&dev->delay_drop.lock); 5290 dev->delay_drop.dev = dev; 5291 dev->delay_drop.activate = false; 5292 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; 5293 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5294 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5295 atomic_set(&dev->delay_drop.events_cnt, 0); 5296 5297 if (delay_drop_debugfs_init(dev)) 5298 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); 5299 } 5300 5301 static const struct cpumask * 5302 mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) 5303 { 5304 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5305 5306 return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); 5307 } 5308 5309 /* The mlx5_ib_multiport_mutex should be held when calling this function */ 5310 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5311 struct mlx5_ib_multiport_info *mpi) 5312 { 5313 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5314 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5315 int comps; 5316 int err; 5317 int i; 5318 5319 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5320 5321 spin_lock(&port->mp.mpi_lock); 5322 if (!mpi->ibdev) { 5323 spin_unlock(&port->mp.mpi_lock); 5324 return; 5325 } 5326 mpi->ibdev = NULL; 5327 5328 spin_unlock(&port->mp.mpi_lock); 5329 mlx5_remove_netdev_notifier(ibdev, port_num); 5330 spin_lock(&port->mp.mpi_lock); 5331 5332 comps = mpi->mdev_refcnt; 5333 if (comps) { 5334 mpi->unaffiliate = true; 5335 init_completion(&mpi->unref_comp); 5336 spin_unlock(&port->mp.mpi_lock); 5337 5338 for (i = 0; i < comps; i++) 5339 wait_for_completion(&mpi->unref_comp); 5340 5341 spin_lock(&port->mp.mpi_lock); 5342 mpi->unaffiliate = false; 5343 } 5344 5345 port->mp.mpi = NULL; 5346 5347 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 5348 5349 spin_unlock(&port->mp.mpi_lock); 5350 5351 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); 5352 5353 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1); 5354 /* Log an error, still needed to cleanup the pointers and add 5355 * it back to the list. 5356 */ 5357 if (err) 5358 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", 5359 port_num + 1); 5360 5361 ibdev->roce[port_num].last_port_state = IB_PORT_DOWN; 5362 } 5363 5364 /* The mlx5_ib_multiport_mutex should be held when calling this function */ 5365 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, 5366 struct mlx5_ib_multiport_info *mpi) 5367 { 5368 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5369 int err; 5370 5371 spin_lock(&ibdev->port[port_num].mp.mpi_lock); 5372 if (ibdev->port[port_num].mp.mpi) { 5373 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5374 port_num + 1); 5375 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5376 return false; 5377 } 5378 5379 ibdev->port[port_num].mp.mpi = mpi; 5380 mpi->ibdev = ibdev; 5381 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5382 5383 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5384 if (err) 5385 goto unbind; 5386 5387 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5388 if (err) 5389 goto unbind; 5390 5391 err = mlx5_add_netdev_notifier(ibdev, port_num); 5392 if (err) { 5393 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5394 port_num + 1); 5395 goto unbind; 5396 } 5397 5398 err = mlx5_ib_init_cong_debugfs(ibdev, port_num); 5399 if (err) 5400 goto unbind; 5401 5402 return true; 5403 5404 unbind: 5405 mlx5_ib_unbind_slave_port(ibdev, mpi); 5406 return false; 5407 } 5408 5409 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) 5410 { 5411 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5412 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5413 port_num + 1); 5414 struct mlx5_ib_multiport_info *mpi; 5415 int err; 5416 int i; 5417 5418 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5419 return 0; 5420 5421 err = mlx5_query_nic_vport_system_image_guid(dev->mdev, 5422 &dev->sys_image_guid); 5423 if (err) 5424 return err; 5425 5426 err = mlx5_nic_vport_enable_roce(dev->mdev); 5427 if (err) 5428 return err; 5429 5430 mutex_lock(&mlx5_ib_multiport_mutex); 5431 for (i = 0; i < dev->num_ports; i++) { 5432 bool bound = false; 5433 5434 /* build a stub multiport info struct for the native port. */ 5435 if (i == port_num) { 5436 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 5437 if (!mpi) { 5438 mutex_unlock(&mlx5_ib_multiport_mutex); 5439 mlx5_nic_vport_disable_roce(dev->mdev); 5440 return -ENOMEM; 5441 } 5442 5443 mpi->is_master = true; 5444 mpi->mdev = dev->mdev; 5445 mpi->sys_image_guid = dev->sys_image_guid; 5446 dev->port[i].mp.mpi = mpi; 5447 mpi->ibdev = dev; 5448 mpi = NULL; 5449 continue; 5450 } 5451 5452 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, 5453 list) { 5454 if (dev->sys_image_guid == mpi->sys_image_guid && 5455 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { 5456 bound = mlx5_ib_bind_slave_port(dev, mpi); 5457 } 5458 5459 if (bound) { 5460 dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n"); 5461 mlx5_ib_dbg(dev, "port %d bound\n", i + 1); 5462 list_del(&mpi->list); 5463 break; 5464 } 5465 } 5466 if (!bound) { 5467 get_port_caps(dev, i + 1); 5468 mlx5_ib_dbg(dev, "no free port found for port %d\n", 5469 i + 1); 5470 } 5471 } 5472 5473 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); 5474 mutex_unlock(&mlx5_ib_multiport_mutex); 5475 return err; 5476 } 5477 5478 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) 5479 { 5480 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5481 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5482 port_num + 1); 5483 int i; 5484 5485 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5486 return; 5487 5488 mutex_lock(&mlx5_ib_multiport_mutex); 5489 for (i = 0; i < dev->num_ports; i++) { 5490 if (dev->port[i].mp.mpi) { 5491 /* Destroy the native port stub */ 5492 if (i == port_num) { 5493 kfree(dev->port[i].mp.mpi); 5494 dev->port[i].mp.mpi = NULL; 5495 } else { 5496 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1); 5497 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); 5498 } 5499 } 5500 } 5501 5502 mlx5_ib_dbg(dev, "removing from devlist\n"); 5503 list_del(&dev->ib_dev_list); 5504 mutex_unlock(&mlx5_ib_multiport_mutex); 5505 5506 mlx5_nic_vport_disable_roce(dev->mdev); 5507 } 5508 5509 ADD_UVERBS_ATTRIBUTES_SIMPLE( 5510 mlx5_ib_dm, 5511 UVERBS_OBJECT_DM, 5512 UVERBS_METHOD_DM_ALLOC, 5513 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 5514 UVERBS_ATTR_TYPE(u64), 5515 UA_MANDATORY), 5516 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 5517 UVERBS_ATTR_TYPE(u16), 5518 UA_MANDATORY)); 5519 5520 ADD_UVERBS_ATTRIBUTES_SIMPLE( 5521 mlx5_ib_flow_action, 5522 UVERBS_OBJECT_FLOW_ACTION, 5523 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 5524 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 5525 enum mlx5_ib_uapi_flow_action_flags)); 5526 5527 static int populate_specs_root(struct mlx5_ib_dev *dev) 5528 { 5529 const struct uverbs_object_tree_def **trees = dev->driver_trees; 5530 size_t num_trees = 0; 5531 5532 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 5533 MLX5_ACCEL_IPSEC_CAP_DEVICE) 5534 trees[num_trees++] = &mlx5_ib_flow_action; 5535 5536 if (MLX5_CAP_DEV_MEM(dev->mdev, memic)) 5537 trees[num_trees++] = &mlx5_ib_dm; 5538 5539 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & 5540 MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) 5541 trees[num_trees++] = mlx5_ib_get_devx_tree(); 5542 5543 num_trees += mlx5_ib_get_flow_trees(trees + num_trees); 5544 5545 WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); 5546 trees[num_trees] = NULL; 5547 dev->ib_dev.driver_specs = trees; 5548 5549 return 0; 5550 } 5551 5552 static int mlx5_ib_read_counters(struct ib_counters *counters, 5553 struct ib_counters_read_attr *read_attr, 5554 struct uverbs_attr_bundle *attrs) 5555 { 5556 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 5557 struct mlx5_read_counters_attr mread_attr = {}; 5558 struct mlx5_ib_flow_counters_desc *desc; 5559 int ret, i; 5560 5561 mutex_lock(&mcounters->mcntrs_mutex); 5562 if (mcounters->cntrs_max_index > read_attr->ncounters) { 5563 ret = -EINVAL; 5564 goto err_bound; 5565 } 5566 5567 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), 5568 GFP_KERNEL); 5569 if (!mread_attr.out) { 5570 ret = -ENOMEM; 5571 goto err_bound; 5572 } 5573 5574 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; 5575 mread_attr.flags = read_attr->flags; 5576 ret = mcounters->read_counters(counters->device, &mread_attr); 5577 if (ret) 5578 goto err_read; 5579 5580 /* do the pass over the counters data array to assign according to the 5581 * descriptions and indexing pairs 5582 */ 5583 desc = mcounters->counters_data; 5584 for (i = 0; i < mcounters->ncounters; i++) 5585 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; 5586 5587 err_read: 5588 kfree(mread_attr.out); 5589 err_bound: 5590 mutex_unlock(&mcounters->mcntrs_mutex); 5591 return ret; 5592 } 5593 5594 static int mlx5_ib_destroy_counters(struct ib_counters *counters) 5595 { 5596 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 5597 5598 counters_clear_description(counters); 5599 if (mcounters->hw_cntrs_hndl) 5600 mlx5_fc_destroy(to_mdev(counters->device)->mdev, 5601 mcounters->hw_cntrs_hndl); 5602 5603 kfree(mcounters); 5604 5605 return 0; 5606 } 5607 5608 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, 5609 struct uverbs_attr_bundle *attrs) 5610 { 5611 struct mlx5_ib_mcounters *mcounters; 5612 5613 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL); 5614 if (!mcounters) 5615 return ERR_PTR(-ENOMEM); 5616 5617 mutex_init(&mcounters->mcntrs_mutex); 5618 5619 return &mcounters->ibcntrs; 5620 } 5621 5622 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 5623 { 5624 mlx5_ib_cleanup_multiport_master(dev); 5625 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 5626 cleanup_srcu_struct(&dev->mr_srcu); 5627 #endif 5628 kfree(dev->port); 5629 } 5630 5631 int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) 5632 { 5633 struct mlx5_core_dev *mdev = dev->mdev; 5634 const char *name; 5635 int err; 5636 int i; 5637 5638 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port), 5639 GFP_KERNEL); 5640 if (!dev->port) 5641 return -ENOMEM; 5642 5643 for (i = 0; i < dev->num_ports; i++) { 5644 spin_lock_init(&dev->port[i].mp.mpi_lock); 5645 rwlock_init(&dev->roce[i].netdev_lock); 5646 } 5647 5648 err = mlx5_ib_init_multiport_master(dev); 5649 if (err) 5650 goto err_free_port; 5651 5652 if (!mlx5_core_mp_enabled(mdev)) { 5653 for (i = 1; i <= dev->num_ports; i++) { 5654 err = get_port_caps(dev, i); 5655 if (err) 5656 break; 5657 } 5658 } else { 5659 err = get_port_caps(dev, mlx5_core_native_port_num(mdev)); 5660 } 5661 if (err) 5662 goto err_mp; 5663 5664 if (mlx5_use_mad_ifc(dev)) 5665 get_ext_port_caps(dev); 5666 5667 if (!mlx5_lag_is_active(mdev)) 5668 name = "mlx5_%d"; 5669 else 5670 name = "mlx5_bond_%d"; 5671 5672 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX); 5673 dev->ib_dev.owner = THIS_MODULE; 5674 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 5675 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 5676 dev->ib_dev.phys_port_cnt = dev->num_ports; 5677 dev->ib_dev.num_comp_vectors = 5678 dev->mdev->priv.eq_table.num_comp_vectors; 5679 dev->ib_dev.dev.parent = &mdev->pdev->dev; 5680 5681 mutex_init(&dev->cap_mask_mutex); 5682 INIT_LIST_HEAD(&dev->qp_list); 5683 spin_lock_init(&dev->reset_flow_resource_lock); 5684 5685 spin_lock_init(&dev->memic.memic_lock); 5686 dev->memic.dev = mdev; 5687 5688 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 5689 err = init_srcu_struct(&dev->mr_srcu); 5690 if (err) 5691 goto err_free_port; 5692 #endif 5693 5694 return 0; 5695 err_mp: 5696 mlx5_ib_cleanup_multiport_master(dev); 5697 5698 err_free_port: 5699 kfree(dev->port); 5700 5701 return -ENOMEM; 5702 } 5703 5704 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) 5705 { 5706 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 5707 5708 if (!dev->flow_db) 5709 return -ENOMEM; 5710 5711 mutex_init(&dev->flow_db->lock); 5712 5713 return 0; 5714 } 5715 5716 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev) 5717 { 5718 struct mlx5_ib_dev *nic_dev; 5719 5720 nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch); 5721 5722 if (!nic_dev) 5723 return -EINVAL; 5724 5725 dev->flow_db = nic_dev->flow_db; 5726 5727 return 0; 5728 } 5729 5730 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) 5731 { 5732 kfree(dev->flow_db); 5733 } 5734 5735 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) 5736 { 5737 struct mlx5_core_dev *mdev = dev->mdev; 5738 int err; 5739 5740 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 5741 dev->ib_dev.uverbs_cmd_mask = 5742 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 5743 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 5744 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 5745 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 5746 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 5747 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 5748 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 5749 (1ull << IB_USER_VERBS_CMD_REG_MR) | 5750 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 5751 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 5752 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 5753 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 5754 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 5755 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 5756 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 5757 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 5758 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 5759 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 5760 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 5761 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 5762 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 5763 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 5764 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 5765 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 5766 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 5767 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 5768 dev->ib_dev.uverbs_ex_cmd_mask = 5769 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 5770 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 5771 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | 5772 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) | 5773 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ); 5774 5775 dev->ib_dev.query_device = mlx5_ib_query_device; 5776 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; 5777 dev->ib_dev.query_gid = mlx5_ib_query_gid; 5778 dev->ib_dev.add_gid = mlx5_ib_add_gid; 5779 dev->ib_dev.del_gid = mlx5_ib_del_gid; 5780 dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 5781 dev->ib_dev.modify_device = mlx5_ib_modify_device; 5782 dev->ib_dev.modify_port = mlx5_ib_modify_port; 5783 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 5784 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 5785 dev->ib_dev.mmap = mlx5_ib_mmap; 5786 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 5787 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 5788 dev->ib_dev.create_ah = mlx5_ib_create_ah; 5789 dev->ib_dev.query_ah = mlx5_ib_query_ah; 5790 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 5791 dev->ib_dev.create_srq = mlx5_ib_create_srq; 5792 dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 5793 dev->ib_dev.query_srq = mlx5_ib_query_srq; 5794 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 5795 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 5796 dev->ib_dev.create_qp = mlx5_ib_create_qp; 5797 dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 5798 dev->ib_dev.query_qp = mlx5_ib_query_qp; 5799 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 5800 dev->ib_dev.drain_sq = mlx5_ib_drain_sq; 5801 dev->ib_dev.drain_rq = mlx5_ib_drain_rq; 5802 dev->ib_dev.post_send = mlx5_ib_post_send; 5803 dev->ib_dev.post_recv = mlx5_ib_post_recv; 5804 dev->ib_dev.create_cq = mlx5_ib_create_cq; 5805 dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 5806 dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 5807 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 5808 dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 5809 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 5810 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 5811 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 5812 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; 5813 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 5814 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 5815 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 5816 dev->ib_dev.process_mad = mlx5_ib_process_mad; 5817 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 5818 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 5819 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 5820 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 5821 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; 5822 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 5823 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 5824 dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params; 5825 5826 if (mlx5_core_is_pf(mdev)) { 5827 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 5828 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 5829 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 5830 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 5831 } 5832 5833 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; 5834 5835 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 5836 5837 if (MLX5_CAP_GEN(mdev, imaicl)) { 5838 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 5839 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 5840 dev->ib_dev.uverbs_cmd_mask |= 5841 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 5842 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 5843 } 5844 5845 if (MLX5_CAP_GEN(mdev, xrc)) { 5846 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 5847 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 5848 dev->ib_dev.uverbs_cmd_mask |= 5849 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 5850 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 5851 } 5852 5853 if (MLX5_CAP_DEV_MEM(mdev, memic)) { 5854 dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm; 5855 dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm; 5856 dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr; 5857 } 5858 5859 dev->ib_dev.create_flow = mlx5_ib_create_flow; 5860 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 5861 dev->ib_dev.uverbs_ex_cmd_mask |= 5862 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 5863 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 5864 dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp; 5865 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action; 5866 dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp; 5867 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; 5868 dev->ib_dev.create_counters = mlx5_ib_create_counters; 5869 dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters; 5870 dev->ib_dev.read_counters = mlx5_ib_read_counters; 5871 5872 err = init_node_data(dev); 5873 if (err) 5874 return err; 5875 5876 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 5877 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 5878 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 5879 mutex_init(&dev->lb_mutex); 5880 5881 return 0; 5882 } 5883 5884 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) 5885 { 5886 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 5887 dev->ib_dev.query_port = mlx5_ib_query_port; 5888 5889 return 0; 5890 } 5891 5892 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) 5893 { 5894 dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable; 5895 dev->ib_dev.query_port = mlx5_ib_rep_query_port; 5896 5897 return 0; 5898 } 5899 5900 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) 5901 { 5902 u8 port_num; 5903 int i; 5904 5905 for (i = 0; i < dev->num_ports; i++) { 5906 dev->roce[i].dev = dev; 5907 dev->roce[i].native_port_num = i + 1; 5908 dev->roce[i].last_port_state = IB_PORT_DOWN; 5909 } 5910 5911 dev->ib_dev.get_netdev = mlx5_ib_get_netdev; 5912 dev->ib_dev.create_wq = mlx5_ib_create_wq; 5913 dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 5914 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 5915 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 5916 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 5917 5918 dev->ib_dev.uverbs_ex_cmd_mask |= 5919 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 5920 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 5921 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 5922 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 5923 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 5924 5925 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5926 5927 return mlx5_add_netdev_notifier(dev, port_num); 5928 } 5929 5930 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) 5931 { 5932 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5933 5934 mlx5_remove_netdev_notifier(dev, port_num); 5935 } 5936 5937 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) 5938 { 5939 struct mlx5_core_dev *mdev = dev->mdev; 5940 enum rdma_link_layer ll; 5941 int port_type_cap; 5942 int err = 0; 5943 5944 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 5945 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 5946 5947 if (ll == IB_LINK_LAYER_ETHERNET) 5948 err = mlx5_ib_stage_common_roce_init(dev); 5949 5950 return err; 5951 } 5952 5953 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) 5954 { 5955 mlx5_ib_stage_common_roce_cleanup(dev); 5956 } 5957 5958 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) 5959 { 5960 struct mlx5_core_dev *mdev = dev->mdev; 5961 enum rdma_link_layer ll; 5962 int port_type_cap; 5963 int err; 5964 5965 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 5966 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 5967 5968 if (ll == IB_LINK_LAYER_ETHERNET) { 5969 err = mlx5_ib_stage_common_roce_init(dev); 5970 if (err) 5971 return err; 5972 5973 err = mlx5_enable_eth(dev); 5974 if (err) 5975 goto cleanup; 5976 } 5977 5978 return 0; 5979 cleanup: 5980 mlx5_ib_stage_common_roce_cleanup(dev); 5981 5982 return err; 5983 } 5984 5985 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) 5986 { 5987 struct mlx5_core_dev *mdev = dev->mdev; 5988 enum rdma_link_layer ll; 5989 int port_type_cap; 5990 5991 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 5992 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 5993 5994 if (ll == IB_LINK_LAYER_ETHERNET) { 5995 mlx5_disable_eth(dev); 5996 mlx5_ib_stage_common_roce_cleanup(dev); 5997 } 5998 } 5999 6000 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) 6001 { 6002 return create_dev_resources(&dev->devr); 6003 } 6004 6005 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) 6006 { 6007 destroy_dev_resources(&dev->devr); 6008 } 6009 6010 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6011 { 6012 mlx5_ib_internal_fill_odp_caps(dev); 6013 6014 return mlx5_ib_odp_init_one(dev); 6015 } 6016 6017 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) 6018 { 6019 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { 6020 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 6021 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 6022 6023 return mlx5_ib_alloc_counters(dev); 6024 } 6025 6026 return 0; 6027 } 6028 6029 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) 6030 { 6031 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 6032 mlx5_ib_dealloc_counters(dev); 6033 } 6034 6035 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) 6036 { 6037 return mlx5_ib_init_cong_debugfs(dev, 6038 mlx5_core_native_port_num(dev->mdev) - 1); 6039 } 6040 6041 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) 6042 { 6043 mlx5_ib_cleanup_cong_debugfs(dev, 6044 mlx5_core_native_port_num(dev->mdev) - 1); 6045 } 6046 6047 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) 6048 { 6049 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); 6050 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); 6051 } 6052 6053 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) 6054 { 6055 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 6056 } 6057 6058 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) 6059 { 6060 int err; 6061 6062 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); 6063 if (err) 6064 return err; 6065 6066 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); 6067 if (err) 6068 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6069 6070 return err; 6071 } 6072 6073 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6074 { 6075 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6076 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6077 } 6078 6079 static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev) 6080 { 6081 return populate_specs_root(dev); 6082 } 6083 6084 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6085 { 6086 return ib_register_device(&dev->ib_dev, NULL); 6087 } 6088 6089 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) 6090 { 6091 destroy_umrc_res(dev); 6092 } 6093 6094 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 6095 { 6096 ib_unregister_device(&dev->ib_dev); 6097 } 6098 6099 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) 6100 { 6101 return create_umr_res(dev); 6102 } 6103 6104 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 6105 { 6106 init_delay_drop(dev); 6107 6108 return 0; 6109 } 6110 6111 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) 6112 { 6113 cancel_delay_drop(dev); 6114 } 6115 6116 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev) 6117 { 6118 int err; 6119 int i; 6120 6121 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 6122 err = device_create_file(&dev->ib_dev.dev, 6123 mlx5_class_attributes[i]); 6124 if (err) 6125 return err; 6126 } 6127 6128 return 0; 6129 } 6130 6131 static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev) 6132 { 6133 mlx5_ib_register_vport_reps(dev); 6134 6135 return 0; 6136 } 6137 6138 static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) 6139 { 6140 mlx5_ib_unregister_vport_reps(dev); 6141 } 6142 6143 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6144 const struct mlx5_ib_profile *profile, 6145 int stage) 6146 { 6147 /* Number of stages to cleanup */ 6148 while (stage) { 6149 stage--; 6150 if (profile->stage[stage].cleanup) 6151 profile->stage[stage].cleanup(dev); 6152 } 6153 6154 ib_dealloc_device((struct ib_device *)dev); 6155 } 6156 6157 void *__mlx5_ib_add(struct mlx5_ib_dev *dev, 6158 const struct mlx5_ib_profile *profile) 6159 { 6160 int err; 6161 int i; 6162 6163 printk_once(KERN_INFO "%s", mlx5_version); 6164 6165 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { 6166 if (profile->stage[i].init) { 6167 err = profile->stage[i].init(dev); 6168 if (err) 6169 goto err_out; 6170 } 6171 } 6172 6173 dev->profile = profile; 6174 dev->ib_active = true; 6175 6176 return dev; 6177 6178 err_out: 6179 __mlx5_ib_remove(dev, profile, i); 6180 6181 return NULL; 6182 } 6183 6184 static const struct mlx5_ib_profile pf_profile = { 6185 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6186 mlx5_ib_stage_init_init, 6187 mlx5_ib_stage_init_cleanup), 6188 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6189 mlx5_ib_stage_flow_db_init, 6190 mlx5_ib_stage_flow_db_cleanup), 6191 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6192 mlx5_ib_stage_caps_init, 6193 NULL), 6194 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6195 mlx5_ib_stage_non_default_cb, 6196 NULL), 6197 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6198 mlx5_ib_stage_roce_init, 6199 mlx5_ib_stage_roce_cleanup), 6200 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6201 mlx5_ib_stage_dev_res_init, 6202 mlx5_ib_stage_dev_res_cleanup), 6203 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6204 mlx5_ib_stage_odp_init, 6205 NULL), 6206 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6207 mlx5_ib_stage_counters_init, 6208 mlx5_ib_stage_counters_cleanup), 6209 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6210 mlx5_ib_stage_cong_debugfs_init, 6211 mlx5_ib_stage_cong_debugfs_cleanup), 6212 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6213 mlx5_ib_stage_uar_init, 6214 mlx5_ib_stage_uar_cleanup), 6215 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6216 mlx5_ib_stage_bfrag_init, 6217 mlx5_ib_stage_bfrag_cleanup), 6218 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6219 NULL, 6220 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6221 STAGE_CREATE(MLX5_IB_STAGE_SPECS, 6222 mlx5_ib_stage_populate_specs, 6223 NULL), 6224 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6225 mlx5_ib_stage_ib_reg_init, 6226 mlx5_ib_stage_ib_reg_cleanup), 6227 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6228 mlx5_ib_stage_post_ib_reg_umr_init, 6229 NULL), 6230 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6231 mlx5_ib_stage_delay_drop_init, 6232 mlx5_ib_stage_delay_drop_cleanup), 6233 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 6234 mlx5_ib_stage_class_attr_init, 6235 NULL), 6236 }; 6237 6238 static const struct mlx5_ib_profile nic_rep_profile = { 6239 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6240 mlx5_ib_stage_init_init, 6241 mlx5_ib_stage_init_cleanup), 6242 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6243 mlx5_ib_stage_flow_db_init, 6244 mlx5_ib_stage_flow_db_cleanup), 6245 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6246 mlx5_ib_stage_caps_init, 6247 NULL), 6248 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6249 mlx5_ib_stage_rep_non_default_cb, 6250 NULL), 6251 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6252 mlx5_ib_stage_rep_roce_init, 6253 mlx5_ib_stage_rep_roce_cleanup), 6254 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6255 mlx5_ib_stage_dev_res_init, 6256 mlx5_ib_stage_dev_res_cleanup), 6257 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6258 mlx5_ib_stage_counters_init, 6259 mlx5_ib_stage_counters_cleanup), 6260 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6261 mlx5_ib_stage_uar_init, 6262 mlx5_ib_stage_uar_cleanup), 6263 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6264 mlx5_ib_stage_bfrag_init, 6265 mlx5_ib_stage_bfrag_cleanup), 6266 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6267 NULL, 6268 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6269 STAGE_CREATE(MLX5_IB_STAGE_SPECS, 6270 mlx5_ib_stage_populate_specs, 6271 NULL), 6272 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6273 mlx5_ib_stage_ib_reg_init, 6274 mlx5_ib_stage_ib_reg_cleanup), 6275 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6276 mlx5_ib_stage_post_ib_reg_umr_init, 6277 NULL), 6278 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 6279 mlx5_ib_stage_class_attr_init, 6280 NULL), 6281 STAGE_CREATE(MLX5_IB_STAGE_REP_REG, 6282 mlx5_ib_stage_rep_reg_init, 6283 mlx5_ib_stage_rep_reg_cleanup), 6284 }; 6285 6286 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) 6287 { 6288 struct mlx5_ib_multiport_info *mpi; 6289 struct mlx5_ib_dev *dev; 6290 bool bound = false; 6291 int err; 6292 6293 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 6294 if (!mpi) 6295 return NULL; 6296 6297 mpi->mdev = mdev; 6298 6299 err = mlx5_query_nic_vport_system_image_guid(mdev, 6300 &mpi->sys_image_guid); 6301 if (err) { 6302 kfree(mpi); 6303 return NULL; 6304 } 6305 6306 mutex_lock(&mlx5_ib_multiport_mutex); 6307 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { 6308 if (dev->sys_image_guid == mpi->sys_image_guid) 6309 bound = mlx5_ib_bind_slave_port(dev, mpi); 6310 6311 if (bound) { 6312 rdma_roce_rescan_device(&dev->ib_dev); 6313 break; 6314 } 6315 } 6316 6317 if (!bound) { 6318 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 6319 dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n"); 6320 } 6321 mutex_unlock(&mlx5_ib_multiport_mutex); 6322 6323 return mpi; 6324 } 6325 6326 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 6327 { 6328 enum rdma_link_layer ll; 6329 struct mlx5_ib_dev *dev; 6330 int port_type_cap; 6331 6332 printk_once(KERN_INFO "%s", mlx5_version); 6333 6334 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6335 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6336 6337 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) 6338 return mlx5_ib_add_slave_port(mdev); 6339 6340 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 6341 if (!dev) 6342 return NULL; 6343 6344 dev->mdev = mdev; 6345 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6346 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6347 6348 if (MLX5_ESWITCH_MANAGER(mdev) && 6349 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 6350 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); 6351 6352 return __mlx5_ib_add(dev, &nic_rep_profile); 6353 } 6354 6355 return __mlx5_ib_add(dev, &pf_profile); 6356 } 6357 6358 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 6359 { 6360 struct mlx5_ib_multiport_info *mpi; 6361 struct mlx5_ib_dev *dev; 6362 6363 if (mlx5_core_is_mp_slave(mdev)) { 6364 mpi = context; 6365 mutex_lock(&mlx5_ib_multiport_mutex); 6366 if (mpi->ibdev) 6367 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); 6368 list_del(&mpi->list); 6369 mutex_unlock(&mlx5_ib_multiport_mutex); 6370 return; 6371 } 6372 6373 dev = context; 6374 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6375 } 6376 6377 static struct mlx5_interface mlx5_ib_interface = { 6378 .add = mlx5_ib_add, 6379 .remove = mlx5_ib_remove, 6380 .event = mlx5_ib_event, 6381 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 6382 .pfault = mlx5_ib_pfault, 6383 #endif 6384 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 6385 }; 6386 6387 unsigned long mlx5_ib_get_xlt_emergency_page(void) 6388 { 6389 mutex_lock(&xlt_emergency_page_mutex); 6390 return xlt_emergency_page; 6391 } 6392 6393 void mlx5_ib_put_xlt_emergency_page(void) 6394 { 6395 mutex_unlock(&xlt_emergency_page_mutex); 6396 } 6397 6398 static int __init mlx5_ib_init(void) 6399 { 6400 int err; 6401 6402 xlt_emergency_page = __get_free_page(GFP_KERNEL); 6403 if (!xlt_emergency_page) 6404 return -ENOMEM; 6405 6406 mutex_init(&xlt_emergency_page_mutex); 6407 6408 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); 6409 if (!mlx5_ib_event_wq) { 6410 free_page(xlt_emergency_page); 6411 return -ENOMEM; 6412 } 6413 6414 mlx5_ib_odp_init(); 6415 6416 err = mlx5_register_interface(&mlx5_ib_interface); 6417 6418 return err; 6419 } 6420 6421 static void __exit mlx5_ib_cleanup(void) 6422 { 6423 mlx5_unregister_interface(&mlx5_ib_interface); 6424 destroy_workqueue(mlx5_ib_event_wq); 6425 mutex_destroy(&xlt_emergency_page_mutex); 6426 free_page(xlt_emergency_page); 6427 } 6428 6429 module_init(mlx5_ib_init); 6430 module_exit(mlx5_ib_cleanup); 6431