1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_mad.h> 34 #include <rdma/ib_smi.h> 35 #include <rdma/ib_sa.h> 36 #include <rdma/ib_cache.h> 37 38 #include <linux/random.h> 39 #include <linux/mlx4/cmd.h> 40 #include <linux/gfp.h> 41 #include <rdma/ib_pma.h> 42 43 #include <linux/mlx4/driver.h> 44 #include "mlx4_ib.h" 45 46 enum { 47 MLX4_IB_VENDOR_CLASS1 = 0x9, 48 MLX4_IB_VENDOR_CLASS2 = 0xa 49 }; 50 51 #define MLX4_TUN_SEND_WRID_SHIFT 34 52 #define MLX4_TUN_QPN_SHIFT 32 53 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT) 54 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT) 55 56 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1) 57 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3) 58 59 /* Port mgmt change event handling */ 60 61 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr) 62 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask) 63 #define NUM_IDX_IN_PKEY_TBL_BLK 32 64 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */ 65 #define GUID_TBL_BLK_NUM_ENTRIES 8 66 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) 67 68 struct mlx4_mad_rcv_buf { 69 struct ib_grh grh; 70 u8 payload[256]; 71 } __packed; 72 73 struct mlx4_mad_snd_buf { 74 u8 payload[256]; 75 } __packed; 76 77 struct mlx4_tunnel_mad { 78 struct ib_grh grh; 79 struct mlx4_ib_tunnel_header hdr; 80 struct ib_mad mad; 81 } __packed; 82 83 struct mlx4_rcv_tunnel_mad { 84 struct mlx4_rcv_tunnel_hdr hdr; 85 struct ib_grh grh; 86 struct ib_mad mad; 87 } __packed; 88 89 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num); 90 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num); 91 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, 92 int block, u32 change_bitmap); 93 94 __be64 mlx4_ib_gen_node_guid(void) 95 { 96 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) 97 return cpu_to_be64(NODE_GUID_HI | prandom_u32()); 98 } 99 100 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) 101 { 102 return cpu_to_be64(atomic_inc_return(&ctx->tid)) | 103 cpu_to_be64(0xff00000000000000LL); 104 } 105 106 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, 107 int port, const struct ib_wc *in_wc, 108 const struct ib_grh *in_grh, 109 const void *in_mad, void *response_mad) 110 { 111 struct mlx4_cmd_mailbox *inmailbox, *outmailbox; 112 void *inbox; 113 int err; 114 u32 in_modifier = port; 115 u8 op_modifier = 0; 116 117 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); 118 if (IS_ERR(inmailbox)) 119 return PTR_ERR(inmailbox); 120 inbox = inmailbox->buf; 121 122 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); 123 if (IS_ERR(outmailbox)) { 124 mlx4_free_cmd_mailbox(dev->dev, inmailbox); 125 return PTR_ERR(outmailbox); 126 } 127 128 memcpy(inbox, in_mad, 256); 129 130 /* 131 * Key check traps can't be generated unless we have in_wc to 132 * tell us where to send the trap. 133 */ 134 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc) 135 op_modifier |= 0x1; 136 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc) 137 op_modifier |= 0x2; 138 if (mlx4_is_mfunc(dev->dev) && 139 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc)) 140 op_modifier |= 0x8; 141 142 if (in_wc) { 143 struct { 144 __be32 my_qpn; 145 u32 reserved1; 146 __be32 rqpn; 147 u8 sl; 148 u8 g_path; 149 u16 reserved2[2]; 150 __be16 pkey; 151 u32 reserved3[11]; 152 u8 grh[40]; 153 } *ext_info; 154 155 memset(inbox + 256, 0, 256); 156 ext_info = inbox + 256; 157 158 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); 159 ext_info->rqpn = cpu_to_be32(in_wc->src_qp); 160 ext_info->sl = in_wc->sl << 4; 161 ext_info->g_path = in_wc->dlid_path_bits | 162 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 163 ext_info->pkey = cpu_to_be16(in_wc->pkey_index); 164 165 if (in_grh) 166 memcpy(ext_info->grh, in_grh, 40); 167 168 op_modifier |= 0x4; 169 170 in_modifier |= in_wc->slid << 16; 171 } 172 173 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, 174 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier, 175 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 176 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED); 177 178 if (!err) 179 memcpy(response_mad, outmailbox->buf, 256); 180 181 mlx4_free_cmd_mailbox(dev->dev, inmailbox); 182 mlx4_free_cmd_mailbox(dev->dev, outmailbox); 183 184 return err; 185 } 186 187 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) 188 { 189 struct ib_ah *new_ah; 190 struct ib_ah_attr ah_attr; 191 unsigned long flags; 192 193 if (!dev->send_agent[port_num - 1][0]) 194 return; 195 196 memset(&ah_attr, 0, sizeof ah_attr); 197 ah_attr.dlid = lid; 198 ah_attr.sl = sl; 199 ah_attr.port_num = port_num; 200 201 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, 202 &ah_attr); 203 if (IS_ERR(new_ah)) 204 return; 205 206 spin_lock_irqsave(&dev->sm_lock, flags); 207 if (dev->sm_ah[port_num - 1]) 208 ib_destroy_ah(dev->sm_ah[port_num - 1]); 209 dev->sm_ah[port_num - 1] = new_ah; 210 spin_unlock_irqrestore(&dev->sm_lock, flags); 211 } 212 213 /* 214 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can 215 * synthesize LID change, Client-Rereg, GID change, and P_Key change events. 216 */ 217 static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, 218 u16 prev_lid) 219 { 220 struct ib_port_info *pinfo; 221 u16 lid; 222 __be16 *base; 223 u32 bn, pkey_change_bitmap; 224 int i; 225 226 227 struct mlx4_ib_dev *dev = to_mdev(ibdev); 228 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 229 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 230 mad->mad_hdr.method == IB_MGMT_METHOD_SET) 231 switch (mad->mad_hdr.attr_id) { 232 case IB_SMP_ATTR_PORT_INFO: 233 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; 234 lid = be16_to_cpu(pinfo->lid); 235 236 update_sm_ah(dev, port_num, 237 be16_to_cpu(pinfo->sm_lid), 238 pinfo->neighbormtu_mastersmsl & 0xf); 239 240 if (pinfo->clientrereg_resv_subnetto & 0x80) 241 handle_client_rereg_event(dev, port_num); 242 243 if (prev_lid != lid) 244 handle_lid_change_event(dev, port_num); 245 break; 246 247 case IB_SMP_ATTR_PKEY_TABLE: 248 if (!mlx4_is_mfunc(dev->dev)) { 249 mlx4_ib_dispatch_event(dev, port_num, 250 IB_EVENT_PKEY_CHANGE); 251 break; 252 } 253 254 /* at this point, we are running in the master. 255 * Slaves do not receive SMPs. 256 */ 257 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; 258 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); 259 pkey_change_bitmap = 0; 260 for (i = 0; i < 32; i++) { 261 pr_debug("PKEY[%d] = x%x\n", 262 i + bn*32, be16_to_cpu(base[i])); 263 if (be16_to_cpu(base[i]) != 264 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) { 265 pkey_change_bitmap |= (1 << i); 266 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] = 267 be16_to_cpu(base[i]); 268 } 269 } 270 pr_debug("PKEY Change event: port=%d, " 271 "block=0x%x, change_bitmap=0x%x\n", 272 port_num, bn, pkey_change_bitmap); 273 274 if (pkey_change_bitmap) { 275 mlx4_ib_dispatch_event(dev, port_num, 276 IB_EVENT_PKEY_CHANGE); 277 if (!dev->sriov.is_going_down) 278 __propagate_pkey_ev(dev, port_num, bn, 279 pkey_change_bitmap); 280 } 281 break; 282 283 case IB_SMP_ATTR_GUID_INFO: 284 /* paravirtualized master's guid is guid 0 -- does not change */ 285 if (!mlx4_is_master(dev->dev)) 286 mlx4_ib_dispatch_event(dev, port_num, 287 IB_EVENT_GID_CHANGE); 288 /*if master, notify relevant slaves*/ 289 if (mlx4_is_master(dev->dev) && 290 !dev->sriov.is_going_down) { 291 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod); 292 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num, 293 (u8 *)(&((struct ib_smp *)mad)->data)); 294 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num, 295 (u8 *)(&((struct ib_smp *)mad)->data)); 296 } 297 break; 298 299 default: 300 break; 301 } 302 } 303 304 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, 305 int block, u32 change_bitmap) 306 { 307 int i, ix, slave, err; 308 int have_event = 0; 309 310 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) { 311 if (slave == mlx4_master_func_num(dev->dev)) 312 continue; 313 if (!mlx4_is_slave_active(dev->dev, slave)) 314 continue; 315 316 have_event = 0; 317 for (i = 0; i < 32; i++) { 318 if (!(change_bitmap & (1 << i))) 319 continue; 320 for (ix = 0; 321 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) { 322 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1] 323 [ix] == i + 32 * block) { 324 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num); 325 pr_debug("propagate_pkey_ev: slave %d," 326 " port %d, ix %d (%d)\n", 327 slave, port_num, ix, err); 328 have_event = 1; 329 break; 330 } 331 } 332 if (have_event) 333 break; 334 } 335 } 336 } 337 338 static void node_desc_override(struct ib_device *dev, 339 struct ib_mad *mad) 340 { 341 unsigned long flags; 342 343 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 344 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 345 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && 346 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { 347 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); 348 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); 349 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); 350 } 351 } 352 353 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad) 354 { 355 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 356 struct ib_mad_send_buf *send_buf; 357 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 358 int ret; 359 unsigned long flags; 360 361 if (agent) { 362 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 363 IB_MGMT_MAD_DATA, GFP_ATOMIC, 364 IB_MGMT_BASE_VERSION); 365 if (IS_ERR(send_buf)) 366 return; 367 /* 368 * We rely here on the fact that MLX QPs don't use the 369 * address handle after the send is posted (this is 370 * wrong following the IB spec strictly, but we know 371 * it's OK for our devices). 372 */ 373 spin_lock_irqsave(&dev->sm_lock, flags); 374 memcpy(send_buf->mad, mad, sizeof *mad); 375 if ((send_buf->ah = dev->sm_ah[port_num - 1])) 376 ret = ib_post_send_mad(send_buf, NULL); 377 else 378 ret = -EINVAL; 379 spin_unlock_irqrestore(&dev->sm_lock, flags); 380 381 if (ret) 382 ib_free_send_mad(send_buf); 383 } 384 } 385 386 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave, 387 struct ib_sa_mad *sa_mad) 388 { 389 int ret = 0; 390 391 /* dispatch to different sa handlers */ 392 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { 393 case IB_SA_ATTR_MC_MEMBER_REC: 394 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad); 395 break; 396 default: 397 break; 398 } 399 return ret; 400 } 401 402 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid) 403 { 404 struct mlx4_ib_dev *dev = to_mdev(ibdev); 405 int i; 406 407 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { 408 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) 409 return i; 410 } 411 return -1; 412 } 413 414 415 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, 416 u8 port, u16 pkey, u16 *ix) 417 { 418 int i, ret; 419 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF; 420 u16 slot_pkey; 421 422 if (slave == mlx4_master_func_num(dev->dev)) 423 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); 424 425 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1; 426 427 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { 428 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix) 429 continue; 430 431 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i]; 432 433 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); 434 if (ret) 435 continue; 436 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) { 437 if (slot_pkey & 0x8000) { 438 *ix = (u16) pkey_ix; 439 return 0; 440 } else { 441 /* take first partial pkey index found */ 442 if (partial_ix == 0xFF) 443 partial_ix = pkey_ix; 444 } 445 } 446 } 447 448 if (partial_ix < 0xFF) { 449 *ix = (u16) partial_ix; 450 return 0; 451 } 452 453 return -EINVAL; 454 } 455 456 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, 457 enum ib_qp_type dest_qpt, struct ib_wc *wc, 458 struct ib_grh *grh, struct ib_mad *mad) 459 { 460 struct ib_sge list; 461 struct ib_ud_wr wr; 462 struct ib_send_wr *bad_wr; 463 struct mlx4_ib_demux_pv_ctx *tun_ctx; 464 struct mlx4_ib_demux_pv_qp *tun_qp; 465 struct mlx4_rcv_tunnel_mad *tun_mad; 466 struct ib_ah_attr attr; 467 struct ib_ah *ah; 468 struct ib_qp *src_qp = NULL; 469 unsigned tun_tx_ix = 0; 470 int dqpn; 471 int ret = 0; 472 u16 tun_pkey_ix; 473 u16 cached_pkey; 474 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; 475 476 if (dest_qpt > IB_QPT_GSI) 477 return -EINVAL; 478 479 tun_ctx = dev->sriov.demux[port-1].tun[slave]; 480 481 /* check if proxy qp created */ 482 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) 483 return -EAGAIN; 484 485 if (!dest_qpt) 486 tun_qp = &tun_ctx->qp[0]; 487 else 488 tun_qp = &tun_ctx->qp[1]; 489 490 /* compute P_Key index to put in tunnel header for slave */ 491 if (dest_qpt) { 492 u16 pkey_ix; 493 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); 494 if (ret) 495 return -EINVAL; 496 497 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix); 498 if (ret) 499 return -EINVAL; 500 tun_pkey_ix = pkey_ix; 501 } else 502 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; 503 504 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1; 505 506 /* get tunnel tx data buf for slave */ 507 src_qp = tun_qp->qp; 508 509 /* create ah. Just need an empty one with the port num for the post send. 510 * The driver will set the force loopback bit in post_send */ 511 memset(&attr, 0, sizeof attr); 512 attr.port_num = port; 513 if (is_eth) { 514 memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16); 515 attr.ah_flags = IB_AH_GRH; 516 } 517 ah = ib_create_ah(tun_ctx->pd, &attr); 518 if (IS_ERR(ah)) 519 return -ENOMEM; 520 521 /* allocate tunnel tx buf after pass failure returns */ 522 spin_lock(&tun_qp->tx_lock); 523 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >= 524 (MLX4_NUM_TUNNEL_BUFS - 1)) 525 ret = -EAGAIN; 526 else 527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); 528 spin_unlock(&tun_qp->tx_lock); 529 if (ret) 530 goto end; 531 532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); 533 if (tun_qp->tx_ring[tun_tx_ix].ah) 534 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah); 535 tun_qp->tx_ring[tun_tx_ix].ah = ah; 536 ib_dma_sync_single_for_cpu(&dev->ib_dev, 537 tun_qp->tx_ring[tun_tx_ix].buf.map, 538 sizeof (struct mlx4_rcv_tunnel_mad), 539 DMA_TO_DEVICE); 540 541 /* copy over to tunnel buffer */ 542 if (grh) 543 memcpy(&tun_mad->grh, grh, sizeof *grh); 544 memcpy(&tun_mad->mad, mad, sizeof *mad); 545 546 /* adjust tunnel data */ 547 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); 548 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); 549 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; 550 551 if (is_eth) { 552 u16 vlan = 0; 553 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan, 554 NULL)) { 555 /* VST mode */ 556 if (vlan != wc->vlan_id) 557 /* Packet vlan is not the VST-assigned vlan. 558 * Drop the packet. 559 */ 560 goto out; 561 else 562 /* Remove the vlan tag before forwarding 563 * the packet to the VF. 564 */ 565 vlan = 0xffff; 566 } else { 567 vlan = wc->vlan_id; 568 } 569 570 tun_mad->hdr.sl_vid = cpu_to_be16(vlan); 571 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); 572 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); 573 } else { 574 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); 575 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); 576 } 577 578 ib_dma_sync_single_for_device(&dev->ib_dev, 579 tun_qp->tx_ring[tun_tx_ix].buf.map, 580 sizeof (struct mlx4_rcv_tunnel_mad), 581 DMA_TO_DEVICE); 582 583 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; 584 list.length = sizeof (struct mlx4_rcv_tunnel_mad); 585 list.lkey = tun_ctx->pd->local_dma_lkey; 586 587 wr.ah = ah; 588 wr.port_num = port; 589 wr.remote_qkey = IB_QP_SET_QKEY; 590 wr.remote_qpn = dqpn; 591 wr.wr.next = NULL; 592 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); 593 wr.wr.sg_list = &list; 594 wr.wr.num_sge = 1; 595 wr.wr.opcode = IB_WR_SEND; 596 wr.wr.send_flags = IB_SEND_SIGNALED; 597 598 ret = ib_post_send(src_qp, &wr.wr, &bad_wr); 599 if (!ret) 600 return 0; 601 out: 602 spin_lock(&tun_qp->tx_lock); 603 tun_qp->tx_ix_tail++; 604 spin_unlock(&tun_qp->tx_lock); 605 tun_qp->tx_ring[tun_tx_ix].ah = NULL; 606 end: 607 ib_destroy_ah(ah); 608 return ret; 609 } 610 611 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, 612 struct ib_wc *wc, struct ib_grh *grh, 613 struct ib_mad *mad) 614 { 615 struct mlx4_ib_dev *dev = to_mdev(ibdev); 616 int err, other_port; 617 int slave = -1; 618 u8 *slave_id; 619 int is_eth = 0; 620 621 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) 622 is_eth = 0; 623 else 624 is_eth = 1; 625 626 if (is_eth) { 627 if (!(wc->wc_flags & IB_WC_GRH)) { 628 mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); 629 return -EINVAL; 630 } 631 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) { 632 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); 633 return -EINVAL; 634 } 635 err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave); 636 if (err && mlx4_is_mf_bonded(dev->dev)) { 637 other_port = (port == 1) ? 2 : 1; 638 err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave); 639 if (!err) { 640 port = other_port; 641 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", 642 slave, grh->dgid.raw, port, other_port); 643 } 644 } 645 if (err) { 646 mlx4_ib_warn(ibdev, "failed matching grh\n"); 647 return -ENOENT; 648 } 649 if (slave >= dev->dev->caps.sqp_demux) { 650 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", 651 slave, dev->dev->caps.sqp_demux); 652 return -ENOENT; 653 } 654 655 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad)) 656 return 0; 657 658 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); 659 if (err) 660 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", 661 slave, err); 662 return 0; 663 } 664 665 /* Initially assume that this mad is for us */ 666 slave = mlx4_master_func_num(dev->dev); 667 668 /* See if the slave id is encoded in a response mad */ 669 if (mad->mad_hdr.method & 0x80) { 670 slave_id = (u8 *) &mad->mad_hdr.tid; 671 slave = *slave_id; 672 if (slave != 255) /*255 indicates the dom0*/ 673 *slave_id = 0; /* remap tid */ 674 } 675 676 /* If a grh is present, we demux according to it */ 677 if (wc->wc_flags & IB_WC_GRH) { 678 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); 679 if (slave < 0) { 680 mlx4_ib_warn(ibdev, "failed matching grh\n"); 681 return -ENOENT; 682 } 683 } 684 /* Class-specific handling */ 685 switch (mad->mad_hdr.mgmt_class) { 686 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 687 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 688 /* 255 indicates the dom0 */ 689 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) { 690 if (!mlx4_vf_smi_enabled(dev->dev, slave, port)) 691 return -EPERM; 692 /* for a VF. drop unsolicited MADs */ 693 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) { 694 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n", 695 slave, mad->mad_hdr.mgmt_class, 696 mad->mad_hdr.method); 697 return -EINVAL; 698 } 699 } 700 break; 701 case IB_MGMT_CLASS_SUBN_ADM: 702 if (mlx4_ib_demux_sa_handler(ibdev, port, slave, 703 (struct ib_sa_mad *) mad)) 704 return 0; 705 break; 706 case IB_MGMT_CLASS_CM: 707 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad)) 708 return 0; 709 break; 710 case IB_MGMT_CLASS_DEVICE_MGMT: 711 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) 712 return 0; 713 break; 714 default: 715 /* Drop unsupported classes for slaves in tunnel mode */ 716 if (slave != mlx4_master_func_num(dev->dev)) { 717 pr_debug("dropping unsupported ingress mad from class:%d " 718 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave); 719 return 0; 720 } 721 } 722 /*make sure that no slave==255 was not handled yet.*/ 723 if (slave >= dev->dev->caps.sqp_demux) { 724 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", 725 slave, dev->dev->caps.sqp_demux); 726 return -ENOENT; 727 } 728 729 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); 730 if (err) 731 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", 732 slave, err); 733 return 0; 734 } 735 736 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 737 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 738 const struct ib_mad *in_mad, struct ib_mad *out_mad) 739 { 740 u16 slid, prev_lid = 0; 741 int err; 742 struct ib_port_attr pattr; 743 744 if (in_wc && in_wc->qp->qp_num) { 745 pr_debug("received MAD: slid:%d sqpn:%d " 746 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", 747 in_wc->slid, in_wc->src_qp, 748 in_wc->dlid_path_bits, 749 in_wc->qp->qp_num, 750 in_wc->wc_flags, 751 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, 752 be16_to_cpu(in_mad->mad_hdr.attr_id)); 753 if (in_wc->wc_flags & IB_WC_GRH) { 754 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", 755 be64_to_cpu(in_grh->sgid.global.subnet_prefix), 756 be64_to_cpu(in_grh->sgid.global.interface_id)); 757 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", 758 be64_to_cpu(in_grh->dgid.global.subnet_prefix), 759 be64_to_cpu(in_grh->dgid.global.interface_id)); 760 } 761 } 762 763 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 764 765 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { 766 forward_trap(to_mdev(ibdev), port_num, in_mad); 767 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 768 } 769 770 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 771 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 772 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 773 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 774 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 775 return IB_MAD_RESULT_SUCCESS; 776 777 /* 778 * Don't process SMInfo queries -- the SMA can't handle them. 779 */ 780 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 781 return IB_MAD_RESULT_SUCCESS; 782 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 783 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || 784 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || 785 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 786 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 787 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 788 return IB_MAD_RESULT_SUCCESS; 789 } else 790 return IB_MAD_RESULT_SUCCESS; 791 792 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 793 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 794 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && 795 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && 796 !ib_query_port(ibdev, port_num, &pattr)) 797 prev_lid = pattr.lid; 798 799 err = mlx4_MAD_IFC(to_mdev(ibdev), 800 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) | 801 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) | 802 MLX4_MAD_IFC_NET_VIEW, 803 port_num, in_wc, in_grh, in_mad, out_mad); 804 if (err) 805 return IB_MAD_RESULT_FAILURE; 806 807 if (!out_mad->mad_hdr.status) { 808 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) 809 smp_snoop(ibdev, port_num, in_mad, prev_lid); 810 /* slaves get node desc from FW */ 811 if (!mlx4_is_slave(to_mdev(ibdev)->dev)) 812 node_desc_override(ibdev, out_mad); 813 } 814 815 /* set return bit in status of directed route responses */ 816 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 817 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 818 819 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 820 /* no response for trap repress */ 821 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 822 823 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 824 } 825 826 static void edit_counter(struct mlx4_counter *cnt, void *counters, 827 __be16 attr_id) 828 { 829 switch (attr_id) { 830 case IB_PMA_PORT_COUNTERS: 831 { 832 struct ib_pma_portcounters *pma_cnt = 833 (struct ib_pma_portcounters *)counters; 834 835 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 836 (be64_to_cpu(cnt->tx_bytes) >> 2)); 837 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 838 (be64_to_cpu(cnt->rx_bytes) >> 2)); 839 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 840 be64_to_cpu(cnt->tx_frames)); 841 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 842 be64_to_cpu(cnt->rx_frames)); 843 break; 844 } 845 case IB_PMA_PORT_COUNTERS_EXT: 846 { 847 struct ib_pma_portcounters_ext *pma_cnt_ext = 848 (struct ib_pma_portcounters_ext *)counters; 849 850 pma_cnt_ext->port_xmit_data = 851 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2); 852 pma_cnt_ext->port_rcv_data = 853 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2); 854 pma_cnt_ext->port_xmit_packets = cnt->tx_frames; 855 pma_cnt_ext->port_rcv_packets = cnt->rx_frames; 856 break; 857 } 858 } 859 } 860 861 static int iboe_process_mad_port_info(void *out_mad) 862 { 863 struct ib_class_port_info cpi = {}; 864 865 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 866 memcpy(out_mad, &cpi, sizeof(cpi)); 867 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 868 } 869 870 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 871 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 872 const struct ib_mad *in_mad, struct ib_mad *out_mad) 873 { 874 struct mlx4_counter counter_stats; 875 struct mlx4_ib_dev *dev = to_mdev(ibdev); 876 struct counter_index *tmp_counter; 877 int err = IB_MAD_RESULT_FAILURE, stats_avail = 0; 878 879 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 880 return -EINVAL; 881 882 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) 883 return iboe_process_mad_port_info((void *)(out_mad->data + 40)); 884 885 memset(&counter_stats, 0, sizeof(counter_stats)); 886 mutex_lock(&dev->counters_table[port_num - 1].mutex); 887 list_for_each_entry(tmp_counter, 888 &dev->counters_table[port_num - 1].counters_list, 889 list) { 890 err = mlx4_get_counter_stats(dev->dev, 891 tmp_counter->index, 892 &counter_stats, 0); 893 if (err) { 894 err = IB_MAD_RESULT_FAILURE; 895 stats_avail = 0; 896 break; 897 } 898 stats_avail = 1; 899 } 900 mutex_unlock(&dev->counters_table[port_num - 1].mutex); 901 if (stats_avail) { 902 memset(out_mad->data, 0, sizeof out_mad->data); 903 switch (counter_stats.counter_mode & 0xf) { 904 case 0: 905 edit_counter(&counter_stats, 906 (void *)(out_mad->data + 40), 907 in_mad->mad_hdr.attr_id); 908 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 909 break; 910 default: 911 err = IB_MAD_RESULT_FAILURE; 912 } 913 } 914 915 return err; 916 } 917 918 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 919 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 920 const struct ib_mad_hdr *in, size_t in_mad_size, 921 struct ib_mad_hdr *out, size_t *out_mad_size, 922 u16 *out_mad_pkey_index) 923 { 924 struct mlx4_ib_dev *dev = to_mdev(ibdev); 925 const struct ib_mad *in_mad = (const struct ib_mad *)in; 926 struct ib_mad *out_mad = (struct ib_mad *)out; 927 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); 928 929 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || 930 *out_mad_size != sizeof(*out_mad))) 931 return IB_MAD_RESULT_FAILURE; 932 933 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA 934 * queries, should be called only by VFs and for that specific purpose 935 */ 936 if (link == IB_LINK_LAYER_INFINIBAND) { 937 if (mlx4_is_slave(dev->dev) && 938 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 939 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || 940 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || 941 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) 942 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 943 in_grh, in_mad, out_mad); 944 945 return ib_process_mad(ibdev, mad_flags, port_num, in_wc, 946 in_grh, in_mad, out_mad); 947 } 948 949 if (link == IB_LINK_LAYER_ETHERNET) 950 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 951 in_grh, in_mad, out_mad); 952 953 return -EINVAL; 954 } 955 956 static void send_handler(struct ib_mad_agent *agent, 957 struct ib_mad_send_wc *mad_send_wc) 958 { 959 if (mad_send_wc->send_buf->context[0]) 960 ib_destroy_ah(mad_send_wc->send_buf->context[0]); 961 ib_free_send_mad(mad_send_wc->send_buf); 962 } 963 964 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) 965 { 966 struct ib_mad_agent *agent; 967 int p, q; 968 int ret; 969 enum rdma_link_layer ll; 970 971 for (p = 0; p < dev->num_ports; ++p) { 972 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); 973 for (q = 0; q <= 1; ++q) { 974 if (ll == IB_LINK_LAYER_INFINIBAND) { 975 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 976 q ? IB_QPT_GSI : IB_QPT_SMI, 977 NULL, 0, send_handler, 978 NULL, NULL, 0); 979 if (IS_ERR(agent)) { 980 ret = PTR_ERR(agent); 981 goto err; 982 } 983 dev->send_agent[p][q] = agent; 984 } else 985 dev->send_agent[p][q] = NULL; 986 } 987 } 988 989 return 0; 990 991 err: 992 for (p = 0; p < dev->num_ports; ++p) 993 for (q = 0; q <= 1; ++q) 994 if (dev->send_agent[p][q]) 995 ib_unregister_mad_agent(dev->send_agent[p][q]); 996 997 return ret; 998 } 999 1000 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) 1001 { 1002 struct ib_mad_agent *agent; 1003 int p, q; 1004 1005 for (p = 0; p < dev->num_ports; ++p) { 1006 for (q = 0; q <= 1; ++q) { 1007 agent = dev->send_agent[p][q]; 1008 if (agent) { 1009 dev->send_agent[p][q] = NULL; 1010 ib_unregister_mad_agent(agent); 1011 } 1012 } 1013 1014 if (dev->sm_ah[p]) 1015 ib_destroy_ah(dev->sm_ah[p]); 1016 } 1017 } 1018 1019 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num) 1020 { 1021 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE); 1022 1023 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) 1024 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, 1025 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK); 1026 } 1027 1028 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) 1029 { 1030 /* re-configure the alias-guid and mcg's */ 1031 if (mlx4_is_master(dev->dev)) { 1032 mlx4_ib_invalidate_all_guid_record(dev, port_num); 1033 1034 if (!dev->sriov.is_going_down) { 1035 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); 1036 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, 1037 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK); 1038 } 1039 } 1040 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER); 1041 } 1042 1043 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, 1044 struct mlx4_eqe *eqe) 1045 { 1046 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), 1047 GET_MASK_FROM_EQE(eqe)); 1048 } 1049 1050 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num, 1051 u32 guid_tbl_blk_num, u32 change_bitmap) 1052 { 1053 struct ib_smp *in_mad = NULL; 1054 struct ib_smp *out_mad = NULL; 1055 u16 i; 1056 1057 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev)) 1058 return; 1059 1060 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 1061 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1062 if (!in_mad || !out_mad) { 1063 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n"); 1064 goto out; 1065 } 1066 1067 guid_tbl_blk_num *= 4; 1068 1069 for (i = 0; i < 4; i++) { 1070 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff))) 1071 continue; 1072 memset(in_mad, 0, sizeof *in_mad); 1073 memset(out_mad, 0, sizeof *out_mad); 1074 1075 in_mad->base_version = 1; 1076 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 1077 in_mad->class_version = 1; 1078 in_mad->method = IB_MGMT_METHOD_GET; 1079 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 1080 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i); 1081 1082 if (mlx4_MAD_IFC(dev, 1083 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW, 1084 port_num, NULL, NULL, in_mad, out_mad)) { 1085 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); 1086 goto out; 1087 } 1088 1089 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i, 1090 port_num, 1091 (u8 *)(&((struct ib_smp *)out_mad)->data)); 1092 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i, 1093 port_num, 1094 (u8 *)(&((struct ib_smp *)out_mad)->data)); 1095 } 1096 1097 out: 1098 kfree(in_mad); 1099 kfree(out_mad); 1100 return; 1101 } 1102 1103 void handle_port_mgmt_change_event(struct work_struct *work) 1104 { 1105 struct ib_event_work *ew = container_of(work, struct ib_event_work, work); 1106 struct mlx4_ib_dev *dev = ew->ib_dev; 1107 struct mlx4_eqe *eqe = &(ew->ib_eqe); 1108 u8 port = eqe->event.port_mgmt_change.port; 1109 u32 changed_attr; 1110 u32 tbl_block; 1111 u32 change_bitmap; 1112 1113 switch (eqe->subtype) { 1114 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: 1115 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); 1116 1117 /* Update the SM ah - This should be done before handling 1118 the other changed attributes so that MADs can be sent to the SM */ 1119 if (changed_attr & MSTR_SM_CHANGE_MASK) { 1120 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); 1121 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; 1122 update_sm_ah(dev, port, lid, sl); 1123 } 1124 1125 /* Check if it is a lid change event */ 1126 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) 1127 handle_lid_change_event(dev, port); 1128 1129 /* Generate GUID changed event */ 1130 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { 1131 if (mlx4_is_master(dev->dev)) { 1132 union ib_gid gid; 1133 int err = 0; 1134 1135 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) 1136 err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); 1137 else 1138 gid.global.subnet_prefix = 1139 eqe->event.port_mgmt_change.params.port_info.gid_prefix; 1140 if (err) { 1141 pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n", 1142 port, err); 1143 } else { 1144 pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n", 1145 port, 1146 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), 1147 be64_to_cpu(gid.global.subnet_prefix)); 1148 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, 1149 be64_to_cpu(gid.global.subnet_prefix)); 1150 } 1151 } 1152 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 1153 /*if master, notify all slaves*/ 1154 if (mlx4_is_master(dev->dev)) 1155 mlx4_gen_slaves_port_mgt_ev(dev->dev, port, 1156 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK); 1157 } 1158 1159 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) 1160 handle_client_rereg_event(dev, port); 1161 break; 1162 1163 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: 1164 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); 1165 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) 1166 propagate_pkey_ev(dev, port, eqe); 1167 break; 1168 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: 1169 /* paravirtualized master's guid is guid 0 -- does not change */ 1170 if (!mlx4_is_master(dev->dev)) 1171 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 1172 /*if master, notify relevant slaves*/ 1173 else if (!dev->sriov.is_going_down) { 1174 tbl_block = GET_BLK_PTR_FROM_EQE(eqe); 1175 change_bitmap = GET_MASK_FROM_EQE(eqe); 1176 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap); 1177 } 1178 break; 1179 default: 1180 pr_warn("Unsupported subtype 0x%x for " 1181 "Port Management Change event\n", eqe->subtype); 1182 } 1183 1184 kfree(ew); 1185 } 1186 1187 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, 1188 enum ib_event_type type) 1189 { 1190 struct ib_event event; 1191 1192 event.device = &dev->ib_dev; 1193 event.element.port_num = port_num; 1194 event.event = type; 1195 1196 ib_dispatch_event(&event); 1197 } 1198 1199 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) 1200 { 1201 unsigned long flags; 1202 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; 1203 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); 1204 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 1205 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) 1206 queue_work(ctx->wq, &ctx->work); 1207 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 1208 } 1209 1210 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, 1211 struct mlx4_ib_demux_pv_qp *tun_qp, 1212 int index) 1213 { 1214 struct ib_sge sg_list; 1215 struct ib_recv_wr recv_wr, *bad_recv_wr; 1216 int size; 1217 1218 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? 1219 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf); 1220 1221 sg_list.addr = tun_qp->ring[index].map; 1222 sg_list.length = size; 1223 sg_list.lkey = ctx->pd->local_dma_lkey; 1224 1225 recv_wr.next = NULL; 1226 recv_wr.sg_list = &sg_list; 1227 recv_wr.num_sge = 1; 1228 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | 1229 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt); 1230 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, 1231 size, DMA_FROM_DEVICE); 1232 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); 1233 } 1234 1235 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port, 1236 int slave, struct ib_sa_mad *sa_mad) 1237 { 1238 int ret = 0; 1239 1240 /* dispatch to different sa handlers */ 1241 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { 1242 case IB_SA_ATTR_MC_MEMBER_REC: 1243 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad); 1244 break; 1245 default: 1246 break; 1247 } 1248 return ret; 1249 } 1250 1251 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) 1252 { 1253 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; 1254 1255 return (qpn >= proxy_start && qpn <= proxy_start + 1); 1256 } 1257 1258 1259 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, 1260 enum ib_qp_type dest_qpt, u16 pkey_index, 1261 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr, 1262 u8 *s_mac, u16 vlan_id, struct ib_mad *mad) 1263 { 1264 struct ib_sge list; 1265 struct ib_ud_wr wr; 1266 struct ib_send_wr *bad_wr; 1267 struct mlx4_ib_demux_pv_ctx *sqp_ctx; 1268 struct mlx4_ib_demux_pv_qp *sqp; 1269 struct mlx4_mad_snd_buf *sqp_mad; 1270 struct ib_ah *ah; 1271 struct ib_qp *send_qp = NULL; 1272 unsigned wire_tx_ix = 0; 1273 int ret = 0; 1274 u16 wire_pkey_ix; 1275 int src_qpnum; 1276 u8 sgid_index; 1277 1278 1279 sqp_ctx = dev->sriov.sqps[port-1]; 1280 1281 /* check if proxy qp created */ 1282 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) 1283 return -EAGAIN; 1284 1285 if (dest_qpt == IB_QPT_SMI) { 1286 src_qpnum = 0; 1287 sqp = &sqp_ctx->qp[0]; 1288 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; 1289 } else { 1290 src_qpnum = 1; 1291 sqp = &sqp_ctx->qp[1]; 1292 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index]; 1293 } 1294 1295 send_qp = sqp->qp; 1296 1297 /* create ah */ 1298 sgid_index = attr->grh.sgid_index; 1299 attr->grh.sgid_index = 0; 1300 ah = ib_create_ah(sqp_ctx->pd, attr); 1301 if (IS_ERR(ah)) 1302 return -ENOMEM; 1303 attr->grh.sgid_index = sgid_index; 1304 to_mah(ah)->av.ib.gid_index = sgid_index; 1305 /* get rid of force-loopback bit */ 1306 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF); 1307 spin_lock(&sqp->tx_lock); 1308 if (sqp->tx_ix_head - sqp->tx_ix_tail >= 1309 (MLX4_NUM_TUNNEL_BUFS - 1)) 1310 ret = -EAGAIN; 1311 else 1312 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); 1313 spin_unlock(&sqp->tx_lock); 1314 if (ret) 1315 goto out; 1316 1317 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1318 if (sqp->tx_ring[wire_tx_ix].ah) 1319 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); 1320 sqp->tx_ring[wire_tx_ix].ah = ah; 1321 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1322 sqp->tx_ring[wire_tx_ix].buf.map, 1323 sizeof (struct mlx4_mad_snd_buf), 1324 DMA_TO_DEVICE); 1325 1326 memcpy(&sqp_mad->payload, mad, sizeof *mad); 1327 1328 ib_dma_sync_single_for_device(&dev->ib_dev, 1329 sqp->tx_ring[wire_tx_ix].buf.map, 1330 sizeof (struct mlx4_mad_snd_buf), 1331 DMA_TO_DEVICE); 1332 1333 list.addr = sqp->tx_ring[wire_tx_ix].buf.map; 1334 list.length = sizeof (struct mlx4_mad_snd_buf); 1335 list.lkey = sqp_ctx->pd->local_dma_lkey; 1336 1337 wr.ah = ah; 1338 wr.port_num = port; 1339 wr.pkey_index = wire_pkey_ix; 1340 wr.remote_qkey = qkey; 1341 wr.remote_qpn = remote_qpn; 1342 wr.wr.next = NULL; 1343 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); 1344 wr.wr.sg_list = &list; 1345 wr.wr.num_sge = 1; 1346 wr.wr.opcode = IB_WR_SEND; 1347 wr.wr.send_flags = IB_SEND_SIGNALED; 1348 if (s_mac) 1349 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); 1350 if (vlan_id < 0x1000) 1351 vlan_id |= (attr->sl & 7) << 13; 1352 to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id); 1353 1354 1355 ret = ib_post_send(send_qp, &wr.wr, &bad_wr); 1356 if (!ret) 1357 return 0; 1358 1359 spin_lock(&sqp->tx_lock); 1360 sqp->tx_ix_tail++; 1361 spin_unlock(&sqp->tx_lock); 1362 sqp->tx_ring[wire_tx_ix].ah = NULL; 1363 out: 1364 ib_destroy_ah(ah); 1365 return ret; 1366 } 1367 1368 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) 1369 { 1370 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) 1371 return slave; 1372 return mlx4_get_base_gid_ix(dev->dev, slave, port); 1373 } 1374 1375 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, 1376 struct ib_ah_attr *ah_attr) 1377 { 1378 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) 1379 ah_attr->grh.sgid_index = slave; 1380 else 1381 ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port); 1382 } 1383 1384 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) 1385 { 1386 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); 1387 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; 1388 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); 1389 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr; 1390 struct mlx4_ib_ah ah; 1391 struct ib_ah_attr ah_attr; 1392 u8 *slave_id; 1393 int slave; 1394 int port; 1395 u16 vlan_id; 1396 1397 /* Get slave that sent this packet */ 1398 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || 1399 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX || 1400 (wc->src_qp & 0x1) != ctx->port - 1 || 1401 wc->src_qp & 0x4) { 1402 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); 1403 return; 1404 } 1405 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8; 1406 if (slave != ctx->slave) { 1407 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " 1408 "belongs to another slave\n", wc->src_qp); 1409 return; 1410 } 1411 1412 /* Map transaction ID */ 1413 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, 1414 sizeof (struct mlx4_tunnel_mad), 1415 DMA_FROM_DEVICE); 1416 switch (tunnel->mad.mad_hdr.method) { 1417 case IB_MGMT_METHOD_SET: 1418 case IB_MGMT_METHOD_GET: 1419 case IB_MGMT_METHOD_REPORT: 1420 case IB_SA_METHOD_GET_TABLE: 1421 case IB_SA_METHOD_DELETE: 1422 case IB_SA_METHOD_GET_MULTI: 1423 case IB_SA_METHOD_GET_TRACE_TBL: 1424 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid; 1425 if (*slave_id) { 1426 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " 1427 "class:%d slave:%d\n", *slave_id, 1428 tunnel->mad.mad_hdr.mgmt_class, slave); 1429 return; 1430 } else 1431 *slave_id = slave; 1432 default: 1433 /* nothing */; 1434 } 1435 1436 /* Class-specific handling */ 1437 switch (tunnel->mad.mad_hdr.mgmt_class) { 1438 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 1439 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 1440 if (slave != mlx4_master_func_num(dev->dev) && 1441 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) 1442 return; 1443 break; 1444 case IB_MGMT_CLASS_SUBN_ADM: 1445 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, 1446 (struct ib_sa_mad *) &tunnel->mad)) 1447 return; 1448 break; 1449 case IB_MGMT_CLASS_CM: 1450 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, 1451 (struct ib_mad *) &tunnel->mad)) 1452 return; 1453 break; 1454 case IB_MGMT_CLASS_DEVICE_MGMT: 1455 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && 1456 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) 1457 return; 1458 break; 1459 default: 1460 /* Drop unsupported classes for slaves in tunnel mode */ 1461 if (slave != mlx4_master_func_num(dev->dev)) { 1462 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " 1463 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave); 1464 return; 1465 } 1466 } 1467 1468 /* We are using standard ib_core services to send the mad, so generate a 1469 * stadard address handle by decoding the tunnelled mlx4_ah fields */ 1470 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); 1471 ah.ibah.device = ctx->ib_dev; 1472 1473 port = be32_to_cpu(ah.av.ib.port_pd) >> 24; 1474 port = mlx4_slave_convert_port(dev->dev, slave, port); 1475 if (port < 0) 1476 return; 1477 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); 1478 1479 mlx4_ib_query_ah(&ah.ibah, &ah_attr); 1480 if (ah_attr.ah_flags & IB_AH_GRH) 1481 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); 1482 1483 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); 1484 vlan_id = be16_to_cpu(tunnel->hdr.vlan); 1485 /* if slave have default vlan use it */ 1486 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, 1487 &vlan_id, &ah_attr.sl); 1488 1489 mlx4_ib_send_to_wire(dev, slave, ctx->port, 1490 is_proxy_qp0(dev, wc->src_qp, slave) ? 1491 IB_QPT_SMI : IB_QPT_GSI, 1492 be16_to_cpu(tunnel->hdr.pkey_index), 1493 be32_to_cpu(tunnel->hdr.remote_qpn), 1494 be32_to_cpu(tunnel->hdr.qkey), 1495 &ah_attr, wc->smac, vlan_id, &tunnel->mad); 1496 } 1497 1498 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, 1499 enum ib_qp_type qp_type, int is_tun) 1500 { 1501 int i; 1502 struct mlx4_ib_demux_pv_qp *tun_qp; 1503 int rx_buf_size, tx_buf_size; 1504 1505 if (qp_type > IB_QPT_GSI) 1506 return -EINVAL; 1507 1508 tun_qp = &ctx->qp[qp_type]; 1509 1510 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS, 1511 GFP_KERNEL); 1512 if (!tun_qp->ring) 1513 return -ENOMEM; 1514 1515 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS, 1516 sizeof (struct mlx4_ib_tun_tx_buf), 1517 GFP_KERNEL); 1518 if (!tun_qp->tx_ring) { 1519 kfree(tun_qp->ring); 1520 tun_qp->ring = NULL; 1521 return -ENOMEM; 1522 } 1523 1524 if (is_tun) { 1525 rx_buf_size = sizeof (struct mlx4_tunnel_mad); 1526 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); 1527 } else { 1528 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); 1529 tx_buf_size = sizeof (struct mlx4_mad_snd_buf); 1530 } 1531 1532 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1533 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); 1534 if (!tun_qp->ring[i].addr) 1535 goto err; 1536 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, 1537 tun_qp->ring[i].addr, 1538 rx_buf_size, 1539 DMA_FROM_DEVICE); 1540 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { 1541 kfree(tun_qp->ring[i].addr); 1542 goto err; 1543 } 1544 } 1545 1546 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1547 tun_qp->tx_ring[i].buf.addr = 1548 kmalloc(tx_buf_size, GFP_KERNEL); 1549 if (!tun_qp->tx_ring[i].buf.addr) 1550 goto tx_err; 1551 tun_qp->tx_ring[i].buf.map = 1552 ib_dma_map_single(ctx->ib_dev, 1553 tun_qp->tx_ring[i].buf.addr, 1554 tx_buf_size, 1555 DMA_TO_DEVICE); 1556 if (ib_dma_mapping_error(ctx->ib_dev, 1557 tun_qp->tx_ring[i].buf.map)) { 1558 kfree(tun_qp->tx_ring[i].buf.addr); 1559 goto tx_err; 1560 } 1561 tun_qp->tx_ring[i].ah = NULL; 1562 } 1563 spin_lock_init(&tun_qp->tx_lock); 1564 tun_qp->tx_ix_head = 0; 1565 tun_qp->tx_ix_tail = 0; 1566 tun_qp->proxy_qpt = qp_type; 1567 1568 return 0; 1569 1570 tx_err: 1571 while (i > 0) { 1572 --i; 1573 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, 1574 tx_buf_size, DMA_TO_DEVICE); 1575 kfree(tun_qp->tx_ring[i].buf.addr); 1576 } 1577 kfree(tun_qp->tx_ring); 1578 tun_qp->tx_ring = NULL; 1579 i = MLX4_NUM_TUNNEL_BUFS; 1580 err: 1581 while (i > 0) { 1582 --i; 1583 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, 1584 rx_buf_size, DMA_FROM_DEVICE); 1585 kfree(tun_qp->ring[i].addr); 1586 } 1587 kfree(tun_qp->ring); 1588 tun_qp->ring = NULL; 1589 return -ENOMEM; 1590 } 1591 1592 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, 1593 enum ib_qp_type qp_type, int is_tun) 1594 { 1595 int i; 1596 struct mlx4_ib_demux_pv_qp *tun_qp; 1597 int rx_buf_size, tx_buf_size; 1598 1599 if (qp_type > IB_QPT_GSI) 1600 return; 1601 1602 tun_qp = &ctx->qp[qp_type]; 1603 if (is_tun) { 1604 rx_buf_size = sizeof (struct mlx4_tunnel_mad); 1605 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); 1606 } else { 1607 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); 1608 tx_buf_size = sizeof (struct mlx4_mad_snd_buf); 1609 } 1610 1611 1612 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1613 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, 1614 rx_buf_size, DMA_FROM_DEVICE); 1615 kfree(tun_qp->ring[i].addr); 1616 } 1617 1618 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1619 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, 1620 tx_buf_size, DMA_TO_DEVICE); 1621 kfree(tun_qp->tx_ring[i].buf.addr); 1622 if (tun_qp->tx_ring[i].ah) 1623 ib_destroy_ah(tun_qp->tx_ring[i].ah); 1624 } 1625 kfree(tun_qp->tx_ring); 1626 kfree(tun_qp->ring); 1627 } 1628 1629 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work) 1630 { 1631 struct mlx4_ib_demux_pv_ctx *ctx; 1632 struct mlx4_ib_demux_pv_qp *tun_qp; 1633 struct ib_wc wc; 1634 int ret; 1635 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); 1636 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); 1637 1638 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { 1639 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; 1640 if (wc.status == IB_WC_SUCCESS) { 1641 switch (wc.opcode) { 1642 case IB_WC_RECV: 1643 mlx4_ib_multiplex_mad(ctx, &wc); 1644 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, 1645 wc.wr_id & 1646 (MLX4_NUM_TUNNEL_BUFS - 1)); 1647 if (ret) 1648 pr_err("Failed reposting tunnel " 1649 "buf:%lld\n", wc.wr_id); 1650 break; 1651 case IB_WC_SEND: 1652 pr_debug("received tunnel send completion:" 1653 "wrid=0x%llx, status=0x%x\n", 1654 wc.wr_id, wc.status); 1655 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & 1656 (MLX4_NUM_TUNNEL_BUFS - 1)].ah); 1657 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1658 = NULL; 1659 spin_lock(&tun_qp->tx_lock); 1660 tun_qp->tx_ix_tail++; 1661 spin_unlock(&tun_qp->tx_lock); 1662 1663 break; 1664 default: 1665 break; 1666 } 1667 } else { 1668 pr_debug("mlx4_ib: completion error in tunnel: %d." 1669 " status = %d, wrid = 0x%llx\n", 1670 ctx->slave, wc.status, wc.wr_id); 1671 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1672 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & 1673 (MLX4_NUM_TUNNEL_BUFS - 1)].ah); 1674 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1675 = NULL; 1676 spin_lock(&tun_qp->tx_lock); 1677 tun_qp->tx_ix_tail++; 1678 spin_unlock(&tun_qp->tx_lock); 1679 } 1680 } 1681 } 1682 } 1683 1684 static void pv_qp_event_handler(struct ib_event *event, void *qp_context) 1685 { 1686 struct mlx4_ib_demux_pv_ctx *sqp = qp_context; 1687 1688 /* It's worse than that! He's dead, Jim! */ 1689 pr_err("Fatal error (%d) on a MAD QP on port %d\n", 1690 event->event, sqp->port); 1691 } 1692 1693 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, 1694 enum ib_qp_type qp_type, int create_tun) 1695 { 1696 int i, ret; 1697 struct mlx4_ib_demux_pv_qp *tun_qp; 1698 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr; 1699 struct ib_qp_attr attr; 1700 int qp_attr_mask_INIT; 1701 1702 if (qp_type > IB_QPT_GSI) 1703 return -EINVAL; 1704 1705 tun_qp = &ctx->qp[qp_type]; 1706 1707 memset(&qp_init_attr, 0, sizeof qp_init_attr); 1708 qp_init_attr.init_attr.send_cq = ctx->cq; 1709 qp_init_attr.init_attr.recv_cq = ctx->cq; 1710 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 1711 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS; 1712 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS; 1713 qp_init_attr.init_attr.cap.max_send_sge = 1; 1714 qp_init_attr.init_attr.cap.max_recv_sge = 1; 1715 if (create_tun) { 1716 qp_init_attr.init_attr.qp_type = IB_QPT_UD; 1717 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP; 1718 qp_init_attr.port = ctx->port; 1719 qp_init_attr.slave = ctx->slave; 1720 qp_init_attr.proxy_qp_type = qp_type; 1721 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | 1722 IB_QP_QKEY | IB_QP_PORT; 1723 } else { 1724 qp_init_attr.init_attr.qp_type = qp_type; 1725 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP; 1726 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY; 1727 } 1728 qp_init_attr.init_attr.port_num = ctx->port; 1729 qp_init_attr.init_attr.qp_context = ctx; 1730 qp_init_attr.init_attr.event_handler = pv_qp_event_handler; 1731 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); 1732 if (IS_ERR(tun_qp->qp)) { 1733 ret = PTR_ERR(tun_qp->qp); 1734 tun_qp->qp = NULL; 1735 pr_err("Couldn't create %s QP (%d)\n", 1736 create_tun ? "tunnel" : "special", ret); 1737 return ret; 1738 } 1739 1740 memset(&attr, 0, sizeof attr); 1741 attr.qp_state = IB_QPS_INIT; 1742 ret = 0; 1743 if (create_tun) 1744 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, 1745 ctx->port, IB_DEFAULT_PKEY_FULL, 1746 &attr.pkey_index); 1747 if (ret || !create_tun) 1748 attr.pkey_index = 1749 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; 1750 attr.qkey = IB_QP1_QKEY; 1751 attr.port_num = ctx->port; 1752 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); 1753 if (ret) { 1754 pr_err("Couldn't change %s qp state to INIT (%d)\n", 1755 create_tun ? "tunnel" : "special", ret); 1756 goto err_qp; 1757 } 1758 attr.qp_state = IB_QPS_RTR; 1759 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); 1760 if (ret) { 1761 pr_err("Couldn't change %s qp state to RTR (%d)\n", 1762 create_tun ? "tunnel" : "special", ret); 1763 goto err_qp; 1764 } 1765 attr.qp_state = IB_QPS_RTS; 1766 attr.sq_psn = 0; 1767 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); 1768 if (ret) { 1769 pr_err("Couldn't change %s qp state to RTS (%d)\n", 1770 create_tun ? "tunnel" : "special", ret); 1771 goto err_qp; 1772 } 1773 1774 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1775 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); 1776 if (ret) { 1777 pr_err(" mlx4_ib_post_pv_buf error" 1778 " (err = %d, i = %d)\n", ret, i); 1779 goto err_qp; 1780 } 1781 } 1782 return 0; 1783 1784 err_qp: 1785 ib_destroy_qp(tun_qp->qp); 1786 tun_qp->qp = NULL; 1787 return ret; 1788 } 1789 1790 /* 1791 * IB MAD completion callback for real SQPs 1792 */ 1793 static void mlx4_ib_sqp_comp_worker(struct work_struct *work) 1794 { 1795 struct mlx4_ib_demux_pv_ctx *ctx; 1796 struct mlx4_ib_demux_pv_qp *sqp; 1797 struct ib_wc wc; 1798 struct ib_grh *grh; 1799 struct ib_mad *mad; 1800 1801 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); 1802 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); 1803 1804 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { 1805 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; 1806 if (wc.status == IB_WC_SUCCESS) { 1807 switch (wc.opcode) { 1808 case IB_WC_SEND: 1809 ib_destroy_ah(sqp->tx_ring[wc.wr_id & 1810 (MLX4_NUM_TUNNEL_BUFS - 1)].ah); 1811 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1812 = NULL; 1813 spin_lock(&sqp->tx_lock); 1814 sqp->tx_ix_tail++; 1815 spin_unlock(&sqp->tx_lock); 1816 break; 1817 case IB_WC_RECV: 1818 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *) 1819 (sqp->ring[wc.wr_id & 1820 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload); 1821 grh = &(((struct mlx4_mad_rcv_buf *) 1822 (sqp->ring[wc.wr_id & 1823 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh); 1824 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); 1825 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & 1826 (MLX4_NUM_TUNNEL_BUFS - 1))) 1827 pr_err("Failed reposting SQP " 1828 "buf:%lld\n", wc.wr_id); 1829 break; 1830 default: 1831 BUG_ON(1); 1832 break; 1833 } 1834 } else { 1835 pr_debug("mlx4_ib: completion error in tunnel: %d." 1836 " status = %d, wrid = 0x%llx\n", 1837 ctx->slave, wc.status, wc.wr_id); 1838 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1839 ib_destroy_ah(sqp->tx_ring[wc.wr_id & 1840 (MLX4_NUM_TUNNEL_BUFS - 1)].ah); 1841 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1842 = NULL; 1843 spin_lock(&sqp->tx_lock); 1844 sqp->tx_ix_tail++; 1845 spin_unlock(&sqp->tx_lock); 1846 } 1847 } 1848 } 1849 } 1850 1851 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, 1852 struct mlx4_ib_demux_pv_ctx **ret_ctx) 1853 { 1854 struct mlx4_ib_demux_pv_ctx *ctx; 1855 1856 *ret_ctx = NULL; 1857 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); 1858 if (!ctx) { 1859 pr_err("failed allocating pv resource context " 1860 "for port %d, slave %d\n", port, slave); 1861 return -ENOMEM; 1862 } 1863 1864 ctx->ib_dev = &dev->ib_dev; 1865 ctx->port = port; 1866 ctx->slave = slave; 1867 *ret_ctx = ctx; 1868 return 0; 1869 } 1870 1871 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port) 1872 { 1873 if (dev->sriov.demux[port - 1].tun[slave]) { 1874 kfree(dev->sriov.demux[port - 1].tun[slave]); 1875 dev->sriov.demux[port - 1].tun[slave] = NULL; 1876 } 1877 } 1878 1879 static int create_pv_resources(struct ib_device *ibdev, int slave, int port, 1880 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) 1881 { 1882 int ret, cq_size; 1883 struct ib_cq_init_attr cq_attr = {}; 1884 1885 if (ctx->state != DEMUX_PV_STATE_DOWN) 1886 return -EEXIST; 1887 1888 ctx->state = DEMUX_PV_STATE_STARTING; 1889 /* have QP0 only if link layer is IB */ 1890 if (rdma_port_get_link_layer(ibdev, ctx->port) == 1891 IB_LINK_LAYER_INFINIBAND) 1892 ctx->has_smi = 1; 1893 1894 if (ctx->has_smi) { 1895 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); 1896 if (ret) { 1897 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret); 1898 goto err_out; 1899 } 1900 } 1901 1902 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); 1903 if (ret) { 1904 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret); 1905 goto err_out_qp0; 1906 } 1907 1908 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS; 1909 if (ctx->has_smi) 1910 cq_size *= 2; 1911 1912 cq_attr.cqe = cq_size; 1913 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, 1914 NULL, ctx, &cq_attr); 1915 if (IS_ERR(ctx->cq)) { 1916 ret = PTR_ERR(ctx->cq); 1917 pr_err("Couldn't create tunnel CQ (%d)\n", ret); 1918 goto err_buf; 1919 } 1920 1921 ctx->pd = ib_alloc_pd(ctx->ib_dev); 1922 if (IS_ERR(ctx->pd)) { 1923 ret = PTR_ERR(ctx->pd); 1924 pr_err("Couldn't create tunnel PD (%d)\n", ret); 1925 goto err_cq; 1926 } 1927 1928 if (ctx->has_smi) { 1929 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); 1930 if (ret) { 1931 pr_err("Couldn't create %s QP0 (%d)\n", 1932 create_tun ? "tunnel for" : "", ret); 1933 goto err_pd; 1934 } 1935 } 1936 1937 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); 1938 if (ret) { 1939 pr_err("Couldn't create %s QP1 (%d)\n", 1940 create_tun ? "tunnel for" : "", ret); 1941 goto err_qp0; 1942 } 1943 1944 if (create_tun) 1945 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); 1946 else 1947 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); 1948 1949 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; 1950 1951 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); 1952 if (ret) { 1953 pr_err("Couldn't arm tunnel cq (%d)\n", ret); 1954 goto err_wq; 1955 } 1956 ctx->state = DEMUX_PV_STATE_ACTIVE; 1957 return 0; 1958 1959 err_wq: 1960 ctx->wq = NULL; 1961 ib_destroy_qp(ctx->qp[1].qp); 1962 ctx->qp[1].qp = NULL; 1963 1964 1965 err_qp0: 1966 if (ctx->has_smi) 1967 ib_destroy_qp(ctx->qp[0].qp); 1968 ctx->qp[0].qp = NULL; 1969 1970 err_pd: 1971 ib_dealloc_pd(ctx->pd); 1972 ctx->pd = NULL; 1973 1974 err_cq: 1975 ib_destroy_cq(ctx->cq); 1976 ctx->cq = NULL; 1977 1978 err_buf: 1979 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); 1980 1981 err_out_qp0: 1982 if (ctx->has_smi) 1983 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); 1984 err_out: 1985 ctx->state = DEMUX_PV_STATE_DOWN; 1986 return ret; 1987 } 1988 1989 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, 1990 struct mlx4_ib_demux_pv_ctx *ctx, int flush) 1991 { 1992 if (!ctx) 1993 return; 1994 if (ctx->state > DEMUX_PV_STATE_DOWN) { 1995 ctx->state = DEMUX_PV_STATE_DOWNING; 1996 if (flush) 1997 flush_workqueue(ctx->wq); 1998 if (ctx->has_smi) { 1999 ib_destroy_qp(ctx->qp[0].qp); 2000 ctx->qp[0].qp = NULL; 2001 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); 2002 } 2003 ib_destroy_qp(ctx->qp[1].qp); 2004 ctx->qp[1].qp = NULL; 2005 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); 2006 ib_dealloc_pd(ctx->pd); 2007 ctx->pd = NULL; 2008 ib_destroy_cq(ctx->cq); 2009 ctx->cq = NULL; 2010 ctx->state = DEMUX_PV_STATE_DOWN; 2011 } 2012 } 2013 2014 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave, 2015 int port, int do_init) 2016 { 2017 int ret = 0; 2018 2019 if (!do_init) { 2020 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); 2021 /* for master, destroy real sqp resources */ 2022 if (slave == mlx4_master_func_num(dev->dev)) 2023 destroy_pv_resources(dev, slave, port, 2024 dev->sriov.sqps[port - 1], 1); 2025 /* destroy the tunnel qp resources */ 2026 destroy_pv_resources(dev, slave, port, 2027 dev->sriov.demux[port - 1].tun[slave], 1); 2028 return 0; 2029 } 2030 2031 /* create the tunnel qp resources */ 2032 ret = create_pv_resources(&dev->ib_dev, slave, port, 1, 2033 dev->sriov.demux[port - 1].tun[slave]); 2034 2035 /* for master, create the real sqp resources */ 2036 if (!ret && slave == mlx4_master_func_num(dev->dev)) 2037 ret = create_pv_resources(&dev->ib_dev, slave, port, 0, 2038 dev->sriov.sqps[port - 1]); 2039 return ret; 2040 } 2041 2042 void mlx4_ib_tunnels_update_work(struct work_struct *work) 2043 { 2044 struct mlx4_ib_demux_work *dmxw; 2045 2046 dmxw = container_of(work, struct mlx4_ib_demux_work, work); 2047 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port, 2048 dmxw->do_init); 2049 kfree(dmxw); 2050 return; 2051 } 2052 2053 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, 2054 struct mlx4_ib_demux_ctx *ctx, 2055 int port) 2056 { 2057 char name[12]; 2058 int ret = 0; 2059 int i; 2060 2061 ctx->tun = kcalloc(dev->dev->caps.sqp_demux, 2062 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL); 2063 if (!ctx->tun) 2064 return -ENOMEM; 2065 2066 ctx->dev = dev; 2067 ctx->port = port; 2068 ctx->ib_dev = &dev->ib_dev; 2069 2070 for (i = 0; 2071 i < min(dev->dev->caps.sqp_demux, 2072 (u16)(dev->dev->persist->num_vfs + 1)); 2073 i++) { 2074 struct mlx4_active_ports actv_ports = 2075 mlx4_get_active_ports(dev->dev, i); 2076 2077 if (!test_bit(port - 1, actv_ports.ports)) 2078 continue; 2079 2080 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); 2081 if (ret) { 2082 ret = -ENOMEM; 2083 goto err_mcg; 2084 } 2085 } 2086 2087 ret = mlx4_ib_mcg_port_init(ctx); 2088 if (ret) { 2089 pr_err("Failed initializing mcg para-virt (%d)\n", ret); 2090 goto err_mcg; 2091 } 2092 2093 snprintf(name, sizeof name, "mlx4_ibt%d", port); 2094 ctx->wq = create_singlethread_workqueue(name); 2095 if (!ctx->wq) { 2096 pr_err("Failed to create tunnelling WQ for port %d\n", port); 2097 ret = -ENOMEM; 2098 goto err_wq; 2099 } 2100 2101 snprintf(name, sizeof name, "mlx4_ibud%d", port); 2102 ctx->ud_wq = create_singlethread_workqueue(name); 2103 if (!ctx->ud_wq) { 2104 pr_err("Failed to create up/down WQ for port %d\n", port); 2105 ret = -ENOMEM; 2106 goto err_udwq; 2107 } 2108 2109 return 0; 2110 2111 err_udwq: 2112 destroy_workqueue(ctx->wq); 2113 ctx->wq = NULL; 2114 2115 err_wq: 2116 mlx4_ib_mcg_port_cleanup(ctx, 1); 2117 err_mcg: 2118 for (i = 0; i < dev->dev->caps.sqp_demux; i++) 2119 free_pv_object(dev, i, port); 2120 kfree(ctx->tun); 2121 ctx->tun = NULL; 2122 return ret; 2123 } 2124 2125 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx) 2126 { 2127 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) { 2128 sqp_ctx->state = DEMUX_PV_STATE_DOWNING; 2129 flush_workqueue(sqp_ctx->wq); 2130 if (sqp_ctx->has_smi) { 2131 ib_destroy_qp(sqp_ctx->qp[0].qp); 2132 sqp_ctx->qp[0].qp = NULL; 2133 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0); 2134 } 2135 ib_destroy_qp(sqp_ctx->qp[1].qp); 2136 sqp_ctx->qp[1].qp = NULL; 2137 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0); 2138 ib_dealloc_pd(sqp_ctx->pd); 2139 sqp_ctx->pd = NULL; 2140 ib_destroy_cq(sqp_ctx->cq); 2141 sqp_ctx->cq = NULL; 2142 sqp_ctx->state = DEMUX_PV_STATE_DOWN; 2143 } 2144 } 2145 2146 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) 2147 { 2148 int i; 2149 if (ctx) { 2150 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); 2151 mlx4_ib_mcg_port_cleanup(ctx, 1); 2152 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { 2153 if (!ctx->tun[i]) 2154 continue; 2155 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) 2156 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; 2157 } 2158 flush_workqueue(ctx->wq); 2159 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { 2160 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); 2161 free_pv_object(dev, i, ctx->port); 2162 } 2163 kfree(ctx->tun); 2164 destroy_workqueue(ctx->ud_wq); 2165 destroy_workqueue(ctx->wq); 2166 } 2167 } 2168 2169 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init) 2170 { 2171 int i; 2172 2173 if (!mlx4_is_master(dev->dev)) 2174 return; 2175 /* initialize or tear down tunnel QPs for the master */ 2176 for (i = 0; i < dev->dev->caps.num_ports; i++) 2177 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init); 2178 return; 2179 } 2180 2181 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) 2182 { 2183 int i = 0; 2184 int err; 2185 2186 if (!mlx4_is_mfunc(dev->dev)) 2187 return 0; 2188 2189 dev->sriov.is_going_down = 0; 2190 spin_lock_init(&dev->sriov.going_down_lock); 2191 mlx4_ib_cm_paravirt_init(dev); 2192 2193 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); 2194 2195 if (mlx4_is_slave(dev->dev)) { 2196 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); 2197 return 0; 2198 } 2199 2200 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { 2201 if (i == mlx4_master_func_num(dev->dev)) 2202 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid); 2203 else 2204 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid()); 2205 } 2206 2207 err = mlx4_ib_init_alias_guid_service(dev); 2208 if (err) { 2209 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); 2210 goto paravirt_err; 2211 } 2212 err = mlx4_ib_device_register_sysfs(dev); 2213 if (err) { 2214 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n"); 2215 goto sysfs_err; 2216 } 2217 2218 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", 2219 dev->dev->caps.sqp_demux); 2220 for (i = 0; i < dev->num_ports; i++) { 2221 union ib_gid gid; 2222 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); 2223 if (err) 2224 goto demux_err; 2225 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; 2226 atomic64_set(&dev->sriov.demux[i].subnet_prefix, 2227 be64_to_cpu(gid.global.subnet_prefix)); 2228 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, 2229 &dev->sriov.sqps[i]); 2230 if (err) 2231 goto demux_err; 2232 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); 2233 if (err) 2234 goto free_pv; 2235 } 2236 mlx4_ib_master_tunnels(dev, 1); 2237 return 0; 2238 2239 free_pv: 2240 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); 2241 demux_err: 2242 while (--i >= 0) { 2243 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); 2244 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); 2245 } 2246 mlx4_ib_device_unregister_sysfs(dev); 2247 2248 sysfs_err: 2249 mlx4_ib_destroy_alias_guid_service(dev); 2250 2251 paravirt_err: 2252 mlx4_ib_cm_paravirt_clean(dev, -1); 2253 2254 return err; 2255 } 2256 2257 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) 2258 { 2259 int i; 2260 unsigned long flags; 2261 2262 if (!mlx4_is_mfunc(dev->dev)) 2263 return; 2264 2265 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 2266 dev->sriov.is_going_down = 1; 2267 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 2268 if (mlx4_is_master(dev->dev)) { 2269 for (i = 0; i < dev->num_ports; i++) { 2270 flush_workqueue(dev->sriov.demux[i].ud_wq); 2271 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); 2272 kfree(dev->sriov.sqps[i]); 2273 dev->sriov.sqps[i] = NULL; 2274 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); 2275 } 2276 2277 mlx4_ib_cm_paravirt_clean(dev, -1); 2278 mlx4_ib_destroy_alias_guid_service(dev); 2279 mlx4_ib_device_unregister_sysfs(dev); 2280 } 2281 } 2282