1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <net/ip.h> 33 #include <net/ipv6.h> 34 #include <net/udp.h> 35 #include <net/addrconf.h> 36 #include <net/route.h> 37 #include <net/ip6_route.h> 38 #include <net/flow.h> 39 #include "qedr.h" 40 #include "qedr_iw_cm.h" 41 42 static inline void 43 qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info, 44 struct iw_cm_event *event) 45 { 46 struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; 47 struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; 48 49 laddr->sin_family = AF_INET; 50 raddr->sin_family = AF_INET; 51 52 laddr->sin_port = htons(cm_info->local_port); 53 raddr->sin_port = htons(cm_info->remote_port); 54 55 laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]); 56 raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]); 57 } 58 59 static inline void 60 qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info, 61 struct iw_cm_event *event) 62 { 63 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; 64 struct sockaddr_in6 *raddr6 = 65 (struct sockaddr_in6 *)&event->remote_addr; 66 int i; 67 68 laddr6->sin6_family = AF_INET6; 69 raddr6->sin6_family = AF_INET6; 70 71 laddr6->sin6_port = htons(cm_info->local_port); 72 raddr6->sin6_port = htons(cm_info->remote_port); 73 74 for (i = 0; i < 4; i++) { 75 laddr6->sin6_addr.in6_u.u6_addr32[i] = 76 htonl(cm_info->local_ip[i]); 77 raddr6->sin6_addr.in6_u.u6_addr32[i] = 78 htonl(cm_info->remote_ip[i]); 79 } 80 } 81 82 static void qedr_iw_free_qp(struct kref *ref) 83 { 84 struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt); 85 86 complete(&qp->qp_rel_comp); 87 } 88 89 static void 90 qedr_iw_free_ep(struct kref *ref) 91 { 92 struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt); 93 94 if (ep->qp) 95 kref_put(&ep->qp->refcnt, qedr_iw_free_qp); 96 97 if (ep->cm_id) 98 ep->cm_id->rem_ref(ep->cm_id); 99 100 kfree(ep); 101 } 102 103 static void 104 qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) 105 { 106 struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context; 107 struct qedr_dev *dev = listener->dev; 108 struct iw_cm_event event; 109 struct qedr_iw_ep *ep; 110 111 ep = kzalloc(sizeof(*ep), GFP_ATOMIC); 112 if (!ep) 113 return; 114 115 ep->dev = dev; 116 ep->qed_context = params->ep_context; 117 kref_init(&ep->refcnt); 118 119 memset(&event, 0, sizeof(event)); 120 event.event = IW_CM_EVENT_CONNECT_REQUEST; 121 event.status = params->status; 122 123 if (!IS_ENABLED(CONFIG_IPV6) || 124 params->cm_info->ip_version == QED_TCP_IPV4) 125 qedr_fill_sockaddr4(params->cm_info, &event); 126 else 127 qedr_fill_sockaddr6(params->cm_info, &event); 128 129 event.provider_data = (void *)ep; 130 event.private_data = (void *)params->cm_info->private_data; 131 event.private_data_len = (u8)params->cm_info->private_data_len; 132 event.ord = params->cm_info->ord; 133 event.ird = params->cm_info->ird; 134 135 listener->cm_id->event_handler(listener->cm_id, &event); 136 } 137 138 static void 139 qedr_iw_issue_event(void *context, 140 struct qed_iwarp_cm_event_params *params, 141 enum iw_cm_event_type event_type) 142 { 143 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 144 struct iw_cm_event event; 145 146 memset(&event, 0, sizeof(event)); 147 event.status = params->status; 148 event.event = event_type; 149 150 if (params->cm_info) { 151 event.ird = params->cm_info->ird; 152 event.ord = params->cm_info->ord; 153 /* Only connect_request and reply have valid private data 154 * the rest of the events this may be left overs from 155 * connection establishment. CONNECT_REQUEST is issued via 156 * qedr_iw_mpa_request 157 */ 158 if (event_type == IW_CM_EVENT_CONNECT_REPLY) { 159 event.private_data_len = 160 params->cm_info->private_data_len; 161 event.private_data = 162 (void *)params->cm_info->private_data; 163 } 164 } 165 166 if (ep->cm_id) 167 ep->cm_id->event_handler(ep->cm_id, &event); 168 } 169 170 static void 171 qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params) 172 { 173 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 174 175 if (ep->cm_id) 176 qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE); 177 178 kref_put(&ep->refcnt, qedr_iw_free_ep); 179 } 180 181 static void 182 qedr_iw_qp_event(void *context, 183 struct qed_iwarp_cm_event_params *params, 184 enum ib_event_type ib_event, char *str) 185 { 186 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 187 struct qedr_dev *dev = ep->dev; 188 struct ib_qp *ibqp = &ep->qp->ibqp; 189 struct ib_event event; 190 191 DP_NOTICE(dev, "QP error received: %s\n", str); 192 193 if (ibqp->event_handler) { 194 event.event = ib_event; 195 event.device = ibqp->device; 196 event.element.qp = ibqp; 197 ibqp->event_handler(&event, ibqp->qp_context); 198 } 199 } 200 201 struct qedr_discon_work { 202 struct work_struct work; 203 struct qedr_iw_ep *ep; 204 enum qed_iwarp_event_type event; 205 int status; 206 }; 207 208 static void qedr_iw_disconnect_worker(struct work_struct *work) 209 { 210 struct qedr_discon_work *dwork = 211 container_of(work, struct qedr_discon_work, work); 212 struct qed_rdma_modify_qp_in_params qp_params = { 0 }; 213 struct qedr_iw_ep *ep = dwork->ep; 214 struct qedr_dev *dev = ep->dev; 215 struct qedr_qp *qp = ep->qp; 216 struct iw_cm_event event; 217 218 /* The qp won't be released until we release the ep. 219 * the ep's refcnt was increased before calling this 220 * function, therefore it is safe to access qp 221 */ 222 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, 223 &qp->iwarp_cm_flags)) 224 goto out; 225 226 memset(&event, 0, sizeof(event)); 227 event.status = dwork->status; 228 event.event = IW_CM_EVENT_DISCONNECT; 229 230 /* Success means graceful disconnect was requested. modifying 231 * to SQD is translated to graceful disconnect. O/w reset is sent 232 */ 233 if (dwork->status) 234 qp_params.new_state = QED_ROCE_QP_STATE_ERR; 235 else 236 qp_params.new_state = QED_ROCE_QP_STATE_SQD; 237 238 239 if (ep->cm_id) 240 ep->cm_id->event_handler(ep->cm_id, &event); 241 242 SET_FIELD(qp_params.modify_flags, 243 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); 244 245 dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); 246 247 complete(&ep->qp->iwarp_cm_comp); 248 out: 249 kfree(dwork); 250 kref_put(&ep->refcnt, qedr_iw_free_ep); 251 } 252 253 static void 254 qedr_iw_disconnect_event(void *context, 255 struct qed_iwarp_cm_event_params *params) 256 { 257 struct qedr_discon_work *work; 258 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 259 struct qedr_dev *dev = ep->dev; 260 261 work = kzalloc(sizeof(*work), GFP_ATOMIC); 262 if (!work) 263 return; 264 265 /* We can't get a close event before disconnect, but since 266 * we're scheduling a work queue we need to make sure close 267 * won't delete the ep, so we increase the refcnt 268 */ 269 kref_get(&ep->refcnt); 270 271 work->ep = ep; 272 work->event = params->event; 273 work->status = params->status; 274 275 INIT_WORK(&work->work, qedr_iw_disconnect_worker); 276 queue_work(dev->iwarp_wq, &work->work); 277 } 278 279 static void 280 qedr_iw_passive_complete(void *context, 281 struct qed_iwarp_cm_event_params *params) 282 { 283 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 284 struct qedr_dev *dev = ep->dev; 285 286 /* We will only reach the following state if MPA_REJECT was called on 287 * passive. In this case there will be no associated QP. 288 */ 289 if ((params->status == -ECONNREFUSED) && (!ep->qp)) { 290 DP_DEBUG(dev, QEDR_MSG_IWARP, 291 "PASSIVE connection refused releasing ep...\n"); 292 kref_put(&ep->refcnt, qedr_iw_free_ep); 293 return; 294 } 295 296 complete(&ep->qp->iwarp_cm_comp); 297 qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED); 298 299 if (params->status < 0) 300 qedr_iw_close_event(context, params); 301 } 302 303 static void 304 qedr_iw_active_complete(void *context, 305 struct qed_iwarp_cm_event_params *params) 306 { 307 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 308 309 complete(&ep->qp->iwarp_cm_comp); 310 qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY); 311 312 if (params->status < 0) 313 kref_put(&ep->refcnt, qedr_iw_free_ep); 314 } 315 316 static int 317 qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params) 318 { 319 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 320 struct qedr_dev *dev = ep->dev; 321 struct qed_iwarp_send_rtr_in rtr_in; 322 323 rtr_in.ep_context = params->ep_context; 324 325 return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in); 326 } 327 328 static int 329 qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params) 330 { 331 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 332 struct qedr_dev *dev = ep->dev; 333 334 switch (params->event) { 335 case QED_IWARP_EVENT_MPA_REQUEST: 336 qedr_iw_mpa_request(context, params); 337 break; 338 case QED_IWARP_EVENT_ACTIVE_MPA_REPLY: 339 qedr_iw_mpa_reply(context, params); 340 break; 341 case QED_IWARP_EVENT_PASSIVE_COMPLETE: 342 qedr_iw_passive_complete(context, params); 343 break; 344 case QED_IWARP_EVENT_ACTIVE_COMPLETE: 345 qedr_iw_active_complete(context, params); 346 break; 347 case QED_IWARP_EVENT_DISCONNECT: 348 qedr_iw_disconnect_event(context, params); 349 break; 350 case QED_IWARP_EVENT_CLOSE: 351 qedr_iw_close_event(context, params); 352 break; 353 case QED_IWARP_EVENT_RQ_EMPTY: 354 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 355 "QED_IWARP_EVENT_RQ_EMPTY"); 356 break; 357 case QED_IWARP_EVENT_IRQ_FULL: 358 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 359 "QED_IWARP_EVENT_IRQ_FULL"); 360 break; 361 case QED_IWARP_EVENT_LLP_TIMEOUT: 362 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 363 "QED_IWARP_EVENT_LLP_TIMEOUT"); 364 break; 365 case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR: 366 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, 367 "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR"); 368 break; 369 case QED_IWARP_EVENT_CQ_OVERFLOW: 370 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 371 "QED_IWARP_EVENT_CQ_OVERFLOW"); 372 break; 373 case QED_IWARP_EVENT_QP_CATASTROPHIC: 374 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 375 "QED_IWARP_EVENT_QP_CATASTROPHIC"); 376 break; 377 case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR: 378 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, 379 "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR"); 380 break; 381 case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR: 382 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 383 "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR"); 384 break; 385 case QED_IWARP_EVENT_TERMINATE_RECEIVED: 386 DP_NOTICE(dev, "Got terminate message\n"); 387 break; 388 default: 389 DP_NOTICE(dev, "Unknown event received %d\n", params->event); 390 break; 391 } 392 return 0; 393 } 394 395 static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr) 396 { 397 struct net_device *ndev; 398 u16 vlan_id = 0; 399 400 ndev = ip_dev_find(&init_net, htonl(addr[0])); 401 402 if (ndev) { 403 vlan_id = rdma_vlan_dev_vlan_id(ndev); 404 dev_put(ndev); 405 } 406 if (vlan_id == 0xffff) 407 vlan_id = 0; 408 return vlan_id; 409 } 410 411 static u16 qedr_iw_get_vlan_ipv6(u32 *addr) 412 { 413 struct net_device *ndev = NULL; 414 struct in6_addr laddr6; 415 u16 vlan_id = 0; 416 int i; 417 418 if (!IS_ENABLED(CONFIG_IPV6)) 419 return vlan_id; 420 421 for (i = 0; i < 4; i++) 422 laddr6.in6_u.u6_addr32[i] = htonl(addr[i]); 423 424 rcu_read_lock(); 425 for_each_netdev_rcu(&init_net, ndev) { 426 if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) { 427 vlan_id = rdma_vlan_dev_vlan_id(ndev); 428 break; 429 } 430 } 431 432 rcu_read_unlock(); 433 if (vlan_id == 0xffff) 434 vlan_id = 0; 435 436 return vlan_id; 437 } 438 439 static int 440 qedr_addr4_resolve(struct qedr_dev *dev, 441 struct sockaddr_in *src_in, 442 struct sockaddr_in *dst_in, u8 *dst_mac) 443 { 444 __be32 src_ip = src_in->sin_addr.s_addr; 445 __be32 dst_ip = dst_in->sin_addr.s_addr; 446 struct neighbour *neigh = NULL; 447 struct rtable *rt = NULL; 448 int rc = 0; 449 450 rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0, 451 RT_SCOPE_UNIVERSE); 452 if (IS_ERR(rt)) { 453 DP_ERR(dev, "ip_route_output returned error\n"); 454 return -EINVAL; 455 } 456 457 neigh = dst_neigh_lookup(&rt->dst, &dst_ip); 458 459 if (neigh) { 460 rcu_read_lock(); 461 if (neigh->nud_state & NUD_VALID) { 462 ether_addr_copy(dst_mac, neigh->ha); 463 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); 464 } else { 465 neigh_event_send(neigh, NULL); 466 } 467 rcu_read_unlock(); 468 neigh_release(neigh); 469 } 470 471 ip_rt_put(rt); 472 473 return rc; 474 } 475 476 static int 477 qedr_addr6_resolve(struct qedr_dev *dev, 478 struct sockaddr_in6 *src_in, 479 struct sockaddr_in6 *dst_in, u8 *dst_mac) 480 { 481 struct neighbour *neigh = NULL; 482 struct dst_entry *dst; 483 struct flowi6 fl6; 484 int rc = 0; 485 486 memset(&fl6, 0, sizeof(fl6)); 487 fl6.daddr = dst_in->sin6_addr; 488 fl6.saddr = src_in->sin6_addr; 489 490 dst = ip6_route_output(&init_net, NULL, &fl6); 491 492 if ((!dst) || dst->error) { 493 if (dst) { 494 DP_ERR(dev, 495 "ip6_route_output returned dst->error = %d\n", 496 dst->error); 497 dst_release(dst); 498 } 499 return -EINVAL; 500 } 501 neigh = dst_neigh_lookup(dst, &fl6.daddr); 502 if (neigh) { 503 rcu_read_lock(); 504 if (neigh->nud_state & NUD_VALID) { 505 ether_addr_copy(dst_mac, neigh->ha); 506 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); 507 } else { 508 neigh_event_send(neigh, NULL); 509 } 510 rcu_read_unlock(); 511 neigh_release(neigh); 512 } 513 514 dst_release(dst); 515 516 return rc; 517 } 518 519 static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn) 520 { 521 struct qedr_qp *qp; 522 523 xa_lock(&dev->qps); 524 qp = xa_load(&dev->qps, qpn); 525 if (qp) 526 kref_get(&qp->refcnt); 527 xa_unlock(&dev->qps); 528 529 return qp; 530 } 531 532 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 533 { 534 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 535 struct qed_iwarp_connect_out out_params; 536 struct qed_iwarp_connect_in in_params; 537 struct qed_iwarp_cm_info *cm_info; 538 struct sockaddr_in6 *laddr6; 539 struct sockaddr_in6 *raddr6; 540 struct sockaddr_in *laddr; 541 struct sockaddr_in *raddr; 542 struct qedr_iw_ep *ep; 543 struct qedr_qp *qp; 544 int rc = 0; 545 int i; 546 547 laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 548 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 549 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 550 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 551 552 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", 553 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), 554 ntohs(raddr->sin_port)); 555 556 DP_DEBUG(dev, QEDR_MSG_IWARP, 557 "Connect source address: %pISpc, remote address: %pISpc\n", 558 &cm_id->local_addr, &cm_id->remote_addr); 559 560 if (!laddr->sin_port || !raddr->sin_port) 561 return -EINVAL; 562 563 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 564 if (!ep) 565 return -ENOMEM; 566 567 ep->dev = dev; 568 kref_init(&ep->refcnt); 569 570 qp = qedr_iw_load_qp(dev, conn_param->qpn); 571 if (!qp) { 572 rc = -EINVAL; 573 goto err; 574 } 575 576 ep->qp = qp; 577 cm_id->add_ref(cm_id); 578 ep->cm_id = cm_id; 579 580 in_params.event_cb = qedr_iw_event_handler; 581 in_params.cb_context = ep; 582 583 cm_info = &in_params.cm_info; 584 memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip)); 585 memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip)); 586 587 if (!IS_ENABLED(CONFIG_IPV6) || 588 cm_id->remote_addr.ss_family == AF_INET) { 589 cm_info->ip_version = QED_TCP_IPV4; 590 591 cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr); 592 cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr); 593 cm_info->remote_port = ntohs(raddr->sin_port); 594 cm_info->local_port = ntohs(laddr->sin_port); 595 cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip); 596 597 rc = qedr_addr4_resolve(dev, laddr, raddr, 598 (u8 *)in_params.remote_mac_addr); 599 600 in_params.mss = dev->iwarp_max_mtu - 601 (sizeof(struct iphdr) + sizeof(struct tcphdr)); 602 603 } else { 604 in_params.cm_info.ip_version = QED_TCP_IPV6; 605 606 for (i = 0; i < 4; i++) { 607 cm_info->remote_ip[i] = 608 ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]); 609 cm_info->local_ip[i] = 610 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); 611 } 612 613 cm_info->local_port = ntohs(laddr6->sin6_port); 614 cm_info->remote_port = ntohs(raddr6->sin6_port); 615 616 in_params.mss = dev->iwarp_max_mtu - 617 (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); 618 619 cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip); 620 621 rc = qedr_addr6_resolve(dev, laddr6, raddr6, 622 (u8 *)in_params.remote_mac_addr); 623 } 624 if (rc) 625 goto err; 626 627 DP_DEBUG(dev, QEDR_MSG_IWARP, 628 "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n", 629 conn_param->ord, conn_param->ird, conn_param->private_data, 630 conn_param->private_data_len, qp->rq_psn); 631 632 cm_info->ord = conn_param->ord; 633 cm_info->ird = conn_param->ird; 634 cm_info->private_data = conn_param->private_data; 635 cm_info->private_data_len = conn_param->private_data_len; 636 in_params.qp = qp->qed_qp; 637 memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN); 638 639 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, 640 &qp->iwarp_cm_flags)) { 641 rc = -ENODEV; 642 goto err; /* QP already being destroyed */ 643 } 644 645 rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params); 646 if (rc) { 647 complete(&qp->iwarp_cm_comp); 648 goto err; 649 } 650 651 return rc; 652 653 err: 654 kref_put(&ep->refcnt, qedr_iw_free_ep); 655 return rc; 656 } 657 658 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) 659 { 660 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 661 struct qedr_iw_listener *listener; 662 struct qed_iwarp_listen_in iparams; 663 struct qed_iwarp_listen_out oparams; 664 struct sockaddr_in *laddr; 665 struct sockaddr_in6 *laddr6; 666 int rc; 667 int i; 668 669 laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 670 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 671 672 DP_DEBUG(dev, QEDR_MSG_IWARP, 673 "Create Listener address: %pISpc\n", &cm_id->local_addr); 674 675 listener = kzalloc(sizeof(*listener), GFP_KERNEL); 676 if (!listener) 677 return -ENOMEM; 678 679 listener->dev = dev; 680 cm_id->add_ref(cm_id); 681 listener->cm_id = cm_id; 682 listener->backlog = backlog; 683 684 iparams.cb_context = listener; 685 iparams.event_cb = qedr_iw_event_handler; 686 iparams.max_backlog = backlog; 687 688 if (!IS_ENABLED(CONFIG_IPV6) || 689 cm_id->local_addr.ss_family == AF_INET) { 690 iparams.ip_version = QED_TCP_IPV4; 691 memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr)); 692 693 iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr); 694 iparams.port = ntohs(laddr->sin_port); 695 iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr); 696 } else { 697 iparams.ip_version = QED_TCP_IPV6; 698 699 for (i = 0; i < 4; i++) { 700 iparams.ip_addr[i] = 701 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); 702 } 703 704 iparams.port = ntohs(laddr6->sin6_port); 705 706 iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr); 707 } 708 rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams); 709 if (rc) 710 goto err; 711 712 listener->qed_handle = oparams.handle; 713 cm_id->provider_data = listener; 714 return rc; 715 716 err: 717 cm_id->rem_ref(cm_id); 718 kfree(listener); 719 return rc; 720 } 721 722 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) 723 { 724 struct qedr_iw_listener *listener = cm_id->provider_data; 725 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 726 int rc = 0; 727 728 if (listener->qed_handle) 729 rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx, 730 listener->qed_handle); 731 732 cm_id->rem_ref(cm_id); 733 kfree(listener); 734 return rc; 735 } 736 737 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 738 { 739 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; 740 struct qedr_dev *dev = ep->dev; 741 struct qedr_qp *qp; 742 struct qed_iwarp_accept_in params; 743 int rc; 744 745 DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); 746 747 qp = qedr_iw_load_qp(dev, conn_param->qpn); 748 if (!qp) { 749 DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn); 750 return -EINVAL; 751 } 752 753 ep->qp = qp; 754 cm_id->add_ref(cm_id); 755 ep->cm_id = cm_id; 756 757 params.ep_context = ep->qed_context; 758 params.cb_context = ep; 759 params.qp = ep->qp->qed_qp; 760 params.private_data = conn_param->private_data; 761 params.private_data_len = conn_param->private_data_len; 762 params.ird = conn_param->ird; 763 params.ord = conn_param->ord; 764 765 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, 766 &qp->iwarp_cm_flags)) { 767 rc = -EINVAL; 768 goto err; /* QP already destroyed */ 769 } 770 771 rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); 772 if (rc) { 773 complete(&qp->iwarp_cm_comp); 774 goto err; 775 } 776 777 return rc; 778 779 err: 780 kref_put(&ep->refcnt, qedr_iw_free_ep); 781 782 return rc; 783 } 784 785 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 786 { 787 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; 788 struct qedr_dev *dev = ep->dev; 789 struct qed_iwarp_reject_in params; 790 791 params.ep_context = ep->qed_context; 792 params.cb_context = ep; 793 params.private_data = pdata; 794 params.private_data_len = pdata_len; 795 ep->qp = NULL; 796 797 return dev->ops->iwarp_reject(dev->rdma_ctx, ¶ms); 798 } 799 800 void qedr_iw_qp_add_ref(struct ib_qp *ibqp) 801 { 802 struct qedr_qp *qp = get_qedr_qp(ibqp); 803 804 kref_get(&qp->refcnt); 805 } 806 807 void qedr_iw_qp_rem_ref(struct ib_qp *ibqp) 808 { 809 struct qedr_qp *qp = get_qedr_qp(ibqp); 810 811 kref_put(&qp->refcnt, qedr_iw_free_qp); 812 } 813 814 struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn) 815 { 816 struct qedr_dev *dev = get_qedr_dev(ibdev); 817 818 return xa_load(&dev->qps, qpn); 819 } 820