1 /* 2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "ipoib.h" 34 35 #ifdef CONFIG_INFINIBAND_IPOIB_CM 36 37 #include <netinet/ip.h> 38 #include <netinet/ip_icmp.h> 39 #include <netinet/icmp6.h> 40 41 #include <rdma/ib_cm.h> 42 #include <rdma/ib_cache.h> 43 #include <linux/delay.h> 44 45 int ipoib_max_conn_qp = 128; 46 47 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); 48 MODULE_PARM_DESC(max_nonsrq_conn_qp, 49 "Max number of connected-mode QPs per interface " 50 "(applied only if shared receive queue is not available)"); 51 52 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 53 static int data_debug_level; 54 55 module_param_named(cm_data_debug_level, data_debug_level, int, 0644); 56 MODULE_PARM_DESC(cm_data_debug_level, 57 "Enable data path debug tracing for connected mode if > 0"); 58 #endif 59 60 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL 61 62 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) 63 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) 64 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 65 #define IPOIB_CM_RX_UPDATE_MASK (0x3) 66 67 static struct ib_qp_attr ipoib_cm_err_attr = { 68 .qp_state = IB_QPS_ERR 69 }; 70 71 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff 72 73 static struct ib_send_wr ipoib_cm_rx_drain_wr = { 74 .wr_id = IPOIB_CM_RX_DRAIN_WRID, 75 .opcode = IB_WR_SEND, 76 }; 77 78 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 79 struct ib_cm_event *event); 80 81 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req) 82 { 83 84 ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req); 85 86 } 87 88 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id) 89 { 90 struct ib_recv_wr *bad_wr; 91 struct ipoib_rx_buf *rx_req; 92 struct mbuf *m; 93 int ret; 94 int i; 95 96 rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id]; 97 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 98 priv->cm.rx_sge[i].addr = rx_req->mapping[i]; 99 priv->cm.rx_sge[i].length = m->m_len; 100 } 101 102 priv->cm.rx_wr.num_sge = i; 103 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 104 105 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 106 if (unlikely(ret)) { 107 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 108 ipoib_dma_unmap_rx(priv, rx_req); 109 m_freem(priv->cm.srq_ring[id].mb); 110 priv->cm.srq_ring[id].mb = NULL; 111 } 112 113 return ret; 114 } 115 116 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv, 117 struct ipoib_cm_rx *rx, 118 struct ib_recv_wr *wr, 119 struct ib_sge *sge, int id) 120 { 121 struct ipoib_rx_buf *rx_req; 122 struct ib_recv_wr *bad_wr; 123 struct mbuf *m; 124 int ret; 125 int i; 126 127 rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id]; 128 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 129 sge[i].addr = rx_req->mapping[i]; 130 sge[i].length = m->m_len; 131 } 132 133 wr->num_sge = i; 134 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 135 136 ret = ib_post_recv(rx->qp, wr, &bad_wr); 137 if (unlikely(ret)) { 138 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); 139 ipoib_dma_unmap_rx(priv, rx_req); 140 m_freem(rx->rx_ring[id].mb); 141 rx->rx_ring[id].mb = NULL; 142 } 143 144 return ret; 145 } 146 147 static struct mbuf * 148 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req) 149 { 150 return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req, 151 priv->cm.max_cm_mtu); 152 } 153 154 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv, 155 struct ipoib_cm_rx_buf *rx_ring) 156 { 157 int i; 158 159 for (i = 0; i < ipoib_recvq_size; ++i) 160 if (rx_ring[i].mb) { 161 ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]); 162 m_freem(rx_ring[i].mb); 163 } 164 165 kfree(rx_ring); 166 } 167 168 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) 169 { 170 struct ib_send_wr *bad_wr; 171 struct ipoib_cm_rx *p; 172 173 /* We only reserved 1 extra slot in CQ for drain WRs, so 174 * make sure we have at most 1 outstanding WR. */ 175 if (list_empty(&priv->cm.rx_flush_list) || 176 !list_empty(&priv->cm.rx_drain_list)) 177 return; 178 179 /* 180 * QPs on flush list are error state. This way, a "flush 181 * error" WC will be immediately generated for each WR we post. 182 */ 183 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); 184 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) 185 ipoib_warn(priv, "failed to post drain wr\n"); 186 187 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); 188 } 189 190 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) 191 { 192 struct ipoib_cm_rx *p = ctx; 193 struct ipoib_dev_priv *priv = p->priv; 194 unsigned long flags; 195 196 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) 197 return; 198 199 spin_lock_irqsave(&priv->lock, flags); 200 list_move(&p->list, &priv->cm.rx_flush_list); 201 p->state = IPOIB_CM_RX_FLUSH; 202 ipoib_cm_start_rx_drain(priv); 203 spin_unlock_irqrestore(&priv->lock, flags); 204 } 205 206 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv, 207 struct ipoib_cm_rx *p) 208 { 209 struct ib_qp_init_attr attr = { 210 .event_handler = ipoib_cm_rx_event_handler, 211 .send_cq = priv->recv_cq, /* For drain WR */ 212 .recv_cq = priv->recv_cq, 213 .srq = priv->cm.srq, 214 .cap.max_send_wr = 1, /* For drain WR */ 215 .cap.max_send_sge = 1, 216 .sq_sig_type = IB_SIGNAL_ALL_WR, 217 .qp_type = IB_QPT_RC, 218 .qp_context = p, 219 }; 220 221 if (!ipoib_cm_has_srq(priv)) { 222 attr.cap.max_recv_wr = ipoib_recvq_size; 223 attr.cap.max_recv_sge = priv->cm.num_frags; 224 } 225 226 return ib_create_qp(priv->pd, &attr); 227 } 228 229 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv, 230 struct ib_cm_id *cm_id, struct ib_qp *qp, 231 unsigned psn) 232 { 233 struct ib_qp_attr qp_attr; 234 int qp_attr_mask, ret; 235 236 qp_attr.qp_state = IB_QPS_INIT; 237 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 238 if (ret) { 239 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); 240 return ret; 241 } 242 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 243 if (ret) { 244 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); 245 return ret; 246 } 247 qp_attr.qp_state = IB_QPS_RTR; 248 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 249 if (ret) { 250 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 251 return ret; 252 } 253 qp_attr.rq_psn = psn; 254 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 255 if (ret) { 256 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 257 return ret; 258 } 259 260 /* 261 * Current Mellanox HCA firmware won't generate completions 262 * with error for drain WRs unless the QP has been moved to 263 * RTS first. This work-around leaves a window where a QP has 264 * moved to error asynchronously, but this will eventually get 265 * fixed in firmware, so let's not error out if modify QP 266 * fails. 267 */ 268 qp_attr.qp_state = IB_QPS_RTS; 269 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 270 if (ret) { 271 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 272 return 0; 273 } 274 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 275 if (ret) { 276 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 277 return 0; 278 } 279 280 return 0; 281 } 282 283 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv, 284 struct ib_recv_wr *wr, 285 struct ib_sge *sge) 286 { 287 int i; 288 289 for (i = 0; i < IPOIB_CM_RX_SG; i++) 290 sge[i].lkey = priv->mr->lkey; 291 292 wr->next = NULL; 293 wr->sg_list = sge; 294 wr->num_sge = 1; 295 } 296 297 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv, 298 struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx) 299 { 300 struct { 301 struct ib_recv_wr wr; 302 struct ib_sge sge[IPOIB_CM_RX_SG]; 303 } *t; 304 int ret; 305 int i; 306 307 rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL); 308 if (!rx->rx_ring) { 309 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 310 priv->ca->name, ipoib_recvq_size); 311 return -ENOMEM; 312 } 313 314 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); 315 316 t = kmalloc(sizeof *t, GFP_KERNEL); 317 if (!t) { 318 ret = -ENOMEM; 319 goto err_free; 320 } 321 322 ipoib_cm_init_rx_wr(priv, &t->wr, t->sge); 323 324 spin_lock_irq(&priv->lock); 325 326 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { 327 spin_unlock_irq(&priv->lock); 328 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); 329 ret = -EINVAL; 330 goto err_free; 331 } else 332 ++priv->cm.nonsrq_conn_qp; 333 334 spin_unlock_irq(&priv->lock); 335 336 for (i = 0; i < ipoib_recvq_size; ++i) { 337 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) { 338 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 339 ret = -ENOMEM; 340 goto err_count; 341 } 342 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i); 343 if (ret) { 344 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " 345 "failed for buf %d\n", i); 346 ret = -EIO; 347 goto err_count; 348 } 349 } 350 351 rx->recv_count = ipoib_recvq_size; 352 353 kfree(t); 354 355 return 0; 356 357 err_count: 358 spin_lock_irq(&priv->lock); 359 --priv->cm.nonsrq_conn_qp; 360 spin_unlock_irq(&priv->lock); 361 362 err_free: 363 kfree(t); 364 ipoib_cm_free_rx_ring(priv, rx->rx_ring); 365 366 return ret; 367 } 368 369 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id, 370 struct ib_qp *qp, struct ib_cm_req_event_param *req, 371 unsigned psn) 372 { 373 struct ipoib_cm_data data = {}; 374 struct ib_cm_rep_param rep = {}; 375 376 data.qpn = cpu_to_be32(priv->qp->qp_num); 377 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu); 378 379 rep.private_data = &data; 380 rep.private_data_len = sizeof data; 381 rep.flow_control = 0; 382 rep.rnr_retry_count = req->rnr_retry_count; 383 rep.srq = ipoib_cm_has_srq(priv); 384 rep.qp_num = qp->qp_num; 385 rep.starting_psn = psn; 386 return ib_send_cm_rep(cm_id, &rep); 387 } 388 389 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 390 { 391 struct ipoib_dev_priv *priv = cm_id->context; 392 struct ipoib_cm_rx *p; 393 unsigned psn; 394 int ret; 395 396 ipoib_dbg(priv, "REQ arrived\n"); 397 p = kzalloc(sizeof *p, GFP_KERNEL); 398 if (!p) 399 return -ENOMEM; 400 p->priv = priv; 401 p->id = cm_id; 402 cm_id->context = p; 403 p->state = IPOIB_CM_RX_LIVE; 404 p->jiffies = jiffies; 405 INIT_LIST_HEAD(&p->list); 406 407 p->qp = ipoib_cm_create_rx_qp(priv, p); 408 if (IS_ERR(p->qp)) { 409 ret = PTR_ERR(p->qp); 410 goto err_qp; 411 } 412 413 psn = random() & 0xffffff; 414 ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn); 415 if (ret) 416 goto err_modify; 417 418 if (!ipoib_cm_has_srq(priv)) { 419 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p); 420 if (ret) 421 goto err_modify; 422 } 423 424 spin_lock_irq(&priv->lock); 425 queue_delayed_work(ipoib_workqueue, 426 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 427 /* Add this entry to passive ids list head, but do not re-add it 428 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 429 p->jiffies = jiffies; 430 if (p->state == IPOIB_CM_RX_LIVE) 431 list_move(&p->list, &priv->cm.passive_ids); 432 spin_unlock_irq(&priv->lock); 433 434 ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn); 435 if (ret) { 436 ipoib_warn(priv, "failed to send REP: %d\n", ret); 437 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 438 ipoib_warn(priv, "unable to move qp to error state\n"); 439 } 440 return 0; 441 442 err_modify: 443 ib_destroy_qp(p->qp); 444 err_qp: 445 kfree(p); 446 return ret; 447 } 448 449 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, 450 struct ib_cm_event *event) 451 { 452 struct ipoib_cm_rx *p; 453 struct ipoib_dev_priv *priv; 454 455 switch (event->event) { 456 case IB_CM_REQ_RECEIVED: 457 return ipoib_cm_req_handler(cm_id, event); 458 case IB_CM_DREQ_RECEIVED: 459 p = cm_id->context; 460 ib_send_cm_drep(cm_id, NULL, 0); 461 /* Fall through */ 462 case IB_CM_REJ_RECEIVED: 463 p = cm_id->context; 464 priv = p->priv; 465 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 466 ipoib_warn(priv, "unable to move qp to error state\n"); 467 /* Fall through */ 468 default: 469 return 0; 470 } 471 } 472 473 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 474 { 475 struct ipoib_cm_rx_buf saverx; 476 struct ipoib_cm_rx_buf *rx_ring; 477 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); 478 struct ifnet *dev = priv->dev; 479 struct mbuf *mb, *newmb; 480 struct ipoib_cm_rx *p; 481 int has_srq; 482 u_short proto; 483 484 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", 485 wr_id, wc->status); 486 487 if (unlikely(wr_id >= ipoib_recvq_size)) { 488 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { 489 spin_lock(&priv->lock); 490 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 491 ipoib_cm_start_rx_drain(priv); 492 if (priv->cm.id != NULL) 493 queue_work(ipoib_workqueue, 494 &priv->cm.rx_reap_task); 495 spin_unlock(&priv->lock); 496 } else 497 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 498 wr_id, ipoib_recvq_size); 499 return; 500 } 501 502 p = wc->qp->qp_context; 503 504 has_srq = ipoib_cm_has_srq(priv); 505 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; 506 507 mb = rx_ring[wr_id].mb; 508 509 if (unlikely(wc->status != IB_WC_SUCCESS)) { 510 ipoib_dbg(priv, "cm recv error " 511 "(status=%d, wrid=%d vend_err %x)\n", 512 wc->status, wr_id, wc->vendor_err); 513 ++dev->if_ierrors; 514 if (has_srq) 515 goto repost; 516 else { 517 if (!--p->recv_count) { 518 spin_lock(&priv->lock); 519 list_move(&p->list, &priv->cm.rx_reap_list); 520 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 521 spin_unlock(&priv->lock); 522 } 523 return; 524 } 525 } 526 527 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { 528 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 529 p->jiffies = jiffies; 530 /* Move this entry to list head, but do not re-add it 531 * if it has been moved out of list. */ 532 if (p->state == IPOIB_CM_RX_LIVE) 533 list_move(&p->list, &priv->cm.passive_ids); 534 } 535 } 536 537 memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx)); 538 newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]); 539 if (unlikely(!newmb)) { 540 /* 541 * If we can't allocate a new RX buffer, dump 542 * this packet and reuse the old buffer. 543 */ 544 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 545 ++dev->if_ierrors; 546 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx)); 547 goto repost; 548 } 549 550 ipoib_cm_dma_unmap_rx(priv, &saverx); 551 552 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 553 wc->byte_len, wc->slid); 554 555 ipoib_dma_mb(priv, mb, wc->byte_len); 556 557 ++dev->if_ipackets; 558 dev->if_ibytes += mb->m_pkthdr.len; 559 560 mb->m_pkthdr.rcvif = dev; 561 proto = *mtod(mb, uint16_t *); 562 m_adj(mb, IPOIB_ENCAP_LEN); 563 564 IPOIB_MTAP_PROTO(dev, mb, proto); 565 ipoib_demux(dev, mb, ntohs(proto)); 566 567 repost: 568 if (has_srq) { 569 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id))) 570 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " 571 "for buf %d\n", wr_id); 572 } else { 573 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p, 574 &priv->cm.rx_wr, 575 priv->cm.rx_sge, 576 wr_id))) { 577 --p->recv_count; 578 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " 579 "for buf %d\n", wr_id); 580 } 581 } 582 } 583 584 static inline int post_send(struct ipoib_dev_priv *priv, 585 struct ipoib_cm_tx *tx, 586 struct ipoib_cm_tx_buf *tx_req, 587 unsigned int wr_id) 588 { 589 struct ib_send_wr *bad_wr; 590 struct mbuf *mb = tx_req->mb; 591 u64 *mapping = tx_req->mapping; 592 struct mbuf *m; 593 int i; 594 595 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 596 priv->tx_sge[i].addr = mapping[i]; 597 priv->tx_sge[i].length = m->m_len; 598 } 599 priv->tx_wr.num_sge = i; 600 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 601 priv->tx_wr.opcode = IB_WR_SEND; 602 603 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 604 } 605 606 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx) 607 { 608 struct ipoib_cm_tx_buf *tx_req; 609 struct ifnet *dev = priv->dev; 610 611 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 612 while (ipoib_poll_tx(priv)); /* nothing */ 613 614 m_adj(mb, sizeof(struct ipoib_pseudoheader)); 615 if (unlikely(mb->m_pkthdr.len > tx->mtu)) { 616 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 617 mb->m_pkthdr.len, tx->mtu); 618 ++dev->if_oerrors; 619 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu)); 620 return; 621 } 622 623 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", 624 tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num); 625 626 627 /* 628 * We put the mb into the tx_ring _before_ we call post_send() 629 * because it's entirely possible that the completion handler will 630 * run before we execute anything after the post_send(). That 631 * means we have to make sure everything is properly recorded and 632 * our state is consistent before we call post_send(). 633 */ 634 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 635 tx_req->mb = mb; 636 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req, 637 priv->cm.num_frags))) { 638 ++dev->if_oerrors; 639 if (tx_req->mb) 640 m_freem(tx_req->mb); 641 return; 642 } 643 644 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) { 645 ipoib_warn(priv, "post_send failed\n"); 646 ++dev->if_oerrors; 647 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 648 m_freem(mb); 649 } else { 650 ++tx->tx_head; 651 652 if (++priv->tx_outstanding == ipoib_sendq_size) { 653 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 654 tx->qp->qp_num); 655 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 656 ipoib_warn(priv, "request notify on send CQ failed\n"); 657 dev->if_drv_flags |= IFF_DRV_OACTIVE; 658 } 659 } 660 661 } 662 663 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 664 { 665 struct ipoib_cm_tx *tx = wc->qp->qp_context; 666 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 667 struct ifnet *dev = priv->dev; 668 struct ipoib_cm_tx_buf *tx_req; 669 670 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 671 wr_id, wc->status); 672 673 if (unlikely(wr_id >= ipoib_sendq_size)) { 674 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", 675 wr_id, ipoib_sendq_size); 676 return; 677 } 678 679 tx_req = &tx->tx_ring[wr_id]; 680 681 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 682 683 /* FIXME: is this right? Shouldn't we only increment on success? */ 684 ++dev->if_opackets; 685 dev->if_obytes += tx_req->mb->m_pkthdr.len; 686 687 m_freem(tx_req->mb); 688 689 ++tx->tx_tail; 690 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 691 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 && 692 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 693 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 694 695 if (wc->status != IB_WC_SUCCESS && 696 wc->status != IB_WC_WR_FLUSH_ERR) { 697 struct ipoib_path *path; 698 699 ipoib_dbg(priv, "failed cm send event " 700 "(status=%d, wrid=%d vend_err %x)\n", 701 wc->status, wr_id, wc->vendor_err); 702 703 path = tx->path; 704 705 if (path) { 706 path->cm = NULL; 707 rb_erase(&path->rb_node, &priv->path_tree); 708 list_del(&path->list); 709 } 710 711 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 712 list_move(&tx->list, &priv->cm.reap_list); 713 queue_work(ipoib_workqueue, &priv->cm.reap_task); 714 } 715 716 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 717 } 718 719 } 720 721 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv) 722 { 723 int ret; 724 725 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev))) 726 return 0; 727 728 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv); 729 if (IS_ERR(priv->cm.id)) { 730 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 731 ret = PTR_ERR(priv->cm.id); 732 goto err_cm; 733 } 734 735 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 736 0, NULL); 737 if (ret) { 738 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 739 IPOIB_CM_IETF_ID | priv->qp->qp_num); 740 goto err_listen; 741 } 742 743 return 0; 744 745 err_listen: 746 ib_destroy_cm_id(priv->cm.id); 747 err_cm: 748 priv->cm.id = NULL; 749 return ret; 750 } 751 752 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv) 753 { 754 struct ipoib_cm_rx *rx, *n; 755 LIST_HEAD(list); 756 757 spin_lock_irq(&priv->lock); 758 list_splice_init(&priv->cm.rx_reap_list, &list); 759 spin_unlock_irq(&priv->lock); 760 761 list_for_each_entry_safe(rx, n, &list, list) { 762 ib_destroy_cm_id(rx->id); 763 ib_destroy_qp(rx->qp); 764 if (!ipoib_cm_has_srq(priv)) { 765 ipoib_cm_free_rx_ring(priv, rx->rx_ring); 766 spin_lock_irq(&priv->lock); 767 --priv->cm.nonsrq_conn_qp; 768 spin_unlock_irq(&priv->lock); 769 } 770 kfree(rx); 771 } 772 } 773 774 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv) 775 { 776 struct ipoib_cm_rx *p; 777 unsigned long begin; 778 int ret; 779 780 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id) 781 return; 782 783 ib_destroy_cm_id(priv->cm.id); 784 priv->cm.id = NULL; 785 786 cancel_work_sync(&priv->cm.rx_reap_task); 787 788 spin_lock_irq(&priv->lock); 789 while (!list_empty(&priv->cm.passive_ids)) { 790 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 791 list_move(&p->list, &priv->cm.rx_error_list); 792 p->state = IPOIB_CM_RX_ERROR; 793 spin_unlock_irq(&priv->lock); 794 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 795 if (ret) 796 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 797 spin_lock_irq(&priv->lock); 798 } 799 800 /* Wait for all RX to be drained */ 801 begin = jiffies; 802 803 while (!list_empty(&priv->cm.rx_error_list) || 804 !list_empty(&priv->cm.rx_flush_list) || 805 !list_empty(&priv->cm.rx_drain_list)) { 806 if (time_after(jiffies, begin + 5 * HZ)) { 807 ipoib_warn(priv, "RX drain timing out\n"); 808 809 /* 810 * assume the HW is wedged and just free up everything. 811 */ 812 list_splice_init(&priv->cm.rx_flush_list, 813 &priv->cm.rx_reap_list); 814 list_splice_init(&priv->cm.rx_error_list, 815 &priv->cm.rx_reap_list); 816 list_splice_init(&priv->cm.rx_drain_list, 817 &priv->cm.rx_reap_list); 818 break; 819 } 820 spin_unlock_irq(&priv->lock); 821 msleep(1); 822 ipoib_drain_cq(priv); 823 spin_lock_irq(&priv->lock); 824 } 825 826 spin_unlock_irq(&priv->lock); 827 828 ipoib_cm_free_rx_reap_list(priv); 829 830 cancel_delayed_work(&priv->cm.stale_task); 831 } 832 833 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 834 { 835 struct ipoib_cm_tx *p = cm_id->context; 836 struct ipoib_dev_priv *priv = p->priv; 837 struct ipoib_cm_data *data = event->private_data; 838 struct ifqueue mbqueue; 839 struct ib_qp_attr qp_attr; 840 int qp_attr_mask, ret; 841 struct mbuf *mb; 842 843 ipoib_dbg(priv, "cm rep handler\n"); 844 p->mtu = be32_to_cpu(data->mtu); 845 846 if (p->mtu <= IPOIB_ENCAP_LEN) { 847 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", 848 p->mtu, IPOIB_ENCAP_LEN); 849 return -EINVAL; 850 } 851 852 qp_attr.qp_state = IB_QPS_RTR; 853 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 854 if (ret) { 855 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 856 return ret; 857 } 858 859 qp_attr.rq_psn = 0 /* FIXME */; 860 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 861 if (ret) { 862 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 863 return ret; 864 } 865 866 qp_attr.qp_state = IB_QPS_RTS; 867 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 868 if (ret) { 869 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 870 return ret; 871 } 872 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 873 if (ret) { 874 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 875 return ret; 876 } 877 878 bzero(&mbqueue, sizeof(mbqueue)); 879 880 spin_lock_irq(&priv->lock); 881 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 882 if (p->path) 883 for (;;) { 884 _IF_DEQUEUE(&p->path->queue, mb); 885 if (mb == NULL) 886 break; 887 _IF_ENQUEUE(&mbqueue, mb); 888 } 889 spin_unlock_irq(&priv->lock); 890 891 for (;;) { 892 struct ifnet *dev = p->priv->dev; 893 _IF_DEQUEUE(&mbqueue, mb); 894 if (mb == NULL) 895 break; 896 mb->m_pkthdr.rcvif = dev; 897 if (dev->if_transmit(dev, mb)) 898 ipoib_warn(priv, "dev_queue_xmit failed " 899 "to requeue packet\n"); 900 } 901 902 ret = ib_send_cm_rtu(cm_id, NULL, 0); 903 if (ret) { 904 ipoib_warn(priv, "failed to send RTU: %d\n", ret); 905 return ret; 906 } 907 return 0; 908 } 909 910 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv, 911 struct ipoib_cm_tx *tx) 912 { 913 struct ib_qp_init_attr attr = { 914 .send_cq = priv->send_cq, 915 .recv_cq = priv->recv_cq, 916 .srq = priv->cm.srq, 917 .cap.max_send_wr = ipoib_sendq_size, 918 .cap.max_send_sge = priv->cm.num_frags, 919 .sq_sig_type = IB_SIGNAL_ALL_WR, 920 .qp_type = IB_QPT_RC, 921 .qp_context = tx 922 }; 923 924 return ib_create_qp(priv->pd, &attr); 925 } 926 927 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv, 928 struct ib_cm_id *id, struct ib_qp *qp, 929 u32 qpn, 930 struct ib_sa_path_rec *pathrec) 931 { 932 struct ipoib_cm_data data = {}; 933 struct ib_cm_req_param req = {}; 934 935 ipoib_dbg(priv, "cm send req\n"); 936 937 data.qpn = cpu_to_be32(priv->qp->qp_num); 938 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu); 939 940 req.primary_path = pathrec; 941 req.alternate_path = NULL; 942 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); 943 req.qp_num = qp->qp_num; 944 req.qp_type = qp->qp_type; 945 req.private_data = &data; 946 req.private_data_len = sizeof data; 947 req.flow_control = 0; 948 949 req.starting_psn = 0; /* FIXME */ 950 951 /* 952 * Pick some arbitrary defaults here; we could make these 953 * module parameters if anyone cared about setting them. 954 */ 955 req.responder_resources = 4; 956 req.remote_cm_response_timeout = 20; 957 req.local_cm_response_timeout = 20; 958 req.retry_count = 0; /* RFC draft warns against retries */ 959 req.rnr_retry_count = 0; /* RFC draft warns against retries */ 960 req.max_cm_retries = 15; 961 req.srq = ipoib_cm_has_srq(priv); 962 return ib_send_cm_req(id, &req); 963 } 964 965 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv, 966 struct ib_cm_id *cm_id, struct ib_qp *qp) 967 { 968 struct ib_qp_attr qp_attr; 969 int qp_attr_mask, ret; 970 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 971 if (ret) { 972 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); 973 return ret; 974 } 975 976 qp_attr.qp_state = IB_QPS_INIT; 977 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 978 qp_attr.port_num = priv->port; 979 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 980 981 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 982 if (ret) { 983 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); 984 return ret; 985 } 986 return 0; 987 } 988 989 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, 990 struct ib_sa_path_rec *pathrec) 991 { 992 struct ipoib_dev_priv *priv = p->priv; 993 int ret; 994 995 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL); 996 if (!p->tx_ring) { 997 ipoib_warn(priv, "failed to allocate tx ring\n"); 998 ret = -ENOMEM; 999 goto err_tx; 1000 } 1001 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1002 1003 p->qp = ipoib_cm_create_tx_qp(p->priv, p); 1004 if (IS_ERR(p->qp)) { 1005 ret = PTR_ERR(p->qp); 1006 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 1007 goto err_qp; 1008 } 1009 1010 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); 1011 if (IS_ERR(p->id)) { 1012 ret = PTR_ERR(p->id); 1013 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); 1014 goto err_id; 1015 } 1016 1017 ret = ipoib_cm_modify_tx_init(p->priv, p->id, p->qp); 1018 if (ret) { 1019 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); 1020 goto err_modify; 1021 } 1022 1023 ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec); 1024 if (ret) { 1025 ipoib_warn(priv, "failed to send cm req: %d\n", ret); 1026 goto err_send_cm; 1027 } 1028 1029 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", 1030 p->qp->qp_num, pathrec->dgid.raw, qpn); 1031 1032 return 0; 1033 1034 err_send_cm: 1035 err_modify: 1036 ib_destroy_cm_id(p->id); 1037 err_id: 1038 p->id = NULL; 1039 ib_destroy_qp(p->qp); 1040 err_qp: 1041 p->qp = NULL; 1042 kfree(p->tx_ring); 1043 err_tx: 1044 return ret; 1045 } 1046 1047 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1048 { 1049 struct ipoib_dev_priv *priv = p->priv; 1050 struct ifnet *dev = priv->dev; 1051 struct ipoib_cm_tx_buf *tx_req; 1052 unsigned long begin; 1053 1054 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1055 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); 1056 1057 if (p->path) 1058 ipoib_path_free(priv, p->path); 1059 1060 if (p->id) 1061 ib_destroy_cm_id(p->id); 1062 1063 if (p->tx_ring) { 1064 /* Wait for all sends to complete */ 1065 begin = jiffies; 1066 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1067 if (time_after(jiffies, begin + 5 * HZ)) { 1068 ipoib_warn(priv, "timing out; %d sends not completed\n", 1069 p->tx_head - p->tx_tail); 1070 goto timeout; 1071 } 1072 1073 msleep(1); 1074 } 1075 } 1076 1077 timeout: 1078 1079 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1080 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1081 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 1082 m_freem(tx_req->mb); 1083 ++p->tx_tail; 1084 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 1085 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 && 1086 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1087 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1088 } 1089 1090 if (p->qp) 1091 ib_destroy_qp(p->qp); 1092 1093 kfree(p->tx_ring); 1094 kfree(p); 1095 } 1096 1097 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 1098 struct ib_cm_event *event) 1099 { 1100 struct ipoib_cm_tx *tx = cm_id->context; 1101 struct ipoib_dev_priv *priv = tx->priv; 1102 struct ipoib_path *path; 1103 unsigned long flags; 1104 int ret; 1105 1106 switch (event->event) { 1107 case IB_CM_DREQ_RECEIVED: 1108 ipoib_dbg(priv, "DREQ received.\n"); 1109 ib_send_cm_drep(cm_id, NULL, 0); 1110 break; 1111 case IB_CM_REP_RECEIVED: 1112 ipoib_dbg(priv, "REP received.\n"); 1113 ret = ipoib_cm_rep_handler(cm_id, event); 1114 if (ret) 1115 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1116 NULL, 0, NULL, 0); 1117 break; 1118 case IB_CM_REQ_ERROR: 1119 case IB_CM_REJ_RECEIVED: 1120 case IB_CM_TIMEWAIT_EXIT: 1121 ipoib_dbg(priv, "CM error %d.\n", event->event); 1122 spin_lock_irqsave(&priv->lock, flags); 1123 path = tx->path; 1124 1125 if (path) { 1126 path->cm = NULL; 1127 tx->path = NULL; 1128 rb_erase(&path->rb_node, &priv->path_tree); 1129 list_del(&path->list); 1130 } 1131 1132 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1133 list_move(&tx->list, &priv->cm.reap_list); 1134 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1135 } 1136 1137 spin_unlock_irqrestore(&priv->lock, flags); 1138 if (path) 1139 ipoib_path_free(tx->priv, path); 1140 break; 1141 default: 1142 break; 1143 } 1144 1145 return 0; 1146 } 1147 1148 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv, 1149 struct ipoib_path *path) 1150 { 1151 struct ipoib_cm_tx *tx; 1152 1153 tx = kzalloc(sizeof *tx, GFP_ATOMIC); 1154 if (!tx) 1155 return NULL; 1156 1157 ipoib_dbg(priv, "Creating cm tx\n"); 1158 path->cm = tx; 1159 tx->path = path; 1160 tx->priv = priv; 1161 list_add(&tx->list, &priv->cm.start_list); 1162 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1163 queue_work(ipoib_workqueue, &priv->cm.start_task); 1164 return tx; 1165 } 1166 1167 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) 1168 { 1169 struct ipoib_dev_priv *priv = tx->priv; 1170 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1171 spin_lock(&priv->lock); 1172 list_move(&tx->list, &priv->cm.reap_list); 1173 spin_unlock(&priv->lock); 1174 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1175 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1176 tx->path->pathrec.dgid.raw); 1177 tx->path = NULL; 1178 } 1179 } 1180 1181 static void ipoib_cm_tx_start(struct work_struct *work) 1182 { 1183 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1184 cm.start_task); 1185 struct ipoib_path *path; 1186 struct ipoib_cm_tx *p; 1187 unsigned long flags; 1188 int ret; 1189 1190 struct ib_sa_path_rec pathrec; 1191 u32 qpn; 1192 1193 ipoib_dbg(priv, "cm start task\n"); 1194 spin_lock_irqsave(&priv->lock, flags); 1195 1196 while (!list_empty(&priv->cm.start_list)) { 1197 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1198 list_del_init(&p->list); 1199 path = p->path; 1200 qpn = IPOIB_QPN(path->hwaddr); 1201 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1202 1203 spin_unlock_irqrestore(&priv->lock, flags); 1204 1205 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1206 1207 spin_lock_irqsave(&priv->lock, flags); 1208 1209 if (ret) { 1210 path = p->path; 1211 if (path) { 1212 path->cm = NULL; 1213 rb_erase(&path->rb_node, &priv->path_tree); 1214 list_del(&path->list); 1215 ipoib_path_free(priv, path); 1216 } 1217 list_del(&p->list); 1218 kfree(p); 1219 } 1220 } 1221 1222 spin_unlock_irqrestore(&priv->lock, flags); 1223 } 1224 1225 static void ipoib_cm_tx_reap(struct work_struct *work) 1226 { 1227 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1228 cm.reap_task); 1229 struct ipoib_cm_tx *p; 1230 unsigned long flags; 1231 1232 spin_lock_irqsave(&priv->lock, flags); 1233 1234 while (!list_empty(&priv->cm.reap_list)) { 1235 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1236 list_del(&p->list); 1237 spin_unlock_irqrestore(&priv->lock, flags); 1238 ipoib_cm_tx_destroy(p); 1239 spin_lock_irqsave(&priv->lock, flags); 1240 } 1241 1242 spin_unlock_irqrestore(&priv->lock, flags); 1243 } 1244 1245 static void ipoib_cm_mb_reap(struct work_struct *work) 1246 { 1247 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1248 cm.mb_task); 1249 struct mbuf *mb; 1250 unsigned long flags; 1251 unsigned mtu = priv->mcast_mtu; 1252 uint16_t proto; 1253 1254 spin_lock_irqsave(&priv->lock, flags); 1255 1256 for (;;) { 1257 IF_DEQUEUE(&priv->cm.mb_queue, mb); 1258 if (mb == NULL) 1259 break; 1260 spin_unlock_irqrestore(&priv->lock, flags); 1261 1262 proto = htons(*mtod(mb, uint16_t *)); 1263 m_adj(mb, IPOIB_ENCAP_LEN); 1264 if (proto == ETHERTYPE_IP) 1265 icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu); 1266 #if defined(INET6) 1267 else if (proto == ETHERTYPE_IPV6) 1268 icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu); 1269 #endif 1270 else 1271 m_freem(mb); 1272 1273 spin_lock_irqsave(&priv->lock, flags); 1274 } 1275 1276 spin_unlock_irqrestore(&priv->lock, flags); 1277 } 1278 1279 void 1280 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu) 1281 { 1282 int e = priv->cm.mb_queue.ifq_len; 1283 1284 IF_ENQUEUE(&priv->cm.mb_queue, mb); 1285 if (e == 0) 1286 queue_work(ipoib_workqueue, &priv->cm.mb_task); 1287 } 1288 1289 static void ipoib_cm_rx_reap(struct work_struct *work) 1290 { 1291 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, 1292 cm.rx_reap_task)); 1293 } 1294 1295 static void ipoib_cm_stale_task(struct work_struct *work) 1296 { 1297 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1298 cm.stale_task.work); 1299 struct ipoib_cm_rx *p; 1300 int ret; 1301 1302 spin_lock_irq(&priv->lock); 1303 while (!list_empty(&priv->cm.passive_ids)) { 1304 /* List is sorted by LRU, start from tail, 1305 * stop when we see a recently used entry */ 1306 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); 1307 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1308 break; 1309 list_move(&p->list, &priv->cm.rx_error_list); 1310 p->state = IPOIB_CM_RX_ERROR; 1311 spin_unlock_irq(&priv->lock); 1312 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 1313 if (ret) 1314 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 1315 spin_lock_irq(&priv->lock); 1316 } 1317 1318 if (!list_empty(&priv->cm.passive_ids)) 1319 queue_delayed_work(ipoib_workqueue, 1320 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1321 spin_unlock_irq(&priv->lock); 1322 } 1323 1324 1325 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge) 1326 { 1327 struct ib_srq_init_attr srq_init_attr = { 1328 .attr = { 1329 .max_wr = ipoib_recvq_size, 1330 .max_sge = max_sge 1331 } 1332 }; 1333 1334 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); 1335 if (IS_ERR(priv->cm.srq)) { 1336 if (PTR_ERR(priv->cm.srq) != -ENOSYS) 1337 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n", 1338 priv->ca->name, PTR_ERR(priv->cm.srq)); 1339 priv->cm.srq = NULL; 1340 return; 1341 } 1342 1343 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL); 1344 if (!priv->cm.srq_ring) { 1345 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1346 priv->ca->name, ipoib_recvq_size); 1347 ib_destroy_srq(priv->cm.srq); 1348 priv->cm.srq = NULL; 1349 return; 1350 } 1351 1352 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1353 } 1354 1355 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv) 1356 { 1357 struct ifnet *dev = priv->dev; 1358 int i, ret; 1359 struct ib_device_attr attr; 1360 1361 INIT_LIST_HEAD(&priv->cm.passive_ids); 1362 INIT_LIST_HEAD(&priv->cm.reap_list); 1363 INIT_LIST_HEAD(&priv->cm.start_list); 1364 INIT_LIST_HEAD(&priv->cm.rx_error_list); 1365 INIT_LIST_HEAD(&priv->cm.rx_flush_list); 1366 INIT_LIST_HEAD(&priv->cm.rx_drain_list); 1367 INIT_LIST_HEAD(&priv->cm.rx_reap_list); 1368 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); 1369 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); 1370 INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap); 1371 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); 1372 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); 1373 1374 bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue)); 1375 mtx_init(&priv->cm.mb_queue.ifq_mtx, 1376 dev->if_xname, "if send queue", MTX_DEF); 1377 1378 ret = ib_query_device(priv->ca, &attr); 1379 if (ret) { 1380 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret); 1381 return ret; 1382 } 1383 1384 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge); 1385 1386 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge); 1387 ipoib_cm_create_srq(priv, attr.max_srq_sge); 1388 if (ipoib_cm_has_srq(priv)) { 1389 priv->cm.max_cm_mtu = attr.max_srq_sge * MJUMPAGESIZE; 1390 priv->cm.num_frags = attr.max_srq_sge; 1391 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", 1392 priv->cm.max_cm_mtu, priv->cm.num_frags); 1393 } else { 1394 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU; 1395 priv->cm.num_frags = IPOIB_CM_RX_SG; 1396 } 1397 1398 ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge); 1399 1400 if (ipoib_cm_has_srq(priv)) { 1401 for (i = 0; i < ipoib_recvq_size; ++i) { 1402 if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) { 1403 ipoib_warn(priv, "failed to allocate " 1404 "receive buffer %d\n", i); 1405 ipoib_cm_dev_cleanup(priv); 1406 return -ENOMEM; 1407 } 1408 1409 if (ipoib_cm_post_receive_srq(priv, i)) { 1410 ipoib_warn(priv, "ipoib_cm_post_receive_srq " 1411 "failed for buf %d\n", i); 1412 ipoib_cm_dev_cleanup(priv); 1413 return -EIO; 1414 } 1415 } 1416 } 1417 1418 IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC; 1419 return 0; 1420 } 1421 1422 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv) 1423 { 1424 int ret; 1425 1426 if (!priv->cm.srq) 1427 return; 1428 1429 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); 1430 1431 ret = ib_destroy_srq(priv->cm.srq); 1432 if (ret) 1433 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); 1434 1435 priv->cm.srq = NULL; 1436 if (!priv->cm.srq_ring) 1437 return; 1438 1439 ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring); 1440 priv->cm.srq_ring = NULL; 1441 1442 mtx_destroy(&priv->cm.mb_queue.ifq_mtx); 1443 } 1444 1445 #endif /* CONFIG_INFINIBAND_IPOIB_CM */ 1446