1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <linux/device.h> 36 #include <linux/dmapool.h> 37 #include <linux/ratelimit.h> 38 39 #include "rds_single_path.h" 40 #include "rds.h" 41 #include "ib.h" 42 43 /* 44 * Convert IB-specific error message to RDS error message and call core 45 * completion handler. 46 */ 47 static void rds_ib_send_complete(struct rds_message *rm, 48 int wc_status, 49 void (*complete)(struct rds_message *rm, int status)) 50 { 51 int notify_status; 52 53 switch (wc_status) { 54 case IB_WC_WR_FLUSH_ERR: 55 return; 56 57 case IB_WC_SUCCESS: 58 notify_status = RDS_RDMA_SUCCESS; 59 break; 60 61 case IB_WC_REM_ACCESS_ERR: 62 notify_status = RDS_RDMA_REMOTE_ERROR; 63 break; 64 65 default: 66 notify_status = RDS_RDMA_OTHER_ERROR; 67 break; 68 } 69 complete(rm, notify_status); 70 } 71 72 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, 73 struct rm_data_op *op, 74 int wc_status) 75 { 76 if (op->op_nents) 77 ib_dma_unmap_sg(ic->i_cm_id->device, 78 op->op_sg, op->op_nents, 79 DMA_TO_DEVICE); 80 } 81 82 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, 83 struct rm_rdma_op *op, 84 int wc_status) 85 { 86 if (op->op_mapped) { 87 ib_dma_unmap_sg(ic->i_cm_id->device, 88 op->op_sg, op->op_nents, 89 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 90 op->op_mapped = 0; 91 } 92 93 /* If the user asked for a completion notification on this 94 * message, we can implement three different semantics: 95 * 1. Notify when we received the ACK on the RDS message 96 * that was queued with the RDMA. This provides reliable 97 * notification of RDMA status at the expense of a one-way 98 * packet delay. 99 * 2. Notify when the IB stack gives us the completion event for 100 * the RDMA operation. 101 * 3. Notify when the IB stack gives us the completion event for 102 * the accompanying RDS messages. 103 * Here, we implement approach #3. To implement approach #2, 104 * we would need to take an event for the rdma WR. To implement #1, 105 * don't call rds_rdma_send_complete at all, and fall back to the notify 106 * handling in the ACK processing code. 107 * 108 * Note: There's no need to explicitly sync any RDMA buffers using 109 * ib_dma_sync_sg_for_cpu - the completion for the RDMA 110 * operation itself unmapped the RDMA buffers, which takes care 111 * of synching. 112 */ 113 rds_ib_send_complete(container_of(op, struct rds_message, rdma), 114 wc_status, rds_rdma_send_complete); 115 116 if (op->op_write) 117 rds_stats_add(s_send_rdma_bytes, op->op_bytes); 118 else 119 rds_stats_add(s_recv_rdma_bytes, op->op_bytes); 120 } 121 122 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, 123 struct rm_atomic_op *op, 124 int wc_status) 125 { 126 /* unmap atomic recvbuf */ 127 if (op->op_mapped) { 128 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, 129 DMA_FROM_DEVICE); 130 op->op_mapped = 0; 131 } 132 133 rds_ib_send_complete(container_of(op, struct rds_message, atomic), 134 wc_status, rds_atomic_send_complete); 135 136 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) 137 rds_ib_stats_inc(s_ib_atomic_cswp); 138 else 139 rds_ib_stats_inc(s_ib_atomic_fadd); 140 } 141 142 /* 143 * Unmap the resources associated with a struct send_work. 144 * 145 * Returns the rm for no good reason other than it is unobtainable 146 * other than by switching on wr.opcode, currently, and the caller, 147 * the event handler, needs it. 148 */ 149 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, 150 struct rds_ib_send_work *send, 151 int wc_status) 152 { 153 struct rds_message *rm = NULL; 154 155 /* In the error case, wc.opcode sometimes contains garbage */ 156 switch (send->s_wr.opcode) { 157 case IB_WR_SEND: 158 if (send->s_op) { 159 rm = container_of(send->s_op, struct rds_message, data); 160 rds_ib_send_unmap_data(ic, send->s_op, wc_status); 161 } 162 break; 163 case IB_WR_RDMA_WRITE: 164 case IB_WR_RDMA_READ: 165 if (send->s_op) { 166 rm = container_of(send->s_op, struct rds_message, rdma); 167 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); 168 } 169 break; 170 case IB_WR_ATOMIC_FETCH_AND_ADD: 171 case IB_WR_ATOMIC_CMP_AND_SWP: 172 if (send->s_op) { 173 rm = container_of(send->s_op, struct rds_message, atomic); 174 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); 175 } 176 break; 177 default: 178 printk_ratelimited(KERN_NOTICE 179 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", 180 __func__, send->s_wr.opcode); 181 break; 182 } 183 184 send->s_wr.opcode = 0xdead; 185 186 return rm; 187 } 188 189 void rds_ib_send_init_ring(struct rds_ib_connection *ic) 190 { 191 struct rds_ib_send_work *send; 192 u32 i; 193 194 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 195 struct ib_sge *sge; 196 197 send->s_op = NULL; 198 199 send->s_wr.wr_id = i; 200 send->s_wr.sg_list = send->s_sge; 201 send->s_wr.ex.imm_data = 0; 202 203 sge = &send->s_sge[0]; 204 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); 205 sge->length = sizeof(struct rds_header); 206 sge->lkey = ic->i_pd->local_dma_lkey; 207 208 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; 209 } 210 } 211 212 void rds_ib_send_clear_ring(struct rds_ib_connection *ic) 213 { 214 struct rds_ib_send_work *send; 215 u32 i; 216 217 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 218 if (send->s_op && send->s_wr.opcode != 0xdead) 219 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); 220 } 221 } 222 223 /* 224 * The only fast path caller always has a non-zero nr, so we don't 225 * bother testing nr before performing the atomic sub. 226 */ 227 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) 228 { 229 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && 230 waitqueue_active(&rds_ib_ring_empty_wait)) 231 wake_up(&rds_ib_ring_empty_wait); 232 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); 233 } 234 235 /* 236 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc 237 * operations performed in the send path. As the sender allocs and potentially 238 * unallocs the next free entry in the ring it doesn't alter which is 239 * the next to be freed, which is what this is concerned with. 240 */ 241 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) 242 { 243 struct rds_message *rm = NULL; 244 struct rds_connection *conn = ic->conn; 245 struct rds_ib_send_work *send; 246 u32 completed; 247 u32 oldest; 248 u32 i = 0; 249 int nr_sig = 0; 250 251 252 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", 253 (unsigned long long)wc->wr_id, wc->status, 254 ib_wc_status_msg(wc->status), wc->byte_len, 255 be32_to_cpu(wc->ex.imm_data)); 256 rds_ib_stats_inc(s_ib_tx_cq_event); 257 258 if (wc->wr_id == RDS_IB_ACK_WR_ID) { 259 if (time_after(jiffies, ic->i_ack_queued + HZ / 2)) 260 rds_ib_stats_inc(s_ib_tx_stalled); 261 rds_ib_ack_send_complete(ic); 262 return; 263 } 264 265 oldest = rds_ib_ring_oldest(&ic->i_send_ring); 266 267 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest); 268 269 for (i = 0; i < completed; i++) { 270 send = &ic->i_sends[oldest]; 271 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 272 nr_sig++; 273 274 rm = rds_ib_send_unmap_op(ic, send, wc->status); 275 276 if (time_after(jiffies, send->s_queued + HZ / 2)) 277 rds_ib_stats_inc(s_ib_tx_stalled); 278 279 if (send->s_op) { 280 if (send->s_op == rm->m_final_op) { 281 /* If anyone waited for this message to get 282 * flushed out, wake them up now 283 */ 284 rds_message_unmapped(rm); 285 } 286 rds_message_put(rm); 287 send->s_op = NULL; 288 } 289 290 oldest = (oldest + 1) % ic->i_send_ring.w_nr; 291 } 292 293 rds_ib_ring_free(&ic->i_send_ring, completed); 294 rds_ib_sub_signaled(ic, nr_sig); 295 nr_sig = 0; 296 297 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 298 test_bit(0, &conn->c_map_queued)) 299 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 300 301 /* We expect errors as the qp is drained during shutdown */ 302 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { 303 rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", 304 &conn->c_faddr, wc->status, 305 ib_wc_status_msg(wc->status)); 306 } 307 } 308 309 /* 310 * This is the main function for allocating credits when sending 311 * messages. 312 * 313 * Conceptually, we have two counters: 314 * - send credits: this tells us how many WRs we're allowed 315 * to submit without overruning the receiver's queue. For 316 * each SEND WR we post, we decrement this by one. 317 * 318 * - posted credits: this tells us how many WRs we recently 319 * posted to the receive queue. This value is transferred 320 * to the peer as a "credit update" in a RDS header field. 321 * Every time we transmit credits to the peer, we subtract 322 * the amount of transferred credits from this counter. 323 * 324 * It is essential that we avoid situations where both sides have 325 * exhausted their send credits, and are unable to send new credits 326 * to the peer. We achieve this by requiring that we send at least 327 * one credit update to the peer before exhausting our credits. 328 * When new credits arrive, we subtract one credit that is withheld 329 * until we've posted new buffers and are ready to transmit these 330 * credits (see rds_ib_send_add_credits below). 331 * 332 * The RDS send code is essentially single-threaded; rds_send_xmit 333 * sets RDS_IN_XMIT to ensure exclusive access to the send ring. 334 * However, the ACK sending code is independent and can race with 335 * message SENDs. 336 * 337 * In the send path, we need to update the counters for send credits 338 * and the counter of posted buffers atomically - when we use the 339 * last available credit, we cannot allow another thread to race us 340 * and grab the posted credits counter. Hence, we have to use a 341 * spinlock to protect the credit counter, or use atomics. 342 * 343 * Spinlocks shared between the send and the receive path are bad, 344 * because they create unnecessary delays. An early implementation 345 * using a spinlock showed a 5% degradation in throughput at some 346 * loads. 347 * 348 * This implementation avoids spinlocks completely, putting both 349 * counters into a single atomic, and updating that atomic using 350 * atomic_add (in the receive path, when receiving fresh credits), 351 * and using atomic_cmpxchg when updating the two counters. 352 */ 353 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, 354 u32 wanted, u32 *adv_credits, int need_posted, int max_posted) 355 { 356 unsigned int avail, posted, got = 0, advertise; 357 long oldval, newval; 358 359 *adv_credits = 0; 360 if (!ic->i_flowctl) 361 return wanted; 362 363 try_again: 364 advertise = 0; 365 oldval = newval = atomic_read(&ic->i_credits); 366 posted = IB_GET_POST_CREDITS(oldval); 367 avail = IB_GET_SEND_CREDITS(oldval); 368 369 rdsdebug("wanted=%u credits=%u posted=%u\n", 370 wanted, avail, posted); 371 372 /* The last credit must be used to send a credit update. */ 373 if (avail && !posted) 374 avail--; 375 376 if (avail < wanted) { 377 struct rds_connection *conn = ic->i_cm_id->context; 378 379 /* Oops, there aren't that many credits left! */ 380 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 381 got = avail; 382 } else { 383 /* Sometimes you get what you want, lalala. */ 384 got = wanted; 385 } 386 newval -= IB_SET_SEND_CREDITS(got); 387 388 /* 389 * If need_posted is non-zero, then the caller wants 390 * the posted regardless of whether any send credits are 391 * available. 392 */ 393 if (posted && (got || need_posted)) { 394 advertise = min_t(unsigned int, posted, max_posted); 395 newval -= IB_SET_POST_CREDITS(advertise); 396 } 397 398 /* Finally bill everything */ 399 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) 400 goto try_again; 401 402 *adv_credits = advertise; 403 return got; 404 } 405 406 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) 407 { 408 struct rds_ib_connection *ic = conn->c_transport_data; 409 410 if (credits == 0) 411 return; 412 413 rdsdebug("credits=%u current=%u%s\n", 414 credits, 415 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), 416 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); 417 418 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); 419 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 420 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 421 422 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); 423 424 rds_ib_stats_inc(s_ib_rx_credit_updates); 425 } 426 427 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) 428 { 429 struct rds_ib_connection *ic = conn->c_transport_data; 430 431 if (posted == 0) 432 return; 433 434 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); 435 436 /* Decide whether to send an update to the peer now. 437 * If we would send a credit update for every single buffer we 438 * post, we would end up with an ACK storm (ACK arrives, 439 * consumes buffer, we refill the ring, send ACK to remote 440 * advertising the newly posted buffer... ad inf) 441 * 442 * Performance pretty much depends on how often we send 443 * credit updates - too frequent updates mean lots of ACKs. 444 * Too infrequent updates, and the peer will run out of 445 * credits and has to throttle. 446 * For the time being, 16 seems to be a good compromise. 447 */ 448 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) 449 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 450 } 451 452 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, 453 struct rds_ib_send_work *send, 454 bool notify) 455 { 456 /* 457 * We want to delay signaling completions just enough to get 458 * the batching benefits but not so much that we create dead time 459 * on the wire. 460 */ 461 if (ic->i_unsignaled_wrs-- == 0 || notify) { 462 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 463 send->s_wr.send_flags |= IB_SEND_SIGNALED; 464 return 1; 465 } 466 return 0; 467 } 468 469 /* 470 * This can be called multiple times for a given message. The first time 471 * we see a message we map its scatterlist into the IB device so that 472 * we can provide that mapped address to the IB scatter gather entries 473 * in the IB work requests. We translate the scatterlist into a series 474 * of work requests that fragment the message. These work requests complete 475 * in order so we pass ownership of the message to the completion handler 476 * once we send the final fragment. 477 * 478 * The RDS core uses the c_send_lock to only enter this function once 479 * per connection. This makes sure that the tx ring alloc/unalloc pairs 480 * don't get out of sync and confuse the ring. 481 */ 482 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 483 unsigned int hdr_off, unsigned int sg, unsigned int off) 484 { 485 struct rds_ib_connection *ic = conn->c_transport_data; 486 struct ib_device *dev = ic->i_cm_id->device; 487 struct rds_ib_send_work *send = NULL; 488 struct rds_ib_send_work *first; 489 struct rds_ib_send_work *prev; 490 struct ib_send_wr *failed_wr; 491 struct scatterlist *scat; 492 u32 pos; 493 u32 i; 494 u32 work_alloc; 495 u32 credit_alloc = 0; 496 u32 posted; 497 u32 adv_credits = 0; 498 int send_flags = 0; 499 int bytes_sent = 0; 500 int ret; 501 int flow_controlled = 0; 502 int nr_sig = 0; 503 504 BUG_ON(off % RDS_FRAG_SIZE); 505 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 506 507 /* Do not send cong updates to IB loopback */ 508 if (conn->c_loopback 509 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 510 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 511 scat = &rm->data.op_sg[sg]; 512 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); 513 return sizeof(struct rds_header) + ret; 514 } 515 516 /* FIXME we may overallocate here */ 517 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 518 i = 1; 519 else 520 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 521 522 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 523 if (work_alloc == 0) { 524 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 525 rds_ib_stats_inc(s_ib_tx_ring_full); 526 ret = -ENOMEM; 527 goto out; 528 } 529 530 if (ic->i_flowctl) { 531 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); 532 adv_credits += posted; 533 if (credit_alloc < work_alloc) { 534 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 535 work_alloc = credit_alloc; 536 flow_controlled = 1; 537 } 538 if (work_alloc == 0) { 539 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 540 rds_ib_stats_inc(s_ib_tx_throttle); 541 ret = -ENOMEM; 542 goto out; 543 } 544 } 545 546 /* map the message the first time we see it */ 547 if (!ic->i_data_op) { 548 if (rm->data.op_nents) { 549 rm->data.op_count = ib_dma_map_sg(dev, 550 rm->data.op_sg, 551 rm->data.op_nents, 552 DMA_TO_DEVICE); 553 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); 554 if (rm->data.op_count == 0) { 555 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 556 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 557 ret = -ENOMEM; /* XXX ? */ 558 goto out; 559 } 560 } else { 561 rm->data.op_count = 0; 562 } 563 564 rds_message_addref(rm); 565 rm->data.op_dmasg = 0; 566 rm->data.op_dmaoff = 0; 567 ic->i_data_op = &rm->data; 568 569 /* Finalize the header */ 570 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) 571 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; 572 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) 573 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 574 575 /* If it has a RDMA op, tell the peer we did it. This is 576 * used by the peer to release use-once RDMA MRs. */ 577 if (rm->rdma.op_active) { 578 struct rds_ext_header_rdma ext_hdr; 579 580 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); 581 rds_message_add_extension(&rm->m_inc.i_hdr, 582 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 583 } 584 if (rm->m_rdma_cookie) { 585 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, 586 rds_rdma_cookie_key(rm->m_rdma_cookie), 587 rds_rdma_cookie_offset(rm->m_rdma_cookie)); 588 } 589 590 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so 591 * we should not do this unless we have a chance of at least 592 * sticking the header into the send ring. Which is why we 593 * should call rds_ib_ring_alloc first. */ 594 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); 595 rds_message_make_checksum(&rm->m_inc.i_hdr); 596 597 /* 598 * Update adv_credits since we reset the ACK_REQUIRED bit. 599 */ 600 if (ic->i_flowctl) { 601 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 602 adv_credits += posted; 603 BUG_ON(adv_credits > 255); 604 } 605 } 606 607 /* Sometimes you want to put a fence between an RDMA 608 * READ and the following SEND. 609 * We could either do this all the time 610 * or when requested by the user. Right now, we let 611 * the application choose. 612 */ 613 if (rm->rdma.op_active && rm->rdma.op_fence) 614 send_flags = IB_SEND_FENCE; 615 616 /* Each frag gets a header. Msgs may be 0 bytes */ 617 send = &ic->i_sends[pos]; 618 first = send; 619 prev = NULL; 620 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; 621 i = 0; 622 do { 623 unsigned int len = 0; 624 625 /* Set up the header */ 626 send->s_wr.send_flags = send_flags; 627 send->s_wr.opcode = IB_WR_SEND; 628 send->s_wr.num_sge = 1; 629 send->s_wr.next = NULL; 630 send->s_queued = jiffies; 631 send->s_op = NULL; 632 633 send->s_sge[0].addr = ic->i_send_hdrs_dma 634 + (pos * sizeof(struct rds_header)); 635 send->s_sge[0].length = sizeof(struct rds_header); 636 637 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); 638 639 /* Set up the data, if present */ 640 if (i < work_alloc 641 && scat != &rm->data.op_sg[rm->data.op_count]) { 642 len = min(RDS_FRAG_SIZE, 643 ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff); 644 send->s_wr.num_sge = 2; 645 646 send->s_sge[1].addr = ib_sg_dma_address(dev, scat); 647 send->s_sge[1].addr += rm->data.op_dmaoff; 648 send->s_sge[1].length = len; 649 650 bytes_sent += len; 651 rm->data.op_dmaoff += len; 652 if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) { 653 scat++; 654 rm->data.op_dmasg++; 655 rm->data.op_dmaoff = 0; 656 } 657 } 658 659 rds_ib_set_wr_signal_state(ic, send, 0); 660 661 /* 662 * Always signal the last one if we're stopping due to flow control. 663 */ 664 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) 665 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 666 667 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 668 nr_sig++; 669 670 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 671 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 672 673 if (ic->i_flowctl && adv_credits) { 674 struct rds_header *hdr = &ic->i_send_hdrs[pos]; 675 676 /* add credit and redo the header checksum */ 677 hdr->h_credit = adv_credits; 678 rds_message_make_checksum(hdr); 679 adv_credits = 0; 680 rds_ib_stats_inc(s_ib_tx_credit_updates); 681 } 682 683 if (prev) 684 prev->s_wr.next = &send->s_wr; 685 prev = send; 686 687 pos = (pos + 1) % ic->i_send_ring.w_nr; 688 send = &ic->i_sends[pos]; 689 i++; 690 691 } while (i < work_alloc 692 && scat != &rm->data.op_sg[rm->data.op_count]); 693 694 /* Account the RDS header in the number of bytes we sent, but just once. 695 * The caller has no concept of fragmentation. */ 696 if (hdr_off == 0) 697 bytes_sent += sizeof(struct rds_header); 698 699 /* if we finished the message then send completion owns it */ 700 if (scat == &rm->data.op_sg[rm->data.op_count]) { 701 prev->s_op = ic->i_data_op; 702 prev->s_wr.send_flags |= IB_SEND_SOLICITED; 703 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) { 704 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 705 prev->s_wr.send_flags |= IB_SEND_SIGNALED; 706 nr_sig++; 707 } 708 ic->i_data_op = NULL; 709 } 710 711 /* Put back wrs & credits we didn't use */ 712 if (i < work_alloc) { 713 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 714 work_alloc = i; 715 } 716 if (ic->i_flowctl && i < credit_alloc) 717 rds_ib_send_add_credits(conn, credit_alloc - i); 718 719 if (nr_sig) 720 atomic_add(nr_sig, &ic->i_signaled_sends); 721 722 /* XXX need to worry about failed_wr and partial sends. */ 723 failed_wr = &first->s_wr; 724 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 725 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 726 first, &first->s_wr, ret, failed_wr); 727 BUG_ON(failed_wr != &first->s_wr); 728 if (ret) { 729 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " 730 "returned %d\n", &conn->c_faddr, ret); 731 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 732 rds_ib_sub_signaled(ic, nr_sig); 733 if (prev->s_op) { 734 ic->i_data_op = prev->s_op; 735 prev->s_op = NULL; 736 } 737 738 rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); 739 goto out; 740 } 741 742 ret = bytes_sent; 743 out: 744 BUG_ON(adv_credits); 745 return ret; 746 } 747 748 /* 749 * Issue atomic operation. 750 * A simplified version of the rdma case, we always map 1 SG, and 751 * only 8 bytes, for the return value from the atomic operation. 752 */ 753 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) 754 { 755 struct rds_ib_connection *ic = conn->c_transport_data; 756 struct rds_ib_send_work *send = NULL; 757 struct ib_send_wr *failed_wr; 758 struct rds_ib_device *rds_ibdev; 759 u32 pos; 760 u32 work_alloc; 761 int ret; 762 int nr_sig = 0; 763 764 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 765 766 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); 767 if (work_alloc != 1) { 768 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 769 rds_ib_stats_inc(s_ib_tx_ring_full); 770 ret = -ENOMEM; 771 goto out; 772 } 773 774 /* address of send request in ring */ 775 send = &ic->i_sends[pos]; 776 send->s_queued = jiffies; 777 778 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { 779 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; 780 send->s_atomic_wr.compare_add = op->op_m_cswp.compare; 781 send->s_atomic_wr.swap = op->op_m_cswp.swap; 782 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; 783 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; 784 } else { /* FADD */ 785 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; 786 send->s_atomic_wr.compare_add = op->op_m_fadd.add; 787 send->s_atomic_wr.swap = 0; 788 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; 789 send->s_atomic_wr.swap_mask = 0; 790 } 791 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 792 send->s_atomic_wr.wr.num_sge = 1; 793 send->s_atomic_wr.wr.next = NULL; 794 send->s_atomic_wr.remote_addr = op->op_remote_addr; 795 send->s_atomic_wr.rkey = op->op_rkey; 796 send->s_op = op; 797 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); 798 799 /* map 8 byte retval buffer to the device */ 800 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); 801 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); 802 if (ret != 1) { 803 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 804 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 805 ret = -ENOMEM; /* XXX ? */ 806 goto out; 807 } 808 809 /* Convert our struct scatterlist to struct ib_sge */ 810 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); 811 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); 812 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; 813 814 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, 815 send->s_sge[0].addr, send->s_sge[0].length); 816 817 if (nr_sig) 818 atomic_add(nr_sig, &ic->i_signaled_sends); 819 820 failed_wr = &send->s_atomic_wr.wr; 821 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); 822 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, 823 send, &send->s_atomic_wr, ret, failed_wr); 824 BUG_ON(failed_wr != &send->s_atomic_wr.wr); 825 if (ret) { 826 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " 827 "returned %d\n", &conn->c_faddr, ret); 828 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 829 rds_ib_sub_signaled(ic, nr_sig); 830 goto out; 831 } 832 833 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { 834 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 835 BUG_ON(failed_wr != &send->s_atomic_wr.wr); 836 } 837 838 out: 839 return ret; 840 } 841 842 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) 843 { 844 struct rds_ib_connection *ic = conn->c_transport_data; 845 struct rds_ib_send_work *send = NULL; 846 struct rds_ib_send_work *first; 847 struct rds_ib_send_work *prev; 848 struct ib_send_wr *failed_wr; 849 struct scatterlist *scat; 850 unsigned long len; 851 u64 remote_addr = op->op_remote_addr; 852 u32 max_sge = ic->rds_ibdev->max_sge; 853 u32 pos; 854 u32 work_alloc; 855 u32 i; 856 u32 j; 857 int sent; 858 int ret; 859 int num_sge; 860 int nr_sig = 0; 861 862 /* map the op the first time we see it */ 863 if (!op->op_mapped) { 864 op->op_count = ib_dma_map_sg(ic->i_cm_id->device, 865 op->op_sg, op->op_nents, (op->op_write) ? 866 DMA_TO_DEVICE : DMA_FROM_DEVICE); 867 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); 868 if (op->op_count == 0) { 869 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 870 ret = -ENOMEM; /* XXX ? */ 871 goto out; 872 } 873 874 op->op_mapped = 1; 875 } 876 877 /* 878 * Instead of knowing how to return a partial rdma read/write we insist that there 879 * be enough work requests to send the entire message. 880 */ 881 i = ceil(op->op_count, max_sge); 882 883 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 884 if (work_alloc != i) { 885 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 886 rds_ib_stats_inc(s_ib_tx_ring_full); 887 ret = -ENOMEM; 888 goto out; 889 } 890 891 send = &ic->i_sends[pos]; 892 first = send; 893 prev = NULL; 894 scat = &op->op_sg[0]; 895 sent = 0; 896 num_sge = op->op_count; 897 898 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { 899 send->s_wr.send_flags = 0; 900 send->s_queued = jiffies; 901 send->s_op = NULL; 902 903 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); 904 905 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 906 send->s_rdma_wr.remote_addr = remote_addr; 907 send->s_rdma_wr.rkey = op->op_rkey; 908 909 if (num_sge > max_sge) { 910 send->s_rdma_wr.wr.num_sge = max_sge; 911 num_sge -= max_sge; 912 } else { 913 send->s_rdma_wr.wr.num_sge = num_sge; 914 } 915 916 send->s_rdma_wr.wr.next = NULL; 917 918 if (prev) 919 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; 920 921 for (j = 0; j < send->s_rdma_wr.wr.num_sge && 922 scat != &op->op_sg[op->op_count]; j++) { 923 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 924 send->s_sge[j].addr = 925 ib_sg_dma_address(ic->i_cm_id->device, scat); 926 send->s_sge[j].length = len; 927 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; 928 929 sent += len; 930 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); 931 932 remote_addr += len; 933 scat++; 934 } 935 936 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 937 &send->s_rdma_wr.wr, 938 send->s_rdma_wr.wr.num_sge, 939 send->s_rdma_wr.wr.next); 940 941 prev = send; 942 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 943 send = ic->i_sends; 944 } 945 946 /* give a reference to the last op */ 947 if (scat == &op->op_sg[op->op_count]) { 948 prev->s_op = op; 949 rds_message_addref(container_of(op, struct rds_message, rdma)); 950 } 951 952 if (i < work_alloc) { 953 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 954 work_alloc = i; 955 } 956 957 if (nr_sig) 958 atomic_add(nr_sig, &ic->i_signaled_sends); 959 960 failed_wr = &first->s_rdma_wr.wr; 961 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); 962 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 963 first, &first->s_rdma_wr.wr, ret, failed_wr); 964 BUG_ON(failed_wr != &first->s_rdma_wr.wr); 965 if (ret) { 966 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " 967 "returned %d\n", &conn->c_faddr, ret); 968 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 969 rds_ib_sub_signaled(ic, nr_sig); 970 goto out; 971 } 972 973 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) { 974 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 975 BUG_ON(failed_wr != &first->s_rdma_wr.wr); 976 } 977 978 979 out: 980 return ret; 981 } 982 983 void rds_ib_xmit_path_complete(struct rds_conn_path *cp) 984 { 985 struct rds_connection *conn = cp->cp_conn; 986 struct rds_ib_connection *ic = conn->c_transport_data; 987 988 /* We may have a pending ACK or window update we were unable 989 * to send previously (due to flow control). Try again. */ 990 rds_ib_attempt_ack(ic); 991 } 992