1 /* 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <rdma/rdma_cm.h> 38 39 #include "rds_single_path.h" 40 #include "rds.h" 41 #include "ib.h" 42 43 static struct kmem_cache *rds_ib_incoming_slab; 44 static struct kmem_cache *rds_ib_frag_slab; 45 static atomic_t rds_ib_allocation = ATOMIC_INIT(0); 46 47 void rds_ib_recv_init_ring(struct rds_ib_connection *ic) 48 { 49 struct rds_ib_recv_work *recv; 50 u32 i; 51 52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { 53 struct ib_sge *sge; 54 55 recv->r_ibinc = NULL; 56 recv->r_frag = NULL; 57 58 recv->r_wr.next = NULL; 59 recv->r_wr.wr_id = i; 60 recv->r_wr.sg_list = recv->r_sge; 61 recv->r_wr.num_sge = RDS_IB_RECV_SGE; 62 63 sge = &recv->r_sge[0]; 64 sge->addr = ic->i_recv_hdrs_dma[i]; 65 sge->length = sizeof(struct rds_header); 66 sge->lkey = ic->i_pd->local_dma_lkey; 67 68 sge = &recv->r_sge[1]; 69 sge->addr = 0; 70 sge->length = RDS_FRAG_SIZE; 71 sge->lkey = ic->i_pd->local_dma_lkey; 72 } 73 } 74 75 /* 76 * The entire 'from' list, including the from element itself, is put on 77 * to the tail of the 'to' list. 78 */ 79 static void list_splice_entire_tail(struct list_head *from, 80 struct list_head *to) 81 { 82 struct list_head *from_last = from->prev; 83 84 list_splice_tail(from_last, to); 85 list_add_tail(from_last, to); 86 } 87 88 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) 89 { 90 struct list_head *tmp; 91 92 tmp = xchg(&cache->xfer, NULL); 93 if (tmp) { 94 if (cache->ready) 95 list_splice_entire_tail(tmp, cache->ready); 96 else 97 cache->ready = tmp; 98 } 99 } 100 101 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) 102 { 103 struct rds_ib_cache_head *head; 104 int cpu; 105 106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); 107 if (!cache->percpu) 108 return -ENOMEM; 109 110 for_each_possible_cpu(cpu) { 111 head = per_cpu_ptr(cache->percpu, cpu); 112 head->first = NULL; 113 head->count = 0; 114 } 115 cache->xfer = NULL; 116 cache->ready = NULL; 117 118 return 0; 119 } 120 121 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) 122 { 123 int ret; 124 125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); 126 if (!ret) { 127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); 128 if (ret) 129 free_percpu(ic->i_cache_incs.percpu); 130 } 131 132 return ret; 133 } 134 135 static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, 136 struct list_head *caller_list) 137 { 138 struct rds_ib_cache_head *head; 139 int cpu; 140 141 for_each_possible_cpu(cpu) { 142 head = per_cpu_ptr(cache->percpu, cpu); 143 if (head->first) { 144 list_splice_entire_tail(head->first, caller_list); 145 head->first = NULL; 146 } 147 } 148 149 if (cache->ready) { 150 list_splice_entire_tail(cache->ready, caller_list); 151 cache->ready = NULL; 152 } 153 } 154 155 void rds_ib_recv_free_caches(struct rds_ib_connection *ic) 156 { 157 struct rds_ib_incoming *inc; 158 struct rds_ib_incoming *inc_tmp; 159 struct rds_page_frag *frag; 160 struct rds_page_frag *frag_tmp; 161 LIST_HEAD(list); 162 163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); 164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); 165 free_percpu(ic->i_cache_incs.percpu); 166 167 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) { 168 list_del(&inc->ii_cache_entry); 169 WARN_ON(!list_empty(&inc->ii_frags)); 170 kmem_cache_free(rds_ib_incoming_slab, inc); 171 atomic_dec(&rds_ib_allocation); 172 } 173 174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); 175 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); 176 free_percpu(ic->i_cache_frags.percpu); 177 178 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { 179 list_del(&frag->f_cache_entry); 180 WARN_ON(!list_empty(&frag->f_item)); 181 kmem_cache_free(rds_ib_frag_slab, frag); 182 } 183 } 184 185 /* fwd decl */ 186 static void rds_ib_recv_cache_put(struct list_head *new_item, 187 struct rds_ib_refill_cache *cache); 188 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); 189 190 191 /* Recycle frag and attached recv buffer f_sg */ 192 static void rds_ib_frag_free(struct rds_ib_connection *ic, 193 struct rds_page_frag *frag) 194 { 195 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); 196 197 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); 198 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); 199 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); 200 } 201 202 /* Recycle inc after freeing attached frags */ 203 void rds_ib_inc_free(struct rds_incoming *inc) 204 { 205 struct rds_ib_incoming *ibinc; 206 struct rds_page_frag *frag; 207 struct rds_page_frag *pos; 208 struct rds_ib_connection *ic = inc->i_conn->c_transport_data; 209 210 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 211 212 /* Free attached frags */ 213 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { 214 list_del_init(&frag->f_item); 215 rds_ib_frag_free(ic, frag); 216 } 217 BUG_ON(!list_empty(&ibinc->ii_frags)); 218 219 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc); 220 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); 221 } 222 223 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, 224 struct rds_ib_recv_work *recv) 225 { 226 if (recv->r_ibinc) { 227 rds_inc_put(&recv->r_ibinc->ii_inc); 228 recv->r_ibinc = NULL; 229 } 230 if (recv->r_frag) { 231 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); 232 rds_ib_frag_free(ic, recv->r_frag); 233 recv->r_frag = NULL; 234 } 235 } 236 237 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) 238 { 239 u32 i; 240 241 for (i = 0; i < ic->i_recv_ring.w_nr; i++) 242 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); 243 } 244 245 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, 246 gfp_t slab_mask) 247 { 248 struct rds_ib_incoming *ibinc; 249 struct list_head *cache_item; 250 int avail_allocs; 251 252 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); 253 if (cache_item) { 254 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry); 255 } else { 256 avail_allocs = atomic_add_unless(&rds_ib_allocation, 257 1, rds_ib_sysctl_max_recv_allocation); 258 if (!avail_allocs) { 259 rds_ib_stats_inc(s_ib_rx_alloc_limit); 260 return NULL; 261 } 262 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask); 263 if (!ibinc) { 264 atomic_dec(&rds_ib_allocation); 265 return NULL; 266 } 267 rds_ib_stats_inc(s_ib_rx_total_incs); 268 } 269 INIT_LIST_HEAD(&ibinc->ii_frags); 270 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); 271 272 return ibinc; 273 } 274 275 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, 276 gfp_t slab_mask, gfp_t page_mask) 277 { 278 struct rds_page_frag *frag; 279 struct list_head *cache_item; 280 int ret; 281 282 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); 283 if (cache_item) { 284 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); 285 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); 286 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); 287 } else { 288 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); 289 if (!frag) 290 return NULL; 291 292 sg_init_table(&frag->f_sg, 1); 293 ret = rds_page_remainder_alloc(&frag->f_sg, 294 RDS_FRAG_SIZE, page_mask); 295 if (ret) { 296 kmem_cache_free(rds_ib_frag_slab, frag); 297 return NULL; 298 } 299 rds_ib_stats_inc(s_ib_rx_total_frags); 300 } 301 302 INIT_LIST_HEAD(&frag->f_item); 303 304 return frag; 305 } 306 307 static int rds_ib_recv_refill_one(struct rds_connection *conn, 308 struct rds_ib_recv_work *recv, gfp_t gfp) 309 { 310 struct rds_ib_connection *ic = conn->c_transport_data; 311 struct ib_sge *sge; 312 int ret = -ENOMEM; 313 gfp_t slab_mask = gfp; 314 gfp_t page_mask = gfp; 315 316 if (gfp & __GFP_DIRECT_RECLAIM) { 317 slab_mask = GFP_KERNEL; 318 page_mask = GFP_HIGHUSER; 319 } 320 321 if (!ic->i_cache_incs.ready) 322 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); 323 if (!ic->i_cache_frags.ready) 324 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); 325 326 /* 327 * ibinc was taken from recv if recv contained the start of a message. 328 * recvs that were continuations will still have this allocated. 329 */ 330 if (!recv->r_ibinc) { 331 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); 332 if (!recv->r_ibinc) 333 goto out; 334 } 335 336 WARN_ON(recv->r_frag); /* leak! */ 337 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); 338 if (!recv->r_frag) 339 goto out; 340 341 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 342 1, DMA_FROM_DEVICE); 343 WARN_ON(ret != 1); 344 345 sge = &recv->r_sge[0]; 346 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; 347 sge->length = sizeof(struct rds_header); 348 349 sge = &recv->r_sge[1]; 350 sge->addr = sg_dma_address(&recv->r_frag->f_sg); 351 sge->length = sg_dma_len(&recv->r_frag->f_sg); 352 353 ret = 0; 354 out: 355 return ret; 356 } 357 358 static int acquire_refill(struct rds_connection *conn) 359 { 360 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0; 361 } 362 363 static void release_refill(struct rds_connection *conn) 364 { 365 clear_bit(RDS_RECV_REFILL, &conn->c_flags); 366 smp_mb__after_atomic(); 367 368 /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a 369 * hot path and finding waiters is very rare. We don't want to walk 370 * the system-wide hashed waitqueue buckets in the fast path only to 371 * almost never find waiters. 372 */ 373 if (waitqueue_active(&conn->c_waitq)) 374 wake_up_all(&conn->c_waitq); 375 } 376 377 /* 378 * This tries to allocate and post unused work requests after making sure that 379 * they have all the allocations they need to queue received fragments into 380 * sockets. 381 */ 382 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) 383 { 384 struct rds_ib_connection *ic = conn->c_transport_data; 385 struct rds_ib_recv_work *recv; 386 unsigned int posted = 0; 387 int ret = 0; 388 bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM); 389 bool must_wake = false; 390 u32 pos; 391 392 /* the goal here is to just make sure that someone, somewhere 393 * is posting buffers. If we can't get the refill lock, 394 * let them do their thing 395 */ 396 if (!acquire_refill(conn)) 397 return; 398 399 while ((prefill || rds_conn_up(conn)) && 400 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 401 if (pos >= ic->i_recv_ring.w_nr) { 402 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 403 pos); 404 break; 405 } 406 407 recv = &ic->i_recvs[pos]; 408 ret = rds_ib_recv_refill_one(conn, recv, gfp); 409 if (ret) { 410 must_wake = true; 411 break; 412 } 413 414 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv, 415 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 416 (long)sg_dma_address(&recv->r_frag->f_sg)); 417 418 /* XXX when can this fail? */ 419 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); 420 if (ret) { 421 rds_ib_conn_error(conn, "recv post on " 422 "%pI6c returned %d, disconnecting and " 423 "reconnecting\n", &conn->c_faddr, 424 ret); 425 break; 426 } 427 428 posted++; 429 430 if ((posted > 128 && need_resched()) || posted > 8192) { 431 must_wake = true; 432 break; 433 } 434 } 435 436 /* We're doing flow control - update the window. */ 437 if (ic->i_flowctl && posted) 438 rds_ib_advertise_credits(conn, posted); 439 440 if (ret) 441 rds_ib_ring_unalloc(&ic->i_recv_ring, 1); 442 443 release_refill(conn); 444 445 /* if we're called from the softirq handler, we'll be GFP_NOWAIT. 446 * in this case the ring being low is going to lead to more interrupts 447 * and we can safely let the softirq code take care of it unless the 448 * ring is completely empty. 449 * 450 * if we're called from krdsd, we'll be GFP_KERNEL. In this case 451 * we might have raced with the softirq code while we had the refill 452 * lock held. Use rds_ib_ring_low() instead of ring_empty to decide 453 * if we should requeue. 454 */ 455 if (rds_conn_up(conn) && 456 (must_wake || 457 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || 458 rds_ib_ring_empty(&ic->i_recv_ring))) { 459 queue_delayed_work(rds_wq, &conn->c_recv_w, 1); 460 } 461 if (can_wait) 462 cond_resched(); 463 } 464 465 /* 466 * We want to recycle several types of recv allocations, like incs and frags. 467 * To use this, the *_free() function passes in the ptr to a list_head within 468 * the recyclee, as well as the cache to put it on. 469 * 470 * First, we put the memory on a percpu list. When this reaches a certain size, 471 * We move it to an intermediate non-percpu list in a lockless manner, with some 472 * xchg/compxchg wizardry. 473 * 474 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can 475 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and 476 * list_empty() will return true with one element is actually present. 477 */ 478 static void rds_ib_recv_cache_put(struct list_head *new_item, 479 struct rds_ib_refill_cache *cache) 480 { 481 unsigned long flags; 482 struct list_head *old, *chpfirst; 483 484 local_irq_save(flags); 485 486 chpfirst = __this_cpu_read(cache->percpu->first); 487 if (!chpfirst) 488 INIT_LIST_HEAD(new_item); 489 else /* put on front */ 490 list_add_tail(new_item, chpfirst); 491 492 __this_cpu_write(cache->percpu->first, new_item); 493 __this_cpu_inc(cache->percpu->count); 494 495 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) 496 goto end; 497 498 /* 499 * Return our per-cpu first list to the cache's xfer by atomically 500 * grabbing the current xfer list, appending it to our per-cpu list, 501 * and then atomically returning that entire list back to the 502 * cache's xfer list as long as it's still empty. 503 */ 504 do { 505 old = xchg(&cache->xfer, NULL); 506 if (old) 507 list_splice_entire_tail(old, chpfirst); 508 old = cmpxchg(&cache->xfer, NULL, chpfirst); 509 } while (old); 510 511 512 __this_cpu_write(cache->percpu->first, NULL); 513 __this_cpu_write(cache->percpu->count, 0); 514 end: 515 local_irq_restore(flags); 516 } 517 518 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) 519 { 520 struct list_head *head = cache->ready; 521 522 if (head) { 523 if (!list_empty(head)) { 524 cache->ready = head->next; 525 list_del_init(head); 526 } else 527 cache->ready = NULL; 528 } 529 530 return head; 531 } 532 533 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 534 { 535 struct rds_ib_incoming *ibinc; 536 struct rds_page_frag *frag; 537 unsigned long to_copy; 538 unsigned long frag_off = 0; 539 int copied = 0; 540 int ret; 541 u32 len; 542 543 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 544 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); 545 len = be32_to_cpu(inc->i_hdr.h_len); 546 547 while (iov_iter_count(to) && copied < len) { 548 if (frag_off == RDS_FRAG_SIZE) { 549 frag = list_entry(frag->f_item.next, 550 struct rds_page_frag, f_item); 551 frag_off = 0; 552 } 553 to_copy = min_t(unsigned long, iov_iter_count(to), 554 RDS_FRAG_SIZE - frag_off); 555 to_copy = min_t(unsigned long, to_copy, len - copied); 556 557 /* XXX needs + offset for multiple recvs per page */ 558 rds_stats_add(s_copy_to_user, to_copy); 559 ret = copy_page_to_iter(sg_page(&frag->f_sg), 560 frag->f_sg.offset + frag_off, 561 to_copy, 562 to); 563 if (ret != to_copy) 564 return -EFAULT; 565 566 frag_off += to_copy; 567 copied += to_copy; 568 } 569 570 return copied; 571 } 572 573 /* ic starts out kzalloc()ed */ 574 void rds_ib_recv_init_ack(struct rds_ib_connection *ic) 575 { 576 struct ib_send_wr *wr = &ic->i_ack_wr; 577 struct ib_sge *sge = &ic->i_ack_sge; 578 579 sge->addr = ic->i_ack_dma; 580 sge->length = sizeof(struct rds_header); 581 sge->lkey = ic->i_pd->local_dma_lkey; 582 583 wr->sg_list = sge; 584 wr->num_sge = 1; 585 wr->opcode = IB_WR_SEND; 586 wr->wr_id = RDS_IB_ACK_WR_ID; 587 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; 588 } 589 590 /* 591 * You'd think that with reliable IB connections you wouldn't need to ack 592 * messages that have been received. The problem is that IB hardware generates 593 * an ack message before it has DMAed the message into memory. This creates a 594 * potential message loss if the HCA is disabled for any reason between when it 595 * sends the ack and before the message is DMAed and processed. This is only a 596 * potential issue if another HCA is available for fail-over. 597 * 598 * When the remote host receives our ack they'll free the sent message from 599 * their send queue. To decrease the latency of this we always send an ack 600 * immediately after we've received messages. 601 * 602 * For simplicity, we only have one ack in flight at a time. This puts 603 * pressure on senders to have deep enough send queues to absorb the latency of 604 * a single ack frame being in flight. This might not be good enough. 605 * 606 * This is implemented by have a long-lived send_wr and sge which point to a 607 * statically allocated ack frame. This ack wr does not fall under the ring 608 * accounting that the tx and rx wrs do. The QP attribute specifically makes 609 * room for it beyond the ring size. Send completion notices its special 610 * wr_id and avoids working with the ring in that case. 611 */ 612 #ifndef KERNEL_HAS_ATOMIC64 613 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) 614 { 615 unsigned long flags; 616 617 spin_lock_irqsave(&ic->i_ack_lock, flags); 618 ic->i_ack_next = seq; 619 if (ack_required) 620 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 621 spin_unlock_irqrestore(&ic->i_ack_lock, flags); 622 } 623 624 static u64 rds_ib_get_ack(struct rds_ib_connection *ic) 625 { 626 unsigned long flags; 627 u64 seq; 628 629 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 630 631 spin_lock_irqsave(&ic->i_ack_lock, flags); 632 seq = ic->i_ack_next; 633 spin_unlock_irqrestore(&ic->i_ack_lock, flags); 634 635 return seq; 636 } 637 #else 638 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) 639 { 640 atomic64_set(&ic->i_ack_next, seq); 641 if (ack_required) { 642 smp_mb__before_atomic(); 643 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 644 } 645 } 646 647 static u64 rds_ib_get_ack(struct rds_ib_connection *ic) 648 { 649 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 650 smp_mb__after_atomic(); 651 652 return atomic64_read(&ic->i_ack_next); 653 } 654 #endif 655 656 657 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) 658 { 659 struct rds_header *hdr = ic->i_ack; 660 u64 seq; 661 int ret; 662 663 seq = rds_ib_get_ack(ic); 664 665 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); 666 667 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma, 668 sizeof(*hdr), DMA_TO_DEVICE); 669 rds_message_populate_header(hdr, 0, 0, 0); 670 hdr->h_ack = cpu_to_be64(seq); 671 hdr->h_credit = adv_credits; 672 rds_message_make_checksum(hdr); 673 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma, 674 sizeof(*hdr), DMA_TO_DEVICE); 675 676 ic->i_ack_queued = jiffies; 677 678 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); 679 if (unlikely(ret)) { 680 /* Failed to send. Release the WR, and 681 * force another ACK. 682 */ 683 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 684 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 685 686 rds_ib_stats_inc(s_ib_ack_send_failure); 687 688 rds_ib_conn_error(ic->conn, "sending ack failed\n"); 689 } else 690 rds_ib_stats_inc(s_ib_ack_sent); 691 } 692 693 /* 694 * There are 3 ways of getting acknowledgements to the peer: 695 * 1. We call rds_ib_attempt_ack from the recv completion handler 696 * to send an ACK-only frame. 697 * However, there can be only one such frame in the send queue 698 * at any time, so we may have to postpone it. 699 * 2. When another (data) packet is transmitted while there's 700 * an ACK in the queue, we piggyback the ACK sequence number 701 * on the data packet. 702 * 3. If the ACK WR is done sending, we get called from the 703 * send queue completion handler, and check whether there's 704 * another ACK pending (postponed because the WR was on the 705 * queue). If so, we transmit it. 706 * 707 * We maintain 2 variables: 708 * - i_ack_flags, which keeps track of whether the ACK WR 709 * is currently in the send queue or not (IB_ACK_IN_FLIGHT) 710 * - i_ack_next, which is the last sequence number we received 711 * 712 * Potentially, send queue and receive queue handlers can run concurrently. 713 * It would be nice to not have to use a spinlock to synchronize things, 714 * but the one problem that rules this out is that 64bit updates are 715 * not atomic on all platforms. Things would be a lot simpler if 716 * we had atomic64 or maybe cmpxchg64 everywhere. 717 * 718 * Reconnecting complicates this picture just slightly. When we 719 * reconnect, we may be seeing duplicate packets. The peer 720 * is retransmitting them, because it hasn't seen an ACK for 721 * them. It is important that we ACK these. 722 * 723 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with 724 * this flag set *MUST* be acknowledged immediately. 725 */ 726 727 /* 728 * When we get here, we're called from the recv queue handler. 729 * Check whether we ought to transmit an ACK. 730 */ 731 void rds_ib_attempt_ack(struct rds_ib_connection *ic) 732 { 733 unsigned int adv_credits; 734 735 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) 736 return; 737 738 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { 739 rds_ib_stats_inc(s_ib_ack_send_delayed); 740 return; 741 } 742 743 /* Can we get a send credit? */ 744 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { 745 rds_ib_stats_inc(s_ib_tx_throttle); 746 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 747 return; 748 } 749 750 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 751 rds_ib_send_ack(ic, adv_credits); 752 } 753 754 /* 755 * We get here from the send completion handler, when the 756 * adapter tells us the ACK frame was sent. 757 */ 758 void rds_ib_ack_send_complete(struct rds_ib_connection *ic) 759 { 760 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 761 rds_ib_attempt_ack(ic); 762 } 763 764 /* 765 * This is called by the regular xmit code when it wants to piggyback 766 * an ACK on an outgoing frame. 767 */ 768 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) 769 { 770 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) 771 rds_ib_stats_inc(s_ib_ack_send_piggybacked); 772 return rds_ib_get_ack(ic); 773 } 774 775 /* 776 * It's kind of lame that we're copying from the posted receive pages into 777 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 778 * them. But receiving new congestion bitmaps should be a *rare* event, so 779 * hopefully we won't need to invest that complexity in making it more 780 * efficient. By copying we can share a simpler core with TCP which has to 781 * copy. 782 */ 783 static void rds_ib_cong_recv(struct rds_connection *conn, 784 struct rds_ib_incoming *ibinc) 785 { 786 struct rds_cong_map *map; 787 unsigned int map_off; 788 unsigned int map_page; 789 struct rds_page_frag *frag; 790 unsigned long frag_off; 791 unsigned long to_copy; 792 unsigned long copied; 793 __le64 uncongested = 0; 794 void *addr; 795 796 /* catch completely corrupt packets */ 797 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) 798 return; 799 800 map = conn->c_fcong; 801 map_page = 0; 802 map_off = 0; 803 804 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); 805 frag_off = 0; 806 807 copied = 0; 808 809 while (copied < RDS_CONG_MAP_BYTES) { 810 __le64 *src, *dst; 811 unsigned int k; 812 813 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 814 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 815 816 addr = kmap_atomic(sg_page(&frag->f_sg)); 817 818 src = addr + frag->f_sg.offset + frag_off; 819 dst = (void *)map->m_page_addrs[map_page] + map_off; 820 for (k = 0; k < to_copy; k += 8) { 821 /* Record ports that became uncongested, ie 822 * bits that changed from 0 to 1. */ 823 uncongested |= ~(*src) & *dst; 824 *dst++ = *src++; 825 } 826 kunmap_atomic(addr); 827 828 copied += to_copy; 829 830 map_off += to_copy; 831 if (map_off == PAGE_SIZE) { 832 map_off = 0; 833 map_page++; 834 } 835 836 frag_off += to_copy; 837 if (frag_off == RDS_FRAG_SIZE) { 838 frag = list_entry(frag->f_item.next, 839 struct rds_page_frag, f_item); 840 frag_off = 0; 841 } 842 } 843 844 /* the congestion map is in little endian order */ 845 rds_cong_map_updated(map, le64_to_cpu(uncongested)); 846 } 847 848 static void rds_ib_process_recv(struct rds_connection *conn, 849 struct rds_ib_recv_work *recv, u32 data_len, 850 struct rds_ib_ack_state *state) 851 { 852 struct rds_ib_connection *ic = conn->c_transport_data; 853 struct rds_ib_incoming *ibinc = ic->i_ibinc; 854 struct rds_header *ihdr, *hdr; 855 dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; 856 857 /* XXX shut down the connection if port 0,0 are seen? */ 858 859 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, 860 data_len); 861 862 if (data_len < sizeof(struct rds_header)) { 863 rds_ib_conn_error(conn, "incoming message " 864 "from %pI6c didn't include a " 865 "header, disconnecting and " 866 "reconnecting\n", 867 &conn->c_faddr); 868 return; 869 } 870 data_len -= sizeof(struct rds_header); 871 872 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs]; 873 874 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr, 875 sizeof(*ihdr), DMA_FROM_DEVICE); 876 /* Validate the checksum. */ 877 if (!rds_message_verify_checksum(ihdr)) { 878 rds_ib_conn_error(conn, "incoming message " 879 "from %pI6c has corrupted header - " 880 "forcing a reconnect\n", 881 &conn->c_faddr); 882 rds_stats_inc(s_recv_drop_bad_checksum); 883 goto done; 884 } 885 886 /* Process the ACK sequence which comes with every packet */ 887 state->ack_recv = be64_to_cpu(ihdr->h_ack); 888 state->ack_recv_valid = 1; 889 890 /* Process the credits update if there was one */ 891 if (ihdr->h_credit) 892 rds_ib_send_add_credits(conn, ihdr->h_credit); 893 894 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { 895 /* This is an ACK-only packet. The fact that it gets 896 * special treatment here is that historically, ACKs 897 * were rather special beasts. 898 */ 899 rds_ib_stats_inc(s_ib_ack_received); 900 901 /* 902 * Usually the frags make their way on to incs and are then freed as 903 * the inc is freed. We don't go that route, so we have to drop the 904 * page ref ourselves. We can't just leave the page on the recv 905 * because that confuses the dma mapping of pages and each recv's use 906 * of a partial page. 907 * 908 * FIXME: Fold this into the code path below. 909 */ 910 rds_ib_frag_free(ic, recv->r_frag); 911 recv->r_frag = NULL; 912 goto done; 913 } 914 915 /* 916 * If we don't already have an inc on the connection then this 917 * fragment has a header and starts a message.. copy its header 918 * into the inc and save the inc so we can hang upcoming fragments 919 * off its list. 920 */ 921 if (!ibinc) { 922 ibinc = recv->r_ibinc; 923 recv->r_ibinc = NULL; 924 ic->i_ibinc = ibinc; 925 926 hdr = &ibinc->ii_inc.i_hdr; 927 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = 928 local_clock(); 929 memcpy(hdr, ihdr, sizeof(*hdr)); 930 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); 931 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] = 932 local_clock(); 933 934 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, 935 ic->i_recv_data_rem, hdr->h_flags); 936 } else { 937 hdr = &ibinc->ii_inc.i_hdr; 938 /* We can't just use memcmp here; fragments of a 939 * single message may carry different ACKs */ 940 if (hdr->h_sequence != ihdr->h_sequence || 941 hdr->h_len != ihdr->h_len || 942 hdr->h_sport != ihdr->h_sport || 943 hdr->h_dport != ihdr->h_dport) { 944 rds_ib_conn_error(conn, 945 "fragment header mismatch; forcing reconnect\n"); 946 goto done; 947 } 948 } 949 950 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); 951 recv->r_frag = NULL; 952 953 if (ic->i_recv_data_rem > RDS_FRAG_SIZE) 954 ic->i_recv_data_rem -= RDS_FRAG_SIZE; 955 else { 956 ic->i_recv_data_rem = 0; 957 ic->i_ibinc = NULL; 958 959 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) { 960 rds_ib_cong_recv(conn, ibinc); 961 } else { 962 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr, 963 &ibinc->ii_inc, GFP_ATOMIC); 964 state->ack_next = be64_to_cpu(hdr->h_sequence); 965 state->ack_next_valid = 1; 966 } 967 968 /* Evaluate the ACK_REQUIRED flag *after* we received 969 * the complete frame, and after bumping the next_rx 970 * sequence. */ 971 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { 972 rds_stats_inc(s_recv_ack_required); 973 state->ack_required = 1; 974 } 975 976 rds_inc_put(&ibinc->ii_inc); 977 } 978 done: 979 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr, 980 sizeof(*ihdr), DMA_FROM_DEVICE); 981 } 982 983 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, 984 struct ib_wc *wc, 985 struct rds_ib_ack_state *state) 986 { 987 struct rds_connection *conn = ic->conn; 988 struct rds_ib_recv_work *recv; 989 990 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", 991 (unsigned long long)wc->wr_id, wc->status, 992 ib_wc_status_msg(wc->status), wc->byte_len, 993 be32_to_cpu(wc->ex.imm_data)); 994 995 rds_ib_stats_inc(s_ib_rx_cq_event); 996 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; 997 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, 998 DMA_FROM_DEVICE); 999 1000 /* Also process recvs in connecting state because it is possible 1001 * to get a recv completion _before_ the rdmacm ESTABLISHED 1002 * event is processed. 1003 */ 1004 if (wc->status == IB_WC_SUCCESS) { 1005 rds_ib_process_recv(conn, recv, wc->byte_len, state); 1006 } else { 1007 /* We expect errors as the qp is drained during shutdown */ 1008 if (rds_conn_up(conn) || rds_conn_connecting(conn)) 1009 rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n", 1010 &conn->c_laddr, &conn->c_faddr, 1011 conn->c_tos, wc->status, 1012 ib_wc_status_msg(wc->status), 1013 wc->vendor_err); 1014 } 1015 1016 /* rds_ib_process_recv() doesn't always consume the frag, and 1017 * we might not have called it at all if the wc didn't indicate 1018 * success. We already unmapped the frag's pages, though, and 1019 * the following rds_ib_ring_free() call tells the refill path 1020 * that it will not find an allocated frag here. Make sure we 1021 * keep that promise by freeing a frag that's still on the ring. 1022 */ 1023 if (recv->r_frag) { 1024 rds_ib_frag_free(ic, recv->r_frag); 1025 recv->r_frag = NULL; 1026 } 1027 rds_ib_ring_free(&ic->i_recv_ring, 1); 1028 1029 /* If we ever end up with a really empty receive ring, we're 1030 * in deep trouble, as the sender will definitely see RNR 1031 * timeouts. */ 1032 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1033 rds_ib_stats_inc(s_ib_rx_ring_empty); 1034 1035 if (rds_ib_ring_low(&ic->i_recv_ring)) { 1036 rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN); 1037 rds_ib_stats_inc(s_ib_rx_refill_from_cq); 1038 } 1039 } 1040 1041 int rds_ib_recv_path(struct rds_conn_path *cp) 1042 { 1043 struct rds_connection *conn = cp->cp_conn; 1044 struct rds_ib_connection *ic = conn->c_transport_data; 1045 1046 rdsdebug("conn %p\n", conn); 1047 if (rds_conn_up(conn)) { 1048 rds_ib_attempt_ack(ic); 1049 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1050 rds_ib_stats_inc(s_ib_rx_refill_from_thread); 1051 } 1052 1053 return 0; 1054 } 1055 1056 int rds_ib_recv_init(void) 1057 { 1058 struct sysinfo si; 1059 int ret = -ENOMEM; 1060 1061 /* Default to 30% of all available RAM for recv memory */ 1062 si_meminfo(&si); 1063 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; 1064 1065 rds_ib_incoming_slab = 1066 kmem_cache_create_usercopy("rds_ib_incoming", 1067 sizeof(struct rds_ib_incoming), 1068 0, SLAB_HWCACHE_ALIGN, 1069 offsetof(struct rds_ib_incoming, 1070 ii_inc.i_usercopy), 1071 sizeof(struct rds_inc_usercopy), 1072 NULL); 1073 if (!rds_ib_incoming_slab) 1074 goto out; 1075 1076 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", 1077 sizeof(struct rds_page_frag), 1078 0, SLAB_HWCACHE_ALIGN, NULL); 1079 if (!rds_ib_frag_slab) { 1080 kmem_cache_destroy(rds_ib_incoming_slab); 1081 rds_ib_incoming_slab = NULL; 1082 } else 1083 ret = 0; 1084 out: 1085 return ret; 1086 } 1087 1088 void rds_ib_recv_exit(void) 1089 { 1090 WARN_ON(atomic_read(&rds_ib_allocation)); 1091 1092 kmem_cache_destroy(rds_ib_incoming_slab); 1093 kmem_cache_destroy(rds_ib_frag_slab); 1094 } 1095