1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Transport Layer
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
16
17 #include "rtrs-clt.h"
18 #include "rtrs-log.h"
19 #include "rtrs-clt-trace.h"
20
21 #define RTRS_CONNECT_TIMEOUT_MS 30000
22 /*
23 * Wait a bit before trying to reconnect after a failure
24 * in order to give server time to finish clean up which
25 * leads to "false positives" failed reconnect attempts
26 */
27 #define RTRS_RECONNECT_BACKOFF 1000
28 /*
29 * Wait for additional random time between 0 and 8 seconds
30 * before starting to reconnect to avoid clients reconnecting
31 * all at once in case of a major network outage
32 */
33 #define RTRS_RECONNECT_SEED 8
34
35 #define FIRST_CONN 0x01
36 /* limit to 128 * 4k = 512k max IO */
37 #define RTRS_MAX_SEGMENTS 128
38
39 MODULE_DESCRIPTION("RDMA Transport Client");
40 MODULE_LICENSE("GPL");
41
42 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
43 static struct rtrs_rdma_dev_pd dev_pd = {
44 .ops = &dev_pd_ops
45 };
46
47 static struct workqueue_struct *rtrs_wq;
48 static const struct class rtrs_clt_dev_class = {
49 .name = "rtrs-client",
50 };
51
rtrs_clt_is_connected(const struct rtrs_clt_sess * clt)52 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
53 {
54 struct rtrs_clt_path *clt_path;
55 bool connected = false;
56
57 rcu_read_lock();
58 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
59 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
60 connected = true;
61 break;
62 }
63 rcu_read_unlock();
64
65 return connected;
66 }
67
68 static struct rtrs_permit *
__rtrs_get_permit(struct rtrs_clt_sess * clt,enum rtrs_clt_con_type con_type)69 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
70 {
71 size_t max_depth = clt->queue_depth;
72 struct rtrs_permit *permit;
73 int bit;
74
75 /*
76 * Adapted from null_blk get_tag(). Callers from different cpus may
77 * grab the same bit, since find_first_zero_bit is not atomic.
78 * But then the test_and_set_bit_lock will fail for all the
79 * callers but one, so that they will loop again.
80 * This way an explicit spinlock is not required.
81 */
82 do {
83 bit = find_first_zero_bit(clt->permits_map, max_depth);
84 if (bit >= max_depth)
85 return NULL;
86 } while (test_and_set_bit_lock(bit, clt->permits_map));
87
88 permit = get_permit(clt, bit);
89 WARN_ON(permit->mem_id != bit);
90 permit->cpu_id = raw_smp_processor_id();
91 permit->con_type = con_type;
92
93 return permit;
94 }
95
__rtrs_put_permit(struct rtrs_clt_sess * clt,struct rtrs_permit * permit)96 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
97 struct rtrs_permit *permit)
98 {
99 clear_bit_unlock(permit->mem_id, clt->permits_map);
100 }
101
102 /**
103 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
104 * @clt: Current session
105 * @con_type: Type of connection to use with the permit
106 * @can_wait: Wait type
107 *
108 * Description:
109 * Allocates permit for the following RDMA operation. Permit is used
110 * to preallocate all resources and to propagate memory pressure
111 * up earlier.
112 *
113 * Context:
114 * Can sleep if @wait == RTRS_PERMIT_WAIT
115 */
rtrs_clt_get_permit(struct rtrs_clt_sess * clt,enum rtrs_clt_con_type con_type,enum wait_type can_wait)116 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
117 enum rtrs_clt_con_type con_type,
118 enum wait_type can_wait)
119 {
120 struct rtrs_permit *permit;
121 DEFINE_WAIT(wait);
122
123 permit = __rtrs_get_permit(clt, con_type);
124 if (permit || !can_wait)
125 return permit;
126
127 do {
128 prepare_to_wait(&clt->permits_wait, &wait,
129 TASK_UNINTERRUPTIBLE);
130 permit = __rtrs_get_permit(clt, con_type);
131 if (permit)
132 break;
133
134 io_schedule();
135 } while (1);
136
137 finish_wait(&clt->permits_wait, &wait);
138
139 return permit;
140 }
141 EXPORT_SYMBOL(rtrs_clt_get_permit);
142
143 /**
144 * rtrs_clt_put_permit() - puts allocated permit
145 * @clt: Current session
146 * @permit: Permit to be freed
147 *
148 * Context:
149 * Does not matter
150 */
rtrs_clt_put_permit(struct rtrs_clt_sess * clt,struct rtrs_permit * permit)151 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
152 struct rtrs_permit *permit)
153 {
154 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
155 return;
156
157 __rtrs_put_permit(clt, permit);
158
159 /*
160 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
161 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
162 * it must have added itself to &clt->permits_wait before
163 * __rtrs_put_permit() finished.
164 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
165 */
166 if (waitqueue_active(&clt->permits_wait))
167 wake_up(&clt->permits_wait);
168 }
169 EXPORT_SYMBOL(rtrs_clt_put_permit);
170
171 /**
172 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
173 * @clt_path: client path pointer
174 * @permit: permit for the allocation of the RDMA buffer
175 * Note:
176 * IO connection starts from 1.
177 * 0 connection is for user messages.
178 */
179 static
rtrs_permit_to_clt_con(struct rtrs_clt_path * clt_path,struct rtrs_permit * permit)180 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
181 struct rtrs_permit *permit)
182 {
183 int id = 0;
184
185 if (permit->con_type == RTRS_IO_CON)
186 id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
187
188 return to_clt_con(clt_path->s.con[id]);
189 }
190
191 /**
192 * rtrs_clt_change_state() - change the session state through session state
193 * machine.
194 *
195 * @clt_path: client path to change the state of.
196 * @new_state: state to change to.
197 *
198 * returns true if sess's state is changed to new state, otherwise return false.
199 *
200 * Locks:
201 * state_wq lock must be hold.
202 */
rtrs_clt_change_state(struct rtrs_clt_path * clt_path,enum rtrs_clt_state new_state)203 static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
204 enum rtrs_clt_state new_state)
205 {
206 enum rtrs_clt_state old_state;
207 bool changed = false;
208
209 lockdep_assert_held(&clt_path->state_wq.lock);
210
211 old_state = clt_path->state;
212 switch (new_state) {
213 case RTRS_CLT_CONNECTING:
214 switch (old_state) {
215 case RTRS_CLT_RECONNECTING:
216 changed = true;
217 fallthrough;
218 default:
219 break;
220 }
221 break;
222 case RTRS_CLT_RECONNECTING:
223 switch (old_state) {
224 case RTRS_CLT_CONNECTED:
225 case RTRS_CLT_CONNECTING_ERR:
226 case RTRS_CLT_CLOSED:
227 changed = true;
228 fallthrough;
229 default:
230 break;
231 }
232 break;
233 case RTRS_CLT_CONNECTED:
234 switch (old_state) {
235 case RTRS_CLT_CONNECTING:
236 changed = true;
237 fallthrough;
238 default:
239 break;
240 }
241 break;
242 case RTRS_CLT_CONNECTING_ERR:
243 switch (old_state) {
244 case RTRS_CLT_CONNECTING:
245 changed = true;
246 fallthrough;
247 default:
248 break;
249 }
250 break;
251 case RTRS_CLT_CLOSING:
252 switch (old_state) {
253 case RTRS_CLT_CONNECTING:
254 case RTRS_CLT_CONNECTING_ERR:
255 case RTRS_CLT_RECONNECTING:
256 case RTRS_CLT_CONNECTED:
257 changed = true;
258 fallthrough;
259 default:
260 break;
261 }
262 break;
263 case RTRS_CLT_CLOSED:
264 switch (old_state) {
265 case RTRS_CLT_CLOSING:
266 changed = true;
267 fallthrough;
268 default:
269 break;
270 }
271 break;
272 case RTRS_CLT_DEAD:
273 switch (old_state) {
274 case RTRS_CLT_CLOSED:
275 changed = true;
276 fallthrough;
277 default:
278 break;
279 }
280 break;
281 default:
282 break;
283 }
284 if (changed) {
285 clt_path->state = new_state;
286 wake_up_locked(&clt_path->state_wq);
287 }
288
289 return changed;
290 }
291
rtrs_clt_change_state_from_to(struct rtrs_clt_path * clt_path,enum rtrs_clt_state old_state,enum rtrs_clt_state new_state)292 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
293 enum rtrs_clt_state old_state,
294 enum rtrs_clt_state new_state)
295 {
296 bool changed = false;
297
298 spin_lock_irq(&clt_path->state_wq.lock);
299 if (clt_path->state == old_state)
300 changed = rtrs_clt_change_state(clt_path, new_state);
301 spin_unlock_irq(&clt_path->state_wq.lock);
302
303 return changed;
304 }
305
306 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
rtrs_rdma_error_recovery(struct rtrs_clt_con * con)307 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
308 {
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
310
311 trace_rtrs_rdma_error_recovery(clt_path);
312
313 if (rtrs_clt_change_state_from_to(clt_path,
314 RTRS_CLT_CONNECTED,
315 RTRS_CLT_RECONNECTING)) {
316 queue_work(rtrs_wq, &clt_path->err_recovery_work);
317 } else {
318 /*
319 * Error can happen just on establishing new connection,
320 * so notify waiter with error state, waiter is responsible
321 * for cleaning the rest and reconnect if needed.
322 */
323 rtrs_clt_change_state_from_to(clt_path,
324 RTRS_CLT_CONNECTING,
325 RTRS_CLT_CONNECTING_ERR);
326 }
327 }
328
rtrs_clt_fast_reg_done(struct ib_cq * cq,struct ib_wc * wc)329 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
330 {
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
332
333 if (wc->status != IB_WC_SUCCESS) {
334 rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n",
335 ib_wc_status_msg(wc->status));
336 rtrs_rdma_error_recovery(con);
337 }
338 }
339
340 static struct ib_cqe fast_reg_cqe = {
341 .done = rtrs_clt_fast_reg_done
342 };
343
344 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
345 bool notify, bool can_wait);
346
rtrs_clt_inv_rkey_done(struct ib_cq * cq,struct ib_wc * wc)347 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
348 {
349 struct rtrs_clt_io_req *req =
350 container_of(wc->wr_cqe, typeof(*req), inv_cqe);
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
352
353 if (wc->status != IB_WC_SUCCESS) {
354 rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
355 ib_wc_status_msg(wc->status));
356 rtrs_rdma_error_recovery(con);
357 }
358 req->mr->need_inval = false;
359 if (req->need_inv_comp)
360 complete(&req->inv_comp);
361 else
362 /* Complete request from INV callback */
363 complete_rdma_req(req, req->inv_errno, true, false);
364 }
365
rtrs_inv_rkey(struct rtrs_clt_io_req * req)366 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
367 {
368 struct rtrs_clt_con *con = req->con;
369 struct ib_send_wr wr = {
370 .opcode = IB_WR_LOCAL_INV,
371 .wr_cqe = &req->inv_cqe,
372 .send_flags = IB_SEND_SIGNALED,
373 .ex.invalidate_rkey = req->mr->rkey,
374 };
375 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
376
377 return ib_post_send(con->c.qp, &wr, NULL);
378 }
379
complete_rdma_req(struct rtrs_clt_io_req * req,int errno,bool notify,bool can_wait)380 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
381 bool notify, bool can_wait)
382 {
383 struct rtrs_clt_con *con = req->con;
384 struct rtrs_clt_path *clt_path;
385 int err;
386
387 if (!req->in_use)
388 return;
389 if (WARN_ON(!req->con))
390 return;
391 clt_path = to_clt_path(con->c.path);
392
393 if (req->sg_cnt) {
394 if (req->mr->need_inval) {
395 /*
396 * We are here to invalidate read/write requests
397 * ourselves. In normal scenario server should
398 * send INV for all read requests, we do local
399 * invalidate for write requests ourselves, but
400 * we are here, thus three things could happen:
401 *
402 * 1. this is failover, when errno != 0
403 * and can_wait == 1,
404 *
405 * 2. something totally bad happened and
406 * server forgot to send INV, so we
407 * should do that ourselves.
408 *
409 * 3. write request finishes, we need to do local
410 * invalidate
411 */
412
413 if (can_wait) {
414 req->need_inv_comp = true;
415 } else {
416 /* This should be IO path, so always notify */
417 WARN_ON(!notify);
418 /* Save errno for INV callback */
419 req->inv_errno = errno;
420 }
421
422 refcount_inc(&req->ref);
423 err = rtrs_inv_rkey(req);
424 if (err) {
425 rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %pe\n",
426 req->mr->rkey, ERR_PTR(err));
427 } else if (can_wait) {
428 wait_for_completion(&req->inv_comp);
429 }
430 if (!refcount_dec_and_test(&req->ref))
431 return;
432 }
433 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
434 req->sg_cnt, req->dir);
435 }
436 if (!refcount_dec_and_test(&req->ref))
437 return;
438 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
439 atomic_dec(&clt_path->stats->inflight);
440
441 req->in_use = false;
442 req->con = NULL;
443
444 if (errno) {
445 rtrs_err_rl(con->c.path,
446 "IO %s request failed: error=%pe path=%s [%s:%u] notify=%d\n",
447 req->dir == DMA_TO_DEVICE ? "write" : "read", ERR_PTR(errno),
448 kobject_name(&clt_path->kobj), clt_path->hca_name,
449 clt_path->hca_port, notify);
450 }
451
452 if (notify)
453 req->conf(req->priv, errno);
454 }
455
rtrs_post_send_rdma(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 off,u32 imm,struct ib_send_wr * wr)456 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
457 struct rtrs_clt_io_req *req,
458 struct rtrs_rbuf *rbuf, u32 off,
459 u32 imm, struct ib_send_wr *wr)
460 {
461 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
462 enum ib_send_flags flags;
463 struct ib_sge sge;
464
465 if (!req->sg_size) {
466 rtrs_wrn(con->c.path,
467 "Doing RDMA Write failed, no data supplied\n");
468 return -EINVAL;
469 }
470
471 /* user data and user message in the first list element */
472 sge.addr = req->iu->dma_addr;
473 sge.length = req->sg_size;
474 sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
475
476 /*
477 * From time to time we have to post signalled sends,
478 * or send queue will fill up and only QP reset can help.
479 */
480 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
481 0 : IB_SEND_SIGNALED;
482
483 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
484 req->iu->dma_addr,
485 req->sg_size, DMA_TO_DEVICE);
486
487 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
488 rbuf->rkey, rbuf->addr + off,
489 imm, flags, wr, NULL);
490 }
491
process_io_rsp(struct rtrs_clt_path * clt_path,u32 msg_id,s16 errno,bool w_inval)492 static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
493 s16 errno, bool w_inval)
494 {
495 struct rtrs_clt_io_req *req;
496
497 if (WARN_ON(msg_id >= clt_path->queue_depth))
498 return;
499
500 req = &clt_path->reqs[msg_id];
501 /* Drop need_inv if server responded with send with invalidation */
502 req->mr->need_inval &= !w_inval;
503 complete_rdma_req(req, errno, true, false);
504 }
505
rtrs_clt_recv_done(struct rtrs_clt_con * con,struct ib_wc * wc)506 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
507 {
508 struct rtrs_iu *iu;
509 int err;
510 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
511
512 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
513 iu = container_of(wc->wr_cqe, struct rtrs_iu,
514 cqe);
515 err = rtrs_iu_post_recv(&con->c, iu);
516 if (err) {
517 rtrs_err(con->c.path, "post iu failed %pe\n", ERR_PTR(err));
518 rtrs_rdma_error_recovery(con);
519 }
520 }
521
rtrs_clt_rkey_rsp_done(struct rtrs_clt_con * con,struct ib_wc * wc)522 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
523 {
524 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
525 struct rtrs_msg_rkey_rsp *msg;
526 u32 imm_type, imm_payload;
527 bool w_inval = false;
528 struct rtrs_iu *iu;
529 u32 buf_id;
530 int err;
531
532 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
533
534 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
535
536 if (wc->byte_len < sizeof(*msg)) {
537 rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
538 wc->byte_len);
539 goto out;
540 }
541 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
542 iu->size, DMA_FROM_DEVICE);
543 msg = iu->buf;
544 if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
545 rtrs_err(clt_path->clt,
546 "rkey response is malformed: type %d\n",
547 le16_to_cpu(msg->type));
548 goto out;
549 }
550 buf_id = le16_to_cpu(msg->buf_id);
551 if (WARN_ON(buf_id >= clt_path->queue_depth))
552 goto out;
553
554 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
555 if (imm_type == RTRS_IO_RSP_IMM ||
556 imm_type == RTRS_IO_RSP_W_INV_IMM) {
557 u32 msg_id;
558
559 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
560 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
561
562 if (WARN_ON(buf_id != msg_id))
563 goto out;
564 clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
565 process_io_rsp(clt_path, msg_id, err, w_inval);
566 }
567 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
568 iu->size, DMA_FROM_DEVICE);
569 return rtrs_clt_recv_done(con, wc);
570 out:
571 rtrs_rdma_error_recovery(con);
572 }
573
574 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
575
576 static struct ib_cqe io_comp_cqe = {
577 .done = rtrs_clt_rdma_done
578 };
579
580 /*
581 * Post x2 empty WRs: first is for this RDMA with IMM,
582 * second is for RECV with INV, which happened earlier.
583 */
rtrs_post_recv_empty_x2(struct rtrs_con * con,struct ib_cqe * cqe)584 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
585 {
586 struct ib_recv_wr wr_arr[2], *wr;
587 int i;
588
589 memset(wr_arr, 0, sizeof(wr_arr));
590 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
591 wr = &wr_arr[i];
592 wr->wr_cqe = cqe;
593 if (i)
594 /* Chain backwards */
595 wr->next = &wr_arr[i - 1];
596 }
597
598 return ib_post_recv(con->qp, wr, NULL);
599 }
600
rtrs_clt_rdma_done(struct ib_cq * cq,struct ib_wc * wc)601 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
602 {
603 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
604 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
605 u32 imm_type, imm_payload;
606 bool w_inval = false;
607 int err;
608
609 if (wc->status != IB_WC_SUCCESS) {
610 if (wc->status != IB_WC_WR_FLUSH_ERR) {
611 rtrs_err(clt_path->clt, "RDMA failed: %s\n",
612 ib_wc_status_msg(wc->status));
613 rtrs_rdma_error_recovery(con);
614 }
615 return;
616 }
617 rtrs_clt_update_wc_stats(con);
618
619 switch (wc->opcode) {
620 case IB_WC_RECV_RDMA_WITH_IMM:
621 /*
622 * post_recv() RDMA write completions of IO reqs (read/write)
623 * and hb
624 */
625 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
626 return;
627 clt_path->s.hb_missed_cnt = 0;
628 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
629 &imm_type, &imm_payload);
630 if (imm_type == RTRS_IO_RSP_IMM ||
631 imm_type == RTRS_IO_RSP_W_INV_IMM) {
632 u32 msg_id;
633
634 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
635 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
636
637 process_io_rsp(clt_path, msg_id, err, w_inval);
638 } else if (imm_type == RTRS_HB_MSG_IMM) {
639 WARN_ON(con->c.cid);
640 rtrs_send_hb_ack(&clt_path->s);
641 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
642 return rtrs_clt_recv_done(con, wc);
643 } else if (imm_type == RTRS_HB_ACK_IMM) {
644 WARN_ON(con->c.cid);
645 clt_path->s.hb_cur_latency =
646 ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
647 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
648 return rtrs_clt_recv_done(con, wc);
649 } else {
650 rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
651 imm_type);
652 }
653 if (w_inval)
654 /*
655 * Post x2 empty WRs: first is for this RDMA with IMM,
656 * second is for RECV with INV, which happened earlier.
657 */
658 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
659 else
660 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
661 if (err) {
662 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %pe\n",
663 ERR_PTR(err));
664 rtrs_rdma_error_recovery(con);
665 }
666 break;
667 case IB_WC_RECV:
668 /*
669 * Key invalidations from server side
670 */
671 clt_path->s.hb_missed_cnt = 0;
672 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
673 wc->wc_flags & IB_WC_WITH_IMM));
674 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
675 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
676 if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
677 return rtrs_clt_recv_done(con, wc);
678
679 return rtrs_clt_rkey_rsp_done(con, wc);
680 }
681 break;
682 case IB_WC_RDMA_WRITE:
683 /*
684 * post_send() RDMA write completions of IO reqs (read/write)
685 * and hb.
686 */
687 break;
688
689 default:
690 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
691 return;
692 }
693 }
694
post_recv_io(struct rtrs_clt_con * con,size_t q_size)695 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
696 {
697 int err, i;
698 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
699
700 for (i = 0; i < q_size; i++) {
701 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
702 struct rtrs_iu *iu = &con->rsp_ius[i];
703
704 err = rtrs_iu_post_recv(&con->c, iu);
705 } else {
706 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
707 }
708 if (err)
709 return err;
710 }
711
712 return 0;
713 }
714
post_recv_path(struct rtrs_clt_path * clt_path)715 static int post_recv_path(struct rtrs_clt_path *clt_path)
716 {
717 size_t q_size = 0;
718 int err, cid;
719
720 for (cid = 0; cid < clt_path->s.con_num; cid++) {
721 if (cid == 0)
722 q_size = SERVICE_CON_QUEUE_DEPTH;
723 else
724 q_size = clt_path->queue_depth;
725
726 /*
727 * x2 for RDMA read responses + FR key invalidations,
728 * RDMA writes do not require any FR registrations.
729 */
730 q_size *= 2;
731
732 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
733 if (err) {
734 rtrs_err(clt_path->clt, "post_recv_io(), err: %pe\n",
735 ERR_PTR(err));
736 return err;
737 }
738 }
739
740 return 0;
741 }
742
743 struct path_it {
744 int i;
745 struct list_head skip_list;
746 struct rtrs_clt_sess *clt;
747 struct rtrs_clt_path *(*next_path)(struct path_it *it);
748 };
749
750 /*
751 * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
752 * @head: the head for the list.
753 * @clt_path: The element to take the next clt_path from.
754 *
755 * Next clt path returned in round-robin fashion, i.e. head will be skipped,
756 * but if list is observed as empty, NULL will be returned.
757 *
758 * This function may safely run concurrently with the _rcu list-mutation
759 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
760 */
761 static inline struct rtrs_clt_path *
rtrs_clt_get_next_path_or_null(struct list_head * head,struct rtrs_clt_path * clt_path)762 rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
763 {
764 return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
765 list_next_or_null_rcu(head,
766 READ_ONCE((&clt_path->s.entry)->next),
767 typeof(*clt_path), s.entry);
768 }
769
770 /**
771 * get_next_path_rr() - Returns path in round-robin fashion.
772 * @it: the path pointer
773 *
774 * Related to @MP_POLICY_RR
775 *
776 * Locks:
777 * rcu_read_lock() must be held.
778 */
get_next_path_rr(struct path_it * it)779 static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
780 {
781 struct rtrs_clt_path __rcu **ppcpu_path;
782 struct rtrs_clt_path *path;
783 struct rtrs_clt_sess *clt;
784
785 /*
786 * Assert that rcu lock must be held
787 */
788 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu read lock held");
789
790 clt = it->clt;
791
792 /*
793 * Here we use two RCU objects: @paths_list and @pcpu_path
794 * pointer. See rtrs_clt_remove_path_from_arr() for details
795 * how that is handled.
796 */
797
798 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
799 path = rcu_dereference(*ppcpu_path);
800 if (!path)
801 path = list_first_or_null_rcu(&clt->paths_list,
802 typeof(*path), s.entry);
803 else
804 path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
805
806 rcu_assign_pointer(*ppcpu_path, path);
807
808 return path;
809 }
810
811 /**
812 * get_next_path_min_inflight() - Returns path with minimal inflight count.
813 * @it: the path pointer
814 *
815 * Related to @MP_POLICY_MIN_INFLIGHT
816 *
817 * Locks:
818 * rcu_read_lock() must be hold.
819 */
get_next_path_min_inflight(struct path_it * it)820 static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
821 {
822 struct rtrs_clt_path *min_path = NULL;
823 struct rtrs_clt_sess *clt = it->clt;
824 struct rtrs_clt_path *clt_path;
825 int min_inflight = INT_MAX;
826 int inflight;
827
828 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
829 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
830 continue;
831
832 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
833 continue;
834
835 inflight = atomic_read(&clt_path->stats->inflight);
836
837 if (inflight < min_inflight) {
838 min_inflight = inflight;
839 min_path = clt_path;
840 }
841 }
842
843 /*
844 * add the path to the skip list, so that next time we can get
845 * a different one
846 */
847 if (min_path)
848 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
849
850 return min_path;
851 }
852
853 /**
854 * get_next_path_min_latency() - Returns path with minimal latency.
855 * @it: the path pointer
856 *
857 * Return: a path with the lowest latency or NULL if all paths are tried
858 *
859 * Locks:
860 * rcu_read_lock() must be hold.
861 *
862 * Related to @MP_POLICY_MIN_LATENCY
863 *
864 * This DOES skip an already-tried path.
865 * There is a skip-list to skip a path if the path has tried but failed.
866 * It will try the minimum latency path and then the second minimum latency
867 * path and so on. Finally it will return NULL if all paths are tried.
868 * Therefore the caller MUST check the returned
869 * path is NULL and trigger the IO error.
870 */
get_next_path_min_latency(struct path_it * it)871 static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
872 {
873 struct rtrs_clt_path *min_path = NULL;
874 struct rtrs_clt_sess *clt = it->clt;
875 struct rtrs_clt_path *clt_path;
876 ktime_t min_latency = KTIME_MAX;
877 ktime_t latency;
878
879 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
880 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
881 continue;
882
883 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
884 continue;
885
886 latency = clt_path->s.hb_cur_latency;
887
888 if (latency < min_latency) {
889 min_latency = latency;
890 min_path = clt_path;
891 }
892 }
893
894 /*
895 * add the path to the skip list, so that next time we can get
896 * a different one
897 */
898 if (min_path)
899 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
900
901 return min_path;
902 }
903
path_it_init(struct path_it * it,struct rtrs_clt_sess * clt)904 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
905 {
906 INIT_LIST_HEAD(&it->skip_list);
907 it->clt = clt;
908 it->i = 0;
909
910 if (clt->mp_policy == MP_POLICY_RR)
911 it->next_path = get_next_path_rr;
912 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
913 it->next_path = get_next_path_min_inflight;
914 else
915 it->next_path = get_next_path_min_latency;
916 }
917
path_it_deinit(struct path_it * it)918 static inline void path_it_deinit(struct path_it *it)
919 {
920 struct list_head *skip, *tmp;
921 /*
922 * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
923 * We need to remove paths from it, so that next IO can insert
924 * paths (->mp_skip_entry) into a skip_list again.
925 */
926 list_for_each_safe(skip, tmp, &it->skip_list)
927 list_del_init(skip);
928 }
929
930 /**
931 * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
932 * about an inflight IO.
933 * The user buffer holding user control message (not data) is copied into
934 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
935 * also hold the control message of rtrs.
936 * @req: an io request holding information about IO.
937 * @clt_path: client path
938 * @conf: conformation callback function to notify upper layer.
939 * @permit: permit for allocation of RDMA remote buffer
940 * @priv: private pointer
941 * @vec: kernel vector containing control message
942 * @usr_len: length of the user message
943 * @sg: scater list for IO data
944 * @sg_cnt: number of scater list entries
945 * @data_len: length of the IO data
946 * @dir: direction of the IO.
947 */
rtrs_clt_init_req(struct rtrs_clt_io_req * req,struct rtrs_clt_path * clt_path,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)948 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
949 struct rtrs_clt_path *clt_path,
950 void (*conf)(void *priv, int errno),
951 struct rtrs_permit *permit, void *priv,
952 const struct kvec *vec, size_t usr_len,
953 struct scatterlist *sg, size_t sg_cnt,
954 size_t data_len, int dir)
955 {
956 struct iov_iter iter;
957 size_t len;
958
959 req->permit = permit;
960 req->in_use = true;
961 req->usr_len = usr_len;
962 req->data_len = data_len;
963 req->sglist = sg;
964 req->sg_cnt = sg_cnt;
965 req->priv = priv;
966 req->dir = dir;
967 req->con = rtrs_permit_to_clt_con(clt_path, permit);
968 req->conf = conf;
969 req->mr->need_inval = false;
970 req->need_inv_comp = false;
971 req->inv_errno = 0;
972 refcount_set(&req->ref, 1);
973 req->mp_policy = clt_path->clt->mp_policy;
974
975 iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
976 len = _copy_from_iter(req->iu->buf, usr_len, &iter);
977 WARN_ON(len != usr_len);
978
979 reinit_completion(&req->inv_comp);
980 }
981
982 static struct rtrs_clt_io_req *
rtrs_clt_get_req(struct rtrs_clt_path * clt_path,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)983 rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
984 void (*conf)(void *priv, int errno),
985 struct rtrs_permit *permit, void *priv,
986 const struct kvec *vec, size_t usr_len,
987 struct scatterlist *sg, size_t sg_cnt,
988 size_t data_len, int dir)
989 {
990 struct rtrs_clt_io_req *req;
991
992 req = &clt_path->reqs[permit->mem_id];
993 rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
994 sg, sg_cnt, data_len, dir);
995 return req;
996 }
997
998 static struct rtrs_clt_io_req *
rtrs_clt_get_copy_req(struct rtrs_clt_path * alive_path,struct rtrs_clt_io_req * fail_req)999 rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
1000 struct rtrs_clt_io_req *fail_req)
1001 {
1002 struct rtrs_clt_io_req *req;
1003 struct kvec vec = {
1004 .iov_base = fail_req->iu->buf,
1005 .iov_len = fail_req->usr_len
1006 };
1007
1008 req = &alive_path->reqs[fail_req->permit->mem_id];
1009 rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
1010 fail_req->priv, &vec, fail_req->usr_len,
1011 fail_req->sglist, fail_req->sg_cnt,
1012 fail_req->data_len, fail_req->dir);
1013 return req;
1014 }
1015
rtrs_post_rdma_write_sg(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,bool fr_en,u32 count,u32 size,u32 imm,struct ib_send_wr * wr,struct ib_send_wr * tail)1016 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1017 struct rtrs_clt_io_req *req,
1018 struct rtrs_rbuf *rbuf, bool fr_en,
1019 u32 count, u32 size, u32 imm,
1020 struct ib_send_wr *wr,
1021 struct ib_send_wr *tail)
1022 {
1023 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1024 struct ib_sge *sge = req->sge;
1025 enum ib_send_flags flags;
1026 struct scatterlist *sg;
1027 size_t num_sge;
1028 int i;
1029 struct ib_send_wr *ptail = NULL;
1030
1031 if (fr_en) {
1032 i = 0;
1033 sge[i].addr = req->mr->iova;
1034 sge[i].length = req->mr->length;
1035 sge[i].lkey = req->mr->lkey;
1036 i++;
1037 num_sge = 2;
1038 ptail = tail;
1039 } else {
1040 for_each_sg(req->sglist, sg, count, i) {
1041 sge[i].addr = sg_dma_address(sg);
1042 sge[i].length = sg_dma_len(sg);
1043 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1044 }
1045 num_sge = 1 + count;
1046 }
1047 sge[i].addr = req->iu->dma_addr;
1048 sge[i].length = size;
1049 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1050
1051 /*
1052 * From time to time we have to post signalled sends,
1053 * or send queue will fill up and only QP reset can help.
1054 */
1055 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1056 0 : IB_SEND_SIGNALED;
1057
1058 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
1059 req->iu->dma_addr,
1060 size, DMA_TO_DEVICE);
1061
1062 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1063 rbuf->rkey, rbuf->addr, imm,
1064 flags, wr, ptail);
1065 }
1066
rtrs_map_sg_fr(struct rtrs_clt_io_req * req,size_t count)1067 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1068 {
1069 int nr;
1070
1071 /* Align the MR to a 4K page size to match the block virt boundary */
1072 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1073 if (nr != count)
1074 return nr < 0 ? nr : -EINVAL;
1075 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1076
1077 return nr;
1078 }
1079
rtrs_clt_write_req(struct rtrs_clt_io_req * req)1080 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
1081 {
1082 struct rtrs_clt_con *con = req->con;
1083 struct rtrs_path *s = con->c.path;
1084 struct rtrs_clt_path *clt_path = to_clt_path(s);
1085 struct rtrs_msg_rdma_write *msg;
1086
1087 struct rtrs_rbuf *rbuf;
1088 int ret, count = 0;
1089 u32 imm, buf_id;
1090 struct ib_reg_wr rwr;
1091 struct ib_send_wr *wr = NULL;
1092 bool fr_en = false;
1093
1094 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1095
1096 if (tsize > clt_path->chunk_size) {
1097 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
1098 tsize, clt_path->chunk_size);
1099 return -EMSGSIZE;
1100 }
1101 if (req->sg_cnt) {
1102 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
1103 req->sg_cnt, req->dir);
1104 if (!count) {
1105 rtrs_wrn(s, "Write request failed, map failed\n");
1106 return -EINVAL;
1107 }
1108 }
1109 /* put rtrs msg after sg and user message */
1110 msg = req->iu->buf + req->usr_len;
1111 msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1112 msg->usr_len = cpu_to_le16(req->usr_len);
1113
1114 /* rtrs message on server side will be after user data and message */
1115 imm = req->permit->mem_off + req->data_len + req->usr_len;
1116 imm = rtrs_to_io_req_imm(imm);
1117 buf_id = req->permit->mem_id;
1118 req->sg_size = tsize;
1119 rbuf = &clt_path->rbufs[buf_id];
1120
1121 if (count) {
1122 ret = rtrs_map_sg_fr(req, count);
1123 if (ret < 0) {
1124 rtrs_err_rl(s,
1125 "Write request failed, failed to map fast reg. data, err: %pe\n",
1126 ERR_PTR(ret));
1127 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1128 req->sg_cnt, req->dir);
1129 return ret;
1130 }
1131 rwr = (struct ib_reg_wr) {
1132 .wr.opcode = IB_WR_REG_MR,
1133 .wr.wr_cqe = &fast_reg_cqe,
1134 .mr = req->mr,
1135 .key = req->mr->rkey,
1136 .access = (IB_ACCESS_LOCAL_WRITE),
1137 };
1138 wr = &rwr.wr;
1139 fr_en = true;
1140 req->mr->need_inval = true;
1141 }
1142 /*
1143 * Update stats now, after request is successfully sent it is not
1144 * safe anymore to touch it.
1145 */
1146 rtrs_clt_update_all_stats(req, WRITE);
1147
1148 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
1149 req->usr_len + sizeof(*msg),
1150 imm, wr, NULL);
1151 if (ret) {
1152 rtrs_err_rl(s,
1153 "Write request failed: error=%pe path=%s [%s:%u]\n",
1154 ERR_PTR(ret), kobject_name(&clt_path->kobj),
1155 clt_path->hca_name, clt_path->hca_port);
1156 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1157 atomic_dec(&clt_path->stats->inflight);
1158 if (req->mr->need_inval) {
1159 req->mr->need_inval = false;
1160 refcount_dec(&req->ref);
1161 }
1162 if (req->sg_cnt)
1163 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1164 req->sg_cnt, req->dir);
1165 }
1166
1167 return ret;
1168 }
1169
rtrs_clt_read_req(struct rtrs_clt_io_req * req)1170 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1171 {
1172 struct rtrs_clt_con *con = req->con;
1173 struct rtrs_path *s = con->c.path;
1174 struct rtrs_clt_path *clt_path = to_clt_path(s);
1175 struct rtrs_msg_rdma_read *msg;
1176 struct rtrs_ib_dev *dev = clt_path->s.dev;
1177
1178 struct ib_reg_wr rwr;
1179 struct ib_send_wr *wr = NULL;
1180
1181 int ret, count = 0;
1182 u32 imm, buf_id;
1183
1184 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1185
1186 if (tsize > clt_path->chunk_size) {
1187 rtrs_wrn(s,
1188 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1189 tsize, clt_path->chunk_size);
1190 return -EMSGSIZE;
1191 }
1192
1193 if (req->sg_cnt) {
1194 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1195 req->dir);
1196 if (!count) {
1197 rtrs_wrn(s,
1198 "Read request failed, dma map failed\n");
1199 return -EINVAL;
1200 }
1201 }
1202 /* put our message into req->buf after user message*/
1203 msg = req->iu->buf + req->usr_len;
1204 msg->type = cpu_to_le16(RTRS_MSG_READ);
1205 msg->usr_len = cpu_to_le16(req->usr_len);
1206
1207 if (count) {
1208 ret = rtrs_map_sg_fr(req, count);
1209 if (ret < 0) {
1210 rtrs_err_rl(s,
1211 "Read request failed, failed to map fast reg. data, err: %pe\n",
1212 ERR_PTR(ret));
1213 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1214 req->dir);
1215 return ret;
1216 }
1217 rwr = (struct ib_reg_wr) {
1218 .wr.opcode = IB_WR_REG_MR,
1219 .wr.wr_cqe = &fast_reg_cqe,
1220 .mr = req->mr,
1221 .key = req->mr->rkey,
1222 .access = (IB_ACCESS_LOCAL_WRITE |
1223 IB_ACCESS_REMOTE_WRITE),
1224 };
1225 wr = &rwr.wr;
1226
1227 msg->sg_cnt = cpu_to_le16(1);
1228 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1229
1230 msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1231 msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1232 msg->desc[0].len = cpu_to_le32(req->mr->length);
1233
1234 /* Further invalidation is required */
1235 req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F;
1236
1237 } else {
1238 msg->sg_cnt = 0;
1239 msg->flags = 0;
1240 }
1241 /*
1242 * rtrs message will be after the space reserved for disk data and
1243 * user message
1244 */
1245 imm = req->permit->mem_off + req->data_len + req->usr_len;
1246 imm = rtrs_to_io_req_imm(imm);
1247 buf_id = req->permit->mem_id;
1248
1249 req->sg_size = sizeof(*msg);
1250 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1251 req->sg_size += req->usr_len;
1252
1253 /*
1254 * Update stats now, after request is successfully sent it is not
1255 * safe anymore to touch it.
1256 */
1257 rtrs_clt_update_all_stats(req, READ);
1258
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1260 req->data_len, imm, wr);
1261 if (ret) {
1262 rtrs_err_rl(s,
1263 "Read request failed: error=%pe path=%s [%s:%u]\n",
1264 ERR_PTR(ret), kobject_name(&clt_path->kobj),
1265 clt_path->hca_name, clt_path->hca_port);
1266 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1267 atomic_dec(&clt_path->stats->inflight);
1268 req->mr->need_inval = false;
1269 if (req->sg_cnt)
1270 ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1271 req->sg_cnt, req->dir);
1272 }
1273
1274 return ret;
1275 }
1276
1277 /**
1278 * rtrs_clt_failover_req() - Try to find an active path for a failed request
1279 * @clt: clt context
1280 * @fail_req: a failed io request.
1281 */
rtrs_clt_failover_req(struct rtrs_clt_sess * clt,struct rtrs_clt_io_req * fail_req)1282 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
1283 struct rtrs_clt_io_req *fail_req)
1284 {
1285 struct rtrs_clt_path *alive_path;
1286 struct rtrs_clt_io_req *req;
1287 int err = -ECONNABORTED;
1288 struct path_it it;
1289
1290 rcu_read_lock();
1291 for (path_it_init(&it, clt);
1292 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
1293 it.i++) {
1294 if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
1295 continue;
1296 req = rtrs_clt_get_copy_req(alive_path, fail_req);
1297 if (req->dir == DMA_TO_DEVICE)
1298 err = rtrs_clt_write_req(req);
1299 else
1300 err = rtrs_clt_read_req(req);
1301 if (err) {
1302 req->in_use = false;
1303 continue;
1304 }
1305 /* Success path */
1306 rtrs_clt_inc_failover_cnt(alive_path->stats);
1307 break;
1308 }
1309 path_it_deinit(&it);
1310 rcu_read_unlock();
1311
1312 return err;
1313 }
1314
fail_all_outstanding_reqs(struct rtrs_clt_path * clt_path)1315 static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
1316 {
1317 struct rtrs_clt_sess *clt = clt_path->clt;
1318 struct rtrs_clt_io_req *req;
1319 int i, err;
1320
1321 if (!clt_path->reqs)
1322 return;
1323 for (i = 0; i < clt_path->queue_depth; ++i) {
1324 req = &clt_path->reqs[i];
1325 if (!req->in_use)
1326 continue;
1327
1328 /*
1329 * Safely (without notification) complete failed request.
1330 * After completion this request is still useble and can
1331 * be failovered to another path.
1332 */
1333 complete_rdma_req(req, -ECONNABORTED, false, true);
1334
1335 err = rtrs_clt_failover_req(clt, req);
1336 if (err)
1337 /* Failover failed, notify anyway */
1338 req->conf(req->priv, err);
1339 }
1340 }
1341
free_path_reqs(struct rtrs_clt_path * clt_path)1342 static void free_path_reqs(struct rtrs_clt_path *clt_path)
1343 {
1344 struct rtrs_clt_io_req *req;
1345 int i;
1346
1347 if (!clt_path->reqs)
1348 return;
1349 for (i = 0; i < clt_path->queue_depth; ++i) {
1350 req = &clt_path->reqs[i];
1351 if (req->mr)
1352 ib_dereg_mr(req->mr);
1353 kfree(req->sge);
1354 rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
1355 }
1356 kfree(clt_path->reqs);
1357 clt_path->reqs = NULL;
1358 }
1359
alloc_path_reqs(struct rtrs_clt_path * clt_path)1360 static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
1361 {
1362 struct ib_device *ib_dev = clt_path->s.dev->ib_dev;
1363 struct rtrs_clt_io_req *req;
1364 enum ib_mr_type mr_type;
1365 int i, err = -ENOMEM;
1366
1367 clt_path->reqs = kzalloc_objs(*clt_path->reqs, clt_path->queue_depth);
1368 if (!clt_path->reqs)
1369 return -ENOMEM;
1370
1371 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
1372 mr_type = IB_MR_TYPE_SG_GAPS;
1373 else
1374 mr_type = IB_MR_TYPE_MEM_REG;
1375
1376 for (i = 0; i < clt_path->queue_depth; ++i) {
1377 req = &clt_path->reqs[i];
1378 req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
1379 clt_path->s.dev->ib_dev,
1380 DMA_TO_DEVICE,
1381 rtrs_clt_rdma_done);
1382 if (!req->iu)
1383 goto out;
1384
1385 req->sge = kzalloc_objs(*req->sge, 2);
1386 if (!req->sge)
1387 goto out;
1388
1389 req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, mr_type,
1390 clt_path->max_pages_per_mr);
1391 if (IS_ERR(req->mr)) {
1392 err = PTR_ERR(req->mr);
1393 pr_err("Failed to alloc clt_path->max_pages_per_mr %d: %pe\n",
1394 clt_path->max_pages_per_mr, req->mr);
1395 req->mr = NULL;
1396 goto out;
1397 }
1398
1399 init_completion(&req->inv_comp);
1400 }
1401
1402 return 0;
1403
1404 out:
1405 free_path_reqs(clt_path);
1406
1407 return err;
1408 }
1409
alloc_permits(struct rtrs_clt_sess * clt)1410 static int alloc_permits(struct rtrs_clt_sess *clt)
1411 {
1412 unsigned int chunk_bits;
1413 int err, i;
1414
1415 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
1416 if (!clt->permits_map) {
1417 err = -ENOMEM;
1418 goto out_err;
1419 }
1420 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1421 if (!clt->permits) {
1422 err = -ENOMEM;
1423 goto err_map;
1424 }
1425 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1426 for (i = 0; i < clt->queue_depth; i++) {
1427 struct rtrs_permit *permit;
1428
1429 permit = get_permit(clt, i);
1430 permit->mem_id = i;
1431 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1432 }
1433
1434 return 0;
1435
1436 err_map:
1437 bitmap_free(clt->permits_map);
1438 clt->permits_map = NULL;
1439 out_err:
1440 return err;
1441 }
1442
free_permits(struct rtrs_clt_sess * clt)1443 static void free_permits(struct rtrs_clt_sess *clt)
1444 {
1445 if (clt->permits_map)
1446 wait_event(clt->permits_wait,
1447 bitmap_empty(clt->permits_map, clt->queue_depth));
1448
1449 bitmap_free(clt->permits_map);
1450 clt->permits_map = NULL;
1451 kfree(clt->permits);
1452 clt->permits = NULL;
1453 }
1454
query_fast_reg_mode(struct rtrs_clt_path * clt_path)1455 static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
1456 {
1457 struct ib_device *ib_dev;
1458 u64 max_pages_per_mr;
1459 int mr_page_shift;
1460
1461 ib_dev = clt_path->s.dev->ib_dev;
1462
1463 /*
1464 * Use the smallest page size supported by the HCA, down to a
1465 * minimum of 4096 bytes. We're unlikely to build large sglists
1466 * out of smaller entries.
1467 */
1468 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1469 max_pages_per_mr = ib_dev->attrs.max_mr_size;
1470 do_div(max_pages_per_mr, (1ull << mr_page_shift));
1471 max_pages_per_mr = min_not_zero((u32)max_pages_per_mr, U32_MAX);
1472 clt_path->max_pages_per_mr =
1473 min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
1474 ib_dev->attrs.max_fast_reg_page_list_len);
1475 clt_path->clt->max_segments =
1476 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
1477 }
1478
rtrs_clt_change_state_get_old(struct rtrs_clt_path * clt_path,enum rtrs_clt_state new_state,enum rtrs_clt_state * old_state)1479 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
1480 enum rtrs_clt_state new_state,
1481 enum rtrs_clt_state *old_state)
1482 {
1483 bool changed;
1484
1485 spin_lock_irq(&clt_path->state_wq.lock);
1486 if (old_state)
1487 *old_state = clt_path->state;
1488 changed = rtrs_clt_change_state(clt_path, new_state);
1489 spin_unlock_irq(&clt_path->state_wq.lock);
1490
1491 return changed;
1492 }
1493
rtrs_clt_hb_err_handler(struct rtrs_con * c)1494 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1495 {
1496 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1497 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1498
1499 rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj));
1500 rtrs_rdma_error_recovery(con);
1501 }
1502
rtrs_clt_init_hb(struct rtrs_clt_path * clt_path)1503 static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
1504 {
1505 rtrs_init_hb(&clt_path->s, &io_comp_cqe,
1506 RTRS_HB_INTERVAL_MS,
1507 RTRS_HB_MISSED_MAX,
1508 rtrs_clt_hb_err_handler,
1509 rtrs_wq);
1510 }
1511
1512 static void rtrs_clt_reconnect_work(struct work_struct *work);
1513 static void rtrs_clt_close_work(struct work_struct *work);
1514
rtrs_clt_err_recovery_work(struct work_struct * work)1515 static void rtrs_clt_err_recovery_work(struct work_struct *work)
1516 {
1517 struct rtrs_clt_path *clt_path;
1518 struct rtrs_clt_sess *clt;
1519 int delay_ms;
1520
1521 clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
1522 clt = clt_path->clt;
1523 delay_ms = clt->reconnect_delay_sec * 1000;
1524 rtrs_clt_stop_and_destroy_conns(clt_path);
1525 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
1526 msecs_to_jiffies(delay_ms +
1527 get_random_u32_below(RTRS_RECONNECT_SEED)));
1528 }
1529
alloc_path(struct rtrs_clt_sess * clt,const struct rtrs_addr * path,size_t con_num,u32 nr_poll_queues)1530 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
1531 const struct rtrs_addr *path,
1532 size_t con_num, u32 nr_poll_queues)
1533 {
1534 struct rtrs_clt_path *clt_path;
1535 int err = -ENOMEM;
1536 int cpu;
1537 size_t total_con;
1538
1539 clt_path = kzalloc_obj(*clt_path);
1540 if (!clt_path)
1541 goto err;
1542
1543 /*
1544 * irqmode and poll
1545 * +1: Extra connection for user messages
1546 */
1547 total_con = con_num + nr_poll_queues + 1;
1548 clt_path->s.con = kzalloc_objs(*clt_path->s.con, total_con);
1549 if (!clt_path->s.con)
1550 goto err_free_path;
1551
1552 clt_path->s.con_num = total_con;
1553 clt_path->s.irq_con_num = con_num + 1;
1554
1555 clt_path->stats = kzalloc_obj(*clt_path->stats);
1556 if (!clt_path->stats)
1557 goto err_free_con;
1558
1559 mutex_init(&clt_path->init_mutex);
1560 uuid_gen(&clt_path->s.uuid);
1561 memcpy(&clt_path->s.dst_addr, path->dst,
1562 rdma_addr_size((struct sockaddr *)path->dst));
1563
1564 /*
1565 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1566 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1567 * the sess->src_addr will contain only zeros, which is then fine.
1568 */
1569 if (path->src)
1570 memcpy(&clt_path->s.src_addr, path->src,
1571 rdma_addr_size((struct sockaddr *)path->src));
1572 strscpy(clt_path->s.sessname, clt->sessname,
1573 sizeof(clt_path->s.sessname));
1574 clt_path->clt = clt;
1575 clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
1576 init_waitqueue_head(&clt_path->state_wq);
1577 clt_path->state = RTRS_CLT_CONNECTING;
1578 atomic_set(&clt_path->connected_cnt, 0);
1579 INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
1580 INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
1581 INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
1582 rtrs_clt_init_hb(clt_path);
1583
1584 clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
1585 if (!clt_path->mp_skip_entry)
1586 goto err_free_stats;
1587
1588 for_each_possible_cpu(cpu)
1589 INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
1590
1591 err = rtrs_clt_init_stats(clt_path->stats);
1592 if (err)
1593 goto err_free_percpu;
1594
1595 return clt_path;
1596
1597 err_free_percpu:
1598 free_percpu(clt_path->mp_skip_entry);
1599 err_free_stats:
1600 kfree(clt_path->stats);
1601 err_free_con:
1602 kfree(clt_path->s.con);
1603 err_free_path:
1604 kfree(clt_path);
1605 err:
1606 return ERR_PTR(err);
1607 }
1608
free_path(struct rtrs_clt_path * clt_path)1609 void free_path(struct rtrs_clt_path *clt_path)
1610 {
1611 free_percpu(clt_path->mp_skip_entry);
1612 mutex_destroy(&clt_path->init_mutex);
1613 kfree(clt_path->s.con);
1614 kfree(clt_path->rbufs);
1615 kfree(clt_path);
1616 }
1617
create_con(struct rtrs_clt_path * clt_path,unsigned int cid)1618 static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
1619 {
1620 struct rtrs_clt_con *con;
1621
1622 con = kzalloc_obj(*con);
1623 if (!con)
1624 return -ENOMEM;
1625
1626 /* Map first two connections to the first CPU */
1627 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1628 con->c.cid = cid;
1629 con->c.path = &clt_path->s;
1630 /* Align with srv, init as 1 */
1631 atomic_set(&con->c.wr_cnt, 1);
1632 mutex_init(&con->con_mutex);
1633
1634 clt_path->s.con[cid] = &con->c;
1635
1636 return 0;
1637 }
1638
destroy_con(struct rtrs_clt_con * con)1639 static void destroy_con(struct rtrs_clt_con *con)
1640 {
1641 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1642
1643 clt_path->s.con[con->c.cid] = NULL;
1644 mutex_destroy(&con->con_mutex);
1645 kfree(con);
1646 }
1647
create_con_cq_qp(struct rtrs_clt_con * con)1648 static int create_con_cq_qp(struct rtrs_clt_con *con)
1649 {
1650 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1651 u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
1652 int err, cq_vector;
1653 struct rtrs_msg_rkey_rsp *rsp;
1654
1655 lockdep_assert_held(&con->con_mutex);
1656 if (con->c.cid == 0) {
1657 max_send_sge = 1;
1658 /* We must be the first here */
1659 if (WARN_ON(clt_path->s.dev))
1660 return -EINVAL;
1661
1662 /*
1663 * The whole session uses device from user connection.
1664 * Be careful not to close user connection before ib dev
1665 * is gracefully put.
1666 */
1667 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1668 &dev_pd);
1669 if (!clt_path->s.dev) {
1670 rtrs_wrn(clt_path->clt,
1671 "rtrs_ib_dev_find_get_or_add(): no memory\n");
1672 return -ENOMEM;
1673 }
1674 clt_path->s.dev_ref = 1;
1675 query_fast_reg_mode(clt_path);
1676 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1677 /*
1678 * Two (request + registration) completion for send
1679 * Two for recv if always_invalidate is set on server
1680 * or one for recv.
1681 * + 2 for drain and heartbeat
1682 * in case qp gets into error state.
1683 */
1684 max_send_wr =
1685 min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1686 max_recv_wr = max_send_wr;
1687 } else {
1688 /*
1689 * Here we assume that session members are correctly set.
1690 * This is always true if user connection (cid == 0) is
1691 * established first.
1692 */
1693 if (WARN_ON(!clt_path->s.dev))
1694 return -EINVAL;
1695 if (WARN_ON(!clt_path->queue_depth))
1696 return -EINVAL;
1697
1698 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1699 /* Shared between connections */
1700 clt_path->s.dev_ref++;
1701 max_send_wr = min_t(int, wr_limit,
1702 /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1703 clt_path->queue_depth * 4 + 1);
1704 max_recv_wr = min_t(int, wr_limit,
1705 clt_path->queue_depth * 3 + 1);
1706 max_send_sge = 2;
1707 }
1708 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1709 cq_num = max_send_wr + max_recv_wr;
1710 /* alloc iu to recv new rkey reply when server reports flags set */
1711 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1712 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1713 GFP_KERNEL,
1714 clt_path->s.dev->ib_dev,
1715 DMA_FROM_DEVICE,
1716 rtrs_clt_rdma_done);
1717 if (!con->rsp_ius)
1718 return -ENOMEM;
1719 con->queue_num = cq_num;
1720 }
1721 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1722 if (con->c.cid >= clt_path->s.irq_con_num)
1723 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1724 cq_vector, cq_num, max_send_wr,
1725 max_recv_wr, IB_POLL_DIRECT);
1726 else
1727 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1728 cq_vector, cq_num, max_send_wr,
1729 max_recv_wr, IB_POLL_SOFTIRQ);
1730 /*
1731 * In case of error we do not bother to clean previous allocations,
1732 * since destroy_con_cq_qp() must be called.
1733 */
1734 return err;
1735 }
1736
destroy_con_cq_qp(struct rtrs_clt_con * con)1737 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1738 {
1739 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1740
1741 /*
1742 * Be careful here: destroy_con_cq_qp() can be called even
1743 * create_con_cq_qp() failed, see comments there.
1744 */
1745 lockdep_assert_held(&con->con_mutex);
1746 rtrs_cq_qp_destroy(&con->c);
1747 if (con->rsp_ius) {
1748 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1749 con->queue_num);
1750 con->rsp_ius = NULL;
1751 con->queue_num = 0;
1752 }
1753 if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
1754 rtrs_ib_dev_put(clt_path->s.dev);
1755 clt_path->s.dev = NULL;
1756 }
1757 }
1758
stop_cm(struct rtrs_clt_con * con)1759 static void stop_cm(struct rtrs_clt_con *con)
1760 {
1761 rdma_disconnect(con->c.cm_id);
1762 if (con->c.qp)
1763 ib_drain_qp(con->c.qp);
1764 }
1765
destroy_cm(struct rtrs_clt_con * con)1766 static void destroy_cm(struct rtrs_clt_con *con)
1767 {
1768 rdma_destroy_id(con->c.cm_id);
1769 con->c.cm_id = NULL;
1770 }
1771
rtrs_rdma_addr_resolved(struct rtrs_clt_con * con)1772 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1773 {
1774 struct rtrs_path *s = con->c.path;
1775 int err;
1776
1777 mutex_lock(&con->con_mutex);
1778 err = create_con_cq_qp(con);
1779 mutex_unlock(&con->con_mutex);
1780 if (err) {
1781 rtrs_err(s, "create_con_cq_qp(), err: %pe\n", ERR_PTR(err));
1782 return err;
1783 }
1784 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1785 if (err)
1786 rtrs_err(s, "Resolving route failed, err: %pe\n", ERR_PTR(err));
1787
1788 return err;
1789 }
1790
rtrs_rdma_route_resolved(struct rtrs_clt_con * con)1791 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1792 {
1793 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1794 struct rtrs_clt_sess *clt = clt_path->clt;
1795 struct rtrs_msg_conn_req msg;
1796 struct rdma_conn_param param;
1797
1798 int err;
1799
1800 param = (struct rdma_conn_param) {
1801 .retry_count = 7,
1802 .rnr_retry_count = 7,
1803 .private_data = &msg,
1804 .private_data_len = sizeof(msg),
1805 };
1806
1807 msg = (struct rtrs_msg_conn_req) {
1808 .magic = cpu_to_le16(RTRS_MAGIC),
1809 .version = cpu_to_le16(RTRS_PROTO_VER),
1810 .cid = cpu_to_le16(con->c.cid),
1811 .cid_num = cpu_to_le16(clt_path->s.con_num),
1812 .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
1813 };
1814 msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
1815 uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
1816 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1817
1818 err = rdma_connect_locked(con->c.cm_id, ¶m);
1819 if (err)
1820 rtrs_err(clt, "rdma_connect_locked(): %pe\n", ERR_PTR(err));
1821
1822 return err;
1823 }
1824
rtrs_rdma_conn_established(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1825 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1826 struct rdma_cm_event *ev)
1827 {
1828 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1829 struct rtrs_clt_sess *clt = clt_path->clt;
1830 const struct rtrs_msg_conn_rsp *msg;
1831 u16 version, queue_depth;
1832 int errno;
1833 u8 len;
1834
1835 msg = ev->param.conn.private_data;
1836 len = ev->param.conn.private_data_len;
1837 if (len < sizeof(*msg)) {
1838 rtrs_err(clt, "Invalid RTRS connection response\n");
1839 return -ECONNRESET;
1840 }
1841 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1842 rtrs_err(clt, "Invalid RTRS magic\n");
1843 return -ECONNRESET;
1844 }
1845 version = le16_to_cpu(msg->version);
1846 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1847 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1848 version >> 8, RTRS_PROTO_VER_MAJOR);
1849 return -ECONNRESET;
1850 }
1851 errno = le16_to_cpu(msg->errno);
1852 if (errno) {
1853 rtrs_err(clt, "Invalid RTRS message: errno %pe\n",
1854 ERR_PTR(errno));
1855 return -ECONNRESET;
1856 }
1857 if (con->c.cid == 0) {
1858 queue_depth = le16_to_cpu(msg->queue_depth);
1859
1860 if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
1861 rtrs_err(clt, "Error: queue depth changed\n");
1862
1863 /*
1864 * Stop any more reconnection attempts
1865 */
1866 clt_path->reconnect_attempts = -1;
1867 rtrs_err(clt,
1868 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1869 return -ECONNRESET;
1870 }
1871
1872 if (!clt_path->rbufs) {
1873 clt_path->rbufs = kzalloc_objs(*clt_path->rbufs,
1874 queue_depth);
1875 if (!clt_path->rbufs)
1876 return -ENOMEM;
1877 }
1878 clt_path->queue_depth = queue_depth;
1879 clt_path->s.signal_interval = min_not_zero(queue_depth,
1880 (unsigned short) SERVICE_CON_QUEUE_DEPTH);
1881 clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1882 clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
1883 clt_path->flags = le32_to_cpu(msg->flags);
1884 clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
1885
1886 /*
1887 * Global IO size is always a minimum.
1888 * If while a reconnection server sends us a value a bit
1889 * higher - client does not care and uses cached minimum.
1890 *
1891 * Since we can have several sessions (paths) restablishing
1892 * connections in parallel, use lock.
1893 */
1894 mutex_lock(&clt->paths_mutex);
1895 clt->queue_depth = clt_path->queue_depth;
1896 clt->max_io_size = min_not_zero(clt_path->max_io_size,
1897 clt->max_io_size);
1898 mutex_unlock(&clt->paths_mutex);
1899
1900 /*
1901 * Cache the hca_port and hca_name for sysfs
1902 */
1903 clt_path->hca_port = con->c.cm_id->port_num;
1904 scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
1905 clt_path->s.dev->ib_dev->name);
1906 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1907 /* set for_new_clt, to allow future reconnect on any path */
1908 clt_path->for_new_clt = 1;
1909 }
1910
1911 return 0;
1912 }
1913
flag_success_on_conn(struct rtrs_clt_con * con)1914 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1915 {
1916 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1917
1918 atomic_inc(&clt_path->connected_cnt);
1919 con->cm_err = 1;
1920 }
1921
rtrs_rdma_conn_rejected(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1922 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1923 struct rdma_cm_event *ev)
1924 {
1925 struct rtrs_path *s = con->c.path;
1926 const struct rtrs_msg_conn_rsp *msg;
1927 const char *rej_msg;
1928 int status, errno = -ECONNRESET;
1929 u8 data_len;
1930
1931 status = ev->status;
1932 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1933 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1934
1935 if (msg && data_len >= sizeof(*msg)) {
1936 errno = (int16_t)le16_to_cpu(msg->errno);
1937 if (errno == -EBUSY)
1938 rtrs_err(s,
1939 "Previous session is still exists on the server, please reconnect later\n");
1940 else
1941 rtrs_err(s,
1942 "Connect rejected: status %d (%s), rtrs errno %pe\n",
1943 status, rej_msg, ERR_PTR(errno));
1944 } else {
1945 rtrs_err(s,
1946 "Connect rejected but with malformed message: status %d (%s)\n",
1947 status, rej_msg);
1948 }
1949
1950 return errno;
1951 }
1952
rtrs_clt_close_conns(struct rtrs_clt_path * clt_path,bool wait)1953 void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
1954 {
1955 trace_rtrs_clt_close_conns(clt_path);
1956
1957 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
1958 queue_work(rtrs_wq, &clt_path->close_work);
1959 if (wait)
1960 flush_work(&clt_path->close_work);
1961 }
1962
flag_error_on_conn(struct rtrs_clt_con * con,int cm_err)1963 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1964 {
1965 if (con->cm_err == 1) {
1966 struct rtrs_clt_path *clt_path;
1967
1968 clt_path = to_clt_path(con->c.path);
1969 if (atomic_dec_and_test(&clt_path->connected_cnt))
1970
1971 wake_up(&clt_path->state_wq);
1972 }
1973 con->cm_err = cm_err;
1974 }
1975
rtrs_clt_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * ev)1976 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1977 struct rdma_cm_event *ev)
1978 {
1979 struct rtrs_clt_con *con = cm_id->context;
1980 struct rtrs_path *s = con->c.path;
1981 struct rtrs_clt_path *clt_path = to_clt_path(s);
1982 int cm_err = 0;
1983
1984 switch (ev->event) {
1985 case RDMA_CM_EVENT_ADDR_RESOLVED:
1986 cm_err = rtrs_rdma_addr_resolved(con);
1987 break;
1988 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1989 cm_err = rtrs_rdma_route_resolved(con);
1990 break;
1991 case RDMA_CM_EVENT_ESTABLISHED:
1992 cm_err = rtrs_rdma_conn_established(con, ev);
1993 if (!cm_err) {
1994 /*
1995 * Report success and wake up. Here we abuse state_wq,
1996 * i.e. wake up without state change, but we set cm_err.
1997 */
1998 flag_success_on_conn(con);
1999 wake_up(&clt_path->state_wq);
2000 return 0;
2001 }
2002 break;
2003 case RDMA_CM_EVENT_REJECTED:
2004 cm_err = rtrs_rdma_conn_rejected(con, ev);
2005 break;
2006 case RDMA_CM_EVENT_DISCONNECTED:
2007 /* No message for disconnecting */
2008 cm_err = -ECONNRESET;
2009 break;
2010 case RDMA_CM_EVENT_CONNECT_ERROR:
2011 case RDMA_CM_EVENT_UNREACHABLE:
2012 case RDMA_CM_EVENT_ADDR_CHANGE:
2013 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2014 if (ev->status < 0) {
2015 rtrs_wrn(s, "CM error (CM event: %s, err: %pe)\n",
2016 rdma_event_msg(ev->event), ERR_PTR(ev->status));
2017 } else if (ev->status > 0) {
2018 rtrs_wrn(s, "CM error (CM event: %s, err: %s)\n",
2019 rdma_event_msg(ev->event),
2020 rdma_reject_msg(cm_id, ev->status));
2021 }
2022 cm_err = -ECONNRESET;
2023 break;
2024 case RDMA_CM_EVENT_ADDR_ERROR:
2025 case RDMA_CM_EVENT_ROUTE_ERROR:
2026 if (ev->status < 0) {
2027 rtrs_wrn(s, "CM error (CM event: %s, err: %pe)\n",
2028 rdma_event_msg(ev->event),
2029 ERR_PTR(ev->status));
2030 } else if (ev->status > 0) {
2031 rtrs_wrn(s, "CM error (CM event: %s, err: %s)\n",
2032 rdma_event_msg(ev->event),
2033 rdma_reject_msg(cm_id, ev->status));
2034 }
2035 cm_err = -EHOSTUNREACH;
2036 break;
2037 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2038 /*
2039 * Device removal is a special case. Queue close and return 0.
2040 */
2041 if (ev->status < 0) {
2042 rtrs_wrn_rl(s, "CM event: %s, status: %pe\n",
2043 rdma_event_msg(ev->event),
2044 ERR_PTR(ev->status));
2045 } else if (ev->status > 0) {
2046 rtrs_wrn_rl(s, "CM event: %s, status: %s\n",
2047 rdma_event_msg(ev->event),
2048 rdma_reject_msg(cm_id, ev->status));
2049 }
2050 rtrs_clt_close_conns(clt_path, false);
2051 return 0;
2052 default:
2053 if (ev->status < 0) {
2054 rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %pe)\n",
2055 rdma_event_msg(ev->event), ERR_PTR(ev->status));
2056 } else if (ev->status > 0) {
2057 rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %s)\n",
2058 rdma_event_msg(ev->event),
2059 rdma_reject_msg(cm_id, ev->status));
2060 }
2061 cm_err = -ECONNRESET;
2062 break;
2063 }
2064
2065 if (cm_err) {
2066 /*
2067 * cm error makes sense only on connection establishing,
2068 * in other cases we rely on normal procedure of reconnecting.
2069 */
2070 flag_error_on_conn(con, cm_err);
2071 rtrs_rdma_error_recovery(con);
2072 }
2073
2074 return 0;
2075 }
2076
2077 /* The caller should do the cleanup in case of error */
create_cm(struct rtrs_clt_con * con)2078 static int create_cm(struct rtrs_clt_con *con)
2079 {
2080 struct rtrs_path *s = con->c.path;
2081 struct rtrs_clt_path *clt_path = to_clt_path(s);
2082 struct rdma_cm_id *cm_id;
2083 int err;
2084
2085 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2086 clt_path->s.dst_addr.ss_family == AF_IB ?
2087 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
2088 if (IS_ERR(cm_id)) {
2089 rtrs_err(s, "Failed to create CM ID, err: %pe\n", cm_id);
2090 return PTR_ERR(cm_id);
2091 }
2092 con->c.cm_id = cm_id;
2093 con->cm_err = 0;
2094 /* allow the port to be reused */
2095 err = rdma_set_reuseaddr(cm_id, 1);
2096 if (err != 0) {
2097 rtrs_err(s, "Set address reuse failed, err: %pe\n", ERR_PTR(err));
2098 return err;
2099 }
2100 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
2101 (struct sockaddr *)&clt_path->s.dst_addr,
2102 RTRS_CONNECT_TIMEOUT_MS);
2103 if (err) {
2104 rtrs_err(s, "Failed to resolve address, err: %pe\n", ERR_PTR(err));
2105 return err;
2106 }
2107 /*
2108 * Combine connection status and session events. This is needed
2109 * for waiting two possible cases: cm_err has something meaningful
2110 * or session state was really changed to error by device removal.
2111 */
2112 err = wait_event_interruptible_timeout(
2113 clt_path->state_wq,
2114 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2115 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2116 if (err == 0 || err == -ERESTARTSYS) {
2117 if (err == 0)
2118 err = -ETIMEDOUT;
2119 /* Timedout or interrupted */
2120 return err;
2121 }
2122 if (con->cm_err < 0)
2123 return con->cm_err;
2124 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
2125 /* Device removal */
2126 return -ECONNABORTED;
2127
2128 return 0;
2129 }
2130
rtrs_clt_path_up(struct rtrs_clt_path * clt_path)2131 static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
2132 {
2133 struct rtrs_clt_sess *clt = clt_path->clt;
2134 int up;
2135
2136 /*
2137 * We can fire RECONNECTED event only when all paths were
2138 * connected on rtrs_clt_open(), then each was disconnected
2139 * and the first one connected again. That's why this nasty
2140 * game with counter value.
2141 */
2142
2143 mutex_lock(&clt->paths_ev_mutex);
2144 up = ++clt->paths_up;
2145 /*
2146 * Here it is safe to access paths num directly since up counter
2147 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
2148 * in progress, thus paths removals are impossible.
2149 */
2150 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2151 clt->paths_up = clt->paths_num;
2152 else if (up == 1)
2153 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2154 mutex_unlock(&clt->paths_ev_mutex);
2155
2156 /* Mark session as established */
2157 clt_path->established = true;
2158 clt_path->reconnect_attempts = 0;
2159 clt_path->stats->reconnects.successful_cnt++;
2160 }
2161
rtrs_clt_path_down(struct rtrs_clt_path * clt_path)2162 static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
2163 {
2164 struct rtrs_clt_sess *clt = clt_path->clt;
2165
2166 if (!clt_path->established)
2167 return;
2168
2169 clt_path->established = false;
2170 mutex_lock(&clt->paths_ev_mutex);
2171 WARN_ON(!clt->paths_up);
2172 if (--clt->paths_up == 0)
2173 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2174 mutex_unlock(&clt->paths_ev_mutex);
2175 }
2176
rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path * clt_path)2177 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
2178 {
2179 struct rtrs_clt_con *con;
2180 unsigned int cid;
2181
2182 WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
2183
2184 /*
2185 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2186 * exactly in between. Start destroying after it finishes.
2187 */
2188 mutex_lock(&clt_path->init_mutex);
2189 mutex_unlock(&clt_path->init_mutex);
2190
2191 /*
2192 * All IO paths must observe !CONNECTED state before we
2193 * free everything.
2194 */
2195 synchronize_rcu();
2196
2197 rtrs_stop_hb(&clt_path->s);
2198
2199 /*
2200 * The order it utterly crucial: firstly disconnect and complete all
2201 * rdma requests with error (thus set in_use=false for requests),
2202 * then fail outstanding requests checking in_use for each, and
2203 * eventually notify upper layer about session disconnection.
2204 */
2205
2206 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2207 if (!clt_path->s.con[cid])
2208 break;
2209 con = to_clt_con(clt_path->s.con[cid]);
2210 stop_cm(con);
2211 }
2212 fail_all_outstanding_reqs(clt_path);
2213 free_path_reqs(clt_path);
2214 rtrs_clt_path_down(clt_path);
2215
2216 /*
2217 * Wait for graceful shutdown, namely when peer side invokes
2218 * rdma_disconnect(). 'connected_cnt' is decremented only on
2219 * CM events, thus if other side had crashed and hb has detected
2220 * something is wrong, here we will stuck for exactly timeout ms,
2221 * since CM does not fire anything. That is fine, we are not in
2222 * hurry.
2223 */
2224 wait_event_timeout(clt_path->state_wq,
2225 !atomic_read(&clt_path->connected_cnt),
2226 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2227
2228 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2229 if (!clt_path->s.con[cid])
2230 break;
2231 con = to_clt_con(clt_path->s.con[cid]);
2232 mutex_lock(&con->con_mutex);
2233 destroy_con_cq_qp(con);
2234 mutex_unlock(&con->con_mutex);
2235 destroy_cm(con);
2236 destroy_con(con);
2237 }
2238 }
2239
rtrs_clt_remove_path_from_arr(struct rtrs_clt_path * clt_path)2240 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
2241 {
2242 struct rtrs_clt_sess *clt = clt_path->clt;
2243 struct rtrs_clt_path *next;
2244 bool wait_for_grace = false;
2245 int cpu;
2246
2247 mutex_lock(&clt->paths_mutex);
2248 list_del_rcu(&clt_path->s.entry);
2249
2250 /* Make sure everybody observes path removal. */
2251 synchronize_rcu();
2252
2253 /*
2254 * At this point nobody sees @sess in the list, but still we have
2255 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2256 * nobody can observe @sess in the list, we guarantee that IO path
2257 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2258 * to @sess, but can never again become @sess.
2259 */
2260
2261 /*
2262 * Decrement paths number only after grace period, because
2263 * caller of do_each_path() must firstly observe list without
2264 * path and only then decremented paths number.
2265 *
2266 * Otherwise there can be the following situation:
2267 * o Two paths exist and IO is coming.
2268 * o One path is removed:
2269 * CPU#0 CPU#1
2270 * do_each_path(): rtrs_clt_remove_path_from_arr():
2271 * path = get_next_path()
2272 * ^^^ list_del_rcu(path)
2273 * [!CONNECTED path] clt->paths_num--
2274 * ^^^^^^^^^
2275 * load clt->paths_num from 2 to 1
2276 * ^^^^^^^^^
2277 * sees 1
2278 *
2279 * path is observed as !CONNECTED, but do_each_path() loop
2280 * ends, because expression i < clt->paths_num is false.
2281 */
2282 clt->paths_num--;
2283
2284 /*
2285 * Get @next connection from current @sess which is going to be
2286 * removed. If @sess is the last element, then @next is NULL.
2287 */
2288 rcu_read_lock();
2289 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
2290 rcu_read_unlock();
2291
2292 /*
2293 * @pcpu paths can still point to the path which is going to be
2294 * removed, so change the pointer manually.
2295 */
2296 for_each_possible_cpu(cpu) {
2297 struct rtrs_clt_path __rcu **ppcpu_path;
2298
2299 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2300 if (rcu_dereference_protected(*ppcpu_path,
2301 lockdep_is_held(&clt->paths_mutex)) != clt_path)
2302 /*
2303 * synchronize_rcu() was called just after deleting
2304 * entry from the list, thus IO code path cannot
2305 * change pointer back to the pointer which is going
2306 * to be removed, we are safe here.
2307 */
2308 continue;
2309
2310 /*
2311 * We race with IO code path, which also changes pointer,
2312 * thus we have to be careful not to overwrite it.
2313 */
2314 if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
2315 next))
2316 /*
2317 * @ppcpu_path was successfully replaced with @next,
2318 * that means that someone could also pick up the
2319 * @sess and dereferencing it right now, so wait for
2320 * a grace period is required.
2321 */
2322 wait_for_grace = true;
2323 }
2324 if (wait_for_grace)
2325 synchronize_rcu();
2326
2327 mutex_unlock(&clt->paths_mutex);
2328 }
2329
rtrs_clt_add_path_to_arr(struct rtrs_clt_path * clt_path)2330 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
2331 {
2332 struct rtrs_clt_sess *clt = clt_path->clt;
2333
2334 mutex_lock(&clt->paths_mutex);
2335 clt->paths_num++;
2336
2337 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2338 mutex_unlock(&clt->paths_mutex);
2339 }
2340
rtrs_clt_close_work(struct work_struct * work)2341 static void rtrs_clt_close_work(struct work_struct *work)
2342 {
2343 struct rtrs_clt_path *clt_path;
2344
2345 clt_path = container_of(work, struct rtrs_clt_path, close_work);
2346
2347 cancel_work_sync(&clt_path->err_recovery_work);
2348 cancel_delayed_work_sync(&clt_path->reconnect_dwork);
2349 rtrs_clt_stop_and_destroy_conns(clt_path);
2350 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
2351 }
2352
init_conns(struct rtrs_clt_path * clt_path)2353 static int init_conns(struct rtrs_clt_path *clt_path)
2354 {
2355 unsigned int cid;
2356 int err, i;
2357
2358 /*
2359 * On every new session connections increase reconnect counter
2360 * to avoid clashes with previous sessions not yet closed
2361 * sessions on a server side.
2362 */
2363 clt_path->s.recon_cnt++;
2364
2365 /* Establish all RDMA connections */
2366 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2367 err = create_con(clt_path, cid);
2368 if (err)
2369 goto destroy;
2370
2371 err = create_cm(to_clt_con(clt_path->s.con[cid]));
2372 if (err)
2373 goto destroy;
2374 }
2375
2376 /*
2377 * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
2378 */
2379 cid = clt_path->s.con_num - 1;
2380
2381 err = alloc_path_reqs(clt_path);
2382 if (err)
2383 goto destroy;
2384
2385 return 0;
2386
2387 destroy:
2388 /* Make sure we do the cleanup in the order they are created */
2389 for (i = 0; i <= cid; i++) {
2390 struct rtrs_clt_con *con;
2391
2392 if (!clt_path->s.con[i])
2393 break;
2394
2395 con = to_clt_con(clt_path->s.con[i]);
2396 if (con->c.cm_id) {
2397 stop_cm(con);
2398 mutex_lock(&con->con_mutex);
2399 destroy_con_cq_qp(con);
2400 mutex_unlock(&con->con_mutex);
2401 destroy_cm(con);
2402 }
2403 destroy_con(con);
2404 }
2405 /*
2406 * If we've never taken async path and got an error, say,
2407 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2408 * manually to keep reconnecting.
2409 */
2410 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2411
2412 return err;
2413 }
2414
rtrs_clt_info_req_done(struct ib_cq * cq,struct ib_wc * wc)2415 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2416 {
2417 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2418 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2419 struct rtrs_iu *iu;
2420
2421 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2422 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2423
2424 if (wc->status != IB_WC_SUCCESS) {
2425 rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
2426 ib_wc_status_msg(wc->status));
2427 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2428 return;
2429 }
2430
2431 rtrs_clt_update_wc_stats(con);
2432 }
2433
process_info_rsp(struct rtrs_clt_path * clt_path,const struct rtrs_msg_info_rsp * msg)2434 static int process_info_rsp(struct rtrs_clt_path *clt_path,
2435 const struct rtrs_msg_info_rsp *msg)
2436 {
2437 unsigned int sg_cnt, total_len;
2438 int i, sgi;
2439
2440 sg_cnt = le16_to_cpu(msg->sg_cnt);
2441 if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
2442 rtrs_err(clt_path->clt,
2443 "Incorrect sg_cnt %d, is not multiple\n",
2444 sg_cnt);
2445 return -EINVAL;
2446 }
2447
2448 /*
2449 * Check if IB immediate data size is enough to hold the mem_id and
2450 * the offset inside the memory chunk.
2451 */
2452 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
2453 MAX_IMM_PAYL_BITS) {
2454 rtrs_err(clt_path->clt,
2455 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2456 MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
2457 return -EINVAL;
2458 }
2459 total_len = 0;
2460 for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
2461 const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2462 u32 len, rkey;
2463 u64 addr;
2464
2465 addr = le64_to_cpu(desc->addr);
2466 rkey = le32_to_cpu(desc->key);
2467 len = le32_to_cpu(desc->len);
2468
2469 total_len += len;
2470
2471 if (!len || (len % clt_path->chunk_size)) {
2472 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
2473 sgi,
2474 len);
2475 return -EINVAL;
2476 }
2477 for ( ; len && i < clt_path->queue_depth; i++) {
2478 clt_path->rbufs[i].addr = addr;
2479 clt_path->rbufs[i].rkey = rkey;
2480
2481 len -= clt_path->chunk_size;
2482 addr += clt_path->chunk_size;
2483 }
2484 }
2485 /* Sanity check */
2486 if (sgi != sg_cnt || i != clt_path->queue_depth) {
2487 rtrs_err(clt_path->clt,
2488 "Incorrect sg vector, not fully mapped\n");
2489 return -EINVAL;
2490 }
2491 if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
2492 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
2493 return -EINVAL;
2494 }
2495
2496 return 0;
2497 }
2498
rtrs_clt_info_rsp_done(struct ib_cq * cq,struct ib_wc * wc)2499 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2500 {
2501 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2502 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2503 struct rtrs_msg_info_rsp *msg;
2504 enum rtrs_clt_state state;
2505 struct rtrs_iu *iu;
2506 size_t rx_sz;
2507 int err;
2508
2509 state = RTRS_CLT_CONNECTING_ERR;
2510
2511 WARN_ON(con->c.cid);
2512 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2513 if (wc->status != IB_WC_SUCCESS) {
2514 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
2515 ib_wc_status_msg(wc->status));
2516 goto out;
2517 }
2518 WARN_ON(wc->opcode != IB_WC_RECV);
2519
2520 if (wc->byte_len < sizeof(*msg)) {
2521 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2522 wc->byte_len);
2523 goto out;
2524 }
2525 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
2526 iu->size, DMA_FROM_DEVICE);
2527 msg = iu->buf;
2528 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
2529 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
2530 le16_to_cpu(msg->type));
2531 goto out;
2532 }
2533 rx_sz = sizeof(*msg);
2534 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2535 if (wc->byte_len < rx_sz) {
2536 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2537 wc->byte_len);
2538 goto out;
2539 }
2540 err = process_info_rsp(clt_path, msg);
2541 if (err)
2542 goto out;
2543
2544 err = post_recv_path(clt_path);
2545 if (err)
2546 goto out;
2547
2548 state = RTRS_CLT_CONNECTED;
2549
2550 out:
2551 rtrs_clt_update_wc_stats(con);
2552 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2553 rtrs_clt_change_state_get_old(clt_path, state, NULL);
2554 }
2555
rtrs_send_path_info(struct rtrs_clt_path * clt_path)2556 static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
2557 {
2558 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
2559 struct rtrs_msg_info_req *msg;
2560 struct rtrs_iu *tx_iu, *rx_iu;
2561 size_t rx_sz;
2562 int err;
2563
2564 rx_sz = sizeof(struct rtrs_msg_info_rsp);
2565 rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
2566
2567 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2568 clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
2569 rtrs_clt_info_req_done);
2570 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
2571 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2572 if (!tx_iu || !rx_iu) {
2573 err = -ENOMEM;
2574 goto out;
2575 }
2576 /* Prepare for getting info response */
2577 err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2578 if (err) {
2579 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %pe\n", ERR_PTR(err));
2580 goto out;
2581 }
2582 rx_iu = NULL;
2583
2584 msg = tx_iu->buf;
2585 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2586 memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
2587
2588 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
2589 tx_iu->dma_addr,
2590 tx_iu->size, DMA_TO_DEVICE);
2591
2592 /* Send info request */
2593 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2594 if (err) {
2595 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %pe\n", ERR_PTR(err));
2596 goto out;
2597 }
2598 tx_iu = NULL;
2599
2600 /* Wait for state change */
2601 wait_event_interruptible_timeout(clt_path->state_wq,
2602 clt_path->state != RTRS_CLT_CONNECTING,
2603 msecs_to_jiffies(
2604 RTRS_CONNECT_TIMEOUT_MS));
2605 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
2606 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
2607 err = -ECONNRESET;
2608 else
2609 err = -ETIMEDOUT;
2610 }
2611
2612 out:
2613 if (tx_iu)
2614 rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
2615 if (rx_iu)
2616 rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
2617 if (err)
2618 /* If we've never taken async path because of malloc problems */
2619 rtrs_clt_change_state_get_old(clt_path,
2620 RTRS_CLT_CONNECTING_ERR, NULL);
2621
2622 return err;
2623 }
2624
2625 /**
2626 * init_path() - establishes all path connections and does handshake
2627 * @clt_path: client path.
2628 * In case of error full close or reconnect procedure should be taken,
2629 * because reconnect or close async works can be started.
2630 */
init_path(struct rtrs_clt_path * clt_path)2631 static int init_path(struct rtrs_clt_path *clt_path)
2632 {
2633 int err;
2634 char str[NAME_MAX];
2635 struct rtrs_addr path = {
2636 .src = &clt_path->s.src_addr,
2637 .dst = &clt_path->s.dst_addr,
2638 };
2639
2640 rtrs_addr_to_str(&path, str, sizeof(str));
2641
2642 mutex_lock(&clt_path->init_mutex);
2643 err = init_conns(clt_path);
2644 if (err) {
2645 rtrs_err(clt_path->clt,
2646 "init_conns() failed: err=%pe path=%s [%s:%u]\n",
2647 ERR_PTR(err), str, clt_path->hca_name, clt_path->hca_port);
2648 goto out;
2649 }
2650 err = rtrs_send_path_info(clt_path);
2651 if (err) {
2652 rtrs_err(clt_path->clt,
2653 "rtrs_send_path_info() failed: err=%pe path=%s [%s:%u]\n",
2654 ERR_PTR(err), str, clt_path->hca_name, clt_path->hca_port);
2655 goto out;
2656 }
2657 rtrs_clt_path_up(clt_path);
2658 rtrs_start_hb(&clt_path->s);
2659 out:
2660 mutex_unlock(&clt_path->init_mutex);
2661
2662 return err;
2663 }
2664
rtrs_clt_reconnect_work(struct work_struct * work)2665 static void rtrs_clt_reconnect_work(struct work_struct *work)
2666 {
2667 struct rtrs_clt_path *clt_path;
2668 struct rtrs_clt_sess *clt;
2669 int err;
2670
2671 clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
2672 reconnect_dwork);
2673 clt = clt_path->clt;
2674
2675 trace_rtrs_clt_reconnect_work(clt_path);
2676
2677 if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
2678 return;
2679
2680 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
2681 /* Close a path completely if max attempts is reached */
2682 rtrs_clt_close_conns(clt_path, false);
2683 return;
2684 }
2685 clt_path->reconnect_attempts++;
2686
2687 msleep(RTRS_RECONNECT_BACKOFF);
2688 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
2689 err = init_path(clt_path);
2690 if (err)
2691 goto reconnect_again;
2692 }
2693
2694 return;
2695
2696 reconnect_again:
2697 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
2698 clt_path->stats->reconnects.fail_cnt++;
2699 queue_work(rtrs_wq, &clt_path->err_recovery_work);
2700 }
2701 }
2702
rtrs_clt_dev_release(struct device * dev)2703 static void rtrs_clt_dev_release(struct device *dev)
2704 {
2705 struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
2706 dev);
2707
2708 mutex_destroy(&clt->paths_ev_mutex);
2709 mutex_destroy(&clt->paths_mutex);
2710 kfree(clt);
2711 }
2712
alloc_clt(const char * sessname,size_t paths_num,u16 port,size_t pdu_sz,void * priv,void (* link_ev)(void * priv,enum rtrs_clt_link_ev ev),unsigned int reconnect_delay_sec,unsigned int max_reconnect_attempts)2713 static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
2714 u16 port, size_t pdu_sz, void *priv,
2715 void (*link_ev)(void *priv,
2716 enum rtrs_clt_link_ev ev),
2717 unsigned int reconnect_delay_sec,
2718 unsigned int max_reconnect_attempts)
2719 {
2720 struct rtrs_clt_sess *clt;
2721 int err;
2722
2723 if (!paths_num || paths_num > MAX_PATHS_NUM)
2724 return ERR_PTR(-EINVAL);
2725
2726 if (strlen(sessname) >= sizeof(clt->sessname))
2727 return ERR_PTR(-EINVAL);
2728
2729 clt = kzalloc_obj(*clt);
2730 if (!clt)
2731 return ERR_PTR(-ENOMEM);
2732
2733 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2734 if (!clt->pcpu_path) {
2735 kfree(clt);
2736 return ERR_PTR(-ENOMEM);
2737 }
2738
2739 clt->dev.class = &rtrs_clt_dev_class;
2740 clt->dev.release = rtrs_clt_dev_release;
2741 uuid_gen(&clt->paths_uuid);
2742 INIT_LIST_HEAD_RCU(&clt->paths_list);
2743 clt->paths_num = paths_num;
2744 clt->paths_up = MAX_PATHS_NUM;
2745 clt->port = port;
2746 clt->pdu_sz = pdu_sz;
2747 clt->max_segments = RTRS_MAX_SEGMENTS;
2748 clt->reconnect_delay_sec = reconnect_delay_sec;
2749 clt->max_reconnect_attempts = max_reconnect_attempts;
2750 clt->priv = priv;
2751 clt->link_ev = link_ev;
2752 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2753 strscpy(clt->sessname, sessname, sizeof(clt->sessname));
2754 init_waitqueue_head(&clt->permits_wait);
2755 mutex_init(&clt->paths_ev_mutex);
2756 mutex_init(&clt->paths_mutex);
2757 device_initialize(&clt->dev);
2758
2759 err = dev_set_name(&clt->dev, "%s", sessname);
2760 if (err)
2761 goto err_put;
2762
2763 /*
2764 * Suppress user space notification until
2765 * sysfs files are created
2766 */
2767 dev_set_uevent_suppress(&clt->dev, true);
2768 err = device_add(&clt->dev);
2769 if (err)
2770 goto err_put;
2771
2772 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2773 if (!clt->kobj_paths) {
2774 err = -ENOMEM;
2775 goto err_del;
2776 }
2777 err = rtrs_clt_create_sysfs_root_files(clt);
2778 if (err) {
2779 kobject_del(clt->kobj_paths);
2780 kobject_put(clt->kobj_paths);
2781 goto err_del;
2782 }
2783 dev_set_uevent_suppress(&clt->dev, false);
2784 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2785
2786 return clt;
2787 err_del:
2788 device_del(&clt->dev);
2789 err_put:
2790 free_percpu(clt->pcpu_path);
2791 put_device(&clt->dev);
2792 return ERR_PTR(err);
2793 }
2794
free_clt(struct rtrs_clt_sess * clt)2795 static void free_clt(struct rtrs_clt_sess *clt)
2796 {
2797 free_percpu(clt->pcpu_path);
2798
2799 /*
2800 * release callback will free clt and destroy mutexes in last put
2801 */
2802 device_unregister(&clt->dev);
2803 }
2804
2805 /**
2806 * rtrs_clt_open() - Open a path to an RTRS server
2807 * @ops: holds the link event callback and the private pointer.
2808 * @pathname: name of the path to an RTRS server
2809 * @paths: Paths to be established defined by their src and dst addresses
2810 * @paths_num: Number of elements in the @paths array
2811 * @port: port to be used by the RTRS session
2812 * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2813 * @reconnect_delay_sec: time between reconnect tries
2814 * @max_reconnect_attempts: Number of times to reconnect on error before giving
2815 * up, 0 for * disabled, -1 for forever
2816 * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
2817 *
2818 * Starts session establishment with the rtrs_server. The function can block
2819 * up to ~2000ms before it returns.
2820 *
2821 * Return a valid pointer on success otherwise PTR_ERR.
2822 */
rtrs_clt_open(struct rtrs_clt_ops * ops,const char * pathname,const struct rtrs_addr * paths,size_t paths_num,u16 port,size_t pdu_sz,u8 reconnect_delay_sec,s16 max_reconnect_attempts,u32 nr_poll_queues)2823 struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
2824 const char *pathname,
2825 const struct rtrs_addr *paths,
2826 size_t paths_num, u16 port,
2827 size_t pdu_sz, u8 reconnect_delay_sec,
2828 s16 max_reconnect_attempts, u32 nr_poll_queues)
2829 {
2830 struct rtrs_clt_path *clt_path, *tmp;
2831 struct rtrs_clt_sess *clt;
2832 int err, i;
2833
2834 if (strchr(pathname, '/') || strchr(pathname, '.')) {
2835 pr_err("pathname cannot contain / and .\n");
2836 err = -EINVAL;
2837 goto out;
2838 }
2839
2840 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
2841 ops->link_ev,
2842 reconnect_delay_sec,
2843 max_reconnect_attempts);
2844 if (IS_ERR(clt)) {
2845 err = PTR_ERR(clt);
2846 goto out;
2847 }
2848 for (i = 0; i < paths_num; i++) {
2849 struct rtrs_clt_path *clt_path;
2850
2851 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
2852 nr_poll_queues);
2853 if (IS_ERR(clt_path)) {
2854 err = PTR_ERR(clt_path);
2855 goto close_all_path;
2856 }
2857 if (!i)
2858 clt_path->for_new_clt = 1;
2859 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2860
2861 err = init_path(clt_path);
2862 if (err) {
2863 list_del_rcu(&clt_path->s.entry);
2864 rtrs_clt_close_conns(clt_path, true);
2865 free_percpu(clt_path->stats->pcpu_stats);
2866 kfree(clt_path->stats);
2867 free_path(clt_path);
2868 goto close_all_path;
2869 }
2870
2871 err = rtrs_clt_create_path_files(clt_path);
2872 if (err) {
2873 list_del_rcu(&clt_path->s.entry);
2874 rtrs_clt_close_conns(clt_path, true);
2875 free_percpu(clt_path->stats->pcpu_stats);
2876 kfree(clt_path->stats);
2877 free_path(clt_path);
2878 goto close_all_path;
2879 }
2880 }
2881 err = alloc_permits(clt);
2882 if (err)
2883 goto close_all_path;
2884
2885 return clt;
2886
2887 close_all_path:
2888 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2889 rtrs_clt_destroy_path_files(clt_path, NULL);
2890 rtrs_clt_close_conns(clt_path, true);
2891 kobject_put(&clt_path->kobj);
2892 }
2893 rtrs_clt_destroy_sysfs_root(clt);
2894 free_clt(clt);
2895
2896 out:
2897 return ERR_PTR(err);
2898 }
2899 EXPORT_SYMBOL(rtrs_clt_open);
2900
2901 /**
2902 * rtrs_clt_close() - Close a path
2903 * @clt: Session handle. Session is freed upon return.
2904 */
rtrs_clt_close(struct rtrs_clt_sess * clt)2905 void rtrs_clt_close(struct rtrs_clt_sess *clt)
2906 {
2907 struct rtrs_clt_path *clt_path, *tmp;
2908
2909 /* Firstly forbid sysfs access */
2910 rtrs_clt_destroy_sysfs_root(clt);
2911
2912 /* Now it is safe to iterate over all paths without locks */
2913 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2914 rtrs_clt_close_conns(clt_path, true);
2915 rtrs_clt_destroy_path_files(clt_path, NULL);
2916 kobject_put(&clt_path->kobj);
2917 }
2918 free_permits(clt);
2919 free_clt(clt);
2920 }
2921 EXPORT_SYMBOL(rtrs_clt_close);
2922
rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path * clt_path)2923 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
2924 {
2925 enum rtrs_clt_state old_state;
2926 int err = -EBUSY;
2927 bool changed;
2928
2929 changed = rtrs_clt_change_state_get_old(clt_path,
2930 RTRS_CLT_RECONNECTING,
2931 &old_state);
2932 if (changed) {
2933 clt_path->reconnect_attempts = 0;
2934 rtrs_clt_stop_and_destroy_conns(clt_path);
2935 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
2936 }
2937 if (changed || old_state == RTRS_CLT_RECONNECTING) {
2938 /*
2939 * flush_delayed_work() queues pending work for immediate
2940 * execution, so do the flush if we have queued something
2941 * right now or work is pending.
2942 */
2943 flush_delayed_work(&clt_path->reconnect_dwork);
2944 err = (READ_ONCE(clt_path->state) ==
2945 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2946 }
2947
2948 return err;
2949 }
2950
rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path * clt_path,const struct attribute * sysfs_self)2951 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
2952 const struct attribute *sysfs_self)
2953 {
2954 enum rtrs_clt_state old_state;
2955 bool changed;
2956
2957 /*
2958 * Continue stopping path till state was changed to DEAD or
2959 * state was observed as DEAD:
2960 * 1. State was changed to DEAD - we were fast and nobody
2961 * invoked rtrs_clt_reconnect(), which can again start
2962 * reconnecting.
2963 * 2. State was observed as DEAD - we have someone in parallel
2964 * removing the path.
2965 */
2966 do {
2967 rtrs_clt_close_conns(clt_path, true);
2968 changed = rtrs_clt_change_state_get_old(clt_path,
2969 RTRS_CLT_DEAD,
2970 &old_state);
2971 } while (!changed && old_state != RTRS_CLT_DEAD);
2972
2973 if (changed) {
2974 rtrs_clt_remove_path_from_arr(clt_path);
2975 rtrs_clt_destroy_path_files(clt_path, sysfs_self);
2976 kobject_put(&clt_path->kobj);
2977 }
2978
2979 return 0;
2980 }
2981
rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess * clt,int value)2982 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
2983 {
2984 clt->max_reconnect_attempts = (unsigned int)value;
2985 }
2986
rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess * clt)2987 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
2988 {
2989 return (int)clt->max_reconnect_attempts;
2990 }
2991
2992 /**
2993 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2994 *
2995 * @dir: READ/WRITE
2996 * @ops: callback function to be called as confirmation, and the pointer.
2997 * @clt: Session
2998 * @permit: Preallocated permit
2999 * @vec: Message that is sent to server together with the request.
3000 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
3001 * Since the msg is copied internally it can be allocated on stack.
3002 * @nr: Number of elements in @vec.
3003 * @data_len: length of data sent to/from server
3004 * @sg: Pages to be sent/received to/from server.
3005 * @sg_cnt: Number of elements in the @sg
3006 *
3007 * Return:
3008 * 0: Success
3009 * <0: Error
3010 *
3011 * On dir=READ rtrs client will request a data transfer from Server to client.
3012 * The data that the server will respond with will be stored in @sg when
3013 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
3014 * On dir=WRITE rtrs client will rdma write data in sg to server side.
3015 */
rtrs_clt_request(int dir,struct rtrs_clt_req_ops * ops,struct rtrs_clt_sess * clt,struct rtrs_permit * permit,const struct kvec * vec,size_t nr,size_t data_len,struct scatterlist * sg,unsigned int sg_cnt)3016 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
3017 struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
3018 const struct kvec *vec, size_t nr, size_t data_len,
3019 struct scatterlist *sg, unsigned int sg_cnt)
3020 {
3021 struct rtrs_clt_io_req *req;
3022 struct rtrs_clt_path *clt_path;
3023
3024 enum dma_data_direction dma_dir;
3025 int err = -ECONNABORTED, i;
3026 size_t usr_len, hdr_len;
3027 struct path_it it;
3028
3029 /* Get kvec length */
3030 for (i = 0, usr_len = 0; i < nr; i++)
3031 usr_len += vec[i].iov_len;
3032
3033 if (dir == READ) {
3034 hdr_len = sizeof(struct rtrs_msg_rdma_read) +
3035 sg_cnt * sizeof(struct rtrs_sg_desc);
3036 dma_dir = DMA_FROM_DEVICE;
3037 } else {
3038 hdr_len = sizeof(struct rtrs_msg_rdma_write);
3039 dma_dir = DMA_TO_DEVICE;
3040 }
3041
3042 rcu_read_lock();
3043 for (path_it_init(&it, clt);
3044 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3045 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3046 continue;
3047
3048 if (usr_len + hdr_len > clt_path->max_hdr_size) {
3049 rtrs_wrn_rl(clt_path->clt,
3050 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
3051 dir == READ ? "Read" : "Write",
3052 usr_len, hdr_len, clt_path->max_hdr_size);
3053 err = -EMSGSIZE;
3054 break;
3055 }
3056 req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
3057 vec, usr_len, sg, sg_cnt, data_len,
3058 dma_dir);
3059 if (dir == READ)
3060 err = rtrs_clt_read_req(req);
3061 else
3062 err = rtrs_clt_write_req(req);
3063 if (err) {
3064 req->in_use = false;
3065 continue;
3066 }
3067 /* Success path */
3068 break;
3069 }
3070 path_it_deinit(&it);
3071 rcu_read_unlock();
3072
3073 return err;
3074 }
3075 EXPORT_SYMBOL(rtrs_clt_request);
3076
rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess * clt,unsigned int index)3077 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
3078 {
3079 /* If no path, return -1 for block layer not to try again */
3080 int cnt = -1;
3081 struct rtrs_con *con;
3082 struct rtrs_clt_path *clt_path;
3083 struct path_it it;
3084
3085 rcu_read_lock();
3086 for (path_it_init(&it, clt);
3087 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3088 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3089 continue;
3090
3091 con = clt_path->s.con[index + 1];
3092 cnt = ib_process_cq_direct(con->cq, -1);
3093 if (cnt)
3094 break;
3095 }
3096 path_it_deinit(&it);
3097 rcu_read_unlock();
3098
3099 return cnt;
3100 }
3101 EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
3102
3103 /**
3104 * rtrs_clt_query() - queries RTRS session attributes
3105 *@clt: session pointer
3106 *@attr: query results for session attributes.
3107 * Returns:
3108 * 0 on success
3109 * -ECOMM no connection to the server
3110 */
rtrs_clt_query(struct rtrs_clt_sess * clt,struct rtrs_attrs * attr)3111 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
3112 {
3113 if (!rtrs_clt_is_connected(clt))
3114 return -ECOMM;
3115
3116 attr->queue_depth = clt->queue_depth;
3117 attr->max_segments = clt->max_segments;
3118 /* Cap max_io_size to min of remote buffer size and the fr pages */
3119 attr->max_io_size = min_t(int, clt->max_io_size,
3120 clt->max_segments * SZ_4K);
3121
3122 return 0;
3123 }
3124 EXPORT_SYMBOL(rtrs_clt_query);
3125
rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess * clt,struct rtrs_addr * addr)3126 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
3127 struct rtrs_addr *addr)
3128 {
3129 struct rtrs_clt_path *clt_path;
3130 int err;
3131
3132 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
3133 if (IS_ERR(clt_path))
3134 return PTR_ERR(clt_path);
3135
3136 mutex_lock(&clt->paths_mutex);
3137 if (clt->paths_num == 0) {
3138 /*
3139 * When all the paths are removed for a session,
3140 * the addition of the first path is like a new session for
3141 * the storage server
3142 */
3143 clt_path->for_new_clt = 1;
3144 }
3145
3146 mutex_unlock(&clt->paths_mutex);
3147
3148 /*
3149 * It is totally safe to add path in CONNECTING state: coming
3150 * IO will never grab it. Also it is very important to add
3151 * path before init, since init fires LINK_CONNECTED event.
3152 */
3153 rtrs_clt_add_path_to_arr(clt_path);
3154
3155 err = init_path(clt_path);
3156 if (err)
3157 goto close_path;
3158
3159 err = rtrs_clt_create_path_files(clt_path);
3160 if (err)
3161 goto close_path;
3162
3163 return 0;
3164
3165 close_path:
3166 rtrs_clt_remove_path_from_arr(clt_path);
3167 rtrs_clt_close_conns(clt_path, true);
3168 free_percpu(clt_path->stats->pcpu_stats);
3169 kfree(clt_path->stats);
3170 free_path(clt_path);
3171
3172 return err;
3173 }
3174
rtrs_clt_ib_event_handler(struct ib_event_handler * handler,struct ib_event * ibevent)3175 void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
3176 struct ib_event *ibevent)
3177 {
3178 struct ib_device *idev = ibevent->device;
3179 u32 port_num = ibevent->element.port_num;
3180
3181 pr_info("Handling event: %s (%d). HCA name: %s, port num: %u\n",
3182 ib_event_msg(ibevent->event), ibevent->event, idev->name, port_num);
3183 }
3184
3185
rtrs_clt_ib_dev_init(struct rtrs_ib_dev * dev)3186 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
3187 {
3188 INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
3189 rtrs_clt_ib_event_handler);
3190 ib_register_event_handler(&dev->event_handler);
3191
3192 if (!(dev->ib_dev->attrs.device_cap_flags &
3193 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
3194 pr_err("Memory registrations not supported.\n");
3195 return -ENOTSUPP;
3196 }
3197
3198 return 0;
3199 }
3200
rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev * dev)3201 static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev)
3202 {
3203 ib_unregister_event_handler(&dev->event_handler);
3204 }
3205
3206
3207 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
3208 .init = rtrs_clt_ib_dev_init,
3209 .deinit = rtrs_clt_ib_dev_deinit
3210 };
3211
rtrs_client_init(void)3212 static int __init rtrs_client_init(void)
3213 {
3214 int ret = 0;
3215
3216 rtrs_rdma_dev_pd_init(0, &dev_pd);
3217 ret = class_register(&rtrs_clt_dev_class);
3218 if (ret) {
3219 pr_err("Failed to create rtrs-client dev class\n");
3220 return ret;
3221 }
3222 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3223 if (!rtrs_wq) {
3224 class_unregister(&rtrs_clt_dev_class);
3225 return -ENOMEM;
3226 }
3227
3228 return 0;
3229 }
3230
rtrs_client_exit(void)3231 static void __exit rtrs_client_exit(void)
3232 {
3233 destroy_workqueue(rtrs_wq);
3234 class_unregister(&rtrs_clt_dev_class);
3235 rtrs_rdma_dev_pd_deinit(&dev_pd);
3236 }
3237
3238 module_init(rtrs_client_init);
3239 module_exit(rtrs_client_exit);
3240