Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
17 #include "rtrs-clt.h"
18 #include "rtrs-log.h"
19 #include "rtrs-clt-trace.h"
49 .name = "rtrs-client",
58 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) in rtrs_clt_is_connected()
59 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) { in rtrs_clt_is_connected()
71 size_t max_depth = clt->queue_depth; in __rtrs_get_permit()
83 bit = find_first_zero_bit(clt->permits_map, max_depth); in __rtrs_get_permit()
86 } while (test_and_set_bit_lock(bit, clt->permits_map)); in __rtrs_get_permit()
89 WARN_ON(permit->mem_id != bit); in __rtrs_get_permit()
90 permit->cpu_id = raw_smp_processor_id(); in __rtrs_get_permit()
91 permit->con_type = con_type; in __rtrs_get_permit()
99 clear_bit_unlock(permit->mem_id, clt->permits_map); in __rtrs_put_permit()
103 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
110 * to preallocate all resources and to propagate memory pressure
128 prepare_to_wait(&clt->permits_wait, &wait, in rtrs_clt_get_permit()
137 finish_wait(&clt->permits_wait, &wait); in rtrs_clt_get_permit()
144 * rtrs_clt_put_permit() - puts allocated permit
154 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) in rtrs_clt_put_permit()
160 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list in rtrs_clt_put_permit()
162 * it must have added itself to &clt->permits_wait before in rtrs_clt_put_permit()
166 if (waitqueue_active(&clt->permits_wait)) in rtrs_clt_put_permit()
167 wake_up(&clt->permits_wait); in rtrs_clt_put_permit()
172 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
185 if (permit->con_type == RTRS_IO_CON) in rtrs_permit_to_clt_con()
186 id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; in rtrs_permit_to_clt_con()
188 return to_clt_con(clt_path->s.con[id]); in rtrs_permit_to_clt_con()
192 * rtrs_clt_change_state() - change the session state through session state
209 lockdep_assert_held(&clt_path->state_wq.lock); in rtrs_clt_change_state()
211 old_state = clt_path->state; in rtrs_clt_change_state()
285 clt_path->state = new_state; in rtrs_clt_change_state()
286 wake_up_locked(&clt_path->state_wq); in rtrs_clt_change_state()
298 spin_lock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_from_to()
299 if (clt_path->state == old_state) in rtrs_clt_change_state_from_to()
301 spin_unlock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_from_to()
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_error_recovery()
316 queue_work(rtrs_wq, &clt_path->err_recovery_work); in rtrs_rdma_error_recovery()
329 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) in rtrs_clt_fast_reg_done() argument
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_fast_reg_done()
333 if (wc->status != IB_WC_SUCCESS) { in rtrs_clt_fast_reg_done()
334 rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n", in rtrs_clt_fast_reg_done()
335 ib_wc_status_msg(wc->status)); in rtrs_clt_fast_reg_done()
347 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) in rtrs_clt_inv_rkey_done() argument
350 container_of(wc->wr_cqe, typeof(*req), inv_cqe); in rtrs_clt_inv_rkey_done()
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_inv_rkey_done()
353 if (wc->status != IB_WC_SUCCESS) { in rtrs_clt_inv_rkey_done()
354 rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", in rtrs_clt_inv_rkey_done()
355 ib_wc_status_msg(wc->status)); in rtrs_clt_inv_rkey_done()
358 req->mr->need_inval = false; in rtrs_clt_inv_rkey_done()
359 if (req->need_inv_comp) in rtrs_clt_inv_rkey_done()
360 complete(&req->inv_comp); in rtrs_clt_inv_rkey_done()
363 complete_rdma_req(req, req->inv_errno, true, false); in rtrs_clt_inv_rkey_done()
368 struct rtrs_clt_con *con = req->con; in rtrs_inv_rkey()
371 .wr_cqe = &req->inv_cqe, in rtrs_inv_rkey()
373 .ex.invalidate_rkey = req->mr->rkey, in rtrs_inv_rkey()
375 req->inv_cqe.done = rtrs_clt_inv_rkey_done; in rtrs_inv_rkey()
377 return ib_post_send(con->c.qp, &wr, NULL); in rtrs_inv_rkey()
383 struct rtrs_clt_con *con = req->con; in complete_rdma_req()
387 if (!req->in_use) in complete_rdma_req()
389 if (WARN_ON(!req->con)) in complete_rdma_req()
391 clt_path = to_clt_path(con->c.path); in complete_rdma_req()
393 if (req->sg_cnt) { in complete_rdma_req()
394 if (req->mr->need_inval) { in complete_rdma_req()
414 req->need_inv_comp = true; in complete_rdma_req()
419 req->inv_errno = errno; in complete_rdma_req()
422 refcount_inc(&req->ref); in complete_rdma_req()
425 rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n", in complete_rdma_req()
426 req->mr->rkey, err); in complete_rdma_req()
428 wait_for_completion(&req->inv_comp); in complete_rdma_req()
430 if (!refcount_dec_and_test(&req->ref)) in complete_rdma_req()
433 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in complete_rdma_req()
434 req->sg_cnt, req->dir); in complete_rdma_req()
436 if (!refcount_dec_and_test(&req->ref)) in complete_rdma_req()
438 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) in complete_rdma_req()
439 atomic_dec(&clt_path->stats->inflight); in complete_rdma_req()
441 req->in_use = false; in complete_rdma_req()
442 req->con = NULL; in complete_rdma_req()
445 rtrs_err_rl(con->c.path, in complete_rdma_req()
447 req->dir == DMA_TO_DEVICE ? "write" : "read", errno, in complete_rdma_req()
448 kobject_name(&clt_path->kobj), clt_path->hca_name, in complete_rdma_req()
449 clt_path->hca_port, notify); in complete_rdma_req()
453 req->conf(req->priv, errno); in complete_rdma_req()
461 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_send_rdma()
465 if (!req->sg_size) { in rtrs_post_send_rdma()
466 rtrs_wrn(con->c.path, in rtrs_post_send_rdma()
467 "Doing RDMA Write failed, no data supplied\n"); in rtrs_post_send_rdma()
468 return -EINVAL; in rtrs_post_send_rdma()
472 sge.addr = req->iu->dma_addr; in rtrs_post_send_rdma()
473 sge.length = req->sg_size; in rtrs_post_send_rdma()
474 sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_send_rdma()
480 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_send_rdma()
483 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_send_rdma()
484 req->iu->dma_addr, in rtrs_post_send_rdma()
485 req->sg_size, DMA_TO_DEVICE); in rtrs_post_send_rdma()
487 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, in rtrs_post_send_rdma()
488 rbuf->rkey, rbuf->addr + off, in rtrs_post_send_rdma()
497 if (WARN_ON(msg_id >= clt_path->queue_depth)) in process_io_rsp()
500 req = &clt_path->reqs[msg_id]; in process_io_rsp()
502 req->mr->need_inval &= !w_inval; in process_io_rsp()
506 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) in rtrs_clt_recv_done() argument
510 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_recv_done()
512 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_recv_done()
513 iu = container_of(wc->wr_cqe, struct rtrs_iu, in rtrs_clt_recv_done()
515 err = rtrs_iu_post_recv(&con->c, iu); in rtrs_clt_recv_done()
517 rtrs_err(con->c.path, "post iu failed %d\n", err); in rtrs_clt_recv_done()
522 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) in rtrs_clt_rkey_rsp_done() argument
524 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rkey_rsp_done()
532 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_rkey_rsp_done()
534 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_rkey_rsp_done()
536 if (wc->byte_len < sizeof(*msg)) { in rtrs_clt_rkey_rsp_done()
537 rtrs_err(con->c.path, "rkey response is malformed: size %d\n", in rtrs_clt_rkey_rsp_done()
538 wc->byte_len); in rtrs_clt_rkey_rsp_done()
541 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
542 iu->size, DMA_FROM_DEVICE); in rtrs_clt_rkey_rsp_done()
543 msg = iu->buf; in rtrs_clt_rkey_rsp_done()
544 if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { in rtrs_clt_rkey_rsp_done()
545 rtrs_err(clt_path->clt, in rtrs_clt_rkey_rsp_done()
547 le16_to_cpu(msg->type)); in rtrs_clt_rkey_rsp_done()
550 buf_id = le16_to_cpu(msg->buf_id); in rtrs_clt_rkey_rsp_done()
551 if (WARN_ON(buf_id >= clt_path->queue_depth)) in rtrs_clt_rkey_rsp_done()
554 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); in rtrs_clt_rkey_rsp_done()
564 clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); in rtrs_clt_rkey_rsp_done()
567 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
568 iu->size, DMA_FROM_DEVICE); in rtrs_clt_rkey_rsp_done()
569 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rkey_rsp_done()
574 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
592 wr->wr_cqe = cqe; in rtrs_post_recv_empty_x2()
595 wr->next = &wr_arr[i - 1]; in rtrs_post_recv_empty_x2()
598 return ib_post_recv(con->qp, wr, NULL); in rtrs_post_recv_empty_x2()
601 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) in rtrs_clt_rdma_done() argument
603 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_rdma_done()
604 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rdma_done()
609 if (wc->status != IB_WC_SUCCESS) { in rtrs_clt_rdma_done()
610 if (wc->status != IB_WC_WR_FLUSH_ERR) { in rtrs_clt_rdma_done()
611 rtrs_err(clt_path->clt, "RDMA failed: %s\n", in rtrs_clt_rdma_done()
612 ib_wc_status_msg(wc->status)); in rtrs_clt_rdma_done()
619 switch (wc->opcode) { in rtrs_clt_rdma_done()
625 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) in rtrs_clt_rdma_done()
627 clt_path->s.hb_missed_cnt = 0; in rtrs_clt_rdma_done()
628 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), in rtrs_clt_rdma_done()
639 WARN_ON(con->c.cid); in rtrs_clt_rdma_done()
640 rtrs_send_hb_ack(&clt_path->s); in rtrs_clt_rdma_done()
641 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
642 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
644 WARN_ON(con->c.cid); in rtrs_clt_rdma_done()
645 clt_path->s.hb_cur_latency = in rtrs_clt_rdma_done()
646 ktime_sub(ktime_get(), clt_path->s.hb_last_sent); in rtrs_clt_rdma_done()
647 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
648 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
650 rtrs_wrn(con->c.path, "Unknown IMM type %u\n", in rtrs_clt_rdma_done()
658 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); in rtrs_clt_rdma_done()
660 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); in rtrs_clt_rdma_done()
662 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", in rtrs_clt_rdma_done()
671 clt_path->s.hb_missed_cnt = 0; in rtrs_clt_rdma_done()
672 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || in rtrs_clt_rdma_done()
673 wc->wc_flags & IB_WC_WITH_IMM)); in rtrs_clt_rdma_done()
674 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); in rtrs_clt_rdma_done()
675 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { in rtrs_clt_rdma_done()
676 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) in rtrs_clt_rdma_done()
677 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
679 return rtrs_clt_rkey_rsp_done(con, wc); in rtrs_clt_rdma_done()
690 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); in rtrs_clt_rdma_done()
698 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in post_recv_io()
701 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { in post_recv_io()
702 struct rtrs_iu *iu = &con->rsp_ius[i]; in post_recv_io()
704 err = rtrs_iu_post_recv(&con->c, iu); in post_recv_io()
706 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); in post_recv_io()
720 for (cid = 0; cid < clt_path->s.con_num; cid++) { in post_recv_path()
724 q_size = clt_path->queue_depth; in post_recv_path()
732 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); in post_recv_path()
734 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", in post_recv_path()
751 * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
755 * Next clt path returned in round-robin fashion, i.e. head will be skipped,
758 * This function may safely run concurrently with the _rcu list-mutation
764 return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?: in rtrs_clt_get_next_path_or_null()
766 READ_ONCE((&clt_path->s.entry)->next), in rtrs_clt_get_next_path_or_null()
771 * get_next_path_rr() - Returns path in round-robin fashion.
788 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu read lock held"); in get_next_path_rr()
790 clt = it->clt; in get_next_path_rr()
798 ppcpu_path = this_cpu_ptr(clt->pcpu_path); in get_next_path_rr()
801 path = list_first_or_null_rcu(&clt->paths_list, in get_next_path_rr()
804 path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path); in get_next_path_rr()
812 * get_next_path_min_inflight() - Returns path with minimal inflight count.
823 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_inflight()
828 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_inflight()
829 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in get_next_path_min_inflight()
832 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) in get_next_path_min_inflight()
835 inflight = atomic_read(&clt_path->stats->inflight); in get_next_path_min_inflight()
848 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list); in get_next_path_min_inflight()
854 * get_next_path_min_latency() - Returns path with minimal latency.
864 * This DOES skip an already-tried path.
865 * There is a skip-list to skip a path if the path has tried but failed.
874 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_latency()
879 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_latency()
880 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in get_next_path_min_latency()
883 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) in get_next_path_min_latency()
886 latency = clt_path->s.hb_cur_latency; in get_next_path_min_latency()
899 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list); in get_next_path_min_latency()
906 INIT_LIST_HEAD(&it->skip_list); in path_it_init()
907 it->clt = clt; in path_it_init()
908 it->i = 0; in path_it_init()
910 if (clt->mp_policy == MP_POLICY_RR) in path_it_init()
911 it->next_path = get_next_path_rr; in path_it_init()
912 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in path_it_init()
913 it->next_path = get_next_path_min_inflight; in path_it_init()
915 it->next_path = get_next_path_min_latency; in path_it_init()
924 * paths (->mp_skip_entry) into a skip_list again. in path_it_deinit()
926 list_for_each_safe(skip, tmp, &it->skip_list) in path_it_deinit()
931 * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
934 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
959 req->permit = permit; in rtrs_clt_init_req()
960 req->in_use = true; in rtrs_clt_init_req()
961 req->usr_len = usr_len; in rtrs_clt_init_req()
962 req->data_len = data_len; in rtrs_clt_init_req()
963 req->sglist = sg; in rtrs_clt_init_req()
964 req->sg_cnt = sg_cnt; in rtrs_clt_init_req()
965 req->priv = priv; in rtrs_clt_init_req()
966 req->dir = dir; in rtrs_clt_init_req()
967 req->con = rtrs_permit_to_clt_con(clt_path, permit); in rtrs_clt_init_req()
968 req->conf = conf; in rtrs_clt_init_req()
969 req->mr->need_inval = false; in rtrs_clt_init_req()
970 req->need_inv_comp = false; in rtrs_clt_init_req()
971 req->inv_errno = 0; in rtrs_clt_init_req()
972 refcount_set(&req->ref, 1); in rtrs_clt_init_req()
973 req->mp_policy = clt_path->clt->mp_policy; in rtrs_clt_init_req()
976 len = _copy_from_iter(req->iu->buf, usr_len, &iter); in rtrs_clt_init_req()
979 reinit_completion(&req->inv_comp); in rtrs_clt_init_req()
992 req = &clt_path->reqs[permit->mem_id]; in rtrs_clt_get_req()
1004 .iov_base = fail_req->iu->buf, in rtrs_clt_get_copy_req()
1005 .iov_len = fail_req->usr_len in rtrs_clt_get_copy_req()
1008 req = &alive_path->reqs[fail_req->permit->mem_id]; in rtrs_clt_get_copy_req()
1009 rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit, in rtrs_clt_get_copy_req()
1010 fail_req->priv, &vec, fail_req->usr_len, in rtrs_clt_get_copy_req()
1011 fail_req->sglist, fail_req->sg_cnt, in rtrs_clt_get_copy_req()
1012 fail_req->data_len, fail_req->dir); in rtrs_clt_get_copy_req()
1023 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_rdma_write_sg()
1024 struct ib_sge *sge = req->sge; in rtrs_post_rdma_write_sg()
1033 sge[i].addr = req->mr->iova; in rtrs_post_rdma_write_sg()
1034 sge[i].length = req->mr->length; in rtrs_post_rdma_write_sg()
1035 sge[i].lkey = req->mr->lkey; in rtrs_post_rdma_write_sg()
1040 for_each_sg(req->sglist, sg, count, i) { in rtrs_post_rdma_write_sg()
1043 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
1047 sge[i].addr = req->iu->dma_addr; in rtrs_post_rdma_write_sg()
1049 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
1055 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_rdma_write_sg()
1058 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_rdma_write_sg()
1059 req->iu->dma_addr, in rtrs_post_rdma_write_sg()
1062 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, in rtrs_post_rdma_write_sg()
1063 rbuf->rkey, rbuf->addr, imm, in rtrs_post_rdma_write_sg()
1072 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); in rtrs_map_sg_fr()
1074 return nr < 0 ? nr : -EINVAL; in rtrs_map_sg_fr()
1075 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in rtrs_map_sg_fr()
1082 struct rtrs_clt_con *con = req->con; in rtrs_clt_write_req()
1083 struct rtrs_path *s = con->c.path; in rtrs_clt_write_req()
1094 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; in rtrs_clt_write_req()
1096 if (tsize > clt_path->chunk_size) { in rtrs_clt_write_req()
1098 tsize, clt_path->chunk_size); in rtrs_clt_write_req()
1099 return -EMSGSIZE; in rtrs_clt_write_req()
1101 if (req->sg_cnt) { in rtrs_clt_write_req()
1102 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1103 req->sg_cnt, req->dir); in rtrs_clt_write_req()
1106 return -EINVAL; in rtrs_clt_write_req()
1110 msg = req->iu->buf + req->usr_len; in rtrs_clt_write_req()
1111 msg->type = cpu_to_le16(RTRS_MSG_WRITE); in rtrs_clt_write_req()
1112 msg->usr_len = cpu_to_le16(req->usr_len); in rtrs_clt_write_req()
1115 imm = req->permit->mem_off + req->data_len + req->usr_len; in rtrs_clt_write_req()
1117 buf_id = req->permit->mem_id; in rtrs_clt_write_req()
1118 req->sg_size = tsize; in rtrs_clt_write_req()
1119 rbuf = &clt_path->rbufs[buf_id]; in rtrs_clt_write_req()
1127 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1128 req->sg_cnt, req->dir); in rtrs_clt_write_req()
1134 .mr = req->mr, in rtrs_clt_write_req()
1135 .key = req->mr->rkey, in rtrs_clt_write_req()
1140 req->mr->need_inval = true; in rtrs_clt_write_req()
1148 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, in rtrs_clt_write_req()
1149 req->usr_len + sizeof(*msg), in rtrs_clt_write_req()
1154 ret, kobject_name(&clt_path->kobj), clt_path->hca_name, in rtrs_clt_write_req()
1155 clt_path->hca_port); in rtrs_clt_write_req()
1156 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) in rtrs_clt_write_req()
1157 atomic_dec(&clt_path->stats->inflight); in rtrs_clt_write_req()
1158 if (req->mr->need_inval) { in rtrs_clt_write_req()
1159 req->mr->need_inval = false; in rtrs_clt_write_req()
1160 refcount_dec(&req->ref); in rtrs_clt_write_req()
1162 if (req->sg_cnt) in rtrs_clt_write_req()
1163 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1164 req->sg_cnt, req->dir); in rtrs_clt_write_req()
1172 struct rtrs_clt_con *con = req->con; in rtrs_clt_read_req()
1173 struct rtrs_path *s = con->c.path; in rtrs_clt_read_req()
1176 struct rtrs_ib_dev *dev = clt_path->s.dev; in rtrs_clt_read_req()
1184 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; in rtrs_clt_read_req()
1186 if (tsize > clt_path->chunk_size) { in rtrs_clt_read_req()
1189 tsize, clt_path->chunk_size); in rtrs_clt_read_req()
1190 return -EMSGSIZE; in rtrs_clt_read_req()
1193 if (req->sg_cnt) { in rtrs_clt_read_req()
1194 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, in rtrs_clt_read_req()
1195 req->dir); in rtrs_clt_read_req()
1199 return -EINVAL; in rtrs_clt_read_req()
1202 /* put our message into req->buf after user message*/ in rtrs_clt_read_req()
1203 msg = req->iu->buf + req->usr_len; in rtrs_clt_read_req()
1204 msg->type = cpu_to_le16(RTRS_MSG_READ); in rtrs_clt_read_req()
1205 msg->usr_len = cpu_to_le16(req->usr_len); in rtrs_clt_read_req()
1213 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, in rtrs_clt_read_req()
1214 req->dir); in rtrs_clt_read_req()
1220 .mr = req->mr, in rtrs_clt_read_req()
1221 .key = req->mr->rkey, in rtrs_clt_read_req()
1227 msg->sg_cnt = cpu_to_le16(1); in rtrs_clt_read_req()
1228 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F); in rtrs_clt_read_req()
1230 msg->desc[0].addr = cpu_to_le64(req->mr->iova); in rtrs_clt_read_req()
1231 msg->desc[0].key = cpu_to_le32(req->mr->rkey); in rtrs_clt_read_req()
1232 msg->desc[0].len = cpu_to_le32(req->mr->length); in rtrs_clt_read_req()
1235 req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F; in rtrs_clt_read_req()
1238 msg->sg_cnt = 0; in rtrs_clt_read_req()
1239 msg->flags = 0; in rtrs_clt_read_req()
1245 imm = req->permit->mem_off + req->data_len + req->usr_len; in rtrs_clt_read_req()
1247 buf_id = req->permit->mem_id; in rtrs_clt_read_req()
1249 req->sg_size = sizeof(*msg); in rtrs_clt_read_req()
1250 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc); in rtrs_clt_read_req()
1251 req->sg_size += req->usr_len; in rtrs_clt_read_req()
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], in rtrs_clt_read_req()
1260 req->data_len, imm, wr); in rtrs_clt_read_req()
1264 ret, kobject_name(&clt_path->kobj), clt_path->hca_name, in rtrs_clt_read_req()
1265 clt_path->hca_port); in rtrs_clt_read_req()
1266 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) in rtrs_clt_read_req()
1267 atomic_dec(&clt_path->stats->inflight); in rtrs_clt_read_req()
1268 req->mr->need_inval = false; in rtrs_clt_read_req()
1269 if (req->sg_cnt) in rtrs_clt_read_req()
1270 ib_dma_unmap_sg(dev->ib_dev, req->sglist, in rtrs_clt_read_req()
1271 req->sg_cnt, req->dir); in rtrs_clt_read_req()
1278 * rtrs_clt_failover_req() - Try to find an active path for a failed request
1287 int err = -ECONNABORTED; in rtrs_clt_failover_req()
1292 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; in rtrs_clt_failover_req()
1294 if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED) in rtrs_clt_failover_req()
1297 if (req->dir == DMA_TO_DEVICE) in rtrs_clt_failover_req()
1302 req->in_use = false; in rtrs_clt_failover_req()
1306 rtrs_clt_inc_failover_cnt(alive_path->stats); in rtrs_clt_failover_req()
1317 struct rtrs_clt_sess *clt = clt_path->clt; in fail_all_outstanding_reqs()
1321 if (!clt_path->reqs) in fail_all_outstanding_reqs()
1323 for (i = 0; i < clt_path->queue_depth; ++i) { in fail_all_outstanding_reqs()
1324 req = &clt_path->reqs[i]; in fail_all_outstanding_reqs()
1325 if (!req->in_use) in fail_all_outstanding_reqs()
1333 complete_rdma_req(req, -ECONNABORTED, false, true); in fail_all_outstanding_reqs()
1338 req->conf(req->priv, err); in fail_all_outstanding_reqs()
1347 if (!clt_path->reqs) in free_path_reqs()
1349 for (i = 0; i < clt_path->queue_depth; ++i) { in free_path_reqs()
1350 req = &clt_path->reqs[i]; in free_path_reqs()
1351 if (req->mr) in free_path_reqs()
1352 ib_dereg_mr(req->mr); in free_path_reqs()
1353 kfree(req->sge); in free_path_reqs()
1354 rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); in free_path_reqs()
1356 kfree(clt_path->reqs); in free_path_reqs()
1357 clt_path->reqs = NULL; in free_path_reqs()
1363 int i, err = -ENOMEM; in alloc_path_reqs()
1365 clt_path->reqs = kcalloc(clt_path->queue_depth, in alloc_path_reqs()
1366 sizeof(*clt_path->reqs), in alloc_path_reqs()
1368 if (!clt_path->reqs) in alloc_path_reqs()
1369 return -ENOMEM; in alloc_path_reqs()
1371 for (i = 0; i < clt_path->queue_depth; ++i) { in alloc_path_reqs()
1372 req = &clt_path->reqs[i]; in alloc_path_reqs()
1373 req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, in alloc_path_reqs()
1374 clt_path->s.dev->ib_dev, in alloc_path_reqs()
1377 if (!req->iu) in alloc_path_reqs()
1380 req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL); in alloc_path_reqs()
1381 if (!req->sge) in alloc_path_reqs()
1384 req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, in alloc_path_reqs()
1386 clt_path->max_pages_per_mr); in alloc_path_reqs()
1387 if (IS_ERR(req->mr)) { in alloc_path_reqs()
1388 err = PTR_ERR(req->mr); in alloc_path_reqs()
1389 pr_err("Failed to alloc clt_path->max_pages_per_mr %d: %pe\n", in alloc_path_reqs()
1390 clt_path->max_pages_per_mr, req->mr); in alloc_path_reqs()
1391 req->mr = NULL; in alloc_path_reqs()
1395 init_completion(&req->inv_comp); in alloc_path_reqs()
1411 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL); in alloc_permits()
1412 if (!clt->permits_map) { in alloc_permits()
1413 err = -ENOMEM; in alloc_permits()
1416 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); in alloc_permits()
1417 if (!clt->permits) { in alloc_permits()
1418 err = -ENOMEM; in alloc_permits()
1421 chunk_bits = ilog2(clt->queue_depth - 1) + 1; in alloc_permits()
1422 for (i = 0; i < clt->queue_depth; i++) { in alloc_permits()
1426 permit->mem_id = i; in alloc_permits()
1427 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits); in alloc_permits()
1433 bitmap_free(clt->permits_map); in alloc_permits()
1434 clt->permits_map = NULL; in alloc_permits()
1441 if (clt->permits_map) in free_permits()
1442 wait_event(clt->permits_wait, in free_permits()
1443 bitmap_empty(clt->permits_map, clt->queue_depth)); in free_permits()
1445 bitmap_free(clt->permits_map); in free_permits()
1446 clt->permits_map = NULL; in free_permits()
1447 kfree(clt->permits); in free_permits()
1448 clt->permits = NULL; in free_permits()
1457 ib_dev = clt_path->s.dev->ib_dev; in query_fast_reg_mode()
1464 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); in query_fast_reg_mode()
1465 max_pages_per_mr = ib_dev->attrs.max_mr_size; in query_fast_reg_mode()
1467 clt_path->max_pages_per_mr = in query_fast_reg_mode()
1468 min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, in query_fast_reg_mode()
1469 ib_dev->attrs.max_fast_reg_page_list_len); in query_fast_reg_mode()
1470 clt_path->clt->max_segments = in query_fast_reg_mode()
1471 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); in query_fast_reg_mode()
1480 spin_lock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_get_old()
1482 *old_state = clt_path->state; in rtrs_clt_change_state_get_old()
1484 spin_unlock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_get_old()
1492 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_hb_err_handler()
1494 rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj)); in rtrs_clt_hb_err_handler()
1500 rtrs_init_hb(&clt_path->s, &io_comp_cqe, in rtrs_clt_init_hb()
1517 clt = clt_path->clt; in rtrs_clt_err_recovery_work()
1518 delay_ms = clt->reconnect_delay_sec * 1000; in rtrs_clt_err_recovery_work()
1520 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, in rtrs_clt_err_recovery_work()
1530 int err = -ENOMEM; in alloc_path()
1543 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), in alloc_path()
1545 if (!clt_path->s.con) in alloc_path()
1548 clt_path->s.con_num = total_con; in alloc_path()
1549 clt_path->s.irq_con_num = con_num + 1; in alloc_path()
1551 clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); in alloc_path()
1552 if (!clt_path->stats) in alloc_path()
1555 mutex_init(&clt_path->init_mutex); in alloc_path()
1556 uuid_gen(&clt_path->s.uuid); in alloc_path()
1557 memcpy(&clt_path->s.dst_addr, path->dst, in alloc_path()
1558 rdma_addr_size((struct sockaddr *)path->dst)); in alloc_path()
1562 * checks the sa_family to be non-zero. If user passed src_addr=NULL in alloc_path()
1563 * the sess->src_addr will contain only zeros, which is then fine. in alloc_path()
1565 if (path->src) in alloc_path()
1566 memcpy(&clt_path->s.src_addr, path->src, in alloc_path()
1567 rdma_addr_size((struct sockaddr *)path->src)); in alloc_path()
1568 strscpy(clt_path->s.sessname, clt->sessname, in alloc_path()
1569 sizeof(clt_path->s.sessname)); in alloc_path()
1570 clt_path->clt = clt; in alloc_path()
1571 clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; in alloc_path()
1572 init_waitqueue_head(&clt_path->state_wq); in alloc_path()
1573 clt_path->state = RTRS_CLT_CONNECTING; in alloc_path()
1574 atomic_set(&clt_path->connected_cnt, 0); in alloc_path()
1575 INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); in alloc_path()
1576 INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work); in alloc_path()
1577 INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); in alloc_path()
1580 clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); in alloc_path()
1581 if (!clt_path->mp_skip_entry) in alloc_path()
1585 INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); in alloc_path()
1587 err = rtrs_clt_init_stats(clt_path->stats); in alloc_path()
1594 free_percpu(clt_path->mp_skip_entry); in alloc_path()
1596 kfree(clt_path->stats); in alloc_path()
1598 kfree(clt_path->s.con); in alloc_path()
1607 free_percpu(clt_path->mp_skip_entry); in free_path()
1608 mutex_destroy(&clt_path->init_mutex); in free_path()
1609 kfree(clt_path->s.con); in free_path()
1610 kfree(clt_path->rbufs); in free_path()
1620 return -ENOMEM; in create_con()
1623 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; in create_con()
1624 con->c.cid = cid; in create_con()
1625 con->c.path = &clt_path->s; in create_con()
1627 atomic_set(&con->c.wr_cnt, 1); in create_con()
1628 mutex_init(&con->con_mutex); in create_con()
1630 clt_path->s.con[cid] = &con->c; in create_con()
1637 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con()
1639 clt_path->s.con[con->c.cid] = NULL; in destroy_con()
1640 mutex_destroy(&con->con_mutex); in destroy_con()
1646 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in create_con_cq_qp()
1651 lockdep_assert_held(&con->con_mutex); in create_con_cq_qp()
1652 if (con->c.cid == 0) { in create_con_cq_qp()
1655 if (WARN_ON(clt_path->s.dev)) in create_con_cq_qp()
1656 return -EINVAL; in create_con_cq_qp()
1663 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, in create_con_cq_qp()
1665 if (!clt_path->s.dev) { in create_con_cq_qp()
1666 rtrs_wrn(clt_path->clt, in create_con_cq_qp()
1667 "rtrs_ib_dev_find_get_or_add(): no memory\n"); in create_con_cq_qp()
1668 return -ENOMEM; in create_con_cq_qp()
1670 clt_path->s.dev_ref = 1; in create_con_cq_qp()
1672 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con_cq_qp()
1689 if (WARN_ON(!clt_path->s.dev)) in create_con_cq_qp()
1690 return -EINVAL; in create_con_cq_qp()
1691 if (WARN_ON(!clt_path->queue_depth)) in create_con_cq_qp()
1692 return -EINVAL; in create_con_cq_qp()
1694 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con_cq_qp()
1696 clt_path->s.dev_ref++; in create_con_cq_qp()
1699 clt_path->queue_depth * 4 + 1); in create_con_cq_qp()
1701 clt_path->queue_depth * 3 + 1); in create_con_cq_qp()
1704 atomic_set(&con->c.sq_wr_avail, max_send_wr); in create_con_cq_qp()
1707 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { in create_con_cq_qp()
1708 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), in create_con_cq_qp()
1710 clt_path->s.dev->ib_dev, in create_con_cq_qp()
1713 if (!con->rsp_ius) in create_con_cq_qp()
1714 return -ENOMEM; in create_con_cq_qp()
1715 con->queue_num = cq_num; in create_con_cq_qp()
1717 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; in create_con_cq_qp()
1718 if (con->c.cid >= clt_path->s.irq_con_num) in create_con_cq_qp()
1719 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1723 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1735 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con_cq_qp()
1741 lockdep_assert_held(&con->con_mutex); in destroy_con_cq_qp()
1742 rtrs_cq_qp_destroy(&con->c); in destroy_con_cq_qp()
1743 if (con->rsp_ius) { in destroy_con_cq_qp()
1744 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, in destroy_con_cq_qp()
1745 con->queue_num); in destroy_con_cq_qp()
1746 con->rsp_ius = NULL; in destroy_con_cq_qp()
1747 con->queue_num = 0; in destroy_con_cq_qp()
1749 if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { in destroy_con_cq_qp()
1750 rtrs_ib_dev_put(clt_path->s.dev); in destroy_con_cq_qp()
1751 clt_path->s.dev = NULL; in destroy_con_cq_qp()
1757 rdma_disconnect(con->c.cm_id); in stop_cm()
1758 if (con->c.qp) in stop_cm()
1759 ib_drain_qp(con->c.qp); in stop_cm()
1764 rdma_destroy_id(con->c.cm_id); in destroy_cm()
1765 con->c.cm_id = NULL; in destroy_cm()
1770 struct rtrs_path *s = con->c.path; in rtrs_rdma_addr_resolved()
1773 mutex_lock(&con->con_mutex); in rtrs_rdma_addr_resolved()
1775 mutex_unlock(&con->con_mutex); in rtrs_rdma_addr_resolved()
1780 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS); in rtrs_rdma_addr_resolved()
1789 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_route_resolved()
1790 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_route_resolved()
1806 .cid = cpu_to_le16(con->c.cid), in rtrs_rdma_route_resolved()
1807 .cid_num = cpu_to_le16(clt_path->s.con_num), in rtrs_rdma_route_resolved()
1808 .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), in rtrs_rdma_route_resolved()
1810 msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; in rtrs_rdma_route_resolved()
1811 uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); in rtrs_rdma_route_resolved()
1812 uuid_copy(&msg.paths_uuid, &clt->paths_uuid); in rtrs_rdma_route_resolved()
1814 err = rdma_connect_locked(con->c.cm_id, ¶m); in rtrs_rdma_route_resolved()
1824 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_conn_established()
1825 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_conn_established()
1831 msg = ev->param.conn.private_data; in rtrs_rdma_conn_established()
1832 len = ev->param.conn.private_data_len; in rtrs_rdma_conn_established()
1835 return -ECONNRESET; in rtrs_rdma_conn_established()
1837 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) { in rtrs_rdma_conn_established()
1839 return -ECONNRESET; in rtrs_rdma_conn_established()
1841 version = le16_to_cpu(msg->version); in rtrs_rdma_conn_established()
1845 return -ECONNRESET; in rtrs_rdma_conn_established()
1847 errno = le16_to_cpu(msg->errno); in rtrs_rdma_conn_established()
1851 return -ECONNRESET; in rtrs_rdma_conn_established()
1853 if (con->c.cid == 0) { in rtrs_rdma_conn_established()
1854 queue_depth = le16_to_cpu(msg->queue_depth); in rtrs_rdma_conn_established()
1856 if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { in rtrs_rdma_conn_established()
1862 clt_path->reconnect_attempts = -1; in rtrs_rdma_conn_established()
1864 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n"); in rtrs_rdma_conn_established()
1865 return -ECONNRESET; in rtrs_rdma_conn_established()
1868 if (!clt_path->rbufs) { in rtrs_rdma_conn_established()
1869 clt_path->rbufs = kcalloc(queue_depth, in rtrs_rdma_conn_established()
1870 sizeof(*clt_path->rbufs), in rtrs_rdma_conn_established()
1872 if (!clt_path->rbufs) in rtrs_rdma_conn_established()
1873 return -ENOMEM; in rtrs_rdma_conn_established()
1875 clt_path->queue_depth = queue_depth; in rtrs_rdma_conn_established()
1876 clt_path->s.signal_interval = min_not_zero(queue_depth, in rtrs_rdma_conn_established()
1878 clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); in rtrs_rdma_conn_established()
1879 clt_path->max_io_size = le32_to_cpu(msg->max_io_size); in rtrs_rdma_conn_established()
1880 clt_path->flags = le32_to_cpu(msg->flags); in rtrs_rdma_conn_established()
1881 clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; in rtrs_rdma_conn_established()
1886 * higher - client does not care and uses cached minimum. in rtrs_rdma_conn_established()
1891 mutex_lock(&clt->paths_mutex); in rtrs_rdma_conn_established()
1892 clt->queue_depth = clt_path->queue_depth; in rtrs_rdma_conn_established()
1893 clt->max_io_size = min_not_zero(clt_path->max_io_size, in rtrs_rdma_conn_established()
1894 clt->max_io_size); in rtrs_rdma_conn_established()
1895 mutex_unlock(&clt->paths_mutex); in rtrs_rdma_conn_established()
1900 clt_path->hca_port = con->c.cm_id->port_num; in rtrs_rdma_conn_established()
1901 scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), in rtrs_rdma_conn_established()
1902 clt_path->s.dev->ib_dev->name); in rtrs_rdma_conn_established()
1903 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; in rtrs_rdma_conn_established()
1905 clt_path->for_new_clt = 1; in rtrs_rdma_conn_established()
1913 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in flag_success_on_conn()
1915 atomic_inc(&clt_path->connected_cnt); in flag_success_on_conn()
1916 con->cm_err = 1; in flag_success_on_conn()
1922 struct rtrs_path *s = con->c.path; in rtrs_rdma_conn_rejected()
1928 status = ev->status; in rtrs_rdma_conn_rejected()
1929 rej_msg = rdma_reject_msg(con->c.cm_id, status); in rtrs_rdma_conn_rejected()
1930 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len); in rtrs_rdma_conn_rejected()
1933 errno = (int16_t)le16_to_cpu(msg->errno); in rtrs_rdma_conn_rejected()
1934 if (errno == -EBUSY) in rtrs_rdma_conn_rejected()
1947 return -ECONNRESET; in rtrs_rdma_conn_rejected()
1955 queue_work(rtrs_wq, &clt_path->close_work); in rtrs_clt_close_conns()
1957 flush_work(&clt_path->close_work); in rtrs_clt_close_conns()
1962 if (con->cm_err == 1) { in flag_error_on_conn()
1965 clt_path = to_clt_path(con->c.path); in flag_error_on_conn()
1966 if (atomic_dec_and_test(&clt_path->connected_cnt)) in flag_error_on_conn()
1968 wake_up(&clt_path->state_wq); in flag_error_on_conn()
1970 con->cm_err = cm_err; in flag_error_on_conn()
1976 struct rtrs_clt_con *con = cm_id->context; in rtrs_clt_rdma_cm_handler()
1977 struct rtrs_path *s = con->c.path; in rtrs_clt_rdma_cm_handler()
1981 switch (ev->event) { in rtrs_clt_rdma_cm_handler()
1996 wake_up(&clt_path->state_wq); in rtrs_clt_rdma_cm_handler()
2004 /* No message for disconnecting */ in rtrs_clt_rdma_cm_handler()
2005 cm_err = -ECONNRESET; in rtrs_clt_rdma_cm_handler()
2012 rdma_event_msg(ev->event), ev->status); in rtrs_clt_rdma_cm_handler()
2013 cm_err = -ECONNRESET; in rtrs_clt_rdma_cm_handler()
2018 rdma_event_msg(ev->event), ev->status); in rtrs_clt_rdma_cm_handler()
2019 cm_err = -EHOSTUNREACH; in rtrs_clt_rdma_cm_handler()
2025 rtrs_wrn_rl(s, "CM event: %s, status: %d\n", rdma_event_msg(ev->event), in rtrs_clt_rdma_cm_handler()
2026 ev->status); in rtrs_clt_rdma_cm_handler()
2031 rdma_event_msg(ev->event), ev->status); in rtrs_clt_rdma_cm_handler()
2032 cm_err = -ECONNRESET; in rtrs_clt_rdma_cm_handler()
2051 struct rtrs_path *s = con->c.path; in create_cm()
2057 clt_path->s.dst_addr.ss_family == AF_IB ? in create_cm()
2063 con->c.cm_id = cm_id; in create_cm()
2064 con->cm_err = 0; in create_cm()
2071 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, in create_cm()
2072 (struct sockaddr *)&clt_path->s.dst_addr, in create_cm()
2084 clt_path->state_wq, in create_cm()
2085 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, in create_cm()
2087 if (err == 0 || err == -ERESTARTSYS) { in create_cm()
2089 err = -ETIMEDOUT; in create_cm()
2093 if (con->cm_err < 0) in create_cm()
2094 return con->cm_err; in create_cm()
2095 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) in create_cm()
2097 return -ECONNABORTED; in create_cm()
2104 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_up()
2114 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2115 up = ++clt->paths_up; in rtrs_clt_path_up()
2121 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num) in rtrs_clt_path_up()
2122 clt->paths_up = clt->paths_num; in rtrs_clt_path_up()
2124 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED); in rtrs_clt_path_up()
2125 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2128 clt_path->established = true; in rtrs_clt_path_up()
2129 clt_path->reconnect_attempts = 0; in rtrs_clt_path_up()
2130 clt_path->stats->reconnects.successful_cnt++; in rtrs_clt_path_up()
2135 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_down()
2137 if (!clt_path->established) in rtrs_clt_path_down()
2140 clt_path->established = false; in rtrs_clt_path_down()
2141 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2142 WARN_ON(!clt->paths_up); in rtrs_clt_path_down()
2143 if (--clt->paths_up == 0) in rtrs_clt_path_down()
2144 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED); in rtrs_clt_path_down()
2145 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2153 WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); in rtrs_clt_stop_and_destroy_conns()
2159 mutex_lock(&clt_path->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2160 mutex_unlock(&clt_path->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2168 rtrs_stop_hb(&clt_path->s); in rtrs_clt_stop_and_destroy_conns()
2177 for (cid = 0; cid < clt_path->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2178 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2180 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2195 wait_event_timeout(clt_path->state_wq, in rtrs_clt_stop_and_destroy_conns()
2196 !atomic_read(&clt_path->connected_cnt), in rtrs_clt_stop_and_destroy_conns()
2199 for (cid = 0; cid < clt_path->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2200 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2202 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2203 mutex_lock(&con->con_mutex); in rtrs_clt_stop_and_destroy_conns()
2205 mutex_unlock(&con->con_mutex); in rtrs_clt_stop_and_destroy_conns()
2213 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_remove_path_from_arr()
2218 mutex_lock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2219 list_del_rcu(&clt_path->s.entry); in rtrs_clt_remove_path_from_arr()
2244 * [!CONNECTED path] clt->paths_num-- in rtrs_clt_remove_path_from_arr()
2246 * load clt->paths_num from 2 to 1 in rtrs_clt_remove_path_from_arr()
2251 * ends, because expression i < clt->paths_num is false. in rtrs_clt_remove_path_from_arr()
2253 clt->paths_num--; in rtrs_clt_remove_path_from_arr()
2260 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path); in rtrs_clt_remove_path_from_arr()
2270 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); in rtrs_clt_remove_path_from_arr()
2272 lockdep_is_held(&clt->paths_mutex)) != clt_path) in rtrs_clt_remove_path_from_arr()
2298 mutex_unlock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2303 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_add_path_to_arr()
2305 mutex_lock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2306 clt->paths_num++; in rtrs_clt_add_path_to_arr()
2308 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_add_path_to_arr()
2309 mutex_unlock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2318 cancel_work_sync(&clt_path->err_recovery_work); in rtrs_clt_close_work()
2319 cancel_delayed_work_sync(&clt_path->reconnect_dwork); in rtrs_clt_close_work()
2334 clt_path->s.recon_cnt++; in init_conns()
2337 for (cid = 0; cid < clt_path->s.con_num; cid++) { in init_conns()
2342 err = create_cm(to_clt_con(clt_path->s.con[cid])); in init_conns()
2348 * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds. in init_conns()
2350 cid = clt_path->s.con_num - 1; in init_conns()
2363 if (!clt_path->s.con[i]) in init_conns()
2366 con = to_clt_con(clt_path->s.con[i]); in init_conns()
2367 if (con->c.cm_id) { in init_conns()
2369 mutex_lock(&con->con_mutex); in init_conns()
2371 mutex_unlock(&con->con_mutex); in init_conns()
2386 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) in rtrs_clt_info_req_done() argument
2388 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_info_req_done()
2389 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_req_done()
2392 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_info_req_done()
2393 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); in rtrs_clt_info_req_done()
2395 if (wc->status != IB_WC_SUCCESS) { in rtrs_clt_info_req_done()
2396 rtrs_err(clt_path->clt, "Path info request send failed: %s\n", in rtrs_clt_info_req_done()
2397 ib_wc_status_msg(wc->status)); in rtrs_clt_info_req_done()
2411 sg_cnt = le16_to_cpu(msg->sg_cnt); in process_info_rsp()
2412 if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { in process_info_rsp()
2413 rtrs_err(clt_path->clt, in process_info_rsp()
2416 return -EINVAL; in process_info_rsp()
2421 * the offset inside the memory chunk. in process_info_rsp()
2423 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > in process_info_rsp()
2425 rtrs_err(clt_path->clt, in process_info_rsp()
2427 MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); in process_info_rsp()
2428 return -EINVAL; in process_info_rsp()
2431 for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { in process_info_rsp()
2432 const struct rtrs_sg_desc *desc = &msg->desc[sgi]; in process_info_rsp()
2436 addr = le64_to_cpu(desc->addr); in process_info_rsp()
2437 rkey = le32_to_cpu(desc->key); in process_info_rsp()
2438 len = le32_to_cpu(desc->len); in process_info_rsp()
2442 if (!len || (len % clt_path->chunk_size)) { in process_info_rsp()
2443 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", in process_info_rsp()
2446 return -EINVAL; in process_info_rsp()
2448 for ( ; len && i < clt_path->queue_depth; i++) { in process_info_rsp()
2449 clt_path->rbufs[i].addr = addr; in process_info_rsp()
2450 clt_path->rbufs[i].rkey = rkey; in process_info_rsp()
2452 len -= clt_path->chunk_size; in process_info_rsp()
2453 addr += clt_path->chunk_size; in process_info_rsp()
2457 if (sgi != sg_cnt || i != clt_path->queue_depth) { in process_info_rsp()
2458 rtrs_err(clt_path->clt, in process_info_rsp()
2460 return -EINVAL; in process_info_rsp()
2462 if (total_len != clt_path->chunk_size * clt_path->queue_depth) { in process_info_rsp()
2463 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); in process_info_rsp()
2464 return -EINVAL; in process_info_rsp()
2470 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) in rtrs_clt_info_rsp_done() argument
2472 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_info_rsp_done()
2473 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_rsp_done()
2482 WARN_ON(con->c.cid); in rtrs_clt_info_rsp_done()
2483 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_info_rsp_done()
2484 if (wc->status != IB_WC_SUCCESS) { in rtrs_clt_info_rsp_done()
2485 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", in rtrs_clt_info_rsp_done()
2486 ib_wc_status_msg(wc->status)); in rtrs_clt_info_rsp_done()
2489 WARN_ON(wc->opcode != IB_WC_RECV); in rtrs_clt_info_rsp_done()
2491 if (wc->byte_len < sizeof(*msg)) { in rtrs_clt_info_rsp_done()
2492 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2493 wc->byte_len); in rtrs_clt_info_rsp_done()
2496 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_info_rsp_done()
2497 iu->size, DMA_FROM_DEVICE); in rtrs_clt_info_rsp_done()
2498 msg = iu->buf; in rtrs_clt_info_rsp_done()
2499 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { in rtrs_clt_info_rsp_done()
2500 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", in rtrs_clt_info_rsp_done()
2501 le16_to_cpu(msg->type)); in rtrs_clt_info_rsp_done()
2505 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); in rtrs_clt_info_rsp_done()
2506 if (wc->byte_len < rx_sz) { in rtrs_clt_info_rsp_done()
2507 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2508 wc->byte_len); in rtrs_clt_info_rsp_done()
2523 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); in rtrs_clt_info_rsp_done()
2529 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); in rtrs_send_path_info()
2536 rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; in rtrs_send_path_info()
2539 clt_path->s.dev->ib_dev, DMA_TO_DEVICE, in rtrs_send_path_info()
2541 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, in rtrs_send_path_info()
2544 err = -ENOMEM; in rtrs_send_path_info()
2548 err = rtrs_iu_post_recv(&usr_con->c, rx_iu); in rtrs_send_path_info()
2550 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); in rtrs_send_path_info()
2555 msg = tx_iu->buf; in rtrs_send_path_info()
2556 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); in rtrs_send_path_info()
2557 memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); in rtrs_send_path_info()
2559 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_send_path_info()
2560 tx_iu->dma_addr, in rtrs_send_path_info()
2561 tx_iu->size, DMA_TO_DEVICE); in rtrs_send_path_info()
2564 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); in rtrs_send_path_info()
2566 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); in rtrs_send_path_info()
2572 wait_event_interruptible_timeout(clt_path->state_wq, in rtrs_send_path_info()
2573 clt_path->state != RTRS_CLT_CONNECTING, in rtrs_send_path_info()
2576 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { in rtrs_send_path_info()
2577 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) in rtrs_send_path_info()
2578 err = -ECONNRESET; in rtrs_send_path_info()
2580 err = -ETIMEDOUT; in rtrs_send_path_info()
2585 rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); in rtrs_send_path_info()
2587 rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); in rtrs_send_path_info()
2597 * init_path() - establishes all path connections and does handshake
2607 .src = &clt_path->s.src_addr, in init_path()
2608 .dst = &clt_path->s.dst_addr, in init_path()
2613 mutex_lock(&clt_path->init_mutex); in init_path()
2616 rtrs_err(clt_path->clt, in init_path()
2618 str, clt_path->hca_name, clt_path->hca_port); in init_path()
2623 rtrs_err(clt_path->clt, in init_path()
2625 err, str, clt_path->hca_name, clt_path->hca_port); in init_path()
2629 rtrs_start_hb(&clt_path->s); in init_path()
2631 mutex_unlock(&clt_path->init_mutex); in init_path()
2644 clt = clt_path->clt; in rtrs_clt_reconnect_work()
2648 if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) in rtrs_clt_reconnect_work()
2651 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { in rtrs_clt_reconnect_work()
2656 clt_path->reconnect_attempts++; in rtrs_clt_reconnect_work()
2669 clt_path->stats->reconnects.fail_cnt++; in rtrs_clt_reconnect_work()
2670 queue_work(rtrs_wq, &clt_path->err_recovery_work); in rtrs_clt_reconnect_work()
2679 mutex_destroy(&clt->paths_ev_mutex); in rtrs_clt_dev_release()
2680 mutex_destroy(&clt->paths_mutex); in rtrs_clt_dev_release()
2695 return ERR_PTR(-EINVAL); in alloc_clt()
2697 if (strlen(sessname) >= sizeof(clt->sessname)) in alloc_clt()
2698 return ERR_PTR(-EINVAL); in alloc_clt()
2702 return ERR_PTR(-ENOMEM); in alloc_clt()
2704 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path)); in alloc_clt()
2705 if (!clt->pcpu_path) { in alloc_clt()
2707 return ERR_PTR(-ENOMEM); in alloc_clt()
2710 clt->dev.class = &rtrs_clt_dev_class; in alloc_clt()
2711 clt->dev.release = rtrs_clt_dev_release; in alloc_clt()
2712 uuid_gen(&clt->paths_uuid); in alloc_clt()
2713 INIT_LIST_HEAD_RCU(&clt->paths_list); in alloc_clt()
2714 clt->paths_num = paths_num; in alloc_clt()
2715 clt->paths_up = MAX_PATHS_NUM; in alloc_clt()
2716 clt->port = port; in alloc_clt()
2717 clt->pdu_sz = pdu_sz; in alloc_clt()
2718 clt->max_segments = RTRS_MAX_SEGMENTS; in alloc_clt()
2719 clt->reconnect_delay_sec = reconnect_delay_sec; in alloc_clt()
2720 clt->max_reconnect_attempts = max_reconnect_attempts; in alloc_clt()
2721 clt->priv = priv; in alloc_clt()
2722 clt->link_ev = link_ev; in alloc_clt()
2723 clt->mp_policy = MP_POLICY_MIN_INFLIGHT; in alloc_clt()
2724 strscpy(clt->sessname, sessname, sizeof(clt->sessname)); in alloc_clt()
2725 init_waitqueue_head(&clt->permits_wait); in alloc_clt()
2726 mutex_init(&clt->paths_ev_mutex); in alloc_clt()
2727 mutex_init(&clt->paths_mutex); in alloc_clt()
2728 device_initialize(&clt->dev); in alloc_clt()
2730 err = dev_set_name(&clt->dev, "%s", sessname); in alloc_clt()
2738 dev_set_uevent_suppress(&clt->dev, true); in alloc_clt()
2739 err = device_add(&clt->dev); in alloc_clt()
2743 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); in alloc_clt()
2744 if (!clt->kobj_paths) { in alloc_clt()
2745 err = -ENOMEM; in alloc_clt()
2750 kobject_del(clt->kobj_paths); in alloc_clt()
2751 kobject_put(clt->kobj_paths); in alloc_clt()
2754 dev_set_uevent_suppress(&clt->dev, false); in alloc_clt()
2755 kobject_uevent(&clt->dev.kobj, KOBJ_ADD); in alloc_clt()
2759 device_del(&clt->dev); in alloc_clt()
2761 free_percpu(clt->pcpu_path); in alloc_clt()
2762 put_device(&clt->dev); in alloc_clt()
2768 free_percpu(clt->pcpu_path); in free_clt()
2773 device_unregister(&clt->dev); in free_clt()
2777 * rtrs_clt_open() - Open a path to an RTRS server
2786 * up, 0 for * disabled, -1 for forever
2807 err = -EINVAL; in rtrs_clt_open()
2811 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, in rtrs_clt_open()
2812 ops->link_ev, in rtrs_clt_open()
2829 clt_path->for_new_clt = 1; in rtrs_clt_open()
2830 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_open()
2834 list_del_rcu(&clt_path->s.entry); in rtrs_clt_open()
2836 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_open()
2837 kfree(clt_path->stats); in rtrs_clt_open()
2844 list_del_rcu(&clt_path->s.entry); in rtrs_clt_open()
2846 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_open()
2847 kfree(clt_path->stats); in rtrs_clt_open()
2859 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_open()
2862 kobject_put(&clt_path->kobj); in rtrs_clt_open()
2873 * rtrs_clt_close() - Close a path
2884 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_close()
2887 kobject_put(&clt_path->kobj); in rtrs_clt_close()
2897 int err = -EBUSY; in rtrs_clt_reconnect_from_sysfs()
2904 clt_path->reconnect_attempts = 0; in rtrs_clt_reconnect_from_sysfs()
2906 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); in rtrs_clt_reconnect_from_sysfs()
2914 flush_delayed_work(&clt_path->reconnect_dwork); in rtrs_clt_reconnect_from_sysfs()
2915 err = (READ_ONCE(clt_path->state) == in rtrs_clt_reconnect_from_sysfs()
2916 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); in rtrs_clt_reconnect_from_sysfs()
2931 * 1. State was changed to DEAD - we were fast and nobody in rtrs_clt_remove_path_from_sysfs()
2934 * 2. State was observed as DEAD - we have someone in parallel in rtrs_clt_remove_path_from_sysfs()
2947 kobject_put(&clt_path->kobj); in rtrs_clt_remove_path_from_sysfs()
2955 clt->max_reconnect_attempts = (unsigned int)value; in rtrs_clt_set_max_reconnect_attempts()
2960 return (int)clt->max_reconnect_attempts; in rtrs_clt_get_max_reconnect_attempts()
2964 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2996 int err = -ECONNABORTED, i; in rtrs_clt_request()
3015 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_request()
3016 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in rtrs_clt_request()
3019 if (usr_len + hdr_len > clt_path->max_hdr_size) { in rtrs_clt_request()
3020 rtrs_wrn_rl(clt_path->clt, in rtrs_clt_request()
3023 usr_len, hdr_len, clt_path->max_hdr_size); in rtrs_clt_request()
3024 err = -EMSGSIZE; in rtrs_clt_request()
3027 req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, in rtrs_clt_request()
3035 req->in_use = false; in rtrs_clt_request()
3050 /* If no path, return -1 for block layer not to try again */ in rtrs_clt_rdma_cq_direct()
3051 int cnt = -1; in rtrs_clt_rdma_cq_direct()
3058 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_rdma_cq_direct()
3059 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in rtrs_clt_rdma_cq_direct()
3062 con = clt_path->s.con[index + 1]; in rtrs_clt_rdma_cq_direct()
3063 cnt = ib_process_cq_direct(con->cq, -1); in rtrs_clt_rdma_cq_direct()
3075 * rtrs_clt_query() - queries RTRS session attributes
3080 * -ECOMM no connection to the server
3085 return -ECOMM; in rtrs_clt_query()
3087 attr->queue_depth = clt->queue_depth; in rtrs_clt_query()
3088 attr->max_segments = clt->max_segments; in rtrs_clt_query()
3090 attr->max_io_size = min_t(int, clt->max_io_size, in rtrs_clt_query()
3091 clt->max_segments * SZ_4K); in rtrs_clt_query()
3107 mutex_lock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()
3108 if (clt->paths_num == 0) { in rtrs_clt_create_path_from_sysfs()
3114 clt_path->for_new_clt = 1; in rtrs_clt_create_path_from_sysfs()
3117 mutex_unlock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()
3139 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_create_path_from_sysfs()
3140 kfree(clt_path->stats); in rtrs_clt_create_path_from_sysfs()
3149 pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event), in rtrs_clt_ib_event_handler()
3150 ibevent->event); in rtrs_clt_ib_event_handler()
3156 INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev, in rtrs_clt_ib_dev_init()
3158 ib_register_event_handler(&dev->event_handler); in rtrs_clt_ib_dev_init()
3160 if (!(dev->ib_dev->attrs.device_cap_flags & in rtrs_clt_ib_dev_init()
3162 pr_err("Memory registrations not supported.\n"); in rtrs_clt_ib_dev_init()
3163 return -ENOTSUPP; in rtrs_clt_ib_dev_init()
3171 ib_unregister_event_handler(&dev->event_handler); in rtrs_clt_ib_dev_deinit()
3187 pr_err("Failed to create rtrs-client dev class\n"); in rtrs_client_init()
3193 return -ENOMEM; in rtrs_client_init()