xref: /linux/drivers/infiniband/ulp/rtrs/rtrs-srv.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * RDMA Transport Layer
4  *
5  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8  */
9 
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12 
13 #include <linux/module.h>
14 
15 #include "rtrs-srv.h"
16 #include "rtrs-log.h"
17 #include <rdma/ib_cm.h>
18 #include <rdma/ib_verbs.h>
19 #include "rtrs-srv-trace.h"
20 
21 MODULE_DESCRIPTION("RDMA Transport Server");
22 MODULE_LICENSE("GPL");
23 
24 /* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
25 #define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
26 #define DEFAULT_SESS_QUEUE_DEPTH 512
27 #define MAX_HDR_SIZE PAGE_SIZE
28 
29 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
30 static struct rtrs_rdma_dev_pd dev_pd = {
31 	.ops = &dev_pd_ops
32 };
33 const struct class rtrs_dev_class = {
34 	.name = "rtrs-server",
35 };
36 static struct rtrs_srv_ib_ctx ib_ctx;
37 
38 static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
39 static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
40 
41 static bool always_invalidate = true;
42 module_param(always_invalidate, bool, 0444);
43 MODULE_PARM_DESC(always_invalidate,
44 		 "Invalidate memory registration for contiguous memory regions before accessing.");
45 
46 module_param_named(max_chunk_size, max_chunk_size, int, 0444);
47 MODULE_PARM_DESC(max_chunk_size,
48 		 "Max size for each IO request, when change the unit is in byte (default: "
49 		 __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
50 
51 module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
52 MODULE_PARM_DESC(sess_queue_depth,
53 		 "Number of buffers for pending I/O requests to allocate per session. Maximum: "
54 		 __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
55 		 __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
56 
57 static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
58 
59 static struct workqueue_struct *rtrs_wq;
60 
to_srv_con(struct rtrs_con * c)61 static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
62 {
63 	return container_of(c, struct rtrs_srv_con, c);
64 }
65 
rtrs_srv_change_state(struct rtrs_srv_path * srv_path,enum rtrs_srv_state new_state)66 static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
67 				  enum rtrs_srv_state new_state)
68 {
69 	enum rtrs_srv_state old_state;
70 	bool changed = false;
71 	unsigned long flags;
72 
73 	spin_lock_irqsave(&srv_path->state_lock, flags);
74 	old_state = srv_path->state;
75 	switch (new_state) {
76 	case RTRS_SRV_CONNECTED:
77 		if (old_state == RTRS_SRV_CONNECTING)
78 			changed = true;
79 		break;
80 	case RTRS_SRV_CLOSING:
81 		if (old_state == RTRS_SRV_CONNECTING ||
82 		    old_state == RTRS_SRV_CONNECTED)
83 			changed = true;
84 		break;
85 	case RTRS_SRV_CLOSED:
86 		if (old_state == RTRS_SRV_CLOSING)
87 			changed = true;
88 		break;
89 	default:
90 		break;
91 	}
92 	if (changed)
93 		srv_path->state = new_state;
94 	spin_unlock_irqrestore(&srv_path->state_lock, flags);
95 
96 	return changed;
97 }
98 
free_id(struct rtrs_srv_op * id)99 static void free_id(struct rtrs_srv_op *id)
100 {
101 	if (!id)
102 		return;
103 	kfree(id);
104 }
105 
rtrs_srv_free_ops_ids(struct rtrs_srv_path * srv_path)106 static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
107 {
108 	struct rtrs_srv_sess *srv = srv_path->srv;
109 	int i;
110 
111 	if (srv_path->ops_ids) {
112 		for (i = 0; i < srv->queue_depth; i++)
113 			free_id(srv_path->ops_ids[i]);
114 		kfree(srv_path->ops_ids);
115 		srv_path->ops_ids = NULL;
116 	}
117 }
118 
119 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
120 
121 static struct ib_cqe io_comp_cqe = {
122 	.done = rtrs_srv_rdma_done
123 };
124 
rtrs_srv_inflight_ref_release(struct percpu_ref * ref)125 static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
126 {
127 	struct rtrs_srv_path *srv_path = container_of(ref,
128 						      struct rtrs_srv_path,
129 						      ids_inflight_ref);
130 
131 	percpu_ref_exit(&srv_path->ids_inflight_ref);
132 	complete(&srv_path->complete_done);
133 }
134 
rtrs_srv_alloc_ops_ids(struct rtrs_srv_path * srv_path)135 static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
136 {
137 	struct rtrs_srv_sess *srv = srv_path->srv;
138 	struct rtrs_srv_op *id;
139 	int i, ret;
140 
141 	srv_path->ops_ids = kzalloc_objs(*srv_path->ops_ids, srv->queue_depth);
142 	if (!srv_path->ops_ids)
143 		goto err;
144 
145 	for (i = 0; i < srv->queue_depth; ++i) {
146 		id = kzalloc_obj(*id);
147 		if (!id)
148 			goto err;
149 
150 		srv_path->ops_ids[i] = id;
151 	}
152 
153 	ret = percpu_ref_init(&srv_path->ids_inflight_ref,
154 			      rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
155 	if (ret) {
156 		pr_err("Percpu reference init failed\n");
157 		goto err;
158 	}
159 	init_completion(&srv_path->complete_done);
160 
161 	return 0;
162 
163 err:
164 	rtrs_srv_free_ops_ids(srv_path);
165 	return -ENOMEM;
166 }
167 
rtrs_srv_get_ops_ids(struct rtrs_srv_path * srv_path)168 static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
169 {
170 	percpu_ref_get(&srv_path->ids_inflight_ref);
171 }
172 
rtrs_srv_put_ops_ids(struct rtrs_srv_path * srv_path)173 static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
174 {
175 	percpu_ref_put(&srv_path->ids_inflight_ref);
176 }
177 
rtrs_srv_reg_mr_done(struct ib_cq * cq,struct ib_wc * wc)178 static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
179 {
180 	struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
181 	struct rtrs_path *s = con->c.path;
182 	struct rtrs_srv_path *srv_path = to_srv_path(s);
183 
184 	if (wc->status != IB_WC_SUCCESS) {
185 		rtrs_err_rl(s, "REG MR failed: %s\n",
186 			  ib_wc_status_msg(wc->status));
187 		close_path(srv_path);
188 		return;
189 	}
190 }
191 
192 static struct ib_cqe local_reg_cqe = {
193 	.done = rtrs_srv_reg_mr_done
194 };
195 
rdma_write_sg(struct rtrs_srv_op * id)196 static int rdma_write_sg(struct rtrs_srv_op *id)
197 {
198 	struct rtrs_path *s = id->con->c.path;
199 	struct rtrs_srv_path *srv_path = to_srv_path(s);
200 	dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
201 	struct rtrs_srv_mr *srv_mr;
202 	struct ib_send_wr inv_wr;
203 	struct ib_rdma_wr imm_wr;
204 	struct ib_rdma_wr *wr = NULL;
205 	enum ib_send_flags flags;
206 	size_t sg_cnt;
207 	int err, offset;
208 	bool need_inval;
209 	struct ib_reg_wr rwr;
210 	struct ib_sge *plist;
211 	struct ib_sge list;
212 
213 	sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
214 	need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
215 	if (sg_cnt != 1)
216 		return -EINVAL;
217 
218 	offset = 0;
219 
220 	wr		= &id->tx_wr;
221 	plist		= &id->tx_sg;
222 	plist->addr	= dma_addr + offset;
223 	plist->length	= le32_to_cpu(id->rd_msg->desc[0].len);
224 
225 	/* WR will fail with length error
226 	 * if this is 0
227 	 */
228 	if (plist->length == 0) {
229 		rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
230 		return -EINVAL;
231 	}
232 
233 	plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
234 	offset += plist->length;
235 
236 	wr->wr.sg_list	= plist;
237 	wr->wr.num_sge	= 1;
238 	wr->remote_addr	= le64_to_cpu(id->rd_msg->desc[0].addr);
239 	wr->rkey	= le32_to_cpu(id->rd_msg->desc[0].key);
240 
241 	wr->wr.opcode = IB_WR_RDMA_WRITE;
242 	wr->wr.wr_cqe   = &io_comp_cqe;
243 	wr->wr.ex.imm_data = 0;
244 	wr->wr.send_flags  = 0;
245 
246 	if (need_inval && always_invalidate) {
247 		wr->wr.next = &rwr.wr;
248 		rwr.wr.next = &inv_wr;
249 		inv_wr.next = &imm_wr.wr;
250 	} else if (always_invalidate) {
251 		wr->wr.next = &rwr.wr;
252 		rwr.wr.next = &imm_wr.wr;
253 	} else if (need_inval) {
254 		wr->wr.next = &inv_wr;
255 		inv_wr.next = &imm_wr.wr;
256 	} else {
257 		wr->wr.next = &imm_wr.wr;
258 	}
259 	/*
260 	 * From time to time we have to post signaled sends,
261 	 * or send queue will fill up and only QP reset can help.
262 	 */
263 	flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
264 		0 : IB_SEND_SIGNALED;
265 
266 	if (need_inval) {
267 		inv_wr.sg_list = NULL;
268 		inv_wr.num_sge = 0;
269 		inv_wr.opcode = IB_WR_SEND_WITH_INV;
270 		inv_wr.wr_cqe   = &io_comp_cqe;
271 		inv_wr.send_flags = 0;
272 		inv_wr.ex.invalidate_rkey = wr->rkey;
273 	}
274 
275 	imm_wr.wr.next = NULL;
276 	if (always_invalidate) {
277 		struct rtrs_msg_rkey_rsp *msg;
278 
279 		srv_mr = &srv_path->mrs[id->msg_id];
280 		rwr.wr.opcode = IB_WR_REG_MR;
281 		rwr.wr.wr_cqe = &local_reg_cqe;
282 		rwr.wr.num_sge = 0;
283 		rwr.mr = srv_mr->mr;
284 		rwr.wr.send_flags = 0;
285 		rwr.key = srv_mr->mr->rkey;
286 		rwr.access = (IB_ACCESS_LOCAL_WRITE |
287 			      IB_ACCESS_REMOTE_WRITE);
288 		msg = srv_mr->iu->buf;
289 		msg->buf_id = cpu_to_le16(id->msg_id);
290 		msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
291 		msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
292 
293 		list.addr   = srv_mr->iu->dma_addr;
294 		list.length = sizeof(*msg);
295 		list.lkey   = srv_path->s.dev->ib_pd->local_dma_lkey;
296 		imm_wr.wr.sg_list = &list;
297 		imm_wr.wr.num_sge = 1;
298 		imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
299 		ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
300 					      srv_mr->iu->dma_addr,
301 					      srv_mr->iu->size, DMA_TO_DEVICE);
302 	} else {
303 		imm_wr.wr.sg_list = NULL;
304 		imm_wr.wr.num_sge = 0;
305 		imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
306 	}
307 	imm_wr.wr.send_flags = flags;
308 	imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
309 							     0, need_inval));
310 
311 	imm_wr.wr.wr_cqe   = &io_comp_cqe;
312 	ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
313 				      offset, DMA_BIDIRECTIONAL);
314 
315 	err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
316 	if (err)
317 		rtrs_err(s,
318 			  "Posting RDMA-Write-Request to QP failed, err: %pe\n",
319 			  ERR_PTR(err));
320 
321 	return err;
322 }
323 
324 /**
325  * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
326  *                      requests or on successful WRITE request.
327  * @con:	the connection to send back result
328  * @id:		the id associated with the IO
329  * @errno:	the error number of the IO.
330  *
331  * Return 0 on success, errno otherwise.
332  */
send_io_resp_imm(struct rtrs_srv_con * con,struct rtrs_srv_op * id,int errno)333 static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
334 			    int errno)
335 {
336 	struct rtrs_path *s = con->c.path;
337 	struct rtrs_srv_path *srv_path = to_srv_path(s);
338 	struct ib_send_wr inv_wr, *wr = NULL;
339 	struct ib_rdma_wr imm_wr;
340 	struct ib_reg_wr rwr;
341 	struct rtrs_srv_mr *srv_mr;
342 	bool need_inval = false;
343 	enum ib_send_flags flags;
344 	struct ib_sge list;
345 	u32 imm;
346 	int err;
347 
348 	if (id->dir == READ) {
349 		struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
350 		size_t sg_cnt;
351 
352 		need_inval = le16_to_cpu(rd_msg->flags) &
353 				RTRS_MSG_NEED_INVAL_F;
354 		sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
355 
356 		if (need_inval) {
357 			if (sg_cnt) {
358 				inv_wr.wr_cqe   = &io_comp_cqe;
359 				inv_wr.sg_list = NULL;
360 				inv_wr.num_sge = 0;
361 				inv_wr.opcode = IB_WR_SEND_WITH_INV;
362 				inv_wr.send_flags = 0;
363 				/* Only one key is actually used */
364 				inv_wr.ex.invalidate_rkey =
365 					le32_to_cpu(rd_msg->desc[0].key);
366 			} else {
367 				WARN_ON_ONCE(1);
368 				need_inval = false;
369 			}
370 		}
371 	}
372 
373 	trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
374 
375 	if (need_inval && always_invalidate) {
376 		wr = &inv_wr;
377 		inv_wr.next = &rwr.wr;
378 		rwr.wr.next = &imm_wr.wr;
379 	} else if (always_invalidate) {
380 		wr = &rwr.wr;
381 		rwr.wr.next = &imm_wr.wr;
382 	} else if (need_inval) {
383 		wr = &inv_wr;
384 		inv_wr.next = &imm_wr.wr;
385 	} else {
386 		wr = &imm_wr.wr;
387 	}
388 	/*
389 	 * From time to time we have to post signalled sends,
390 	 * or send queue will fill up and only QP reset can help.
391 	 */
392 	flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
393 		0 : IB_SEND_SIGNALED;
394 	imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
395 	imm_wr.wr.next = NULL;
396 	if (always_invalidate) {
397 		struct rtrs_msg_rkey_rsp *msg;
398 
399 		srv_mr = &srv_path->mrs[id->msg_id];
400 		rwr.wr.next = &imm_wr.wr;
401 		rwr.wr.opcode = IB_WR_REG_MR;
402 		rwr.wr.wr_cqe = &local_reg_cqe;
403 		rwr.wr.num_sge = 0;
404 		rwr.wr.send_flags = 0;
405 		rwr.mr = srv_mr->mr;
406 		rwr.key = srv_mr->mr->rkey;
407 		rwr.access = (IB_ACCESS_LOCAL_WRITE |
408 			      IB_ACCESS_REMOTE_WRITE);
409 		msg = srv_mr->iu->buf;
410 		msg->buf_id = cpu_to_le16(id->msg_id);
411 		msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
412 		msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
413 
414 		list.addr   = srv_mr->iu->dma_addr;
415 		list.length = sizeof(*msg);
416 		list.lkey   = srv_path->s.dev->ib_pd->local_dma_lkey;
417 		imm_wr.wr.sg_list = &list;
418 		imm_wr.wr.num_sge = 1;
419 		imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
420 		ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
421 					      srv_mr->iu->dma_addr,
422 					      srv_mr->iu->size, DMA_TO_DEVICE);
423 	} else {
424 		imm_wr.wr.sg_list = NULL;
425 		imm_wr.wr.num_sge = 0;
426 		imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
427 	}
428 	imm_wr.wr.send_flags = flags;
429 	imm_wr.wr.wr_cqe   = &io_comp_cqe;
430 
431 	imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
432 
433 	err = ib_post_send(id->con->c.qp, wr, NULL);
434 	if (err)
435 		rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %pe\n",
436 			    ERR_PTR(err));
437 
438 	return err;
439 }
440 
close_path(struct rtrs_srv_path * srv_path)441 void close_path(struct rtrs_srv_path *srv_path)
442 {
443 	if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
444 		queue_work(rtrs_wq, &srv_path->close_work);
445 	WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
446 }
447 
rtrs_srv_state_str(enum rtrs_srv_state state)448 static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
449 {
450 	switch (state) {
451 	case RTRS_SRV_CONNECTING:
452 		return "RTRS_SRV_CONNECTING";
453 	case RTRS_SRV_CONNECTED:
454 		return "RTRS_SRV_CONNECTED";
455 	case RTRS_SRV_CLOSING:
456 		return "RTRS_SRV_CLOSING";
457 	case RTRS_SRV_CLOSED:
458 		return "RTRS_SRV_CLOSED";
459 	default:
460 		return "UNKNOWN";
461 	}
462 }
463 
464 /**
465  * rtrs_srv_resp_rdma() - Finish an RDMA request
466  *
467  * @id:		Internal RTRS operation identifier
468  * @status:	Response Code sent to the other side for this operation.
469  *		0 = success, <=0 error
470  * Context: any
471  *
472  * Finish a RDMA operation. A message is sent to the client and the
473  * corresponding memory areas will be released.
474  */
rtrs_srv_resp_rdma(struct rtrs_srv_op * id,int status)475 bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
476 {
477 	struct rtrs_srv_path *srv_path;
478 	struct rtrs_srv_con *con;
479 	struct rtrs_path *s;
480 	int err;
481 
482 	if (WARN_ON(!id))
483 		return true;
484 
485 	con = id->con;
486 	s = con->c.path;
487 	srv_path = to_srv_path(s);
488 
489 	id->status = status;
490 
491 	if (srv_path->state != RTRS_SRV_CONNECTED) {
492 		rtrs_err_rl(s,
493 			    "Sending I/O response failed,  server path %s is disconnected, path state %s\n",
494 			    kobject_name(&srv_path->kobj),
495 			    rtrs_srv_state_str(srv_path->state));
496 		goto out;
497 	}
498 	if (always_invalidate) {
499 		struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
500 
501 		ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
502 	}
503 	if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
504 		rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
505 			 kobject_name(&srv_path->kobj),
506 			 con->c.cid);
507 		atomic_add(1, &con->c.sq_wr_avail);
508 		spin_lock(&con->rsp_wr_wait_lock);
509 		list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
510 		spin_unlock(&con->rsp_wr_wait_lock);
511 		return false;
512 	}
513 
514 	if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
515 		err = send_io_resp_imm(con, id, status);
516 	else
517 		err = rdma_write_sg(id);
518 
519 	if (err) {
520 		rtrs_err_rl(s, "IO response failed: %pe: srv_path=%s\n",
521 			    ERR_PTR(err), kobject_name(&srv_path->kobj));
522 		close_path(srv_path);
523 	}
524 out:
525 	rtrs_srv_put_ops_ids(srv_path);
526 	return true;
527 }
528 EXPORT_SYMBOL(rtrs_srv_resp_rdma);
529 
530 /**
531  * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
532  * @srv:	Session pointer
533  * @priv:	The private pointer that is associated with the session.
534  */
rtrs_srv_set_sess_priv(struct rtrs_srv_sess * srv,void * priv)535 void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
536 {
537 	srv->priv = priv;
538 }
539 EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
540 
unmap_cont_bufs(struct rtrs_srv_path * srv_path)541 static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
542 {
543 	int i;
544 
545 	for (i = 0; i < srv_path->mrs_num; i++) {
546 		struct rtrs_srv_mr *srv_mr;
547 
548 		srv_mr = &srv_path->mrs[i];
549 
550 		if (always_invalidate)
551 			rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
552 
553 		ib_dereg_mr(srv_mr->mr);
554 		ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
555 				srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
556 		sg_free_table(&srv_mr->sgt);
557 	}
558 	kfree(srv_path->mrs);
559 }
560 
map_cont_bufs(struct rtrs_srv_path * srv_path)561 static int map_cont_bufs(struct rtrs_srv_path *srv_path)
562 {
563 	struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
564 	struct rtrs_srv_sess *srv = srv_path->srv;
565 	struct rtrs_path *ss = &srv_path->s;
566 	int i, err, mrs_num;
567 	unsigned int chunk_bits;
568 	enum ib_mr_type mr_type;
569 	int chunks_per_mr = 1;
570 	struct sg_table *sgt;
571 	struct ib_mr *mr;
572 
573 	/*
574 	 * Here we map queue_depth chunks to MR.  Firstly we have to
575 	 * figure out how many chunks can we map per MR.
576 	 */
577 	if (always_invalidate) {
578 		/*
579 		 * in order to do invalidate for each chunks of memory, we needs
580 		 * more memory regions.
581 		 */
582 		mrs_num = srv->queue_depth;
583 	} else {
584 		chunks_per_mr =
585 			srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
586 		mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
587 		chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
588 	}
589 
590 	srv_path->mrs = kzalloc_objs(*srv_path->mrs, mrs_num);
591 	if (!srv_path->mrs)
592 		return -ENOMEM;
593 
594 	for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
595 	     srv_path->mrs_num++) {
596 		struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
597 		struct scatterlist *s;
598 		int nr, nr_sgt, chunks, ind;
599 
600 		sgt = &srv_mr->sgt;
601 		chunks = chunks_per_mr * srv_path->mrs_num;
602 		if (!always_invalidate)
603 			chunks_per_mr = min_t(int, chunks_per_mr,
604 					      srv->queue_depth - chunks);
605 
606 		err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
607 		if (err)
608 			goto err;
609 
610 		for_each_sg(sgt->sgl, s, chunks_per_mr, i)
611 			sg_set_page(s, srv->chunks[chunks + i],
612 				    max_chunk_size, 0);
613 
614 		nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
615 				   sgt->nents, DMA_BIDIRECTIONAL);
616 		if (!nr_sgt) {
617 			err = -EINVAL;
618 			goto free_sg;
619 		}
620 
621 		if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
622 			mr_type = IB_MR_TYPE_SG_GAPS;
623 		else
624 			mr_type = IB_MR_TYPE_MEM_REG;
625 
626 		mr = ib_alloc_mr(srv_path->s.dev->ib_pd, mr_type, nr_sgt);
627 		if (IS_ERR(mr)) {
628 			err = PTR_ERR(mr);
629 			goto unmap_sg;
630 		}
631 		nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
632 				  NULL, max_chunk_size);
633 		if (nr < nr_sgt) {
634 			err = nr < 0 ? nr : -EINVAL;
635 			goto dereg_mr;
636 		}
637 
638 		if (always_invalidate) {
639 			srv_mr->iu = rtrs_iu_alloc(1,
640 					sizeof(struct rtrs_msg_rkey_rsp),
641 					GFP_KERNEL, srv_path->s.dev->ib_dev,
642 					DMA_TO_DEVICE, rtrs_srv_rdma_done);
643 			if (!srv_mr->iu) {
644 				err = -ENOMEM;
645 				rtrs_err(ss, "rtrs_iu_alloc(), err: %pe\n", ERR_PTR(err));
646 				goto dereg_mr;
647 			}
648 		}
649 
650 		/*
651 		 * Cache DMA addresses by traversing sg entries.  If
652 		 * regions were merged, an inner loop is required to
653 		 * populate the DMA address array by traversing larger
654 		 * regions.
655 		 */
656 		ind = chunks;
657 		for_each_sg(sgt->sgl, s, nr_sgt, i) {
658 			unsigned int dma_len = sg_dma_len(s);
659 			u64 dma_addr = sg_dma_address(s);
660 			u64 dma_addr_end = dma_addr + dma_len;
661 
662 			do {
663 				srv_path->dma_addr[ind++] = dma_addr;
664 				dma_addr += max_chunk_size;
665 			} while (dma_addr < dma_addr_end);
666 		}
667 
668 		ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
669 		srv_mr->mr = mr;
670 	}
671 
672 	chunk_bits = ilog2(srv->queue_depth - 1) + 1;
673 	srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
674 
675 	return 0;
676 
677 dereg_mr:
678 	ib_dereg_mr(mr);
679 unmap_sg:
680 	ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
681 			sgt->nents, DMA_BIDIRECTIONAL);
682 free_sg:
683 	sg_free_table(sgt);
684 err:
685 	unmap_cont_bufs(srv_path);
686 
687 	return err;
688 }
689 
rtrs_srv_hb_err_handler(struct rtrs_con * c)690 static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
691 {
692 	struct rtrs_srv_con *con = container_of(c, typeof(*con), c);
693 	struct rtrs_srv_path *srv_path = to_srv_path(con->c.path);
694 
695 	rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj));
696 	close_path(to_srv_path(c->path));
697 }
698 
rtrs_srv_init_hb(struct rtrs_srv_path * srv_path)699 static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
700 {
701 	rtrs_init_hb(&srv_path->s, &io_comp_cqe,
702 		      RTRS_HB_INTERVAL_MS,
703 		      RTRS_HB_MISSED_MAX,
704 		      rtrs_srv_hb_err_handler,
705 		      rtrs_wq);
706 }
707 
rtrs_srv_start_hb(struct rtrs_srv_path * srv_path)708 static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
709 {
710 	rtrs_start_hb(&srv_path->s);
711 }
712 
rtrs_srv_stop_hb(struct rtrs_srv_path * srv_path)713 static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
714 {
715 	rtrs_stop_hb(&srv_path->s);
716 }
717 
rtrs_srv_info_rsp_done(struct ib_cq * cq,struct ib_wc * wc)718 static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
719 {
720 	struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
721 	struct rtrs_path *s = con->c.path;
722 	struct rtrs_srv_path *srv_path = to_srv_path(s);
723 	struct rtrs_iu *iu;
724 
725 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
726 	rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
727 
728 	if (wc->status != IB_WC_SUCCESS) {
729 		rtrs_err(s, "Sess info response send failed: %s\n",
730 			  ib_wc_status_msg(wc->status));
731 		close_path(srv_path);
732 		return;
733 	}
734 	WARN_ON(wc->opcode != IB_WC_SEND);
735 }
736 
rtrs_srv_path_up(struct rtrs_srv_path * srv_path)737 static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
738 {
739 	struct rtrs_srv_sess *srv = srv_path->srv;
740 	struct rtrs_srv_ctx *ctx = srv->ctx;
741 	int up, ret = 0;
742 
743 	mutex_lock(&srv->paths_ev_mutex);
744 	up = ++srv->paths_up;
745 	if (up == 1)
746 		ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
747 	mutex_unlock(&srv->paths_ev_mutex);
748 
749 	/* Mark session as established */
750 	if (!ret)
751 		srv_path->established = true;
752 
753 	return ret;
754 }
755 
rtrs_srv_path_down(struct rtrs_srv_path * srv_path)756 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
757 {
758 	struct rtrs_srv_sess *srv = srv_path->srv;
759 	struct rtrs_srv_ctx *ctx = srv->ctx;
760 
761 	if (!srv_path->established)
762 		return;
763 
764 	srv_path->established = false;
765 	mutex_lock(&srv->paths_ev_mutex);
766 	WARN_ON(!srv->paths_up);
767 	if (--srv->paths_up == 0)
768 		ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
769 	mutex_unlock(&srv->paths_ev_mutex);
770 }
771 
exist_pathname(struct rtrs_srv_ctx * ctx,const char * pathname,const uuid_t * path_uuid)772 static bool exist_pathname(struct rtrs_srv_ctx *ctx,
773 			   const char *pathname, const uuid_t *path_uuid)
774 {
775 	struct rtrs_srv_sess *srv;
776 	struct rtrs_srv_path *srv_path;
777 	bool found = false;
778 
779 	mutex_lock(&ctx->srv_mutex);
780 	list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
781 		mutex_lock(&srv->paths_mutex);
782 
783 		/* when a client with same uuid and same sessname tried to add a path */
784 		if (uuid_equal(&srv->paths_uuid, path_uuid)) {
785 			mutex_unlock(&srv->paths_mutex);
786 			continue;
787 		}
788 
789 		list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
790 			if (strlen(srv_path->s.sessname) == strlen(pathname) &&
791 			    !strcmp(srv_path->s.sessname, pathname)) {
792 				found = true;
793 				break;
794 			}
795 		}
796 		mutex_unlock(&srv->paths_mutex);
797 		if (found)
798 			break;
799 	}
800 	mutex_unlock(&ctx->srv_mutex);
801 	return found;
802 }
803 
804 static int post_recv_path(struct rtrs_srv_path *srv_path);
805 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
806 
process_info_req(struct rtrs_srv_con * con,struct rtrs_msg_info_req * msg)807 static int process_info_req(struct rtrs_srv_con *con,
808 			    struct rtrs_msg_info_req *msg)
809 {
810 	struct rtrs_path *s = con->c.path;
811 	struct rtrs_srv_path *srv_path = to_srv_path(s);
812 	struct ib_send_wr *reg_wr = NULL;
813 	struct rtrs_msg_info_rsp *rsp;
814 	struct rtrs_iu *tx_iu;
815 	struct ib_reg_wr *rwr;
816 	int mri, err;
817 	size_t tx_sz;
818 
819 	err = post_recv_path(srv_path);
820 	if (err) {
821 		rtrs_err(s, "post_recv_path(), err: %pe\n", ERR_PTR(err));
822 		return err;
823 	}
824 
825 	if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
826 		rtrs_err(s, "pathname cannot contain / and .\n");
827 		return -EINVAL;
828 	}
829 
830 	if (exist_pathname(srv_path->srv->ctx,
831 			   msg->pathname, &srv_path->srv->paths_uuid)) {
832 		rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
833 		return -EPERM;
834 	}
835 	strscpy(srv_path->s.sessname, msg->pathname,
836 		sizeof(srv_path->s.sessname));
837 
838 	rwr = kzalloc_objs(*rwr, srv_path->mrs_num);
839 	if (!rwr)
840 		return -ENOMEM;
841 
842 	tx_sz  = sizeof(*rsp);
843 	tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
844 	tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
845 			       DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
846 	if (!tx_iu) {
847 		err = -ENOMEM;
848 		goto rwr_free;
849 	}
850 
851 	rsp = tx_iu->buf;
852 	rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
853 	rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
854 
855 	for (mri = 0; mri < srv_path->mrs_num; mri++) {
856 		struct ib_mr *mr = srv_path->mrs[mri].mr;
857 
858 		rsp->desc[mri].addr = cpu_to_le64(mr->iova);
859 		rsp->desc[mri].key  = cpu_to_le32(mr->rkey);
860 		rsp->desc[mri].len  = cpu_to_le32(mr->length);
861 
862 		/*
863 		 * Fill in reg MR request and chain them *backwards*
864 		 */
865 		rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
866 		rwr[mri].wr.opcode = IB_WR_REG_MR;
867 		rwr[mri].wr.wr_cqe = &local_reg_cqe;
868 		rwr[mri].wr.num_sge = 0;
869 		rwr[mri].wr.send_flags = 0;
870 		rwr[mri].mr = mr;
871 		rwr[mri].key = mr->rkey;
872 		rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
873 				   IB_ACCESS_REMOTE_WRITE);
874 		reg_wr = &rwr[mri].wr;
875 	}
876 
877 	err = rtrs_srv_create_path_files(srv_path);
878 	if (err)
879 		goto iu_free;
880 	kobject_get(&srv_path->kobj);
881 	get_device(&srv_path->srv->dev);
882 	err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
883 	if (!err) {
884 		rtrs_err(s, "rtrs_srv_change_state() failed\n");
885 		goto iu_free;
886 	}
887 
888 	rtrs_srv_start_hb(srv_path);
889 
890 	/*
891 	 * We do not account number of established connections at the current
892 	 * moment, we rely on the client, which should send info request when
893 	 * all connections are successfully established.  Thus, simply notify
894 	 * listener with a proper event if we are the first path.
895 	 */
896 	err = rtrs_srv_path_up(srv_path);
897 	if (err) {
898 		rtrs_err(s, "rtrs_srv_path_up(), err: %pe\n", ERR_PTR(err));
899 		goto iu_free;
900 	}
901 
902 	ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
903 				      tx_iu->dma_addr,
904 				      tx_iu->size, DMA_TO_DEVICE);
905 
906 	/*
907 	 * Now disable zombie connection closing. Since from the logs and code,
908 	 * we know that it can never be in CONNECTED state.
909 	 */
910 	srv_path->connection_timeout = 0;
911 
912 	/* Send info response */
913 	err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
914 	if (err) {
915 		rtrs_err(s, "rtrs_iu_post_send(), err: %pe\n", ERR_PTR(err));
916 iu_free:
917 		rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
918 	}
919 rwr_free:
920 	kfree(rwr);
921 
922 	return err;
923 }
924 
rtrs_srv_info_req_done(struct ib_cq * cq,struct ib_wc * wc)925 static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
926 {
927 	struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
928 	struct rtrs_path *s = con->c.path;
929 	struct rtrs_srv_path *srv_path = to_srv_path(s);
930 	struct rtrs_msg_info_req *msg;
931 	struct rtrs_iu *iu;
932 	int err;
933 
934 	WARN_ON(con->c.cid);
935 
936 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
937 	if (wc->status != IB_WC_SUCCESS) {
938 		rtrs_err(s, "Sess info request receive failed: %s\n",
939 			  ib_wc_status_msg(wc->status));
940 		goto close;
941 	}
942 	WARN_ON(wc->opcode != IB_WC_RECV);
943 
944 	if (wc->byte_len < sizeof(*msg)) {
945 		rtrs_err(s, "Sess info request is malformed: size %d\n",
946 			  wc->byte_len);
947 		goto close;
948 	}
949 	ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
950 				   iu->size, DMA_FROM_DEVICE);
951 	msg = iu->buf;
952 	if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
953 		rtrs_err(s, "Sess info request is malformed: type %d\n",
954 			  le16_to_cpu(msg->type));
955 		goto close;
956 	}
957 	err = process_info_req(con, msg);
958 	if (err)
959 		goto close;
960 
961 	rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
962 	return;
963 close:
964 	rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
965 	close_path(srv_path);
966 }
967 
post_recv_info_req(struct rtrs_srv_con * con)968 static int post_recv_info_req(struct rtrs_srv_con *con)
969 {
970 	struct rtrs_path *s = con->c.path;
971 	struct rtrs_srv_path *srv_path = to_srv_path(s);
972 	struct rtrs_iu *rx_iu;
973 	int err;
974 
975 	rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
976 			       GFP_KERNEL, srv_path->s.dev->ib_dev,
977 			       DMA_FROM_DEVICE, rtrs_srv_info_req_done);
978 	if (!rx_iu)
979 		return -ENOMEM;
980 	/* Prepare for getting info response */
981 	err = rtrs_iu_post_recv(&con->c, rx_iu);
982 	if (err) {
983 		rtrs_err(s, "rtrs_iu_post_recv(), err: %pe\n", ERR_PTR(err));
984 		rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
985 		return err;
986 	}
987 
988 	return 0;
989 }
990 
post_recv_io(struct rtrs_srv_con * con,size_t q_size)991 static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
992 {
993 	int i, err;
994 
995 	for (i = 0; i < q_size; i++) {
996 		err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
997 		if (err)
998 			return err;
999 	}
1000 
1001 	return 0;
1002 }
1003 
post_recv_path(struct rtrs_srv_path * srv_path)1004 static int post_recv_path(struct rtrs_srv_path *srv_path)
1005 {
1006 	struct rtrs_srv_sess *srv = srv_path->srv;
1007 	struct rtrs_path *s = &srv_path->s;
1008 	size_t q_size;
1009 	int err, cid;
1010 
1011 	for (cid = 0; cid < srv_path->s.con_num; cid++) {
1012 		if (cid == 0)
1013 			q_size = SERVICE_CON_QUEUE_DEPTH;
1014 		else
1015 			q_size = srv->queue_depth;
1016 		if (srv_path->state != RTRS_SRV_CONNECTING) {
1017 			rtrs_err(s, "Path state invalid. state %s\n",
1018 				 rtrs_srv_state_str(srv_path->state));
1019 			return -EIO;
1020 		}
1021 
1022 		if (!srv_path->s.con[cid]) {
1023 			rtrs_err(s, "Conn not set for %d\n", cid);
1024 			return -EIO;
1025 		}
1026 
1027 		err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
1028 		if (err) {
1029 			rtrs_err(s, "post_recv_io(), err: %pe\n", ERR_PTR(err));
1030 			return err;
1031 		}
1032 	}
1033 
1034 	return 0;
1035 }
1036 
process_read(struct rtrs_srv_con * con,struct rtrs_msg_rdma_read * msg,u32 buf_id,u32 off)1037 static void process_read(struct rtrs_srv_con *con,
1038 			 struct rtrs_msg_rdma_read *msg,
1039 			 u32 buf_id, u32 off)
1040 {
1041 	struct rtrs_path *s = con->c.path;
1042 	struct rtrs_srv_path *srv_path = to_srv_path(s);
1043 	struct rtrs_srv_sess *srv = srv_path->srv;
1044 	struct rtrs_srv_ctx *ctx = srv->ctx;
1045 	struct rtrs_srv_op *id;
1046 
1047 	size_t usr_len, data_len;
1048 	void *data;
1049 	int ret;
1050 
1051 	if (srv_path->state != RTRS_SRV_CONNECTED) {
1052 		rtrs_err_rl(s,
1053 			     "Processing read request failed,  session is disconnected, sess state %s\n",
1054 			     rtrs_srv_state_str(srv_path->state));
1055 		return;
1056 	}
1057 	if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
1058 		rtrs_err_rl(s,
1059 			    "Processing read request failed, invalid message\n");
1060 		return;
1061 	}
1062 	rtrs_srv_get_ops_ids(srv_path);
1063 	rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
1064 	id = srv_path->ops_ids[buf_id];
1065 	id->con		= con;
1066 	id->dir		= READ;
1067 	id->msg_id	= buf_id;
1068 	id->rd_msg	= msg;
1069 	usr_len = le16_to_cpu(msg->usr_len);
1070 	data_len = off - usr_len;
1071 	data = page_address(srv->chunks[buf_id]);
1072 	ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1073 			   data + data_len, usr_len);
1074 
1075 	if (ret) {
1076 		rtrs_err_rl(s,
1077 			     "Processing read request failed, user module cb reported for msg_id %d, err: %pe\n",
1078 			     buf_id, ERR_PTR(ret));
1079 		goto send_err_msg;
1080 	}
1081 
1082 	return;
1083 
1084 send_err_msg:
1085 	ret = send_io_resp_imm(con, id, ret);
1086 	if (ret < 0) {
1087 		rtrs_err_rl(s,
1088 			     "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %pe\n",
1089 			     buf_id, ERR_PTR(ret));
1090 		close_path(srv_path);
1091 	}
1092 	rtrs_srv_put_ops_ids(srv_path);
1093 }
1094 
process_write(struct rtrs_srv_con * con,struct rtrs_msg_rdma_write * req,u32 buf_id,u32 off)1095 static void process_write(struct rtrs_srv_con *con,
1096 			  struct rtrs_msg_rdma_write *req,
1097 			  u32 buf_id, u32 off)
1098 {
1099 	struct rtrs_path *s = con->c.path;
1100 	struct rtrs_srv_path *srv_path = to_srv_path(s);
1101 	struct rtrs_srv_sess *srv = srv_path->srv;
1102 	struct rtrs_srv_ctx *ctx = srv->ctx;
1103 	struct rtrs_srv_op *id;
1104 
1105 	size_t data_len, usr_len;
1106 	void *data;
1107 	int ret;
1108 
1109 	if (srv_path->state != RTRS_SRV_CONNECTED) {
1110 		rtrs_err_rl(s,
1111 			     "Processing write request failed,  session is disconnected, sess state %s\n",
1112 			     rtrs_srv_state_str(srv_path->state));
1113 		return;
1114 	}
1115 	rtrs_srv_get_ops_ids(srv_path);
1116 	rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
1117 	id = srv_path->ops_ids[buf_id];
1118 	id->con    = con;
1119 	id->dir    = WRITE;
1120 	id->msg_id = buf_id;
1121 
1122 	usr_len = le16_to_cpu(req->usr_len);
1123 	data_len = off - usr_len;
1124 	data = page_address(srv->chunks[buf_id]);
1125 	ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1126 			       data + data_len, usr_len);
1127 	if (ret) {
1128 		rtrs_err_rl(s,
1129 			     "Processing write request failed, user module callback reports err: %pe\n",
1130 			     ERR_PTR(ret));
1131 		goto send_err_msg;
1132 	}
1133 
1134 	return;
1135 
1136 send_err_msg:
1137 	ret = send_io_resp_imm(con, id, ret);
1138 	if (ret < 0) {
1139 		rtrs_err_rl(s,
1140 			     "Processing write request failed, sending I/O response failed, msg_id %d, err: %pe\n",
1141 			     buf_id, ERR_PTR(ret));
1142 		close_path(srv_path);
1143 	}
1144 	rtrs_srv_put_ops_ids(srv_path);
1145 }
1146 
process_io_req(struct rtrs_srv_con * con,void * msg,u32 id,u32 off)1147 static void process_io_req(struct rtrs_srv_con *con, void *msg,
1148 			   u32 id, u32 off)
1149 {
1150 	struct rtrs_path *s = con->c.path;
1151 	struct rtrs_srv_path *srv_path = to_srv_path(s);
1152 	struct rtrs_msg_rdma_hdr *hdr;
1153 	unsigned int type;
1154 
1155 	ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
1156 				   srv_path->dma_addr[id],
1157 				   max_chunk_size, DMA_BIDIRECTIONAL);
1158 	hdr = msg;
1159 	type = le16_to_cpu(hdr->type);
1160 
1161 	switch (type) {
1162 	case RTRS_MSG_WRITE:
1163 		process_write(con, msg, id, off);
1164 		break;
1165 	case RTRS_MSG_READ:
1166 		process_read(con, msg, id, off);
1167 		break;
1168 	default:
1169 		rtrs_err(s,
1170 			  "Processing I/O request failed, unknown message type received: 0x%02x\n",
1171 			  type);
1172 		goto err;
1173 	}
1174 
1175 	return;
1176 
1177 err:
1178 	close_path(srv_path);
1179 }
1180 
rtrs_srv_inv_rkey_done(struct ib_cq * cq,struct ib_wc * wc)1181 static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1182 {
1183 	struct rtrs_srv_mr *mr =
1184 		container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
1185 	struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1186 	struct rtrs_path *s = con->c.path;
1187 	struct rtrs_srv_path *srv_path = to_srv_path(s);
1188 	struct rtrs_srv_sess *srv = srv_path->srv;
1189 	u32 msg_id, off;
1190 	void *data;
1191 
1192 	if (wc->status != IB_WC_SUCCESS) {
1193 		rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
1194 			  ib_wc_status_msg(wc->status));
1195 		close_path(srv_path);
1196 	}
1197 	msg_id = mr->msg_id;
1198 	off = mr->msg_off;
1199 	data = page_address(srv->chunks[msg_id]) + off;
1200 	process_io_req(con, data, msg_id, off);
1201 }
1202 
rtrs_srv_inv_rkey(struct rtrs_srv_con * con,struct rtrs_srv_mr * mr)1203 static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
1204 			      struct rtrs_srv_mr *mr)
1205 {
1206 	struct ib_send_wr wr = {
1207 		.opcode		    = IB_WR_LOCAL_INV,
1208 		.wr_cqe		    = &mr->inv_cqe,
1209 		.send_flags	    = IB_SEND_SIGNALED,
1210 		.ex.invalidate_rkey = mr->mr->rkey,
1211 	};
1212 	mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
1213 
1214 	return ib_post_send(con->c.qp, &wr, NULL);
1215 }
1216 
rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con * con)1217 static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
1218 {
1219 	spin_lock(&con->rsp_wr_wait_lock);
1220 	while (!list_empty(&con->rsp_wr_wait_list)) {
1221 		struct rtrs_srv_op *id;
1222 		int ret;
1223 
1224 		id = list_entry(con->rsp_wr_wait_list.next,
1225 				struct rtrs_srv_op, wait_list);
1226 		list_del(&id->wait_list);
1227 
1228 		spin_unlock(&con->rsp_wr_wait_lock);
1229 		ret = rtrs_srv_resp_rdma(id, id->status);
1230 		spin_lock(&con->rsp_wr_wait_lock);
1231 
1232 		if (!ret) {
1233 			list_add(&id->wait_list, &con->rsp_wr_wait_list);
1234 			break;
1235 		}
1236 	}
1237 	spin_unlock(&con->rsp_wr_wait_lock);
1238 }
1239 
rtrs_srv_rdma_done(struct ib_cq * cq,struct ib_wc * wc)1240 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
1241 {
1242 	struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1243 	struct rtrs_path *s = con->c.path;
1244 	struct rtrs_srv_path *srv_path = to_srv_path(s);
1245 	struct rtrs_srv_sess *srv = srv_path->srv;
1246 	u32 imm_type, imm_payload;
1247 	int err;
1248 
1249 	if (wc->status != IB_WC_SUCCESS) {
1250 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
1251 			rtrs_err(s,
1252 				  "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
1253 				  ib_wc_status_msg(wc->status), wc->wr_cqe,
1254 				  wc->opcode, wc->vendor_err, wc->byte_len);
1255 			close_path(srv_path);
1256 		}
1257 		return;
1258 	}
1259 
1260 	switch (wc->opcode) {
1261 	case IB_WC_RECV_RDMA_WITH_IMM:
1262 		/*
1263 		 * post_recv() RDMA write completions of IO reqs (read/write)
1264 		 * and hb
1265 		 */
1266 		if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
1267 			return;
1268 		srv_path->s.hb_missed_cnt = 0;
1269 		err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
1270 		if (err) {
1271 			rtrs_err(s, "rtrs_post_recv(), err: %pe\n",
1272 				 ERR_PTR(err));
1273 			close_path(srv_path);
1274 			break;
1275 		}
1276 		rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
1277 			       &imm_type, &imm_payload);
1278 		if (imm_type == RTRS_IO_REQ_IMM) {
1279 			u32 msg_id, off;
1280 			void *data;
1281 
1282 			msg_id = imm_payload >> srv_path->mem_bits;
1283 			off = imm_payload & ((1 << srv_path->mem_bits) - 1);
1284 			if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
1285 				rtrs_err(s, "Wrong msg_id %u, off %u\n",
1286 					  msg_id, off);
1287 				close_path(srv_path);
1288 				return;
1289 			}
1290 			if (always_invalidate) {
1291 				struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
1292 
1293 				mr->msg_off = off;
1294 				mr->msg_id = msg_id;
1295 				err = rtrs_srv_inv_rkey(con, mr);
1296 				if (err) {
1297 					rtrs_err(s, "rtrs_post_recv(), err: %pe\n",
1298 						 ERR_PTR(err));
1299 					close_path(srv_path);
1300 					break;
1301 				}
1302 			} else {
1303 				data = page_address(srv->chunks[msg_id]) + off;
1304 				process_io_req(con, data, msg_id, off);
1305 			}
1306 		} else if (imm_type == RTRS_HB_MSG_IMM) {
1307 			WARN_ON(con->c.cid);
1308 			rtrs_send_hb_ack(&srv_path->s);
1309 		} else if (imm_type == RTRS_HB_ACK_IMM) {
1310 			WARN_ON(con->c.cid);
1311 			srv_path->s.hb_missed_cnt = 0;
1312 		} else {
1313 			rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
1314 		}
1315 		break;
1316 	case IB_WC_RDMA_WRITE:
1317 	case IB_WC_SEND:
1318 		/*
1319 		 * post_send() RDMA write completions of IO reqs (read/write)
1320 		 * and hb.
1321 		 */
1322 		atomic_add(s->signal_interval, &con->c.sq_wr_avail);
1323 
1324 		if (!list_empty_careful(&con->rsp_wr_wait_list))
1325 			rtrs_rdma_process_wr_wait_list(con);
1326 
1327 		break;
1328 	default:
1329 		rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
1330 		return;
1331 	}
1332 }
1333 
1334 /**
1335  * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
1336  * @srv:	Session
1337  * @pathname:	Pathname buffer
1338  * @len:	Length of sessname buffer
1339  */
rtrs_srv_get_path_name(struct rtrs_srv_sess * srv,char * pathname,size_t len)1340 int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
1341 			   size_t len)
1342 {
1343 	struct rtrs_srv_path *srv_path;
1344 	int err = -ENOTCONN;
1345 
1346 	mutex_lock(&srv->paths_mutex);
1347 	list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1348 		if (srv_path->state != RTRS_SRV_CONNECTED)
1349 			continue;
1350 		strscpy(pathname, srv_path->s.sessname,
1351 			min_t(size_t, sizeof(srv_path->s.sessname), len));
1352 		err = 0;
1353 		break;
1354 	}
1355 	mutex_unlock(&srv->paths_mutex);
1356 
1357 	return err;
1358 }
1359 EXPORT_SYMBOL(rtrs_srv_get_path_name);
1360 
1361 /**
1362  * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
1363  * @srv:	Session
1364  */
rtrs_srv_get_queue_depth(struct rtrs_srv_sess * srv)1365 int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
1366 {
1367 	return srv->queue_depth;
1368 }
1369 EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
1370 
find_next_bit_ring(struct rtrs_srv_path * srv_path)1371 static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
1372 {
1373 	struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
1374 	int v;
1375 
1376 	v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
1377 	if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
1378 		v = cpumask_first(&cq_affinity_mask);
1379 	return v;
1380 }
1381 
rtrs_srv_get_next_cq_vector(struct rtrs_srv_path * srv_path)1382 static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
1383 {
1384 	srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
1385 
1386 	return srv_path->cur_cq_vector;
1387 }
1388 
rtrs_srv_dev_release(struct device * dev)1389 static void rtrs_srv_dev_release(struct device *dev)
1390 {
1391 	struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
1392 						 dev);
1393 
1394 	kfree(srv);
1395 }
1396 
free_srv(struct rtrs_srv_sess * srv)1397 static void free_srv(struct rtrs_srv_sess *srv)
1398 {
1399 	int i;
1400 
1401 	WARN_ON(refcount_read(&srv->refcount));
1402 	for (i = 0; i < srv->queue_depth; i++)
1403 		__free_pages(srv->chunks[i], get_order(max_chunk_size));
1404 	kfree(srv->chunks);
1405 	mutex_destroy(&srv->paths_mutex);
1406 	mutex_destroy(&srv->paths_ev_mutex);
1407 	/* last put to release the srv structure */
1408 	put_device(&srv->dev);
1409 }
1410 
get_or_create_srv(struct rtrs_srv_ctx * ctx,const uuid_t * paths_uuid,bool first_conn)1411 static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
1412 					  const uuid_t *paths_uuid,
1413 					  bool first_conn)
1414 {
1415 	struct rtrs_srv_sess *srv;
1416 	int i;
1417 
1418 	mutex_lock(&ctx->srv_mutex);
1419 	list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
1420 		if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
1421 		    refcount_inc_not_zero(&srv->refcount)) {
1422 			mutex_unlock(&ctx->srv_mutex);
1423 			return srv;
1424 		}
1425 	}
1426 	mutex_unlock(&ctx->srv_mutex);
1427 	/*
1428 	 * If this request is not the first connection request from the
1429 	 * client for this session then fail and return error.
1430 	 */
1431 	if (!first_conn) {
1432 		pr_err_ratelimited("Error: Not the first connection request for this session\n");
1433 		return ERR_PTR(-ENXIO);
1434 	}
1435 
1436 	/* need to allocate a new srv */
1437 	srv = kzalloc_obj(*srv);
1438 	if  (!srv)
1439 		return ERR_PTR(-ENOMEM);
1440 
1441 	INIT_LIST_HEAD(&srv->paths_list);
1442 	mutex_init(&srv->paths_mutex);
1443 	mutex_init(&srv->paths_ev_mutex);
1444 	uuid_copy(&srv->paths_uuid, paths_uuid);
1445 	srv->queue_depth = sess_queue_depth;
1446 	srv->ctx = ctx;
1447 	device_initialize(&srv->dev);
1448 	srv->dev.release = rtrs_srv_dev_release;
1449 
1450 	srv->chunks = kzalloc_objs(*srv->chunks, srv->queue_depth);
1451 	if (!srv->chunks)
1452 		goto err_free_srv;
1453 
1454 	for (i = 0; i < srv->queue_depth; i++) {
1455 		srv->chunks[i] = alloc_pages(GFP_KERNEL,
1456 					     get_order(max_chunk_size));
1457 		if (!srv->chunks[i])
1458 			goto err_free_chunks;
1459 	}
1460 	refcount_set(&srv->refcount, 1);
1461 	mutex_lock(&ctx->srv_mutex);
1462 	list_add(&srv->ctx_list, &ctx->srv_list);
1463 	mutex_unlock(&ctx->srv_mutex);
1464 
1465 	return srv;
1466 
1467 err_free_chunks:
1468 	while (i--)
1469 		__free_pages(srv->chunks[i], get_order(max_chunk_size));
1470 	kfree(srv->chunks);
1471 
1472 err_free_srv:
1473 	put_device(&srv->dev);
1474 	return ERR_PTR(-ENOMEM);
1475 }
1476 
put_srv(struct rtrs_srv_sess * srv)1477 static void put_srv(struct rtrs_srv_sess *srv)
1478 {
1479 	if (refcount_dec_and_test(&srv->refcount)) {
1480 		struct rtrs_srv_ctx *ctx = srv->ctx;
1481 
1482 		WARN_ON(srv->dev.kobj.state_in_sysfs);
1483 
1484 		mutex_lock(&ctx->srv_mutex);
1485 		list_del(&srv->ctx_list);
1486 		mutex_unlock(&ctx->srv_mutex);
1487 		free_srv(srv);
1488 	}
1489 }
1490 
__add_path_to_srv(struct rtrs_srv_sess * srv,struct rtrs_srv_path * srv_path)1491 static void __add_path_to_srv(struct rtrs_srv_sess *srv,
1492 			      struct rtrs_srv_path *srv_path)
1493 {
1494 	list_add_tail(&srv_path->s.entry, &srv->paths_list);
1495 	srv->paths_num++;
1496 	WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
1497 }
1498 
del_path_from_srv(struct rtrs_srv_path * srv_path)1499 static void del_path_from_srv(struct rtrs_srv_path *srv_path)
1500 {
1501 	struct rtrs_srv_sess *srv = srv_path->srv;
1502 
1503 	if (WARN_ON(!srv))
1504 		return;
1505 
1506 	mutex_lock(&srv->paths_mutex);
1507 	list_del(&srv_path->s.entry);
1508 	WARN_ON(!srv->paths_num);
1509 	srv->paths_num--;
1510 	mutex_unlock(&srv->paths_mutex);
1511 }
1512 
1513 /* return true if addresses are the same, error other wise */
sockaddr_cmp(const struct sockaddr * a,const struct sockaddr * b)1514 static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
1515 {
1516 	switch (a->sa_family) {
1517 	case AF_IB:
1518 		return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
1519 			      &((struct sockaddr_ib *)b)->sib_addr,
1520 			      sizeof(struct ib_addr)) &&
1521 			(b->sa_family == AF_IB);
1522 	case AF_INET:
1523 		return memcmp(&((struct sockaddr_in *)a)->sin_addr,
1524 			      &((struct sockaddr_in *)b)->sin_addr,
1525 			      sizeof(struct in_addr)) &&
1526 			(b->sa_family == AF_INET);
1527 	case AF_INET6:
1528 		return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
1529 			      &((struct sockaddr_in6 *)b)->sin6_addr,
1530 			      sizeof(struct in6_addr)) &&
1531 			(b->sa_family == AF_INET6);
1532 	default:
1533 		return -ENOENT;
1534 	}
1535 }
1536 
1537 /* Let's close connections which have been waiting for more than 30 seconds */
1538 #define RTRS_MAX_CONN_TIMEOUT 30000
1539 
rtrs_srv_check_close_path(struct rtrs_srv_path * srv_path)1540 static void rtrs_srv_check_close_path(struct rtrs_srv_path *srv_path)
1541 {
1542 	struct rtrs_path *s = &srv_path->s;
1543 
1544 	if (srv_path->state == RTRS_SRV_CONNECTING && srv_path->connection_timeout &&
1545 	   (jiffies_to_msecs(jiffies - srv_path->connection_timeout) > RTRS_MAX_CONN_TIMEOUT)) {
1546 		rtrs_err(s, "Closing zombie path\n");
1547 		close_path(srv_path);
1548 	}
1549 }
1550 
__is_path_w_addr_exists(struct rtrs_srv_sess * srv,struct rdma_addr * addr)1551 static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
1552 				    struct rdma_addr *addr)
1553 {
1554 	struct rtrs_srv_path *srv_path;
1555 
1556 	list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1557 		if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
1558 				  (struct sockaddr *)&addr->dst_addr) &&
1559 		    !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
1560 				  (struct sockaddr *)&addr->src_addr)) {
1561 			rtrs_err((&srv_path->s),
1562 				 "Path (%s) with same addr exists (lifetime %u)\n",
1563 				 rtrs_srv_state_str(srv_path->state),
1564 				 (jiffies_to_msecs(jiffies - srv_path->connection_timeout)));
1565 			rtrs_srv_check_close_path(srv_path);
1566 			return true;
1567 		}
1568 	}
1569 
1570 	return false;
1571 }
1572 
free_path(struct rtrs_srv_path * srv_path)1573 static void free_path(struct rtrs_srv_path *srv_path)
1574 {
1575 	if (srv_path->kobj.state_in_sysfs) {
1576 		kobject_del(&srv_path->kobj);
1577 		kobject_put(&srv_path->kobj);
1578 	} else {
1579 		free_percpu(srv_path->stats->rdma_stats);
1580 		kfree(srv_path->stats);
1581 		kfree(srv_path);
1582 	}
1583 }
1584 
rtrs_srv_close_work(struct work_struct * work)1585 static void rtrs_srv_close_work(struct work_struct *work)
1586 {
1587 	struct rtrs_srv_path *srv_path;
1588 	struct rtrs_srv_con *con;
1589 	int i;
1590 
1591 	srv_path = container_of(work, typeof(*srv_path), close_work);
1592 
1593 	rtrs_srv_stop_hb(srv_path);
1594 
1595 	for (i = 0; i < srv_path->s.con_num; i++) {
1596 		if (!srv_path->s.con[i])
1597 			continue;
1598 		con = to_srv_con(srv_path->s.con[i]);
1599 		rdma_disconnect(con->c.cm_id);
1600 		ib_drain_qp(con->c.qp);
1601 	}
1602 
1603 	/*
1604 	 * Degrade ref count to the usual model with a single shared
1605 	 * atomic_t counter
1606 	 */
1607 	percpu_ref_kill(&srv_path->ids_inflight_ref);
1608 
1609 	/* Wait for all completion */
1610 	wait_for_completion(&srv_path->complete_done);
1611 
1612 	rtrs_srv_destroy_path_files(srv_path);
1613 
1614 	/* Notify upper layer if we are the last path */
1615 	rtrs_srv_path_down(srv_path);
1616 
1617 	unmap_cont_bufs(srv_path);
1618 	rtrs_srv_free_ops_ids(srv_path);
1619 
1620 	for (i = 0; i < srv_path->s.con_num; i++) {
1621 		if (!srv_path->s.con[i])
1622 			continue;
1623 		con = to_srv_con(srv_path->s.con[i]);
1624 		rtrs_cq_qp_destroy(&con->c);
1625 		rdma_destroy_id(con->c.cm_id);
1626 		kfree(con);
1627 	}
1628 	rtrs_ib_dev_put(srv_path->s.dev);
1629 
1630 	del_path_from_srv(srv_path);
1631 	put_srv(srv_path->srv);
1632 	srv_path->srv = NULL;
1633 	rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
1634 
1635 	kfree(srv_path->dma_addr);
1636 	kfree(srv_path->s.con);
1637 	free_path(srv_path);
1638 }
1639 
rtrs_rdma_do_accept(struct rtrs_srv_path * srv_path,struct rdma_cm_id * cm_id)1640 static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
1641 			       struct rdma_cm_id *cm_id)
1642 {
1643 	struct rtrs_srv_sess *srv = srv_path->srv;
1644 	struct rtrs_msg_conn_rsp msg;
1645 	struct rdma_conn_param param;
1646 	int err;
1647 
1648 	param = (struct rdma_conn_param) {
1649 		.rnr_retry_count = 7,
1650 		.private_data = &msg,
1651 		.private_data_len = sizeof(msg),
1652 	};
1653 
1654 	msg = (struct rtrs_msg_conn_rsp) {
1655 		.magic = cpu_to_le16(RTRS_MAGIC),
1656 		.version = cpu_to_le16(RTRS_PROTO_VER),
1657 		.queue_depth = cpu_to_le16(srv->queue_depth),
1658 		.max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
1659 		.max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
1660 	};
1661 
1662 	if (always_invalidate)
1663 		msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
1664 
1665 	err = rdma_accept(cm_id, &param);
1666 	if (err)
1667 		pr_err("rdma_accept(), err: %pe\n", ERR_PTR(err));
1668 
1669 	return err;
1670 }
1671 
rtrs_rdma_do_reject(struct rdma_cm_id * cm_id,int errno)1672 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
1673 {
1674 	struct rtrs_msg_conn_rsp msg;
1675 	int err;
1676 
1677 	msg = (struct rtrs_msg_conn_rsp) {
1678 		.magic = cpu_to_le16(RTRS_MAGIC),
1679 		.version = cpu_to_le16(RTRS_PROTO_VER),
1680 		.errno = cpu_to_le16(errno),
1681 	};
1682 
1683 	err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
1684 	if (err)
1685 		pr_err("rdma_reject(), err: %pe\n", ERR_PTR(err));
1686 
1687 	/* Bounce errno back */
1688 	return errno;
1689 }
1690 
1691 static struct rtrs_srv_path *
__find_path(struct rtrs_srv_sess * srv,const uuid_t * sess_uuid)1692 __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
1693 {
1694 	struct rtrs_srv_path *srv_path;
1695 
1696 	list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1697 		if (uuid_equal(&srv_path->s.uuid, sess_uuid))
1698 			return srv_path;
1699 	}
1700 
1701 	return NULL;
1702 }
1703 
create_con(struct rtrs_srv_path * srv_path,struct rdma_cm_id * cm_id,unsigned int cid)1704 static int create_con(struct rtrs_srv_path *srv_path,
1705 		      struct rdma_cm_id *cm_id,
1706 		      unsigned int cid)
1707 {
1708 	struct rtrs_srv_sess *srv = srv_path->srv;
1709 	struct rtrs_path *s = &srv_path->s;
1710 	struct rtrs_srv_con *con;
1711 
1712 	u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
1713 	int err, cq_vector;
1714 
1715 	con = kzalloc_obj(*con);
1716 	if (!con) {
1717 		err = -ENOMEM;
1718 		goto err;
1719 	}
1720 
1721 	spin_lock_init(&con->rsp_wr_wait_lock);
1722 	INIT_LIST_HEAD(&con->rsp_wr_wait_list);
1723 	con->c.cm_id = cm_id;
1724 	con->c.path = &srv_path->s;
1725 	con->c.cid = cid;
1726 	atomic_set(&con->c.wr_cnt, 1);
1727 	wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
1728 
1729 	if (con->c.cid == 0) {
1730 		/*
1731 		 * All receive and all send (each requiring invalidate)
1732 		 * + 2 for drain and heartbeat
1733 		 */
1734 		max_send_wr = min_t(int, wr_limit,
1735 				    SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1736 		max_recv_wr = max_send_wr;
1737 		s->signal_interval = min_not_zero(srv->queue_depth,
1738 						  (size_t)SERVICE_CON_QUEUE_DEPTH);
1739 	} else {
1740 		/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
1741 		if (always_invalidate)
1742 			max_send_wr =
1743 				min_t(int, wr_limit,
1744 				      srv->queue_depth * (1 + 4) + 1);
1745 		else
1746 			max_send_wr =
1747 				min_t(int, wr_limit,
1748 				      srv->queue_depth * (1 + 2) + 1);
1749 
1750 		max_recv_wr = srv->queue_depth + 1;
1751 	}
1752 	cq_num = max_send_wr + max_recv_wr;
1753 	atomic_set(&con->c.sq_wr_avail, max_send_wr);
1754 	cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
1755 
1756 	/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
1757 	err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
1758 				 max_send_wr, max_recv_wr,
1759 				 IB_POLL_WORKQUEUE);
1760 	if (err) {
1761 		rtrs_err(s, "rtrs_cq_qp_create(), err: %pe\n", ERR_PTR(err));
1762 		goto free_con;
1763 	}
1764 	if (con->c.cid == 0) {
1765 		err = post_recv_info_req(con);
1766 		if (err)
1767 			goto free_cqqp;
1768 	}
1769 	WARN_ON(srv_path->s.con[cid]);
1770 	srv_path->s.con[cid] = &con->c;
1771 
1772 	/*
1773 	 * Change context from server to current connection.  The other
1774 	 * way is to use cm_id->qp->qp_context, which does not work on OFED.
1775 	 */
1776 	cm_id->context = &con->c;
1777 
1778 	return 0;
1779 
1780 free_cqqp:
1781 	rtrs_cq_qp_destroy(&con->c);
1782 free_con:
1783 	kfree(con);
1784 
1785 err:
1786 	return err;
1787 }
1788 
__alloc_path(struct rtrs_srv_sess * srv,struct rdma_cm_id * cm_id,unsigned int con_num,unsigned int recon_cnt,const uuid_t * uuid)1789 static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
1790 					   struct rdma_cm_id *cm_id,
1791 					   unsigned int con_num,
1792 					   unsigned int recon_cnt,
1793 					   const uuid_t *uuid)
1794 {
1795 	struct rtrs_srv_path *srv_path;
1796 	int err = -ENOMEM;
1797 	char str[NAME_MAX];
1798 	struct rtrs_addr path;
1799 
1800 	if (srv->paths_num >= MAX_PATHS_NUM) {
1801 		err = -ECONNRESET;
1802 		goto err;
1803 	}
1804 	if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
1805 		err = -EEXIST;
1806 		goto err;
1807 	}
1808 	srv_path = kzalloc_obj(*srv_path);
1809 	if (!srv_path)
1810 		goto err;
1811 
1812 	srv_path->stats = kzalloc_obj(*srv_path->stats);
1813 	if (!srv_path->stats)
1814 		goto err_free_sess;
1815 
1816 	srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
1817 	if (!srv_path->stats->rdma_stats)
1818 		goto err_free_stats;
1819 
1820 	srv_path->stats->srv_path = srv_path;
1821 
1822 	srv_path->dma_addr = kzalloc_objs(*srv_path->dma_addr, srv->queue_depth);
1823 	if (!srv_path->dma_addr)
1824 		goto err_free_percpu;
1825 
1826 	srv_path->s.con = kzalloc_objs(*srv_path->s.con, con_num);
1827 	if (!srv_path->s.con)
1828 		goto err_free_dma_addr;
1829 
1830 	srv_path->state = RTRS_SRV_CONNECTING;
1831 	srv_path->srv = srv;
1832 	srv_path->cur_cq_vector = -1;
1833 	srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
1834 	srv_path->s.src_addr = cm_id->route.addr.src_addr;
1835 
1836 	/* temporary until receiving session-name from client */
1837 	path.src = &srv_path->s.src_addr;
1838 	path.dst = &srv_path->s.dst_addr;
1839 	rtrs_addr_to_str(&path, str, sizeof(str));
1840 	strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
1841 
1842 	srv_path->s.con_num = con_num;
1843 	srv_path->s.irq_con_num = con_num;
1844 	srv_path->s.recon_cnt = recon_cnt;
1845 	uuid_copy(&srv_path->s.uuid, uuid);
1846 	spin_lock_init(&srv_path->state_lock);
1847 	INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
1848 	rtrs_srv_init_hb(srv_path);
1849 	srv_path->connection_timeout = 0;
1850 
1851 	srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
1852 	if (!srv_path->s.dev) {
1853 		err = -ENOMEM;
1854 		goto err_free_con;
1855 	}
1856 	err = map_cont_bufs(srv_path);
1857 	if (err)
1858 		goto err_put_dev;
1859 
1860 	err = rtrs_srv_alloc_ops_ids(srv_path);
1861 	if (err)
1862 		goto err_unmap_bufs;
1863 
1864 	__add_path_to_srv(srv, srv_path);
1865 
1866 	return srv_path;
1867 
1868 err_unmap_bufs:
1869 	unmap_cont_bufs(srv_path);
1870 err_put_dev:
1871 	rtrs_ib_dev_put(srv_path->s.dev);
1872 err_free_con:
1873 	kfree(srv_path->s.con);
1874 err_free_dma_addr:
1875 	kfree(srv_path->dma_addr);
1876 err_free_percpu:
1877 	free_percpu(srv_path->stats->rdma_stats);
1878 err_free_stats:
1879 	kfree(srv_path->stats);
1880 err_free_sess:
1881 	kfree(srv_path);
1882 err:
1883 	return ERR_PTR(err);
1884 }
1885 
rtrs_rdma_connect(struct rdma_cm_id * cm_id,const struct rtrs_msg_conn_req * msg,size_t len)1886 static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
1887 			      const struct rtrs_msg_conn_req *msg,
1888 			      size_t len)
1889 {
1890 	struct rtrs_srv_ctx *ctx = cm_id->context;
1891 	struct rtrs_srv_path *srv_path;
1892 	struct rtrs_srv_sess *srv;
1893 
1894 	u16 version, con_num, cid;
1895 	u16 recon_cnt;
1896 	int err = -ECONNRESET;
1897 
1898 	if (len < sizeof(*msg)) {
1899 		pr_err("Invalid RTRS connection request\n");
1900 		goto reject_w_err;
1901 	}
1902 	if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1903 		pr_err("Invalid RTRS magic\n");
1904 		goto reject_w_err;
1905 	}
1906 	version = le16_to_cpu(msg->version);
1907 	if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1908 		pr_err("Unsupported major RTRS version: %d, expected %d\n",
1909 		       version >> 8, RTRS_PROTO_VER_MAJOR);
1910 		goto reject_w_err;
1911 	}
1912 	con_num = le16_to_cpu(msg->cid_num);
1913 	if (con_num > 4096) {
1914 		/* Sanity check */
1915 		pr_err("Too many connections requested: %d\n", con_num);
1916 		goto reject_w_err;
1917 	}
1918 	cid = le16_to_cpu(msg->cid);
1919 	if (cid >= con_num) {
1920 		/* Sanity check */
1921 		pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
1922 		goto reject_w_err;
1923 	}
1924 	recon_cnt = le16_to_cpu(msg->recon_cnt);
1925 	srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
1926 	if (IS_ERR(srv)) {
1927 		err = PTR_ERR(srv);
1928 		pr_err("get_or_create_srv(), error %d\n", err);
1929 		goto reject_w_err;
1930 	}
1931 	mutex_lock(&srv->paths_mutex);
1932 	srv_path = __find_path(srv, &msg->sess_uuid);
1933 	if (srv_path) {
1934 		struct rtrs_path *s = &srv_path->s;
1935 
1936 		/* Session already holds a reference */
1937 		put_srv(srv);
1938 
1939 		if (srv_path->state != RTRS_SRV_CONNECTING) {
1940 			rtrs_err(s, "Session in wrong state: %s\n",
1941 				  rtrs_srv_state_str(srv_path->state));
1942 			mutex_unlock(&srv->paths_mutex);
1943 			goto reject_w_err;
1944 		}
1945 		/*
1946 		 * Sanity checks
1947 		 */
1948 		if (con_num != s->con_num || cid >= s->con_num) {
1949 			rtrs_err(s, "Incorrect request: %d, %d\n",
1950 				  cid, con_num);
1951 			mutex_unlock(&srv->paths_mutex);
1952 			goto reject_w_err;
1953 		}
1954 		if (s->con[cid]) {
1955 			rtrs_err(s, "Connection (%s) already exists: %d (lifetime %u)\n",
1956 				 rtrs_srv_state_str(srv_path->state), cid,
1957 				 (jiffies_to_msecs(jiffies - srv_path->connection_timeout)));
1958 			rtrs_srv_check_close_path(srv_path);
1959 			mutex_unlock(&srv->paths_mutex);
1960 			goto reject_w_err;
1961 		}
1962 	} else {
1963 		srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
1964 				    &msg->sess_uuid);
1965 		if (IS_ERR(srv_path)) {
1966 			mutex_unlock(&srv->paths_mutex);
1967 			put_srv(srv);
1968 			err = PTR_ERR(srv_path);
1969 			pr_err("RTRS server session allocation failed: %d\n", err);
1970 			goto reject_w_err;
1971 		}
1972 	}
1973 
1974 	/*
1975 	 * Start of any connection creation resets the timeout for the path.
1976 	 */
1977 	srv_path->connection_timeout = jiffies;
1978 
1979 	err = create_con(srv_path, cm_id, cid);
1980 	if (err) {
1981 		rtrs_err((&srv_path->s), "create_con(), error %pe\n", ERR_PTR(err));
1982 		rtrs_rdma_do_reject(cm_id, err);
1983 		/*
1984 		 * Since session has other connections we follow normal way
1985 		 * through workqueue, but still return an error to tell cma.c
1986 		 * to call rdma_destroy_id() for current connection.
1987 		 */
1988 		goto close_and_return_err;
1989 	}
1990 	err = rtrs_rdma_do_accept(srv_path, cm_id);
1991 	if (err) {
1992 		rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %pe\n",
1993 			 ERR_PTR(err));
1994 		rtrs_rdma_do_reject(cm_id, err);
1995 		/*
1996 		 * Since current connection was successfully added to the
1997 		 * session we follow normal way through workqueue to close the
1998 		 * session, thus return 0 to tell cma.c we call
1999 		 * rdma_destroy_id() ourselves.
2000 		 */
2001 		err = 0;
2002 		goto close_and_return_err;
2003 	}
2004 	mutex_unlock(&srv->paths_mutex);
2005 
2006 	return 0;
2007 
2008 reject_w_err:
2009 	return rtrs_rdma_do_reject(cm_id, err);
2010 
2011 close_and_return_err:
2012 	mutex_unlock(&srv->paths_mutex);
2013 	close_path(srv_path);
2014 
2015 	return err;
2016 }
2017 
rtrs_srv_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * ev)2018 static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
2019 				     struct rdma_cm_event *ev)
2020 {
2021 	struct rtrs_srv_path *srv_path = NULL;
2022 	struct rtrs_path *s = NULL;
2023 	struct rtrs_con *c = NULL;
2024 
2025 	if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
2026 		/*
2027 		 * In case of error cma.c will destroy cm_id,
2028 		 * see cma_process_remove()
2029 		 */
2030 		return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
2031 					  ev->param.conn.private_data_len);
2032 
2033 	c = cm_id->context;
2034 	s = c->path;
2035 	srv_path = to_srv_path(s);
2036 
2037 	switch (ev->event) {
2038 	case RDMA_CM_EVENT_ESTABLISHED:
2039 		/* Nothing here */
2040 		break;
2041 	case RDMA_CM_EVENT_REJECTED:
2042 	case RDMA_CM_EVENT_CONNECT_ERROR:
2043 	case RDMA_CM_EVENT_UNREACHABLE:
2044 		if (ev->status < 0) {
2045 			rtrs_err(s, "CM error (CM event: %s, err: %pe)\n",
2046 					rdma_event_msg(ev->event),
2047 					ERR_PTR(ev->status));
2048 		} else if (ev->status > 0) {
2049 			rtrs_err(s, "CM error (CM event: %s, err: %s)\n",
2050 					rdma_event_msg(ev->event),
2051 					rdma_reject_msg(cm_id, ev->status));
2052 		}
2053 		fallthrough;
2054 	case RDMA_CM_EVENT_DISCONNECTED:
2055 	case RDMA_CM_EVENT_ADDR_CHANGE:
2056 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2057 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
2058 		close_path(srv_path);
2059 		break;
2060 	default:
2061 		if (ev->status < 0) {
2062 			pr_err("Ignoring unexpected CM event %s, err %pe\n",
2063 					rdma_event_msg(ev->event),
2064 					ERR_PTR(ev->status));
2065 		} else if (ev->status > 0) {
2066 			pr_err("Ignoring unexpected CM event %s, err %s\n",
2067 					rdma_event_msg(ev->event),
2068 					rdma_reject_msg(cm_id, ev->status));
2069 		}
2070 		break;
2071 	}
2072 
2073 	return 0;
2074 }
2075 
rtrs_srv_cm_init(struct rtrs_srv_ctx * ctx,struct sockaddr * addr,enum rdma_ucm_port_space ps)2076 static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
2077 					    struct sockaddr *addr,
2078 					    enum rdma_ucm_port_space ps)
2079 {
2080 	struct rdma_cm_id *cm_id;
2081 	int ret;
2082 
2083 	cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
2084 			       ctx, ps, IB_QPT_RC);
2085 	if (IS_ERR(cm_id)) {
2086 		ret = PTR_ERR(cm_id);
2087 		pr_err("Creating id for RDMA connection failed, err: %d\n",
2088 		       ret);
2089 		goto err_out;
2090 	}
2091 	ret = rdma_bind_addr(cm_id, addr);
2092 	if (ret) {
2093 		pr_err("Binding RDMA address failed, err: %pe\n", ERR_PTR(ret));
2094 		goto err_cm;
2095 	}
2096 	ret = rdma_listen(cm_id, 64);
2097 	if (ret) {
2098 		pr_err("Listening on RDMA connection failed, err: %pe\n",
2099 		       ERR_PTR(ret));
2100 		goto err_cm;
2101 	}
2102 
2103 	return cm_id;
2104 
2105 err_cm:
2106 	rdma_destroy_id(cm_id);
2107 err_out:
2108 
2109 	return ERR_PTR(ret);
2110 }
2111 
rtrs_srv_rdma_init(struct rtrs_srv_ctx * ctx,u16 port)2112 static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
2113 {
2114 	struct sockaddr_in6 sin = {
2115 		.sin6_family	= AF_INET6,
2116 		.sin6_addr	= IN6ADDR_ANY_INIT,
2117 		.sin6_port	= htons(port),
2118 	};
2119 	struct sockaddr_ib sib = {
2120 		.sib_family			= AF_IB,
2121 		.sib_sid	= cpu_to_be64(RDMA_IB_IP_PS_IB | port),
2122 		.sib_sid_mask	= cpu_to_be64(0xffffffffffffffffULL),
2123 		.sib_pkey	= cpu_to_be16(0xffff),
2124 	};
2125 	struct rdma_cm_id *cm_ip, *cm_ib;
2126 	int ret;
2127 
2128 	/*
2129 	 * We accept both IPoIB and IB connections, so we need to keep
2130 	 * two cm id's, one for each socket type and port space.
2131 	 * If the cm initialization of one of the id's fails, we abort
2132 	 * everything.
2133 	 */
2134 	cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
2135 	if (IS_ERR(cm_ip))
2136 		return PTR_ERR(cm_ip);
2137 
2138 	cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
2139 	if (IS_ERR(cm_ib)) {
2140 		ret = PTR_ERR(cm_ib);
2141 		goto free_cm_ip;
2142 	}
2143 
2144 	ctx->cm_id_ip = cm_ip;
2145 	ctx->cm_id_ib = cm_ib;
2146 
2147 	return 0;
2148 
2149 free_cm_ip:
2150 	rdma_destroy_id(cm_ip);
2151 
2152 	return ret;
2153 }
2154 
alloc_srv_ctx(struct rtrs_srv_ops * ops)2155 static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
2156 {
2157 	struct rtrs_srv_ctx *ctx;
2158 
2159 	ctx = kzalloc_obj(*ctx);
2160 	if (!ctx)
2161 		return NULL;
2162 
2163 	ctx->ops = *ops;
2164 	mutex_init(&ctx->srv_mutex);
2165 	INIT_LIST_HEAD(&ctx->srv_list);
2166 
2167 	return ctx;
2168 }
2169 
free_srv_ctx(struct rtrs_srv_ctx * ctx)2170 static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
2171 {
2172 	WARN_ON(!list_empty(&ctx->srv_list));
2173 	mutex_destroy(&ctx->srv_mutex);
2174 	kfree(ctx);
2175 }
2176 
rtrs_srv_add_one(struct ib_device * device)2177 static int rtrs_srv_add_one(struct ib_device *device)
2178 {
2179 	struct rtrs_srv_ctx *ctx;
2180 	int ret = 0;
2181 
2182 	mutex_lock(&ib_ctx.ib_dev_mutex);
2183 	if (ib_ctx.ib_dev_count)
2184 		goto out;
2185 
2186 	/*
2187 	 * Since our CM IDs are NOT bound to any ib device we will create them
2188 	 * only once
2189 	 */
2190 	ctx = ib_ctx.srv_ctx;
2191 	ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
2192 	if (ret) {
2193 		/*
2194 		 * We errored out here.
2195 		 * According to the ib code, if we encounter an error here then the
2196 		 * error code is ignored, and no more calls to our ops are made.
2197 		 */
2198 		pr_err("Failed to initialize RDMA connection");
2199 		goto err_out;
2200 	}
2201 
2202 out:
2203 	/*
2204 	 * Keep a track on the number of ib devices added
2205 	 */
2206 	ib_ctx.ib_dev_count++;
2207 
2208 err_out:
2209 	mutex_unlock(&ib_ctx.ib_dev_mutex);
2210 	return ret;
2211 }
2212 
rtrs_srv_remove_one(struct ib_device * device,void * client_data)2213 static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
2214 {
2215 	struct rtrs_srv_ctx *ctx;
2216 
2217 	mutex_lock(&ib_ctx.ib_dev_mutex);
2218 	ib_ctx.ib_dev_count--;
2219 
2220 	if (ib_ctx.ib_dev_count)
2221 		goto out;
2222 
2223 	/*
2224 	 * Since our CM IDs are NOT bound to any ib device we will remove them
2225 	 * only once, when the last device is removed
2226 	 */
2227 	ctx = ib_ctx.srv_ctx;
2228 	rdma_destroy_id(ctx->cm_id_ip);
2229 	rdma_destroy_id(ctx->cm_id_ib);
2230 
2231 out:
2232 	mutex_unlock(&ib_ctx.ib_dev_mutex);
2233 }
2234 
2235 static struct ib_client rtrs_srv_client = {
2236 	.name	= "rtrs_server",
2237 	.add	= rtrs_srv_add_one,
2238 	.remove	= rtrs_srv_remove_one
2239 };
2240 
2241 /**
2242  * rtrs_srv_open() - open RTRS server context
2243  * @ops:		callback functions
2244  * @port:               port to listen on
2245  *
2246  * Creates server context with specified callbacks.
2247  *
2248  * Return a valid pointer on success otherwise PTR_ERR.
2249  */
rtrs_srv_open(struct rtrs_srv_ops * ops,u16 port)2250 struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
2251 {
2252 	struct rtrs_srv_ctx *ctx;
2253 	int err;
2254 
2255 	ctx = alloc_srv_ctx(ops);
2256 	if (!ctx)
2257 		return ERR_PTR(-ENOMEM);
2258 
2259 	mutex_init(&ib_ctx.ib_dev_mutex);
2260 	ib_ctx.srv_ctx = ctx;
2261 	ib_ctx.port = port;
2262 
2263 	err = ib_register_client(&rtrs_srv_client);
2264 	if (err) {
2265 		free_srv_ctx(ctx);
2266 		return ERR_PTR(err);
2267 	}
2268 
2269 	return ctx;
2270 }
2271 EXPORT_SYMBOL(rtrs_srv_open);
2272 
close_paths(struct rtrs_srv_sess * srv)2273 static void close_paths(struct rtrs_srv_sess *srv)
2274 {
2275 	struct rtrs_srv_path *srv_path;
2276 
2277 	mutex_lock(&srv->paths_mutex);
2278 	list_for_each_entry(srv_path, &srv->paths_list, s.entry)
2279 		close_path(srv_path);
2280 	mutex_unlock(&srv->paths_mutex);
2281 }
2282 
close_ctx(struct rtrs_srv_ctx * ctx)2283 static void close_ctx(struct rtrs_srv_ctx *ctx)
2284 {
2285 	struct rtrs_srv_sess *srv;
2286 
2287 	mutex_lock(&ctx->srv_mutex);
2288 	list_for_each_entry(srv, &ctx->srv_list, ctx_list)
2289 		close_paths(srv);
2290 	mutex_unlock(&ctx->srv_mutex);
2291 	flush_workqueue(rtrs_wq);
2292 }
2293 
2294 /**
2295  * rtrs_srv_close() - close RTRS server context
2296  * @ctx: pointer to server context
2297  *
2298  * Closes RTRS server context with all client sessions.
2299  */
rtrs_srv_close(struct rtrs_srv_ctx * ctx)2300 void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
2301 {
2302 	ib_unregister_client(&rtrs_srv_client);
2303 	mutex_destroy(&ib_ctx.ib_dev_mutex);
2304 	close_ctx(ctx);
2305 	free_srv_ctx(ctx);
2306 }
2307 EXPORT_SYMBOL(rtrs_srv_close);
2308 
check_module_params(void)2309 static int check_module_params(void)
2310 {
2311 	if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
2312 		pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
2313 		       sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
2314 		return -EINVAL;
2315 	}
2316 	if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
2317 		pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
2318 		       max_chunk_size, MIN_CHUNK_SIZE);
2319 		return -EINVAL;
2320 	}
2321 
2322 	/*
2323 	 * Check if IB immediate data size is enough to hold the mem_id and the
2324 	 * offset inside the memory chunk
2325 	 */
2326 	if ((ilog2(sess_queue_depth - 1) + 1) +
2327 	    (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
2328 		pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
2329 		       MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
2330 		return -EINVAL;
2331 	}
2332 
2333 	return 0;
2334 }
2335 
rtrs_srv_ib_event_handler(struct ib_event_handler * handler,struct ib_event * ibevent)2336 void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
2337 			       struct ib_event *ibevent)
2338 {
2339 	struct ib_device *idev = ibevent->device;
2340 	u32 port_num = ibevent->element.port_num;
2341 
2342 	pr_info("Handling event: %s (%d). HCA name: %s, port num: %u\n",
2343 			ib_event_msg(ibevent->event), ibevent->event, idev->name, port_num);
2344 }
2345 
rtrs_srv_ib_dev_init(struct rtrs_ib_dev * dev)2346 static int rtrs_srv_ib_dev_init(struct rtrs_ib_dev *dev)
2347 {
2348 	INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
2349 			      rtrs_srv_ib_event_handler);
2350 	ib_register_event_handler(&dev->event_handler);
2351 
2352 	return 0;
2353 }
2354 
rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev * dev)2355 static void rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev *dev)
2356 {
2357 	ib_unregister_event_handler(&dev->event_handler);
2358 }
2359 
2360 
2361 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
2362 	.init = rtrs_srv_ib_dev_init,
2363 	.deinit = rtrs_srv_ib_dev_deinit
2364 };
2365 
2366 
rtrs_server_init(void)2367 static int __init rtrs_server_init(void)
2368 {
2369 	int err;
2370 
2371 	pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
2372 		KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
2373 		max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
2374 		sess_queue_depth, always_invalidate);
2375 
2376 	rtrs_rdma_dev_pd_init(0, &dev_pd);
2377 
2378 	err = check_module_params();
2379 	if (err) {
2380 		pr_err("Failed to load module, invalid module parameters, err: %pe\n",
2381 		       ERR_PTR(err));
2382 		return err;
2383 	}
2384 	err = class_register(&rtrs_dev_class);
2385 	if (err)
2386 		goto out_err;
2387 
2388 	rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
2389 	if (!rtrs_wq) {
2390 		err = -ENOMEM;
2391 		goto out_dev_class;
2392 	}
2393 
2394 	return 0;
2395 
2396 out_dev_class:
2397 	class_unregister(&rtrs_dev_class);
2398 out_err:
2399 	return err;
2400 }
2401 
rtrs_server_exit(void)2402 static void __exit rtrs_server_exit(void)
2403 {
2404 	destroy_workqueue(rtrs_wq);
2405 	class_unregister(&rtrs_dev_class);
2406 	rtrs_rdma_dev_pd_deinit(&dev_pd);
2407 }
2408 
2409 module_init(rtrs_server_init);
2410 module_exit(rtrs_server_exit);
2411