1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Transport Layer
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9 #undef pr_fmt
10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
11
12 #include <linux/module.h>
13 #include <linux/inet.h>
14
15 #include "rtrs-pri.h"
16 #include "rtrs-log.h"
17
18 MODULE_DESCRIPTION("RDMA Transport Core");
19 MODULE_LICENSE("GPL");
20
rtrs_iu_alloc(u32 iu_num,size_t size,gfp_t gfp_mask,struct ib_device * dma_dev,enum dma_data_direction dir,void (* done)(struct ib_cq * cq,struct ib_wc * wc))21 struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
22 struct ib_device *dma_dev,
23 enum dma_data_direction dir,
24 void (*done)(struct ib_cq *cq, struct ib_wc *wc))
25 {
26 struct rtrs_iu *ius, *iu;
27 int i;
28
29 ius = kzalloc_objs(*ius, iu_num, gfp_mask);
30 if (!ius)
31 return NULL;
32 for (i = 0; i < iu_num; i++) {
33 iu = &ius[i];
34 iu->direction = dir;
35 iu->buf = kzalloc(size, gfp_mask);
36 if (!iu->buf)
37 goto err;
38
39 iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
40 if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
41 kfree(iu->buf);
42 goto err;
43 }
44
45 iu->cqe.done = done;
46 iu->size = size;
47 }
48 return ius;
49 err:
50 rtrs_iu_free(ius, dma_dev, i);
51 return NULL;
52 }
53 EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
54
rtrs_iu_free(struct rtrs_iu * ius,struct ib_device * ibdev,u32 queue_num)55 void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
56 {
57 struct rtrs_iu *iu;
58 int i;
59
60 if (!ius)
61 return;
62
63 for (i = 0; i < queue_num; i++) {
64 iu = &ius[i];
65 ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
66 kfree(iu->buf);
67 }
68 kfree(ius);
69 }
70 EXPORT_SYMBOL_GPL(rtrs_iu_free);
71
rtrs_iu_post_recv(struct rtrs_con * con,struct rtrs_iu * iu)72 int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
73 {
74 struct rtrs_path *path = con->path;
75 struct ib_recv_wr wr;
76 struct ib_sge list;
77
78 list.addr = iu->dma_addr;
79 list.length = iu->size;
80 list.lkey = path->dev->ib_pd->local_dma_lkey;
81
82 if (list.length == 0) {
83 rtrs_wrn(con->path,
84 "Posting receive work request failed, sg list is empty\n");
85 return -EINVAL;
86 }
87 wr = (struct ib_recv_wr) {
88 .wr_cqe = &iu->cqe,
89 .sg_list = &list,
90 .num_sge = 1,
91 };
92
93 return ib_post_recv(con->qp, &wr, NULL);
94 }
95 EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
96
rtrs_post_recv_empty(struct rtrs_con * con,struct ib_cqe * cqe)97 int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
98 {
99 struct ib_recv_wr wr;
100
101 wr = (struct ib_recv_wr) {
102 .wr_cqe = cqe,
103 };
104
105 return ib_post_recv(con->qp, &wr, NULL);
106 }
107 EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
108
rtrs_post_send(struct ib_qp * qp,struct ib_send_wr * head,struct ib_send_wr * wr,struct ib_send_wr * tail)109 static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
110 struct ib_send_wr *wr, struct ib_send_wr *tail)
111 {
112 if (head) {
113 struct ib_send_wr *next = head;
114
115 while (next->next)
116 next = next->next;
117 next->next = wr;
118 } else {
119 head = wr;
120 }
121
122 if (tail)
123 wr->next = tail;
124
125 return ib_post_send(qp, head, NULL);
126 }
127
rtrs_iu_post_send(struct rtrs_con * con,struct rtrs_iu * iu,size_t size,struct ib_send_wr * head)128 int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
129 struct ib_send_wr *head)
130 {
131 struct rtrs_path *path = con->path;
132 struct ib_send_wr wr;
133 struct ib_sge list;
134
135 if (WARN_ON(size == 0))
136 return -EINVAL;
137
138 list.addr = iu->dma_addr;
139 list.length = size;
140 list.lkey = path->dev->ib_pd->local_dma_lkey;
141
142 wr = (struct ib_send_wr) {
143 .wr_cqe = &iu->cqe,
144 .sg_list = &list,
145 .num_sge = 1,
146 .opcode = IB_WR_SEND,
147 .send_flags = IB_SEND_SIGNALED,
148 };
149
150 return rtrs_post_send(con->qp, head, &wr, NULL);
151 }
152 EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
153
rtrs_iu_post_rdma_write_imm(struct rtrs_con * con,struct rtrs_iu * iu,struct ib_sge * sge,unsigned int num_sge,u32 rkey,u64 rdma_addr,u32 imm_data,enum ib_send_flags flags,struct ib_send_wr * head,struct ib_send_wr * tail)154 int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
155 struct ib_sge *sge, unsigned int num_sge,
156 u32 rkey, u64 rdma_addr, u32 imm_data,
157 enum ib_send_flags flags,
158 struct ib_send_wr *head,
159 struct ib_send_wr *tail)
160 {
161 struct ib_rdma_wr wr;
162 int i;
163
164 wr = (struct ib_rdma_wr) {
165 .wr.wr_cqe = &iu->cqe,
166 .wr.sg_list = sge,
167 .wr.num_sge = num_sge,
168 .rkey = rkey,
169 .remote_addr = rdma_addr,
170 .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
171 .wr.ex.imm_data = cpu_to_be32(imm_data),
172 .wr.send_flags = flags,
173 };
174
175 /*
176 * If one of the sges has 0 size, the operation will fail with a
177 * length error
178 */
179 for (i = 0; i < num_sge; i++)
180 if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
181 return -EINVAL;
182
183 return rtrs_post_send(con->qp, head, &wr.wr, tail);
184 }
185 EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
186
rtrs_post_rdma_write_imm_empty(struct rtrs_con * con,struct ib_cqe * cqe,u32 imm_data,struct ib_send_wr * head)187 static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
188 struct ib_cqe *cqe,
189 u32 imm_data,
190 struct ib_send_wr *head)
191 {
192 struct ib_rdma_wr wr;
193 struct rtrs_path *path = con->path;
194 enum ib_send_flags sflags;
195
196 atomic_dec_if_positive(&con->sq_wr_avail);
197 sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
198 0 : IB_SEND_SIGNALED;
199
200 wr = (struct ib_rdma_wr) {
201 .wr.wr_cqe = cqe,
202 .wr.send_flags = sflags,
203 .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
204 .wr.ex.imm_data = cpu_to_be32(imm_data),
205 };
206
207 return rtrs_post_send(con->qp, head, &wr.wr, NULL);
208 }
209
qp_event_handler(struct ib_event * ev,void * ctx)210 static void qp_event_handler(struct ib_event *ev, void *ctx)
211 {
212 struct rtrs_con *con = ctx;
213
214 switch (ev->event) {
215 case IB_EVENT_COMM_EST:
216 rtrs_info(con->path, "QP event %s (%d) received\n",
217 ib_event_msg(ev->event), ev->event);
218 rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
219 break;
220 default:
221 rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
222 ib_event_msg(ev->event), ev->event);
223 break;
224 }
225 }
226
is_pollqueue(struct rtrs_con * con)227 static bool is_pollqueue(struct rtrs_con *con)
228 {
229 return con->cid >= con->path->irq_con_num;
230 }
231
create_cq(struct rtrs_con * con,int cq_vector,int nr_cqe,enum ib_poll_context poll_ctx)232 static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
233 enum ib_poll_context poll_ctx)
234 {
235 struct rdma_cm_id *cm_id = con->cm_id;
236 struct ib_cq *cq;
237
238 if (is_pollqueue(con))
239 cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
240 poll_ctx);
241 else
242 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
243
244 if (IS_ERR(cq)) {
245 rtrs_err(con->path, "Creating completion queue failed, errno: %pe\n",
246 cq);
247 return PTR_ERR(cq);
248 }
249 con->cq = cq;
250 con->nr_cqe = nr_cqe;
251
252 return 0;
253 }
254
create_qp(struct rtrs_con * con,struct ib_pd * pd,u32 max_send_wr,u32 max_recv_wr,u32 max_sge)255 static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
256 u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
257 {
258 struct ib_qp_init_attr init_attr = {};
259 struct rdma_cm_id *cm_id = con->cm_id;
260 int ret;
261
262 init_attr.cap.max_send_wr = max_send_wr;
263 init_attr.cap.max_recv_wr = max_recv_wr;
264 init_attr.cap.max_recv_sge = 1;
265 init_attr.event_handler = qp_event_handler;
266 init_attr.qp_context = con;
267 init_attr.cap.max_send_sge = max_sge;
268
269 init_attr.qp_type = IB_QPT_RC;
270 init_attr.send_cq = con->cq;
271 init_attr.recv_cq = con->cq;
272 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
273
274 ret = rdma_create_qp(cm_id, pd, &init_attr);
275 if (ret) {
276 rtrs_err(con->path, "Creating QP failed, err: %pe\n",
277 ERR_PTR(ret));
278 return ret;
279 }
280 con->qp = cm_id->qp;
281
282 return ret;
283 }
284
destroy_cq(struct rtrs_con * con)285 static void destroy_cq(struct rtrs_con *con)
286 {
287 if (con->cq) {
288 if (is_pollqueue(con))
289 ib_free_cq(con->cq);
290 else
291 ib_cq_pool_put(con->cq, con->nr_cqe);
292 }
293 con->cq = NULL;
294 }
295
rtrs_cq_qp_create(struct rtrs_path * path,struct rtrs_con * con,u32 max_send_sge,int cq_vector,int nr_cqe,u32 max_send_wr,u32 max_recv_wr,enum ib_poll_context poll_ctx)296 int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
297 u32 max_send_sge, int cq_vector, int nr_cqe,
298 u32 max_send_wr, u32 max_recv_wr,
299 enum ib_poll_context poll_ctx)
300 {
301 int err;
302
303 err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
304 if (err)
305 return err;
306
307 err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
308 max_send_sge);
309 if (err) {
310 destroy_cq(con);
311 return err;
312 }
313 con->path = path;
314
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
318
rtrs_cq_qp_destroy(struct rtrs_con * con)319 void rtrs_cq_qp_destroy(struct rtrs_con *con)
320 {
321 if (con->qp) {
322 rdma_destroy_qp(con->cm_id);
323 con->qp = NULL;
324 }
325 destroy_cq(con);
326 }
327 EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
328
schedule_hb(struct rtrs_path * path)329 static void schedule_hb(struct rtrs_path *path)
330 {
331 queue_delayed_work(path->hb_wq, &path->hb_dwork,
332 msecs_to_jiffies(path->hb_interval_ms));
333 }
334
rtrs_send_hb_ack(struct rtrs_path * path)335 void rtrs_send_hb_ack(struct rtrs_path *path)
336 {
337 struct rtrs_con *usr_con = path->con[0];
338 u32 imm;
339 int err;
340
341 imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
342 err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
343 NULL);
344 if (err) {
345 rtrs_err(path, "send HB ACK failed, errno: %pe\n",
346 ERR_PTR(err));
347 path->hb_err_handler(usr_con);
348 return;
349 }
350 }
351 EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
352
hb_work(struct work_struct * work)353 static void hb_work(struct work_struct *work)
354 {
355 struct rtrs_con *usr_con;
356 struct rtrs_path *path;
357 u32 imm;
358 int err;
359
360 path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
361 usr_con = path->con[0];
362
363 if (path->hb_missed_cnt > path->hb_missed_max) {
364 rtrs_err(path, "HB missed max reached.\n");
365 path->hb_err_handler(usr_con);
366 return;
367 }
368 if (path->hb_missed_cnt++) {
369 /* Reschedule work without sending hb */
370 schedule_hb(path);
371 return;
372 }
373
374 path->hb_last_sent = ktime_get();
375
376 imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
377 err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
378 NULL);
379 if (err) {
380 rtrs_err(path, "HB send failed, errno: %pe\n",
381 ERR_PTR(err));
382 path->hb_err_handler(usr_con);
383 return;
384 }
385
386 schedule_hb(path);
387 }
388
rtrs_init_hb(struct rtrs_path * path,struct ib_cqe * cqe,unsigned int interval_ms,unsigned int missed_max,void (* err_handler)(struct rtrs_con * con),struct workqueue_struct * wq)389 void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
390 unsigned int interval_ms, unsigned int missed_max,
391 void (*err_handler)(struct rtrs_con *con),
392 struct workqueue_struct *wq)
393 {
394 path->hb_cqe = cqe;
395 path->hb_interval_ms = interval_ms;
396 path->hb_err_handler = err_handler;
397 path->hb_wq = wq;
398 path->hb_missed_max = missed_max;
399 path->hb_missed_cnt = 0;
400 INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
401 }
402 EXPORT_SYMBOL_GPL(rtrs_init_hb);
403
rtrs_start_hb(struct rtrs_path * path)404 void rtrs_start_hb(struct rtrs_path *path)
405 {
406 schedule_hb(path);
407 }
408 EXPORT_SYMBOL_GPL(rtrs_start_hb);
409
rtrs_stop_hb(struct rtrs_path * path)410 void rtrs_stop_hb(struct rtrs_path *path)
411 {
412 cancel_delayed_work_sync(&path->hb_dwork);
413 path->hb_missed_cnt = 0;
414 }
415 EXPORT_SYMBOL_GPL(rtrs_stop_hb);
416
rtrs_str_gid_to_sockaddr(const char * addr,size_t len,short port,struct sockaddr_storage * dst)417 static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
418 short port, struct sockaddr_storage *dst)
419 {
420 struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
421 int ret;
422
423 /*
424 * We can use some of the IPv6 functions since GID is a valid
425 * IPv6 address format
426 */
427 ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
428 if (ret == 0)
429 return -EINVAL;
430
431 dst_ib->sib_family = AF_IB;
432 /*
433 * Use the same TCP server port number as the IB service ID
434 * on the IB port space range
435 */
436 dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
437 dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
438 dst_ib->sib_pkey = cpu_to_be16(0xffff);
439
440 return 0;
441 }
442
443 /**
444 * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
445 * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
446 * - "ip:192.168.1.1"
447 * - "ip:fe80::200:5aee:feaa:20a2"
448 * - "gid:fe80::200:5aee:feaa:20a2"
449 * @len: String address length
450 * @port: Destination port
451 * @dst: Destination sockaddr structure
452 *
453 * Returns 0 if conversion successful. Non-zero on error.
454 */
rtrs_str_to_sockaddr(const char * addr,size_t len,u16 port,struct sockaddr_storage * dst)455 static int rtrs_str_to_sockaddr(const char *addr, size_t len,
456 u16 port, struct sockaddr_storage *dst)
457 {
458 if (strncmp(addr, "gid:", 4) == 0) {
459 return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
460 } else if (strncmp(addr, "ip:", 3) == 0) {
461 char port_str[8];
462 char *cpy;
463 int err;
464
465 snprintf(port_str, sizeof(port_str), "%u", port);
466 cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
467 err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
468 cpy, port_str, dst) : -ENOMEM;
469 kfree(cpy);
470
471 return err;
472 }
473 return -EPROTONOSUPPORT;
474 }
475
476 /**
477 * sockaddr_to_str() - convert sockaddr to a string.
478 * @addr: the sockadddr structure to be converted.
479 * @buf: string containing socket addr.
480 * @len: string length.
481 *
482 * The return value is the number of characters written into buf not
483 * including the trailing '\0'. If len is == 0 the function returns 0..
484 */
sockaddr_to_str(const struct sockaddr * addr,char * buf,size_t len)485 int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
486 {
487 switch (addr->sa_family) {
488 case AF_IB:
489 return scnprintf(buf, len, "gid:%pI6",
490 &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
491 case AF_INET:
492 return scnprintf(buf, len, "ip:%pI4",
493 &((struct sockaddr_in *)addr)->sin_addr);
494 case AF_INET6:
495 return scnprintf(buf, len, "ip:%pI6c",
496 &((struct sockaddr_in6 *)addr)->sin6_addr);
497 }
498 return scnprintf(buf, len, "<invalid address family>");
499 }
500 EXPORT_SYMBOL(sockaddr_to_str);
501
502 /**
503 * rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
504 * @addr: the rtrs_addr structure to be converted
505 * @buf: string containing source and destination addr of a path
506 * separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
507 * "ip:1.1.1.1@ip:1.1.1.2".
508 * @len: string length
509 *
510 * The return value is the number of characters written into buf not
511 * including the trailing '\0'.
512 */
rtrs_addr_to_str(const struct rtrs_addr * addr,char * buf,size_t len)513 int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
514 {
515 int cnt;
516
517 cnt = sockaddr_to_str((struct sockaddr *)addr->src,
518 buf, len);
519 cnt += scnprintf(buf + cnt, len - cnt, "@");
520 sockaddr_to_str((struct sockaddr *)addr->dst,
521 buf + cnt, len - cnt);
522 return cnt;
523 }
524 EXPORT_SYMBOL(rtrs_addr_to_str);
525
526 /**
527 * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
528 * to sockaddreses
529 * @str: string containing source and destination addr of a path
530 * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
531 * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
532 * considered to be destination.
533 * @len: string length
534 * @port: Destination port number.
535 * @addr: will be set to the source/destination address or to NULL
536 * if str doesn't contain any source address.
537 *
538 * Returns zero if conversion successful. Non-zero otherwise.
539 */
rtrs_addr_to_sockaddr(const char * str,size_t len,u16 port,struct rtrs_addr * addr)540 int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
541 struct rtrs_addr *addr)
542 {
543 const char *d;
544
545 d = strchr(str, ',');
546 if (!d)
547 d = strchr(str, '@');
548 if (d) {
549 if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
550 return -EINVAL;
551 d += 1;
552 len -= d - str;
553 str = d;
554
555 } else {
556 addr->src = NULL;
557 }
558 return rtrs_str_to_sockaddr(str, len, port, addr->dst);
559 }
560 EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
561
rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,struct rtrs_rdma_dev_pd * pool)562 void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
563 struct rtrs_rdma_dev_pd *pool)
564 {
565 INIT_LIST_HEAD(&pool->list);
566 mutex_init(&pool->mutex);
567 pool->pd_flags = pd_flags;
568 }
569 EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
570
rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd * pool)571 void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
572 {
573 mutex_destroy(&pool->mutex);
574 WARN_ON(!list_empty(&pool->list));
575 }
576 EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
577
dev_free(struct kref * ref)578 static void dev_free(struct kref *ref)
579 {
580 struct rtrs_rdma_dev_pd *pool;
581 struct rtrs_ib_dev *dev;
582
583 dev = container_of(ref, typeof(*dev), ref);
584 pool = dev->pool;
585
586 mutex_lock(&pool->mutex);
587 list_del(&dev->entry);
588 mutex_unlock(&pool->mutex);
589
590 if (pool->ops && pool->ops->deinit)
591 pool->ops->deinit(dev);
592
593 ib_dealloc_pd(dev->ib_pd);
594 kfree(dev);
595 }
596
rtrs_ib_dev_put(struct rtrs_ib_dev * dev)597 int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
598 {
599 return kref_put(&dev->ref, dev_free);
600 }
601 EXPORT_SYMBOL(rtrs_ib_dev_put);
602
rtrs_ib_dev_get(struct rtrs_ib_dev * dev)603 static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
604 {
605 return kref_get_unless_zero(&dev->ref);
606 }
607
608 struct rtrs_ib_dev *
rtrs_ib_dev_find_or_add(struct ib_device * ib_dev,struct rtrs_rdma_dev_pd * pool)609 rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
610 struct rtrs_rdma_dev_pd *pool)
611 {
612 struct rtrs_ib_dev *dev;
613
614 mutex_lock(&pool->mutex);
615 list_for_each_entry(dev, &pool->list, entry) {
616 if (dev->ib_dev->node_guid == ib_dev->node_guid &&
617 rtrs_ib_dev_get(dev))
618 goto out_unlock;
619 }
620 mutex_unlock(&pool->mutex);
621 dev = kzalloc_obj(*dev);
622 if (!dev)
623 goto out_err;
624
625 kref_init(&dev->ref);
626 dev->pool = pool;
627 dev->ib_dev = ib_dev;
628 dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
629 if (IS_ERR(dev->ib_pd))
630 goto out_free_dev;
631
632 if (pool->ops && pool->ops->init && pool->ops->init(dev))
633 goto out_free_pd;
634
635 mutex_lock(&pool->mutex);
636 list_add(&dev->entry, &pool->list);
637 out_unlock:
638 mutex_unlock(&pool->mutex);
639 return dev;
640
641 out_free_pd:
642 ib_dealloc_pd(dev->ib_pd);
643 out_free_dev:
644 kfree(dev);
645 out_err:
646 return NULL;
647 }
648 EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
649