1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Basic Transport Functions exploiting Infiniband API
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
20 #include <linux/smc.h>
21 #include <net/tcp.h>
22 #include <net/sock.h>
23 #include <rdma/ib_verbs.h>
24 #include <rdma/ib_cache.h>
25
26 #include "smc.h"
27 #include "smc_clc.h"
28 #include "smc_core.h"
29 #include "smc_ib.h"
30 #include "smc_wr.h"
31 #include "smc_llc.h"
32 #include "smc_cdc.h"
33 #include "smc_close.h"
34 #include "smc_ism.h"
35 #include "smc_netlink.h"
36 #include "smc_stats.h"
37 #include "smc_tracepoint.h"
38
39 #define SMC_LGR_NUM_INCR 256
40 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
41 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
42
43 struct smc_lgr_list smc_lgr_list = { /* established link groups */
44 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
45 .list = LIST_HEAD_INIT(smc_lgr_list.list),
46 .num = 0,
47 };
48
49 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
50 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
51
52 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
53 struct smc_buf_desc *buf_desc);
54 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
55
56 static void smc_link_down_work(struct work_struct *work);
57
58 /* return head of link group list and its lock for a given link group */
smc_lgr_list_head(struct smc_link_group * lgr,spinlock_t ** lgr_lock)59 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
60 spinlock_t **lgr_lock)
61 {
62 if (lgr->is_smcd) {
63 *lgr_lock = &lgr->smcd->lgr_lock;
64 return &lgr->smcd->lgr_list;
65 }
66
67 *lgr_lock = &smc_lgr_list.lock;
68 return &smc_lgr_list.list;
69 }
70
smc_ibdev_cnt_inc(struct smc_link * lnk)71 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
72 {
73 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
74 }
75
smc_ibdev_cnt_dec(struct smc_link * lnk)76 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
77 {
78 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
79 }
80
smc_lgr_schedule_free_work(struct smc_link_group * lgr)81 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
82 {
83 /* client link group creation always follows the server link group
84 * creation. For client use a somewhat higher removal delay time,
85 * otherwise there is a risk of out-of-sync link groups.
86 */
87 if (!lgr->freeing) {
88 mod_delayed_work(system_wq, &lgr->free_work,
89 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
90 SMC_LGR_FREE_DELAY_CLNT :
91 SMC_LGR_FREE_DELAY_SERV);
92 }
93 }
94
95 /* Register connection's alert token in our lookup structure.
96 * To use rbtrees we have to implement our own insert core.
97 * Requires @conns_lock
98 * @smc connection to register
99 * Returns 0 on success, != otherwise.
100 */
smc_lgr_add_alert_token(struct smc_connection * conn)101 static void smc_lgr_add_alert_token(struct smc_connection *conn)
102 {
103 struct rb_node **link, *parent = NULL;
104 u32 token = conn->alert_token_local;
105
106 link = &conn->lgr->conns_all.rb_node;
107 while (*link) {
108 struct smc_connection *cur = rb_entry(*link,
109 struct smc_connection, alert_node);
110
111 parent = *link;
112 if (cur->alert_token_local > token)
113 link = &parent->rb_left;
114 else
115 link = &parent->rb_right;
116 }
117 /* Put the new node there */
118 rb_link_node(&conn->alert_node, parent, link);
119 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
120 }
121
122 /* assign an SMC-R link to the connection */
smcr_lgr_conn_assign_link(struct smc_connection * conn,bool first)123 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
124 {
125 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
126 SMC_LNK_ACTIVE;
127 int i, j;
128
129 /* do link balancing */
130 conn->lnk = NULL; /* reset conn->lnk first */
131 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
132 struct smc_link *lnk = &conn->lgr->lnk[i];
133
134 if (lnk->state != expected || lnk->link_is_asym)
135 continue;
136 if (conn->lgr->role == SMC_CLNT) {
137 conn->lnk = lnk; /* temporary, SMC server assigns link*/
138 break;
139 }
140 if (conn->lgr->conns_num % 2) {
141 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
142 struct smc_link *lnk2;
143
144 lnk2 = &conn->lgr->lnk[j];
145 if (lnk2->state == expected &&
146 !lnk2->link_is_asym) {
147 conn->lnk = lnk2;
148 break;
149 }
150 }
151 }
152 if (!conn->lnk)
153 conn->lnk = lnk;
154 break;
155 }
156 if (!conn->lnk)
157 return SMC_CLC_DECL_NOACTLINK;
158 atomic_inc(&conn->lnk->conn_cnt);
159 return 0;
160 }
161
162 /* Register connection in link group by assigning an alert token
163 * registered in a search tree.
164 * Requires @conns_lock
165 * Note that '0' is a reserved value and not assigned.
166 */
smc_lgr_register_conn(struct smc_connection * conn,bool first)167 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
168 {
169 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
170 static atomic_t nexttoken = ATOMIC_INIT(0);
171 int rc;
172
173 if (!conn->lgr->is_smcd) {
174 rc = smcr_lgr_conn_assign_link(conn, first);
175 if (rc) {
176 conn->lgr = NULL;
177 return rc;
178 }
179 }
180 /* find a new alert_token_local value not yet used by some connection
181 * in this link group
182 */
183 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
184 while (!conn->alert_token_local) {
185 conn->alert_token_local = atomic_inc_return(&nexttoken);
186 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
187 conn->alert_token_local = 0;
188 }
189 smc_lgr_add_alert_token(conn);
190 conn->lgr->conns_num++;
191 return 0;
192 }
193
194 /* Unregister connection and reset the alert token of the given connection<
195 */
__smc_lgr_unregister_conn(struct smc_connection * conn)196 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
197 {
198 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
199 struct smc_link_group *lgr = conn->lgr;
200
201 rb_erase(&conn->alert_node, &lgr->conns_all);
202 if (conn->lnk)
203 atomic_dec(&conn->lnk->conn_cnt);
204 lgr->conns_num--;
205 conn->alert_token_local = 0;
206 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
207 }
208
209 /* Unregister connection from lgr
210 */
smc_lgr_unregister_conn(struct smc_connection * conn)211 static void smc_lgr_unregister_conn(struct smc_connection *conn)
212 {
213 struct smc_link_group *lgr = conn->lgr;
214
215 if (!smc_conn_lgr_valid(conn))
216 return;
217 write_lock_bh(&lgr->conns_lock);
218 if (conn->alert_token_local) {
219 __smc_lgr_unregister_conn(conn);
220 }
221 write_unlock_bh(&lgr->conns_lock);
222 }
223
smc_lgr_buf_list_add(struct smc_link_group * lgr,bool is_rmb,struct list_head * buf_list,struct smc_buf_desc * buf_desc)224 static void smc_lgr_buf_list_add(struct smc_link_group *lgr,
225 bool is_rmb,
226 struct list_head *buf_list,
227 struct smc_buf_desc *buf_desc)
228 {
229 list_add(&buf_desc->list, buf_list);
230 if (is_rmb) {
231 lgr->alloc_rmbs += buf_desc->len;
232 lgr->alloc_rmbs +=
233 lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0;
234 } else {
235 lgr->alloc_sndbufs += buf_desc->len;
236 }
237 }
238
smc_lgr_buf_list_del(struct smc_link_group * lgr,bool is_rmb,struct smc_buf_desc * buf_desc)239 static void smc_lgr_buf_list_del(struct smc_link_group *lgr,
240 bool is_rmb,
241 struct smc_buf_desc *buf_desc)
242 {
243 list_del(&buf_desc->list);
244 if (is_rmb) {
245 lgr->alloc_rmbs -= buf_desc->len;
246 lgr->alloc_rmbs -=
247 lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0;
248 } else {
249 lgr->alloc_sndbufs -= buf_desc->len;
250 }
251 }
252
smc_nl_get_sys_info(struct sk_buff * skb,struct netlink_callback * cb)253 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
254 {
255 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
256 char hostname[SMC_MAX_HOSTNAME_LEN + 1];
257 char smc_seid[SMC_MAX_EID_LEN + 1];
258 struct nlattr *attrs;
259 u8 *seid = NULL;
260 u8 *host = NULL;
261 void *nlh;
262
263 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
264 &smc_gen_nl_family, NLM_F_MULTI,
265 SMC_NETLINK_GET_SYS_INFO);
266 if (!nlh)
267 goto errmsg;
268 if (cb_ctx->pos[0])
269 goto errout;
270 attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
271 if (!attrs)
272 goto errout;
273 if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
274 goto errattr;
275 if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
276 goto errattr;
277 if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
278 goto errattr;
279 if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true))
280 goto errattr;
281 smc_clc_get_hostname(&host);
282 if (host) {
283 memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
284 hostname[SMC_MAX_HOSTNAME_LEN] = 0;
285 if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
286 goto errattr;
287 }
288 if (smc_ism_is_v2_capable()) {
289 smc_ism_get_system_eid(&seid);
290 memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
291 smc_seid[SMC_MAX_EID_LEN] = 0;
292 if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
293 goto errattr;
294 }
295 nla_nest_end(skb, attrs);
296 genlmsg_end(skb, nlh);
297 cb_ctx->pos[0] = 1;
298 return skb->len;
299
300 errattr:
301 nla_nest_cancel(skb, attrs);
302 errout:
303 genlmsg_cancel(skb, nlh);
304 errmsg:
305 return skb->len;
306 }
307
308 /* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */
smc_nl_fill_lgr_v2_common(struct smc_link_group * lgr,struct sk_buff * skb,struct netlink_callback * cb,struct nlattr * v2_attrs)309 static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr,
310 struct sk_buff *skb,
311 struct netlink_callback *cb,
312 struct nlattr *v2_attrs)
313 {
314 char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
315 char smc_eid[SMC_MAX_EID_LEN + 1];
316
317 if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
318 goto errv2attr;
319 if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
320 goto errv2attr;
321 if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
322 goto errv2attr;
323 memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
324 smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
325 if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
326 goto errv2attr;
327 memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
328 smc_eid[SMC_MAX_EID_LEN] = 0;
329 if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
330 goto errv2attr;
331
332 nla_nest_end(skb, v2_attrs);
333 return 0;
334
335 errv2attr:
336 nla_nest_cancel(skb, v2_attrs);
337 return -EMSGSIZE;
338 }
339
smc_nl_fill_smcr_lgr_v2(struct smc_link_group * lgr,struct sk_buff * skb,struct netlink_callback * cb)340 static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr,
341 struct sk_buff *skb,
342 struct netlink_callback *cb)
343 {
344 struct nlattr *v2_attrs;
345
346 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2);
347 if (!v2_attrs)
348 goto errattr;
349 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway))
350 goto errv2attr;
351 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_CONNS, lgr->max_conns))
352 goto errv2attr;
353 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_LINKS, lgr->max_links))
354 goto errv2attr;
355
356 nla_nest_end(skb, v2_attrs);
357 return 0;
358
359 errv2attr:
360 nla_nest_cancel(skb, v2_attrs);
361 errattr:
362 return -EMSGSIZE;
363 }
364
smc_nl_fill_lgr(struct smc_link_group * lgr,struct sk_buff * skb,struct netlink_callback * cb)365 static int smc_nl_fill_lgr(struct smc_link_group *lgr,
366 struct sk_buff *skb,
367 struct netlink_callback *cb)
368 {
369 char smc_target[SMC_MAX_PNETID_LEN + 1];
370 struct nlattr *attrs, *v2_attrs;
371
372 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
373 if (!attrs)
374 goto errout;
375
376 if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
377 goto errattr;
378 if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
379 goto errattr;
380 if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
381 goto errattr;
382 if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
383 goto errattr;
384 if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type))
385 goto errattr;
386 if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
387 goto errattr;
388 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
389 lgr->net->net_cookie, SMC_NLA_LGR_R_PAD))
390 goto errattr;
391 memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
392 smc_target[SMC_MAX_PNETID_LEN] = 0;
393 if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
394 goto errattr;
395 if (nla_put_uint(skb, SMC_NLA_LGR_R_SNDBUF_ALLOC, lgr->alloc_sndbufs))
396 goto errattr;
397 if (nla_put_uint(skb, SMC_NLA_LGR_R_RMB_ALLOC, lgr->alloc_rmbs))
398 goto errattr;
399 if (lgr->smc_version > SMC_V1) {
400 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
401 if (!v2_attrs)
402 goto errattr;
403 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
404 goto errattr;
405 if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
406 goto errattr;
407 }
408
409 nla_nest_end(skb, attrs);
410 return 0;
411 errattr:
412 nla_nest_cancel(skb, attrs);
413 errout:
414 return -EMSGSIZE;
415 }
416
smc_nl_fill_lgr_link(struct smc_link_group * lgr,struct smc_link * link,struct sk_buff * skb,struct netlink_callback * cb)417 static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
418 struct smc_link *link,
419 struct sk_buff *skb,
420 struct netlink_callback *cb)
421 {
422 char smc_ibname[IB_DEVICE_NAME_MAX];
423 u8 smc_gid_target[41];
424 struct nlattr *attrs;
425 u32 link_uid = 0;
426 void *nlh;
427
428 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
429 &smc_gen_nl_family, NLM_F_MULTI,
430 SMC_NETLINK_GET_LINK_SMCR);
431 if (!nlh)
432 goto errmsg;
433
434 attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
435 if (!attrs)
436 goto errout;
437
438 if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
439 goto errattr;
440 if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
441 goto errattr;
442 if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
443 atomic_read(&link->conn_cnt)))
444 goto errattr;
445 if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
446 goto errattr;
447 if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
448 goto errattr;
449 snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
450 if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
451 goto errattr;
452 memcpy(&link_uid, link->link_uid, sizeof(link_uid));
453 if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
454 goto errattr;
455 memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
456 if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
457 goto errattr;
458 memset(smc_gid_target, 0, sizeof(smc_gid_target));
459 smc_gid_be16_convert(smc_gid_target, link->gid);
460 if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
461 goto errattr;
462 memset(smc_gid_target, 0, sizeof(smc_gid_target));
463 smc_gid_be16_convert(smc_gid_target, link->peer_gid);
464 if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
465 goto errattr;
466
467 nla_nest_end(skb, attrs);
468 genlmsg_end(skb, nlh);
469 return 0;
470 errattr:
471 nla_nest_cancel(skb, attrs);
472 errout:
473 genlmsg_cancel(skb, nlh);
474 errmsg:
475 return -EMSGSIZE;
476 }
477
smc_nl_handle_lgr(struct smc_link_group * lgr,struct sk_buff * skb,struct netlink_callback * cb,bool list_links)478 static int smc_nl_handle_lgr(struct smc_link_group *lgr,
479 struct sk_buff *skb,
480 struct netlink_callback *cb,
481 bool list_links)
482 {
483 void *nlh;
484 int i;
485
486 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
487 &smc_gen_nl_family, NLM_F_MULTI,
488 SMC_NETLINK_GET_LGR_SMCR);
489 if (!nlh)
490 goto errmsg;
491 if (smc_nl_fill_lgr(lgr, skb, cb))
492 goto errout;
493
494 genlmsg_end(skb, nlh);
495 if (!list_links)
496 goto out;
497 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
498 if (!smc_link_usable(&lgr->lnk[i]))
499 continue;
500 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
501 goto errout;
502 }
503 out:
504 return 0;
505
506 errout:
507 genlmsg_cancel(skb, nlh);
508 errmsg:
509 return -EMSGSIZE;
510 }
511
smc_nl_fill_lgr_list(struct smc_lgr_list * smc_lgr,struct sk_buff * skb,struct netlink_callback * cb,bool list_links)512 static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
513 struct sk_buff *skb,
514 struct netlink_callback *cb,
515 bool list_links)
516 {
517 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
518 struct smc_link_group *lgr;
519 int snum = cb_ctx->pos[0];
520 int num = 0;
521
522 spin_lock_bh(&smc_lgr->lock);
523 list_for_each_entry(lgr, &smc_lgr->list, list) {
524 if (num < snum)
525 goto next;
526 if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
527 goto errout;
528 next:
529 num++;
530 }
531 errout:
532 spin_unlock_bh(&smc_lgr->lock);
533 cb_ctx->pos[0] = num;
534 }
535
smc_nl_fill_smcd_lgr(struct smc_link_group * lgr,struct sk_buff * skb,struct netlink_callback * cb)536 static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
537 struct sk_buff *skb,
538 struct netlink_callback *cb)
539 {
540 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
541 struct smcd_dev *smcd = lgr->smcd;
542 struct smcd_gid smcd_gid;
543 struct nlattr *attrs;
544 void *nlh;
545
546 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
547 &smc_gen_nl_family, NLM_F_MULTI,
548 SMC_NETLINK_GET_LGR_SMCD);
549 if (!nlh)
550 goto errmsg;
551
552 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
553 if (!attrs)
554 goto errout;
555
556 if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
557 goto errattr;
558 smcd->ops->get_local_gid(smcd, &smcd_gid);
559 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
560 smcd_gid.gid, SMC_NLA_LGR_D_PAD))
561 goto errattr;
562 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_EXT_GID,
563 smcd_gid.gid_ext, SMC_NLA_LGR_D_PAD))
564 goto errattr;
565 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid.gid,
566 SMC_NLA_LGR_D_PAD))
567 goto errattr;
568 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_EXT_GID,
569 lgr->peer_gid.gid_ext, SMC_NLA_LGR_D_PAD))
570 goto errattr;
571 if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
572 goto errattr;
573 if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
574 goto errattr;
575 if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
576 goto errattr;
577 if (nla_put_uint(skb, SMC_NLA_LGR_D_SNDBUF_ALLOC, lgr->alloc_sndbufs))
578 goto errattr;
579 if (nla_put_uint(skb, SMC_NLA_LGR_D_DMB_ALLOC, lgr->alloc_rmbs))
580 goto errattr;
581 memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
582 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
583 if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
584 goto errattr;
585 if (lgr->smc_version > SMC_V1) {
586 struct nlattr *v2_attrs;
587
588 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON);
589 if (!v2_attrs)
590 goto errattr;
591 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
592 goto errattr;
593 }
594 nla_nest_end(skb, attrs);
595 genlmsg_end(skb, nlh);
596 return 0;
597
598 errattr:
599 nla_nest_cancel(skb, attrs);
600 errout:
601 genlmsg_cancel(skb, nlh);
602 errmsg:
603 return -EMSGSIZE;
604 }
605
smc_nl_handle_smcd_lgr(struct smcd_dev * dev,struct sk_buff * skb,struct netlink_callback * cb)606 static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
607 struct sk_buff *skb,
608 struct netlink_callback *cb)
609 {
610 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
611 struct smc_link_group *lgr;
612 int snum = cb_ctx->pos[1];
613 int rc = 0, num = 0;
614
615 spin_lock_bh(&dev->lgr_lock);
616 list_for_each_entry(lgr, &dev->lgr_list, list) {
617 if (!lgr->is_smcd)
618 continue;
619 if (num < snum)
620 goto next;
621 rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
622 if (rc)
623 goto errout;
624 next:
625 num++;
626 }
627 errout:
628 spin_unlock_bh(&dev->lgr_lock);
629 cb_ctx->pos[1] = num;
630 return rc;
631 }
632
smc_nl_fill_smcd_dev(struct smcd_dev_list * dev_list,struct sk_buff * skb,struct netlink_callback * cb)633 static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
634 struct sk_buff *skb,
635 struct netlink_callback *cb)
636 {
637 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
638 struct smcd_dev *smcd_dev;
639 int snum = cb_ctx->pos[0];
640 int rc = 0, num = 0;
641
642 mutex_lock(&dev_list->mutex);
643 list_for_each_entry(smcd_dev, &dev_list->list, list) {
644 if (list_empty(&smcd_dev->lgr_list))
645 continue;
646 if (num < snum)
647 goto next;
648 rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
649 if (rc)
650 goto errout;
651 next:
652 num++;
653 }
654 errout:
655 mutex_unlock(&dev_list->mutex);
656 cb_ctx->pos[0] = num;
657 return rc;
658 }
659
smcr_nl_get_lgr(struct sk_buff * skb,struct netlink_callback * cb)660 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
661 {
662 bool list_links = false;
663
664 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
665 return skb->len;
666 }
667
smcr_nl_get_link(struct sk_buff * skb,struct netlink_callback * cb)668 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
669 {
670 bool list_links = true;
671
672 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
673 return skb->len;
674 }
675
smcd_nl_get_lgr(struct sk_buff * skb,struct netlink_callback * cb)676 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
677 {
678 smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
679 return skb->len;
680 }
681
smc_lgr_cleanup_early(struct smc_link_group * lgr)682 void smc_lgr_cleanup_early(struct smc_link_group *lgr)
683 {
684 spinlock_t *lgr_lock;
685
686 if (!lgr)
687 return;
688
689 smc_lgr_list_head(lgr, &lgr_lock);
690 spin_lock_bh(lgr_lock);
691 /* do not use this link group for new connections */
692 if (!list_empty(&lgr->list))
693 list_del_init(&lgr->list);
694 spin_unlock_bh(lgr_lock);
695 __smc_lgr_terminate(lgr, true);
696 }
697
smcr_lgr_link_deactivate_all(struct smc_link_group * lgr)698 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
699 {
700 int i;
701
702 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
703 struct smc_link *lnk = &lgr->lnk[i];
704
705 if (smc_link_sendable(lnk))
706 lnk->state = SMC_LNK_INACTIVE;
707 }
708 wake_up_all(&lgr->llc_msg_waiter);
709 wake_up_all(&lgr->llc_flow_waiter);
710 }
711
712 static void smc_lgr_free(struct smc_link_group *lgr);
713
smc_lgr_free_work(struct work_struct * work)714 static void smc_lgr_free_work(struct work_struct *work)
715 {
716 struct smc_link_group *lgr = container_of(to_delayed_work(work),
717 struct smc_link_group,
718 free_work);
719 spinlock_t *lgr_lock;
720 bool conns;
721
722 smc_lgr_list_head(lgr, &lgr_lock);
723 spin_lock_bh(lgr_lock);
724 if (lgr->freeing) {
725 spin_unlock_bh(lgr_lock);
726 return;
727 }
728 read_lock_bh(&lgr->conns_lock);
729 conns = RB_EMPTY_ROOT(&lgr->conns_all);
730 read_unlock_bh(&lgr->conns_lock);
731 if (!conns) { /* number of lgr connections is no longer zero */
732 spin_unlock_bh(lgr_lock);
733 return;
734 }
735 list_del_init(&lgr->list); /* remove from smc_lgr_list */
736 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
737 spin_unlock_bh(lgr_lock);
738 cancel_delayed_work(&lgr->free_work);
739
740 if (!lgr->is_smcd && !lgr->terminating)
741 smc_llc_send_link_delete_all(lgr, true,
742 SMC_LLC_DEL_PROG_INIT_TERM);
743 if (lgr->is_smcd && !lgr->terminating)
744 smc_ism_signal_shutdown(lgr);
745 if (!lgr->is_smcd)
746 smcr_lgr_link_deactivate_all(lgr);
747 smc_lgr_free(lgr);
748 }
749
smc_lgr_terminate_work(struct work_struct * work)750 static void smc_lgr_terminate_work(struct work_struct *work)
751 {
752 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
753 terminate_work);
754
755 __smc_lgr_terminate(lgr, true);
756 }
757
758 /* return next unique link id for the lgr */
smcr_next_link_id(struct smc_link_group * lgr)759 static u8 smcr_next_link_id(struct smc_link_group *lgr)
760 {
761 u8 link_id;
762 int i;
763
764 while (1) {
765 again:
766 link_id = ++lgr->next_link_id;
767 if (!link_id) /* skip zero as link_id */
768 link_id = ++lgr->next_link_id;
769 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
770 if (smc_link_usable(&lgr->lnk[i]) &&
771 lgr->lnk[i].link_id == link_id)
772 goto again;
773 }
774 break;
775 }
776 return link_id;
777 }
778
smcr_copy_dev_info_to_link(struct smc_link * link)779 static void smcr_copy_dev_info_to_link(struct smc_link *link)
780 {
781 struct smc_ib_device *smcibdev = link->smcibdev;
782
783 snprintf(link->ibname, sizeof(link->ibname), "%s",
784 smcibdev->ibdev->name);
785 link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
786 }
787
smcr_link_init(struct smc_link_group * lgr,struct smc_link * lnk,u8 link_idx,struct smc_init_info * ini)788 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
789 u8 link_idx, struct smc_init_info *ini)
790 {
791 struct smc_ib_device *smcibdev;
792 u8 rndvec[3];
793 int rc;
794
795 if (lgr->smc_version == SMC_V2) {
796 lnk->smcibdev = ini->smcrv2.ib_dev_v2;
797 lnk->ibport = ini->smcrv2.ib_port_v2;
798 } else {
799 lnk->smcibdev = ini->ib_dev;
800 lnk->ibport = ini->ib_port;
801 }
802 get_device(&lnk->smcibdev->ibdev->dev);
803 atomic_inc(&lnk->smcibdev->lnk_cnt);
804 refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
805 lnk->clearing = 0;
806 lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
807 lnk->link_id = smcr_next_link_id(lgr);
808 lnk->lgr = lgr;
809 smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
810 lnk->link_idx = link_idx;
811 lnk->wr_rx_id_compl = 0;
812 smc_ibdev_cnt_inc(lnk);
813 smcr_copy_dev_info_to_link(lnk);
814 atomic_set(&lnk->conn_cnt, 0);
815 smc_llc_link_set_uid(lnk);
816 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
817 if (!lnk->smcibdev->initialized) {
818 rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
819 if (rc)
820 goto out;
821 }
822 get_random_bytes(rndvec, sizeof(rndvec));
823 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
824 (rndvec[2] << 16);
825 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
826 ini->vlan_id, lnk->gid, &lnk->sgid_index,
827 lgr->smc_version == SMC_V2 ?
828 &ini->smcrv2 : NULL);
829 if (rc)
830 goto out;
831 rc = smc_llc_link_init(lnk);
832 if (rc)
833 goto out;
834 rc = smc_wr_alloc_link_mem(lnk);
835 if (rc)
836 goto clear_llc_lnk;
837 rc = smc_ib_create_protection_domain(lnk);
838 if (rc)
839 goto free_link_mem;
840 rc = smc_ib_create_queue_pair(lnk);
841 if (rc)
842 goto dealloc_pd;
843 rc = smc_wr_create_link(lnk);
844 if (rc)
845 goto destroy_qp;
846 lnk->state = SMC_LNK_ACTIVATING;
847 return 0;
848
849 destroy_qp:
850 smc_ib_destroy_queue_pair(lnk);
851 dealloc_pd:
852 smc_ib_dealloc_protection_domain(lnk);
853 free_link_mem:
854 smc_wr_free_link_mem(lnk);
855 clear_llc_lnk:
856 smc_llc_link_clear(lnk, false);
857 out:
858 smc_ibdev_cnt_dec(lnk);
859 put_device(&lnk->smcibdev->ibdev->dev);
860 smcibdev = lnk->smcibdev;
861 memset(lnk, 0, sizeof(struct smc_link));
862 lnk->state = SMC_LNK_UNUSED;
863 if (!atomic_dec_return(&smcibdev->lnk_cnt))
864 wake_up(&smcibdev->lnks_deleted);
865 smc_lgr_put(lgr); /* lgr_hold above */
866 return rc;
867 }
868
869 /* create a new SMC link group */
smc_lgr_create(struct smc_sock * smc,struct smc_init_info * ini)870 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
871 {
872 struct smc_link_group *lgr;
873 struct list_head *lgr_list;
874 struct smcd_dev *smcd;
875 struct smc_link *lnk;
876 spinlock_t *lgr_lock;
877 u8 link_idx;
878 int rc = 0;
879 int i;
880
881 if (ini->is_smcd && ini->vlan_id) {
882 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
883 ini->vlan_id)) {
884 rc = SMC_CLC_DECL_ISMVLANERR;
885 goto out;
886 }
887 }
888
889 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
890 if (!lgr) {
891 rc = SMC_CLC_DECL_MEM;
892 goto ism_put_vlan;
893 }
894 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
895 SMC_LGR_ID_SIZE, &lgr->id);
896 if (!lgr->tx_wq) {
897 rc = -ENOMEM;
898 goto free_lgr;
899 }
900 lgr->is_smcd = ini->is_smcd;
901 lgr->sync_err = 0;
902 lgr->terminating = 0;
903 lgr->freeing = 0;
904 lgr->vlan_id = ini->vlan_id;
905 refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
906 init_rwsem(&lgr->sndbufs_lock);
907 init_rwsem(&lgr->rmbs_lock);
908 rwlock_init(&lgr->conns_lock);
909 for (i = 0; i < SMC_RMBE_SIZES; i++) {
910 INIT_LIST_HEAD(&lgr->sndbufs[i]);
911 INIT_LIST_HEAD(&lgr->rmbs[i]);
912 }
913 lgr->next_link_id = 0;
914 smc_lgr_list.num += SMC_LGR_NUM_INCR;
915 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
916 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
917 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
918 lgr->conns_all = RB_ROOT;
919 if (ini->is_smcd) {
920 /* SMC-D specific settings */
921 smcd = ini->ism_dev[ini->ism_selected];
922 get_device(smcd->ops->get_dev(smcd));
923 lgr->peer_gid.gid =
924 ini->ism_peer_gid[ini->ism_selected].gid;
925 lgr->peer_gid.gid_ext =
926 ini->ism_peer_gid[ini->ism_selected].gid_ext;
927 lgr->smcd = ini->ism_dev[ini->ism_selected];
928 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
929 lgr_lock = &lgr->smcd->lgr_lock;
930 lgr->smc_version = ini->smcd_version;
931 lgr->peer_shutdown = 0;
932 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
933 } else {
934 /* SMC-R specific settings */
935 struct smc_ib_device *ibdev;
936 int ibport;
937
938 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
939 lgr->smc_version = ini->smcr_version;
940 memcpy(lgr->peer_systemid, ini->peer_systemid,
941 SMC_SYSTEMID_LEN);
942 if (lgr->smc_version == SMC_V2) {
943 ibdev = ini->smcrv2.ib_dev_v2;
944 ibport = ini->smcrv2.ib_port_v2;
945 lgr->saddr = ini->smcrv2.saddr;
946 lgr->uses_gateway = ini->smcrv2.uses_gateway;
947 memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac,
948 ETH_ALEN);
949 lgr->max_conns = ini->max_conns;
950 lgr->max_links = ini->max_links;
951 } else {
952 ibdev = ini->ib_dev;
953 ibport = ini->ib_port;
954 lgr->max_conns = SMC_CONN_PER_LGR_MAX;
955 lgr->max_links = SMC_LINKS_ADD_LNK_MAX;
956 }
957 memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
958 SMC_MAX_PNETID_LEN);
959 rc = smc_wr_alloc_lgr_mem(lgr);
960 if (rc)
961 goto free_wq;
962 smc_llc_lgr_init(lgr, smc);
963
964 link_idx = SMC_SINGLE_LINK;
965 lnk = &lgr->lnk[link_idx];
966 rc = smcr_link_init(lgr, lnk, link_idx, ini);
967 if (rc) {
968 smc_wr_free_lgr_mem(lgr);
969 goto free_wq;
970 }
971 lgr->net = smc_ib_net(lnk->smcibdev);
972 lgr_list = &smc_lgr_list.list;
973 lgr_lock = &smc_lgr_list.lock;
974 lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type;
975 atomic_inc(&lgr_cnt);
976 }
977 smc->conn.lgr = lgr;
978 spin_lock_bh(lgr_lock);
979 list_add_tail(&lgr->list, lgr_list);
980 spin_unlock_bh(lgr_lock);
981 return 0;
982
983 free_wq:
984 destroy_workqueue(lgr->tx_wq);
985 free_lgr:
986 kfree(lgr);
987 ism_put_vlan:
988 if (ini->is_smcd && ini->vlan_id)
989 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
990 out:
991 if (rc < 0) {
992 if (rc == -ENOMEM)
993 rc = SMC_CLC_DECL_MEM;
994 else
995 rc = SMC_CLC_DECL_INTERR;
996 }
997 return rc;
998 }
999
smc_write_space(struct smc_connection * conn)1000 static int smc_write_space(struct smc_connection *conn)
1001 {
1002 int buffer_len = conn->peer_rmbe_size;
1003 union smc_host_cursor prod;
1004 union smc_host_cursor cons;
1005 int space;
1006
1007 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
1008 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
1009 /* determine rx_buf space */
1010 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
1011 return space;
1012 }
1013
smc_switch_cursor(struct smc_sock * smc,struct smc_cdc_tx_pend * pend,struct smc_wr_buf * wr_buf)1014 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
1015 struct smc_wr_buf *wr_buf)
1016 {
1017 struct smc_connection *conn = &smc->conn;
1018 union smc_host_cursor cons, fin;
1019 int rc = 0;
1020 int diff;
1021
1022 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
1023 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
1024 /* set prod cursor to old state, enforce tx_rdma_writes() */
1025 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
1026 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
1027
1028 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
1029 /* cons cursor advanced more than fin, and prod was set
1030 * fin above, so now prod is smaller than cons. Fix that.
1031 */
1032 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
1033 smc_curs_add(conn->sndbuf_desc->len,
1034 &conn->tx_curs_sent, diff);
1035 smc_curs_add(conn->sndbuf_desc->len,
1036 &conn->tx_curs_fin, diff);
1037
1038 smp_mb__before_atomic();
1039 atomic_add(diff, &conn->sndbuf_space);
1040 smp_mb__after_atomic();
1041
1042 smc_curs_add(conn->peer_rmbe_size,
1043 &conn->local_tx_ctrl.prod, diff);
1044 smc_curs_add(conn->peer_rmbe_size,
1045 &conn->local_tx_ctrl_fin, diff);
1046 }
1047 /* recalculate, value is used by tx_rdma_writes() */
1048 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
1049
1050 if (smc->sk.sk_state != SMC_INIT &&
1051 smc->sk.sk_state != SMC_CLOSED) {
1052 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
1053 if (!rc) {
1054 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
1055 smc->sk.sk_data_ready(&smc->sk);
1056 }
1057 } else {
1058 smc_wr_tx_put_slot(conn->lnk,
1059 (struct smc_wr_tx_pend_priv *)pend);
1060 }
1061 return rc;
1062 }
1063
smc_switch_link_and_count(struct smc_connection * conn,struct smc_link * to_lnk)1064 void smc_switch_link_and_count(struct smc_connection *conn,
1065 struct smc_link *to_lnk)
1066 {
1067 atomic_dec(&conn->lnk->conn_cnt);
1068 /* link_hold in smc_conn_create() */
1069 smcr_link_put(conn->lnk);
1070 conn->lnk = to_lnk;
1071 atomic_inc(&conn->lnk->conn_cnt);
1072 /* link_put in smc_conn_free() */
1073 smcr_link_hold(conn->lnk);
1074 }
1075
smc_switch_conns(struct smc_link_group * lgr,struct smc_link * from_lnk,bool is_dev_err)1076 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
1077 struct smc_link *from_lnk, bool is_dev_err)
1078 {
1079 struct smc_link *to_lnk = NULL;
1080 struct smc_cdc_tx_pend *pend;
1081 struct smc_connection *conn;
1082 struct smc_wr_buf *wr_buf;
1083 struct smc_sock *smc;
1084 struct rb_node *node;
1085 int i, rc = 0;
1086
1087 /* link is inactive, wake up tx waiters */
1088 smc_wr_wakeup_tx_wait(from_lnk);
1089
1090 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1091 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1092 continue;
1093 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1094 from_lnk->ibport == lgr->lnk[i].ibport) {
1095 continue;
1096 }
1097 to_lnk = &lgr->lnk[i];
1098 break;
1099 }
1100 if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
1101 smc_lgr_terminate_sched(lgr);
1102 return NULL;
1103 }
1104 again:
1105 read_lock_bh(&lgr->conns_lock);
1106 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
1107 conn = rb_entry(node, struct smc_connection, alert_node);
1108 if (conn->lnk != from_lnk)
1109 continue;
1110 smc = container_of(conn, struct smc_sock, conn);
1111 /* conn->lnk not yet set in SMC_INIT state */
1112 if (smc->sk.sk_state == SMC_INIT)
1113 continue;
1114 if (smc->sk.sk_state == SMC_CLOSED ||
1115 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
1116 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
1117 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
1118 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
1119 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
1120 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
1121 smc->sk.sk_state == SMC_PEERABORTWAIT ||
1122 smc->sk.sk_state == SMC_PROCESSABORT) {
1123 spin_lock_bh(&conn->send_lock);
1124 smc_switch_link_and_count(conn, to_lnk);
1125 spin_unlock_bh(&conn->send_lock);
1126 continue;
1127 }
1128 sock_hold(&smc->sk);
1129 read_unlock_bh(&lgr->conns_lock);
1130 /* pre-fetch buffer outside of send_lock, might sleep */
1131 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
1132 if (rc)
1133 goto err_out;
1134 /* avoid race with smcr_tx_sndbuf_nonempty() */
1135 spin_lock_bh(&conn->send_lock);
1136 smc_switch_link_and_count(conn, to_lnk);
1137 rc = smc_switch_cursor(smc, pend, wr_buf);
1138 spin_unlock_bh(&conn->send_lock);
1139 sock_put(&smc->sk);
1140 if (rc)
1141 goto err_out;
1142 goto again;
1143 }
1144 read_unlock_bh(&lgr->conns_lock);
1145 smc_wr_tx_link_put(to_lnk);
1146 return to_lnk;
1147
1148 err_out:
1149 smcr_link_down_cond_sched(to_lnk);
1150 smc_wr_tx_link_put(to_lnk);
1151 return NULL;
1152 }
1153
smcr_buf_unuse(struct smc_buf_desc * buf_desc,bool is_rmb,struct smc_link_group * lgr)1154 static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
1155 struct smc_link_group *lgr)
1156 {
1157 struct rw_semaphore *lock; /* lock buffer list */
1158 int rc;
1159
1160 if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1161 /* unregister rmb with peer */
1162 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1163 if (!rc) {
1164 /* protect against smc_llc_cli_rkey_exchange() */
1165 down_read(&lgr->llc_conf_mutex);
1166 smc_llc_do_delete_rkey(lgr, buf_desc);
1167 buf_desc->is_conf_rkey = false;
1168 up_read(&lgr->llc_conf_mutex);
1169 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1170 }
1171 }
1172
1173 if (buf_desc->is_reg_err) {
1174 /* buf registration failed, reuse not possible */
1175 lock = is_rmb ? &lgr->rmbs_lock :
1176 &lgr->sndbufs_lock;
1177 down_write(lock);
1178 smc_lgr_buf_list_del(lgr, is_rmb, buf_desc);
1179 up_write(lock);
1180
1181 smc_buf_free(lgr, is_rmb, buf_desc);
1182 } else {
1183 /* memzero_explicit provides potential memory barrier semantics */
1184 memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
1185 WRITE_ONCE(buf_desc->used, 0);
1186 }
1187 }
1188
smcd_buf_detach(struct smc_connection * conn)1189 static void smcd_buf_detach(struct smc_connection *conn)
1190 {
1191 struct smcd_dev *smcd = conn->lgr->smcd;
1192 u64 peer_token = conn->peer_token;
1193
1194 if (!conn->sndbuf_desc)
1195 return;
1196
1197 smc_ism_detach_dmb(smcd, peer_token);
1198
1199 kfree(conn->sndbuf_desc);
1200 conn->sndbuf_desc = NULL;
1201 }
1202
smc_buf_unuse(struct smc_connection * conn,struct smc_link_group * lgr)1203 static void smc_buf_unuse(struct smc_connection *conn,
1204 struct smc_link_group *lgr)
1205 {
1206 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1207 bool is_smcd = lgr->is_smcd;
1208 int bufsize;
1209
1210 if (conn->sndbuf_desc) {
1211 bufsize = conn->sndbuf_desc->len;
1212 if (!is_smcd && conn->sndbuf_desc->is_vm) {
1213 smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
1214 } else {
1215 memzero_explicit(conn->sndbuf_desc->cpu_addr, bufsize);
1216 WRITE_ONCE(conn->sndbuf_desc->used, 0);
1217 }
1218 SMC_STAT_RMB_SIZE(smc, is_smcd, false, false, bufsize);
1219 }
1220 if (conn->rmb_desc) {
1221 bufsize = conn->rmb_desc->len;
1222 if (!is_smcd) {
1223 smcr_buf_unuse(conn->rmb_desc, true, lgr);
1224 } else {
1225 bufsize += sizeof(struct smcd_cdc_msg);
1226 memzero_explicit(conn->rmb_desc->cpu_addr, bufsize);
1227 WRITE_ONCE(conn->rmb_desc->used, 0);
1228 }
1229 SMC_STAT_RMB_SIZE(smc, is_smcd, true, false, bufsize);
1230 }
1231 }
1232
1233 /* remove a finished connection from its link group */
smc_conn_free(struct smc_connection * conn)1234 void smc_conn_free(struct smc_connection *conn)
1235 {
1236 struct smc_link_group *lgr = conn->lgr;
1237
1238 if (!lgr || conn->freed)
1239 /* Connection has never been registered in a
1240 * link group, or has already been freed.
1241 */
1242 return;
1243
1244 conn->freed = 1;
1245 if (!smc_conn_lgr_valid(conn))
1246 /* Connection has already unregistered from
1247 * link group.
1248 */
1249 goto lgr_put;
1250
1251 if (lgr->is_smcd) {
1252 if (!list_empty(&lgr->list))
1253 smc_ism_unset_conn(conn);
1254 if (smc_ism_support_dmb_nocopy(lgr->smcd))
1255 smcd_buf_detach(conn);
1256 tasklet_kill(&conn->rx_tsklet);
1257 } else {
1258 smc_cdc_wait_pend_tx_wr(conn);
1259 if (current_work() != &conn->abort_work)
1260 cancel_work_sync(&conn->abort_work);
1261 }
1262 if (!list_empty(&lgr->list)) {
1263 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1264 smc_lgr_unregister_conn(conn);
1265 }
1266
1267 if (!lgr->conns_num)
1268 smc_lgr_schedule_free_work(lgr);
1269 lgr_put:
1270 if (!lgr->is_smcd)
1271 smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1272 smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */
1273 }
1274
1275 /* unregister a link from a buf_desc */
smcr_buf_unmap_link(struct smc_buf_desc * buf_desc,bool is_rmb,struct smc_link * lnk)1276 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1277 struct smc_link *lnk)
1278 {
1279 if (is_rmb || buf_desc->is_vm)
1280 buf_desc->is_reg_mr[lnk->link_idx] = false;
1281 if (!buf_desc->is_map_ib[lnk->link_idx])
1282 return;
1283
1284 if ((is_rmb || buf_desc->is_vm) &&
1285 buf_desc->mr[lnk->link_idx]) {
1286 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1287 buf_desc->mr[lnk->link_idx] = NULL;
1288 }
1289 if (is_rmb)
1290 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1291 else
1292 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1293
1294 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1295 buf_desc->is_map_ib[lnk->link_idx] = false;
1296 }
1297
1298 /* unmap all buffers of lgr for a deleted link */
smcr_buf_unmap_lgr(struct smc_link * lnk)1299 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1300 {
1301 struct smc_link_group *lgr = lnk->lgr;
1302 struct smc_buf_desc *buf_desc, *bf;
1303 int i;
1304
1305 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1306 down_write(&lgr->rmbs_lock);
1307 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1308 smcr_buf_unmap_link(buf_desc, true, lnk);
1309 up_write(&lgr->rmbs_lock);
1310
1311 down_write(&lgr->sndbufs_lock);
1312 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1313 list)
1314 smcr_buf_unmap_link(buf_desc, false, lnk);
1315 up_write(&lgr->sndbufs_lock);
1316 }
1317 }
1318
smcr_rtoken_clear_link(struct smc_link * lnk)1319 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1320 {
1321 struct smc_link_group *lgr = lnk->lgr;
1322 int i;
1323
1324 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1325 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1326 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1327 }
1328 }
1329
__smcr_link_clear(struct smc_link * lnk)1330 static void __smcr_link_clear(struct smc_link *lnk)
1331 {
1332 struct smc_link_group *lgr = lnk->lgr;
1333 struct smc_ib_device *smcibdev;
1334
1335 smc_wr_free_link_mem(lnk);
1336 smc_ibdev_cnt_dec(lnk);
1337 put_device(&lnk->smcibdev->ibdev->dev);
1338 smcibdev = lnk->smcibdev;
1339 memset(lnk, 0, sizeof(struct smc_link));
1340 lnk->state = SMC_LNK_UNUSED;
1341 if (!atomic_dec_return(&smcibdev->lnk_cnt))
1342 wake_up(&smcibdev->lnks_deleted);
1343 smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */
1344 }
1345
1346 /* must be called under lgr->llc_conf_mutex lock */
smcr_link_clear(struct smc_link * lnk,bool log)1347 void smcr_link_clear(struct smc_link *lnk, bool log)
1348 {
1349 if (!lnk->lgr || lnk->clearing ||
1350 lnk->state == SMC_LNK_UNUSED)
1351 return;
1352 lnk->clearing = 1;
1353 lnk->peer_qpn = 0;
1354 smc_llc_link_clear(lnk, log);
1355 smcr_buf_unmap_lgr(lnk);
1356 smcr_rtoken_clear_link(lnk);
1357 smc_ib_modify_qp_error(lnk);
1358 smc_wr_free_link(lnk);
1359 smc_ib_destroy_queue_pair(lnk);
1360 smc_ib_dealloc_protection_domain(lnk);
1361 smcr_link_put(lnk); /* theoretically last link_put */
1362 }
1363
smcr_link_hold(struct smc_link * lnk)1364 void smcr_link_hold(struct smc_link *lnk)
1365 {
1366 refcount_inc(&lnk->refcnt);
1367 }
1368
smcr_link_put(struct smc_link * lnk)1369 void smcr_link_put(struct smc_link *lnk)
1370 {
1371 if (refcount_dec_and_test(&lnk->refcnt))
1372 __smcr_link_clear(lnk);
1373 }
1374
smcr_buf_free(struct smc_link_group * lgr,bool is_rmb,struct smc_buf_desc * buf_desc)1375 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1376 struct smc_buf_desc *buf_desc)
1377 {
1378 int i;
1379
1380 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1381 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1382
1383 if (!buf_desc->is_vm && buf_desc->pages)
1384 __free_pages(buf_desc->pages, buf_desc->order);
1385 else if (buf_desc->is_vm && buf_desc->cpu_addr)
1386 vfree(buf_desc->cpu_addr);
1387 kfree(buf_desc);
1388 }
1389
smcd_buf_free(struct smc_link_group * lgr,bool is_dmb,struct smc_buf_desc * buf_desc)1390 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1391 struct smc_buf_desc *buf_desc)
1392 {
1393 if (is_dmb) {
1394 /* restore original buf len */
1395 buf_desc->len += sizeof(struct smcd_cdc_msg);
1396 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1397 } else {
1398 kfree(buf_desc->cpu_addr);
1399 }
1400 kfree(buf_desc);
1401 }
1402
smc_buf_free(struct smc_link_group * lgr,bool is_rmb,struct smc_buf_desc * buf_desc)1403 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1404 struct smc_buf_desc *buf_desc)
1405 {
1406 if (lgr->is_smcd)
1407 smcd_buf_free(lgr, is_rmb, buf_desc);
1408 else
1409 smcr_buf_free(lgr, is_rmb, buf_desc);
1410 }
1411
__smc_lgr_free_bufs(struct smc_link_group * lgr,bool is_rmb)1412 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1413 {
1414 struct smc_buf_desc *buf_desc, *bf_desc;
1415 struct list_head *buf_list;
1416 int i;
1417
1418 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1419 if (is_rmb)
1420 buf_list = &lgr->rmbs[i];
1421 else
1422 buf_list = &lgr->sndbufs[i];
1423 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1424 list) {
1425 smc_lgr_buf_list_del(lgr, is_rmb, buf_desc);
1426 smc_buf_free(lgr, is_rmb, buf_desc);
1427 }
1428 }
1429 }
1430
smc_lgr_free_bufs(struct smc_link_group * lgr)1431 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1432 {
1433 /* free send buffers */
1434 __smc_lgr_free_bufs(lgr, false);
1435 /* free rmbs */
1436 __smc_lgr_free_bufs(lgr, true);
1437 }
1438
1439 /* won't be freed until no one accesses to lgr anymore */
__smc_lgr_free(struct smc_link_group * lgr)1440 static void __smc_lgr_free(struct smc_link_group *lgr)
1441 {
1442 smc_lgr_free_bufs(lgr);
1443 if (lgr->is_smcd) {
1444 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1445 wake_up(&lgr->smcd->lgrs_deleted);
1446 } else {
1447 smc_wr_free_lgr_mem(lgr);
1448 if (!atomic_dec_return(&lgr_cnt))
1449 wake_up(&lgrs_deleted);
1450 }
1451 kfree(lgr);
1452 }
1453
1454 /* remove a link group */
smc_lgr_free(struct smc_link_group * lgr)1455 static void smc_lgr_free(struct smc_link_group *lgr)
1456 {
1457 int i;
1458
1459 if (!lgr->is_smcd) {
1460 down_write(&lgr->llc_conf_mutex);
1461 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1462 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1463 smcr_link_clear(&lgr->lnk[i], false);
1464 }
1465 up_write(&lgr->llc_conf_mutex);
1466 smc_llc_lgr_clear(lgr);
1467 }
1468
1469 destroy_workqueue(lgr->tx_wq);
1470 if (lgr->is_smcd) {
1471 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1472 put_device(lgr->smcd->ops->get_dev(lgr->smcd));
1473 }
1474 smc_lgr_put(lgr); /* theoretically last lgr_put */
1475 }
1476
smc_lgr_hold(struct smc_link_group * lgr)1477 void smc_lgr_hold(struct smc_link_group *lgr)
1478 {
1479 refcount_inc(&lgr->refcnt);
1480 }
1481
smc_lgr_put(struct smc_link_group * lgr)1482 void smc_lgr_put(struct smc_link_group *lgr)
1483 {
1484 if (refcount_dec_and_test(&lgr->refcnt))
1485 __smc_lgr_free(lgr);
1486 }
1487
smc_sk_wake_ups(struct smc_sock * smc)1488 static void smc_sk_wake_ups(struct smc_sock *smc)
1489 {
1490 smc->sk.sk_write_space(&smc->sk);
1491 smc->sk.sk_data_ready(&smc->sk);
1492 smc->sk.sk_state_change(&smc->sk);
1493 }
1494
1495 /* kill a connection */
smc_conn_kill(struct smc_connection * conn,bool soft)1496 static void smc_conn_kill(struct smc_connection *conn, bool soft)
1497 {
1498 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1499
1500 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1501 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1502 else
1503 smc_close_abort(conn);
1504 conn->killed = 1;
1505 smc->sk.sk_err = ECONNABORTED;
1506 smc_sk_wake_ups(smc);
1507 if (conn->lgr->is_smcd) {
1508 smc_ism_unset_conn(conn);
1509 if (smc_ism_support_dmb_nocopy(conn->lgr->smcd))
1510 smcd_buf_detach(conn);
1511 if (soft)
1512 tasklet_kill(&conn->rx_tsklet);
1513 else
1514 tasklet_unlock_wait(&conn->rx_tsklet);
1515 } else {
1516 smc_cdc_wait_pend_tx_wr(conn);
1517 }
1518 smc_lgr_unregister_conn(conn);
1519 smc_close_active_abort(smc);
1520 }
1521
smc_lgr_cleanup(struct smc_link_group * lgr)1522 static void smc_lgr_cleanup(struct smc_link_group *lgr)
1523 {
1524 if (lgr->is_smcd) {
1525 smc_ism_signal_shutdown(lgr);
1526 } else {
1527 u32 rsn = lgr->llc_termination_rsn;
1528
1529 if (!rsn)
1530 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1531 smc_llc_send_link_delete_all(lgr, false, rsn);
1532 smcr_lgr_link_deactivate_all(lgr);
1533 }
1534 }
1535
1536 /* terminate link group
1537 * @soft: true if link group shutdown can take its time
1538 * false if immediate link group shutdown is required
1539 */
__smc_lgr_terminate(struct smc_link_group * lgr,bool soft)1540 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1541 {
1542 struct smc_connection *conn;
1543 struct smc_sock *smc;
1544 struct rb_node *node;
1545
1546 if (lgr->terminating)
1547 return; /* lgr already terminating */
1548 /* cancel free_work sync, will terminate when lgr->freeing is set */
1549 cancel_delayed_work(&lgr->free_work);
1550 lgr->terminating = 1;
1551
1552 /* kill remaining link group connections */
1553 read_lock_bh(&lgr->conns_lock);
1554 node = rb_first(&lgr->conns_all);
1555 while (node) {
1556 read_unlock_bh(&lgr->conns_lock);
1557 conn = rb_entry(node, struct smc_connection, alert_node);
1558 smc = container_of(conn, struct smc_sock, conn);
1559 sock_hold(&smc->sk); /* sock_put below */
1560 lock_sock(&smc->sk);
1561 smc_conn_kill(conn, soft);
1562 release_sock(&smc->sk);
1563 sock_put(&smc->sk); /* sock_hold above */
1564 read_lock_bh(&lgr->conns_lock);
1565 node = rb_first(&lgr->conns_all);
1566 }
1567 read_unlock_bh(&lgr->conns_lock);
1568 smc_lgr_cleanup(lgr);
1569 smc_lgr_free(lgr);
1570 }
1571
1572 /* unlink link group and schedule termination */
smc_lgr_terminate_sched(struct smc_link_group * lgr)1573 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1574 {
1575 spinlock_t *lgr_lock;
1576
1577 smc_lgr_list_head(lgr, &lgr_lock);
1578 spin_lock_bh(lgr_lock);
1579 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1580 spin_unlock_bh(lgr_lock);
1581 return; /* lgr already terminating */
1582 }
1583 list_del_init(&lgr->list);
1584 lgr->freeing = 1;
1585 spin_unlock_bh(lgr_lock);
1586 schedule_work(&lgr->terminate_work);
1587 }
1588
1589 /* Called when peer lgr shutdown (regularly or abnormally) is received */
smc_smcd_terminate(struct smcd_dev * dev,struct smcd_gid * peer_gid,unsigned short vlan)1590 void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
1591 unsigned short vlan)
1592 {
1593 struct smc_link_group *lgr, *l;
1594 LIST_HEAD(lgr_free_list);
1595
1596 /* run common cleanup function and build free list */
1597 spin_lock_bh(&dev->lgr_lock);
1598 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1599 if ((!peer_gid->gid ||
1600 (lgr->peer_gid.gid == peer_gid->gid &&
1601 !smc_ism_is_emulated(dev) ? 1 :
1602 lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
1603 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1604 if (peer_gid->gid) /* peer triggered termination */
1605 lgr->peer_shutdown = 1;
1606 list_move(&lgr->list, &lgr_free_list);
1607 lgr->freeing = 1;
1608 }
1609 }
1610 spin_unlock_bh(&dev->lgr_lock);
1611
1612 /* cancel the regular free workers and actually free lgrs */
1613 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1614 list_del_init(&lgr->list);
1615 schedule_work(&lgr->terminate_work);
1616 }
1617 }
1618
1619 /* Called when an SMCD device is removed or the smc module is unloaded */
smc_smcd_terminate_all(struct smcd_dev * smcd)1620 void smc_smcd_terminate_all(struct smcd_dev *smcd)
1621 {
1622 struct smc_link_group *lgr, *lg;
1623 LIST_HEAD(lgr_free_list);
1624
1625 spin_lock_bh(&smcd->lgr_lock);
1626 list_splice_init(&smcd->lgr_list, &lgr_free_list);
1627 list_for_each_entry(lgr, &lgr_free_list, list)
1628 lgr->freeing = 1;
1629 spin_unlock_bh(&smcd->lgr_lock);
1630
1631 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1632 list_del_init(&lgr->list);
1633 __smc_lgr_terminate(lgr, false);
1634 }
1635
1636 if (atomic_read(&smcd->lgr_cnt))
1637 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1638 }
1639
1640 /* Called when an SMCR device is removed or the smc module is unloaded.
1641 * If smcibdev is given, all SMCR link groups using this device are terminated.
1642 * If smcibdev is NULL, all SMCR link groups are terminated.
1643 */
smc_smcr_terminate_all(struct smc_ib_device * smcibdev)1644 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1645 {
1646 struct smc_link_group *lgr, *lg;
1647 LIST_HEAD(lgr_free_list);
1648 int i;
1649
1650 spin_lock_bh(&smc_lgr_list.lock);
1651 if (!smcibdev) {
1652 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1653 list_for_each_entry(lgr, &lgr_free_list, list)
1654 lgr->freeing = 1;
1655 } else {
1656 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1657 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1658 if (lgr->lnk[i].smcibdev == smcibdev)
1659 smcr_link_down_cond_sched(&lgr->lnk[i]);
1660 }
1661 }
1662 }
1663 spin_unlock_bh(&smc_lgr_list.lock);
1664
1665 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1666 list_del_init(&lgr->list);
1667 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1668 __smc_lgr_terminate(lgr, false);
1669 }
1670
1671 if (smcibdev) {
1672 if (atomic_read(&smcibdev->lnk_cnt))
1673 wait_event(smcibdev->lnks_deleted,
1674 !atomic_read(&smcibdev->lnk_cnt));
1675 } else {
1676 if (atomic_read(&lgr_cnt))
1677 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1678 }
1679 }
1680
1681 /* set new lgr type and clear all asymmetric link tagging */
smcr_lgr_set_type(struct smc_link_group * lgr,enum smc_lgr_type new_type)1682 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1683 {
1684 char *lgr_type = "";
1685 int i;
1686
1687 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1688 if (smc_link_usable(&lgr->lnk[i]))
1689 lgr->lnk[i].link_is_asym = false;
1690 if (lgr->type == new_type)
1691 return;
1692 lgr->type = new_type;
1693
1694 switch (lgr->type) {
1695 case SMC_LGR_NONE:
1696 lgr_type = "NONE";
1697 break;
1698 case SMC_LGR_SINGLE:
1699 lgr_type = "SINGLE";
1700 break;
1701 case SMC_LGR_SYMMETRIC:
1702 lgr_type = "SYMMETRIC";
1703 break;
1704 case SMC_LGR_ASYMMETRIC_PEER:
1705 lgr_type = "ASYMMETRIC_PEER";
1706 break;
1707 case SMC_LGR_ASYMMETRIC_LOCAL:
1708 lgr_type = "ASYMMETRIC_LOCAL";
1709 break;
1710 }
1711 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: "
1712 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1713 lgr->net->net_cookie, lgr_type, lgr->pnet_id);
1714 }
1715
1716 /* set new lgr type and tag a link as asymmetric */
smcr_lgr_set_type_asym(struct smc_link_group * lgr,enum smc_lgr_type new_type,int asym_lnk_idx)1717 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1718 enum smc_lgr_type new_type, int asym_lnk_idx)
1719 {
1720 smcr_lgr_set_type(lgr, new_type);
1721 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1722 }
1723
1724 /* abort connection, abort_work scheduled from tasklet context */
smc_conn_abort_work(struct work_struct * work)1725 static void smc_conn_abort_work(struct work_struct *work)
1726 {
1727 struct smc_connection *conn = container_of(work,
1728 struct smc_connection,
1729 abort_work);
1730 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1731
1732 lock_sock(&smc->sk);
1733 smc_conn_kill(conn, true);
1734 release_sock(&smc->sk);
1735 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1736 }
1737
smcr_port_add(struct smc_ib_device * smcibdev,u8 ibport)1738 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1739 {
1740 struct smc_link_group *lgr, *n;
1741
1742 spin_lock_bh(&smc_lgr_list.lock);
1743 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1744 struct smc_link *link;
1745
1746 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1747 SMC_MAX_PNETID_LEN) ||
1748 lgr->type == SMC_LGR_SYMMETRIC ||
1749 lgr->type == SMC_LGR_ASYMMETRIC_PEER ||
1750 !rdma_dev_access_netns(smcibdev->ibdev, lgr->net))
1751 continue;
1752
1753 if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1)
1754 continue;
1755
1756 /* trigger local add link processing */
1757 link = smc_llc_usable_link(lgr);
1758 if (link)
1759 smc_llc_add_link_local(link);
1760 }
1761 spin_unlock_bh(&smc_lgr_list.lock);
1762 }
1763
1764 /* link is down - switch connections to alternate link,
1765 * must be called under lgr->llc_conf_mutex lock
1766 */
smcr_link_down(struct smc_link * lnk)1767 static void smcr_link_down(struct smc_link *lnk)
1768 {
1769 struct smc_link_group *lgr = lnk->lgr;
1770 struct smc_link *to_lnk;
1771 int del_link_id;
1772
1773 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1774 return;
1775
1776 to_lnk = smc_switch_conns(lgr, lnk, true);
1777 if (!to_lnk) { /* no backup link available */
1778 smcr_link_clear(lnk, true);
1779 return;
1780 }
1781 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1782 del_link_id = lnk->link_id;
1783
1784 if (lgr->role == SMC_SERV) {
1785 /* trigger local delete link processing */
1786 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1787 } else {
1788 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1789 /* another llc task is ongoing */
1790 up_write(&lgr->llc_conf_mutex);
1791 wait_event_timeout(lgr->llc_flow_waiter,
1792 (list_empty(&lgr->list) ||
1793 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1794 SMC_LLC_WAIT_TIME);
1795 down_write(&lgr->llc_conf_mutex);
1796 }
1797 if (!list_empty(&lgr->list)) {
1798 smc_llc_send_delete_link(to_lnk, del_link_id,
1799 SMC_LLC_REQ, true,
1800 SMC_LLC_DEL_LOST_PATH);
1801 smcr_link_clear(lnk, true);
1802 }
1803 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1804 }
1805 }
1806
1807 /* must be called under lgr->llc_conf_mutex lock */
smcr_link_down_cond(struct smc_link * lnk)1808 void smcr_link_down_cond(struct smc_link *lnk)
1809 {
1810 if (smc_link_downing(&lnk->state)) {
1811 trace_smcr_link_down(lnk, __builtin_return_address(0));
1812 smcr_link_down(lnk);
1813 }
1814 }
1815
1816 /* will get the lgr->llc_conf_mutex lock */
smcr_link_down_cond_sched(struct smc_link * lnk)1817 void smcr_link_down_cond_sched(struct smc_link *lnk)
1818 {
1819 if (smc_link_downing(&lnk->state)) {
1820 trace_smcr_link_down(lnk, __builtin_return_address(0));
1821 smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
1822 if (!schedule_work(&lnk->link_down_wrk))
1823 smcr_link_put(lnk);
1824 }
1825 }
1826
smcr_port_err(struct smc_ib_device * smcibdev,u8 ibport)1827 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1828 {
1829 struct smc_link_group *lgr, *n;
1830 int i;
1831
1832 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1833 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1834 SMC_MAX_PNETID_LEN))
1835 continue; /* lgr is not affected */
1836 if (list_empty(&lgr->list))
1837 continue;
1838 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1839 struct smc_link *lnk = &lgr->lnk[i];
1840
1841 if (smc_link_usable(lnk) &&
1842 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1843 smcr_link_down_cond_sched(lnk);
1844 }
1845 }
1846 }
1847
smc_link_down_work(struct work_struct * work)1848 static void smc_link_down_work(struct work_struct *work)
1849 {
1850 struct smc_link *link = container_of(work, struct smc_link,
1851 link_down_wrk);
1852 struct smc_link_group *lgr = link->lgr;
1853
1854 if (list_empty(&lgr->list))
1855 goto out;
1856 wake_up_all(&lgr->llc_msg_waiter);
1857 down_write(&lgr->llc_conf_mutex);
1858 smcr_link_down(link);
1859 up_write(&lgr->llc_conf_mutex);
1860
1861 out:
1862 smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
1863 }
1864
smc_vlan_by_tcpsk_walk(struct net_device * lower_dev,struct netdev_nested_priv * priv)1865 static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1866 struct netdev_nested_priv *priv)
1867 {
1868 unsigned short *vlan_id = (unsigned short *)priv->data;
1869
1870 if (is_vlan_dev(lower_dev)) {
1871 *vlan_id = vlan_dev_vlan_id(lower_dev);
1872 return 1;
1873 }
1874
1875 return 0;
1876 }
1877
1878 /* Determine vlan of internal TCP socket. */
smc_vlan_by_tcpsk(struct socket * clcsock,struct smc_init_info * ini)1879 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1880 {
1881 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1882 struct netdev_nested_priv priv;
1883 struct net_device *ndev;
1884 int rc = 0;
1885
1886 ini->vlan_id = 0;
1887 if (!dst) {
1888 rc = -ENOTCONN;
1889 goto out;
1890 }
1891 if (!dst->dev) {
1892 rc = -ENODEV;
1893 goto out_rel;
1894 }
1895
1896 ndev = dst->dev;
1897 if (is_vlan_dev(ndev)) {
1898 ini->vlan_id = vlan_dev_vlan_id(ndev);
1899 goto out_rel;
1900 }
1901
1902 priv.data = (void *)&ini->vlan_id;
1903 rtnl_lock();
1904 netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1905 rtnl_unlock();
1906
1907 out_rel:
1908 dst_release(dst);
1909 out:
1910 return rc;
1911 }
1912
smcr_lgr_match(struct smc_link_group * lgr,u8 smcr_version,u8 peer_systemid[],u8 peer_gid[],u8 peer_mac_v1[],enum smc_lgr_role role,u32 clcqpn,struct net * net)1913 static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
1914 u8 peer_systemid[],
1915 u8 peer_gid[],
1916 u8 peer_mac_v1[],
1917 enum smc_lgr_role role, u32 clcqpn,
1918 struct net *net)
1919 {
1920 struct smc_link *lnk;
1921 int i;
1922
1923 if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
1924 lgr->role != role)
1925 return false;
1926
1927 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1928 lnk = &lgr->lnk[i];
1929
1930 if (!smc_link_active(lnk))
1931 continue;
1932 /* use verbs API to check netns, instead of lgr->net */
1933 if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1934 return false;
1935 if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1936 !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1937 (smcr_version == SMC_V2 ||
1938 !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
1939 return true;
1940 }
1941 return false;
1942 }
1943
smcd_lgr_match(struct smc_link_group * lgr,struct smcd_dev * smcismdev,struct smcd_gid * peer_gid)1944 static bool smcd_lgr_match(struct smc_link_group *lgr,
1945 struct smcd_dev *smcismdev,
1946 struct smcd_gid *peer_gid)
1947 {
1948 if (lgr->peer_gid.gid != peer_gid->gid ||
1949 lgr->smcd != smcismdev)
1950 return false;
1951
1952 if (smc_ism_is_emulated(smcismdev) &&
1953 lgr->peer_gid.gid_ext != peer_gid->gid_ext)
1954 return false;
1955
1956 return true;
1957 }
1958
1959 /* create a new SMC connection (and a new link group if necessary) */
smc_conn_create(struct smc_sock * smc,struct smc_init_info * ini)1960 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1961 {
1962 struct smc_connection *conn = &smc->conn;
1963 struct net *net = sock_net(&smc->sk);
1964 struct list_head *lgr_list;
1965 struct smc_link_group *lgr;
1966 enum smc_lgr_role role;
1967 spinlock_t *lgr_lock;
1968 int rc = 0;
1969
1970 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1971 &smc_lgr_list.list;
1972 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1973 &smc_lgr_list.lock;
1974 ini->first_contact_local = 1;
1975 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1976 if (role == SMC_CLNT && ini->first_contact_peer)
1977 /* create new link group as well */
1978 goto create;
1979
1980 /* determine if an existing link group can be reused */
1981 spin_lock_bh(lgr_lock);
1982 list_for_each_entry(lgr, lgr_list, list) {
1983 write_lock_bh(&lgr->conns_lock);
1984 if ((ini->is_smcd ?
1985 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1986 &ini->ism_peer_gid[ini->ism_selected]) :
1987 smcr_lgr_match(lgr, ini->smcr_version,
1988 ini->peer_systemid,
1989 ini->peer_gid, ini->peer_mac, role,
1990 ini->ib_clcqpn, net)) &&
1991 !lgr->sync_err &&
1992 (ini->smcd_version == SMC_V2 ||
1993 lgr->vlan_id == ini->vlan_id) &&
1994 (role == SMC_CLNT || ini->is_smcd ||
1995 (lgr->conns_num < lgr->max_conns &&
1996 !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
1997 /* link group found */
1998 ini->first_contact_local = 0;
1999 conn->lgr = lgr;
2000 rc = smc_lgr_register_conn(conn, false);
2001 write_unlock_bh(&lgr->conns_lock);
2002 if (!rc && delayed_work_pending(&lgr->free_work))
2003 cancel_delayed_work(&lgr->free_work);
2004 break;
2005 }
2006 write_unlock_bh(&lgr->conns_lock);
2007 }
2008 spin_unlock_bh(lgr_lock);
2009 if (rc)
2010 return rc;
2011
2012 if (role == SMC_CLNT && !ini->first_contact_peer &&
2013 ini->first_contact_local) {
2014 /* Server reuses a link group, but Client wants to start
2015 * a new one
2016 * send out_of_sync decline, reason synchr. error
2017 */
2018 return SMC_CLC_DECL_SYNCERR;
2019 }
2020
2021 create:
2022 if (ini->first_contact_local) {
2023 rc = smc_lgr_create(smc, ini);
2024 if (rc)
2025 goto out;
2026 lgr = conn->lgr;
2027 write_lock_bh(&lgr->conns_lock);
2028 rc = smc_lgr_register_conn(conn, true);
2029 write_unlock_bh(&lgr->conns_lock);
2030 if (rc) {
2031 smc_lgr_cleanup_early(lgr);
2032 goto out;
2033 }
2034 }
2035 smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */
2036 if (!conn->lgr->is_smcd)
2037 smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
2038 conn->freed = 0;
2039 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
2040 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
2041 conn->urg_state = SMC_URG_READ;
2042 init_waitqueue_head(&conn->cdc_pend_tx_wq);
2043 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
2044 if (ini->is_smcd) {
2045 conn->rx_off = sizeof(struct smcd_cdc_msg);
2046 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
2047 } else {
2048 conn->rx_off = 0;
2049 }
2050 #ifndef KERNEL_HAS_ATOMIC64
2051 spin_lock_init(&conn->acurs_lock);
2052 #endif
2053
2054 out:
2055 return rc;
2056 }
2057
2058 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
2059 #define SMCR_RMBE_SIZES 15 /* 0 -> 16KB, 1 -> 32KB, .. 15 -> 512MB */
2060
2061 /* convert the RMB size into the compressed notation (minimum 16K, see
2062 * SMCD/R_DMBE_SIZES.
2063 * In contrast to plain ilog2, this rounds towards the next power of 2,
2064 * so the socket application gets at least its desired sndbuf / rcvbuf size.
2065 */
smc_compress_bufsize(int size,bool is_smcd,bool is_rmb)2066 static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
2067 {
2068 u8 compressed;
2069
2070 if (size <= SMC_BUF_MIN_SIZE)
2071 return 0;
2072
2073 size = (size - 1) >> 14; /* convert to 16K multiple */
2074 compressed = min_t(u8, ilog2(size) + 1,
2075 is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
2076
2077 #ifdef CONFIG_ARCH_NO_SG_CHAIN
2078 if (!is_smcd && is_rmb)
2079 /* RMBs are backed by & limited to max size of scatterlists */
2080 compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
2081 #endif
2082
2083 return compressed;
2084 }
2085
2086 /* convert the RMB size from compressed notation into integer */
smc_uncompress_bufsize(u8 compressed)2087 int smc_uncompress_bufsize(u8 compressed)
2088 {
2089 u32 size;
2090
2091 size = 0x00000001 << (((int)compressed) + 14);
2092 return (int)size;
2093 }
2094
2095 /* try to reuse a sndbuf or rmb description slot for a certain
2096 * buffer size; if not available, return NULL
2097 */
smc_buf_get_slot(int compressed_bufsize,struct rw_semaphore * lock,struct list_head * buf_list)2098 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
2099 struct rw_semaphore *lock,
2100 struct list_head *buf_list)
2101 {
2102 struct smc_buf_desc *buf_slot;
2103
2104 down_read(lock);
2105 list_for_each_entry(buf_slot, buf_list, list) {
2106 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
2107 up_read(lock);
2108 return buf_slot;
2109 }
2110 }
2111 up_read(lock);
2112 return NULL;
2113 }
2114
2115 /* one of the conditions for announcing a receiver's current window size is
2116 * that it "results in a minimum increase in the window size of 10% of the
2117 * receive buffer space" [RFC7609]
2118 */
smc_rmb_wnd_update_limit(int rmbe_size)2119 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
2120 {
2121 return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
2122 }
2123
2124 /* map an buf to a link */
smcr_buf_map_link(struct smc_buf_desc * buf_desc,bool is_rmb,struct smc_link * lnk)2125 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
2126 struct smc_link *lnk)
2127 {
2128 int rc, i, nents, offset, buf_size, size, access_flags;
2129 struct scatterlist *sg;
2130 void *buf;
2131
2132 if (buf_desc->is_map_ib[lnk->link_idx])
2133 return 0;
2134
2135 if (buf_desc->is_vm) {
2136 buf = buf_desc->cpu_addr;
2137 buf_size = buf_desc->len;
2138 offset = offset_in_page(buf_desc->cpu_addr);
2139 nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
2140 } else {
2141 nents = 1;
2142 }
2143
2144 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2145 if (rc)
2146 return rc;
2147
2148 if (buf_desc->is_vm) {
2149 /* virtually contiguous buffer */
2150 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2151 size = min_t(int, PAGE_SIZE - offset, buf_size);
2152 sg_set_page(sg, vmalloc_to_page(buf), size, offset);
2153 buf += size / sizeof(*buf);
2154 buf_size -= size;
2155 offset = 0;
2156 }
2157 } else {
2158 /* physically contiguous buffer */
2159 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2160 buf_desc->cpu_addr, buf_desc->len);
2161 }
2162
2163 /* map sg table to DMA address */
2164 rc = smc_ib_buf_map_sg(lnk, buf_desc,
2165 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2166 /* SMC protocol depends on mapping to one DMA address only */
2167 if (rc != nents) {
2168 rc = -EAGAIN;
2169 goto free_table;
2170 }
2171
2172 buf_desc->is_dma_need_sync |=
2173 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2174
2175 if (is_rmb || buf_desc->is_vm) {
2176 /* create a new memory region for the RMB or vzalloced sndbuf */
2177 access_flags = is_rmb ?
2178 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2179 IB_ACCESS_LOCAL_WRITE;
2180
2181 rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2182 buf_desc, lnk->link_idx);
2183 if (rc)
2184 goto buf_unmap;
2185 smc_ib_sync_sg_for_device(lnk, buf_desc,
2186 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2187 }
2188 buf_desc->is_map_ib[lnk->link_idx] = true;
2189 return 0;
2190
2191 buf_unmap:
2192 smc_ib_buf_unmap_sg(lnk, buf_desc,
2193 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2194 free_table:
2195 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2196 return rc;
2197 }
2198
2199 /* register a new buf on IB device, rmb or vzalloced sndbuf
2200 * must be called under lgr->llc_conf_mutex lock
2201 */
smcr_link_reg_buf(struct smc_link * link,struct smc_buf_desc * buf_desc)2202 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
2203 {
2204 if (list_empty(&link->lgr->list))
2205 return -ENOLINK;
2206 if (!buf_desc->is_reg_mr[link->link_idx]) {
2207 /* register memory region for new buf */
2208 if (buf_desc->is_vm)
2209 buf_desc->mr[link->link_idx]->iova =
2210 (uintptr_t)buf_desc->cpu_addr;
2211 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) {
2212 buf_desc->is_reg_err = true;
2213 return -EFAULT;
2214 }
2215 buf_desc->is_reg_mr[link->link_idx] = true;
2216 }
2217 return 0;
2218 }
2219
_smcr_buf_map_lgr(struct smc_link * lnk,struct rw_semaphore * lock,struct list_head * lst,bool is_rmb)2220 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2221 struct list_head *lst, bool is_rmb)
2222 {
2223 struct smc_buf_desc *buf_desc, *bf;
2224 int rc = 0;
2225
2226 down_write(lock);
2227 list_for_each_entry_safe(buf_desc, bf, lst, list) {
2228 if (!buf_desc->used)
2229 continue;
2230 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2231 if (rc)
2232 goto out;
2233 }
2234 out:
2235 up_write(lock);
2236 return rc;
2237 }
2238
2239 /* map all used buffers of lgr for a new link */
smcr_buf_map_lgr(struct smc_link * lnk)2240 int smcr_buf_map_lgr(struct smc_link *lnk)
2241 {
2242 struct smc_link_group *lgr = lnk->lgr;
2243 int i, rc = 0;
2244
2245 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2246 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2247 &lgr->rmbs[i], true);
2248 if (rc)
2249 return rc;
2250 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2251 &lgr->sndbufs[i], false);
2252 if (rc)
2253 return rc;
2254 }
2255 return 0;
2256 }
2257
2258 /* register all used buffers of lgr for a new link,
2259 * must be called under lgr->llc_conf_mutex lock
2260 */
smcr_buf_reg_lgr(struct smc_link * lnk)2261 int smcr_buf_reg_lgr(struct smc_link *lnk)
2262 {
2263 struct smc_link_group *lgr = lnk->lgr;
2264 struct smc_buf_desc *buf_desc, *bf;
2265 int i, rc = 0;
2266
2267 /* reg all RMBs for a new link */
2268 down_write(&lgr->rmbs_lock);
2269 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2270 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
2271 if (!buf_desc->used)
2272 continue;
2273 rc = smcr_link_reg_buf(lnk, buf_desc);
2274 if (rc) {
2275 up_write(&lgr->rmbs_lock);
2276 return rc;
2277 }
2278 }
2279 }
2280 up_write(&lgr->rmbs_lock);
2281
2282 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2283 return rc;
2284
2285 /* reg all vzalloced sndbufs for a new link */
2286 down_write(&lgr->sndbufs_lock);
2287 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2288 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
2289 if (!buf_desc->used || !buf_desc->is_vm)
2290 continue;
2291 rc = smcr_link_reg_buf(lnk, buf_desc);
2292 if (rc) {
2293 up_write(&lgr->sndbufs_lock);
2294 return rc;
2295 }
2296 }
2297 }
2298 up_write(&lgr->sndbufs_lock);
2299 return rc;
2300 }
2301
smcr_new_buf_create(struct smc_link_group * lgr,int bufsize)2302 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
2303 int bufsize)
2304 {
2305 struct smc_buf_desc *buf_desc;
2306
2307 /* try to alloc a new buffer */
2308 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2309 if (!buf_desc)
2310 return ERR_PTR(-ENOMEM);
2311
2312 switch (lgr->buf_type) {
2313 case SMCR_PHYS_CONT_BUFS:
2314 case SMCR_MIXED_BUFS:
2315 buf_desc->order = get_order(bufsize);
2316 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
2317 __GFP_NOMEMALLOC | __GFP_COMP |
2318 __GFP_NORETRY | __GFP_ZERO,
2319 buf_desc->order);
2320 if (buf_desc->pages) {
2321 buf_desc->cpu_addr =
2322 (void *)page_address(buf_desc->pages);
2323 buf_desc->len = bufsize;
2324 buf_desc->is_vm = false;
2325 break;
2326 }
2327 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2328 goto out;
2329 fallthrough; // try virtually contiguous buf
2330 case SMCR_VIRT_CONT_BUFS:
2331 buf_desc->order = get_order(bufsize);
2332 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
2333 if (!buf_desc->cpu_addr)
2334 goto out;
2335 buf_desc->pages = NULL;
2336 buf_desc->len = bufsize;
2337 buf_desc->is_vm = true;
2338 break;
2339 }
2340 return buf_desc;
2341
2342 out:
2343 kfree(buf_desc);
2344 return ERR_PTR(-EAGAIN);
2345 }
2346
2347 /* map buf_desc on all usable links,
2348 * unused buffers stay mapped as long as the link is up
2349 */
smcr_buf_map_usable_links(struct smc_link_group * lgr,struct smc_buf_desc * buf_desc,bool is_rmb)2350 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
2351 struct smc_buf_desc *buf_desc, bool is_rmb)
2352 {
2353 int i, rc = 0, cnt = 0;
2354
2355 /* protect against parallel link reconfiguration */
2356 down_read(&lgr->llc_conf_mutex);
2357 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2358 struct smc_link *lnk = &lgr->lnk[i];
2359
2360 if (!smc_link_usable(lnk))
2361 continue;
2362 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2363 rc = -ENOMEM;
2364 goto out;
2365 }
2366 cnt++;
2367 }
2368 out:
2369 up_read(&lgr->llc_conf_mutex);
2370 if (!rc && !cnt)
2371 rc = -EINVAL;
2372 return rc;
2373 }
2374
smcd_new_buf_create(struct smc_link_group * lgr,bool is_dmb,int bufsize)2375 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
2376 bool is_dmb, int bufsize)
2377 {
2378 struct smc_buf_desc *buf_desc;
2379 int rc;
2380
2381 /* try to alloc a new DMB */
2382 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2383 if (!buf_desc)
2384 return ERR_PTR(-ENOMEM);
2385 if (is_dmb) {
2386 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2387 if (rc) {
2388 kfree(buf_desc);
2389 if (rc == -ENOMEM)
2390 return ERR_PTR(-EAGAIN);
2391 if (rc == -ENOSPC)
2392 return ERR_PTR(-ENOSPC);
2393 return ERR_PTR(-EIO);
2394 }
2395 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2396 /* CDC header stored in buf. So, pretend it was smaller */
2397 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2398 } else {
2399 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2400 __GFP_NOWARN | __GFP_NORETRY |
2401 __GFP_NOMEMALLOC);
2402 if (!buf_desc->cpu_addr) {
2403 kfree(buf_desc);
2404 return ERR_PTR(-EAGAIN);
2405 }
2406 buf_desc->len = bufsize;
2407 }
2408 return buf_desc;
2409 }
2410
__smc_buf_create(struct smc_sock * smc,bool is_smcd,bool is_rmb)2411 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2412 {
2413 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2414 struct smc_connection *conn = &smc->conn;
2415 struct smc_link_group *lgr = conn->lgr;
2416 struct list_head *buf_list;
2417 int bufsize, bufsize_comp;
2418 struct rw_semaphore *lock; /* lock buffer list */
2419 bool is_dgraded = false;
2420
2421 if (is_rmb)
2422 /* use socket recv buffer size (w/o overhead) as start value */
2423 bufsize = smc->sk.sk_rcvbuf / 2;
2424 else
2425 /* use socket send buffer size (w/o overhead) as start value */
2426 bufsize = smc->sk.sk_sndbuf / 2;
2427
2428 for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb);
2429 bufsize_comp >= 0; bufsize_comp--) {
2430 if (is_rmb) {
2431 lock = &lgr->rmbs_lock;
2432 buf_list = &lgr->rmbs[bufsize_comp];
2433 } else {
2434 lock = &lgr->sndbufs_lock;
2435 buf_list = &lgr->sndbufs[bufsize_comp];
2436 }
2437 bufsize = smc_uncompress_bufsize(bufsize_comp);
2438
2439 /* check for reusable slot in the link group */
2440 buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
2441 if (buf_desc) {
2442 buf_desc->is_dma_need_sync = 0;
2443 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
2444 SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2445 break; /* found reusable slot */
2446 }
2447
2448 if (is_smcd)
2449 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2450 else
2451 buf_desc = smcr_new_buf_create(lgr, bufsize);
2452
2453 if (PTR_ERR(buf_desc) == -ENOMEM)
2454 break;
2455 if (IS_ERR(buf_desc)) {
2456 if (!is_dgraded) {
2457 is_dgraded = true;
2458 SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2459 }
2460 continue;
2461 }
2462
2463 SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2464 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
2465 buf_desc->used = 1;
2466 down_write(lock);
2467 smc_lgr_buf_list_add(lgr, is_rmb, buf_list, buf_desc);
2468 up_write(lock);
2469 break; /* found */
2470 }
2471
2472 if (IS_ERR(buf_desc))
2473 return PTR_ERR(buf_desc);
2474
2475 if (!is_smcd) {
2476 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2477 smcr_buf_unuse(buf_desc, is_rmb, lgr);
2478 return -ENOMEM;
2479 }
2480 }
2481
2482 if (is_rmb) {
2483 conn->rmb_desc = buf_desc;
2484 conn->rmbe_size_comp = bufsize_comp;
2485 smc->sk.sk_rcvbuf = bufsize * 2;
2486 atomic_set(&conn->bytes_to_rcv, 0);
2487 conn->rmbe_update_limit =
2488 smc_rmb_wnd_update_limit(buf_desc->len);
2489 if (is_smcd)
2490 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2491 } else {
2492 conn->sndbuf_desc = buf_desc;
2493 smc->sk.sk_sndbuf = bufsize * 2;
2494 atomic_set(&conn->sndbuf_space, bufsize);
2495 }
2496 return 0;
2497 }
2498
smc_sndbuf_sync_sg_for_device(struct smc_connection * conn)2499 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2500 {
2501 if (!conn->sndbuf_desc->is_dma_need_sync)
2502 return;
2503 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
2504 !smc_link_active(conn->lnk))
2505 return;
2506 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2507 }
2508
smc_rmb_sync_sg_for_cpu(struct smc_connection * conn)2509 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2510 {
2511 int i;
2512
2513 if (!conn->rmb_desc->is_dma_need_sync)
2514 return;
2515 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
2516 return;
2517 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2518 if (!smc_link_active(&conn->lgr->lnk[i]))
2519 continue;
2520 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2521 DMA_FROM_DEVICE);
2522 }
2523 }
2524
2525 /* create the send and receive buffer for an SMC socket;
2526 * receive buffers are called RMBs;
2527 * (even though the SMC protocol allows more than one RMB-element per RMB,
2528 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2529 * extra RMB for every connection in a link group
2530 */
smc_buf_create(struct smc_sock * smc,bool is_smcd)2531 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2532 {
2533 int rc;
2534
2535 /* create send buffer */
2536 if (is_smcd &&
2537 smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd))
2538 goto create_rmb;
2539
2540 rc = __smc_buf_create(smc, is_smcd, false);
2541 if (rc)
2542 return rc;
2543
2544 create_rmb:
2545 /* create rmb */
2546 rc = __smc_buf_create(smc, is_smcd, true);
2547 if (rc && smc->conn.sndbuf_desc) {
2548 down_write(&smc->conn.lgr->sndbufs_lock);
2549 smc_lgr_buf_list_del(smc->conn.lgr, false,
2550 smc->conn.sndbuf_desc);
2551 up_write(&smc->conn.lgr->sndbufs_lock);
2552 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2553 smc->conn.sndbuf_desc = NULL;
2554 }
2555 return rc;
2556 }
2557
smcd_buf_attach(struct smc_sock * smc)2558 int smcd_buf_attach(struct smc_sock *smc)
2559 {
2560 struct smc_connection *conn = &smc->conn;
2561 struct smcd_dev *smcd = conn->lgr->smcd;
2562 u64 peer_token = conn->peer_token;
2563 struct smc_buf_desc *buf_desc;
2564 int rc;
2565
2566 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2567 if (!buf_desc)
2568 return -ENOMEM;
2569
2570 /* The ghost sndbuf_desc describes the same memory region as
2571 * peer RMB. Its lifecycle is consistent with the connection's
2572 * and it will be freed with the connections instead of the
2573 * link group.
2574 */
2575 rc = smc_ism_attach_dmb(smcd, peer_token, buf_desc);
2576 if (rc)
2577 goto free;
2578
2579 smc->sk.sk_sndbuf = buf_desc->len;
2580 buf_desc->cpu_addr =
2581 (u8 *)buf_desc->cpu_addr + sizeof(struct smcd_cdc_msg);
2582 buf_desc->len -= sizeof(struct smcd_cdc_msg);
2583 conn->sndbuf_desc = buf_desc;
2584 conn->sndbuf_desc->used = 1;
2585 atomic_set(&conn->sndbuf_space, conn->sndbuf_desc->len);
2586 return 0;
2587
2588 free:
2589 kfree(buf_desc);
2590 return rc;
2591 }
2592
smc_rmb_reserve_rtoken_idx(struct smc_link_group * lgr)2593 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2594 {
2595 int i;
2596
2597 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2598 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2599 return i;
2600 }
2601 return -ENOSPC;
2602 }
2603
smc_rtoken_find_by_link(struct smc_link_group * lgr,int lnk_idx,u32 rkey)2604 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2605 u32 rkey)
2606 {
2607 int i;
2608
2609 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2610 if (test_bit(i, lgr->rtokens_used_mask) &&
2611 lgr->rtokens[i][lnk_idx].rkey == rkey)
2612 return i;
2613 }
2614 return -ENOENT;
2615 }
2616
2617 /* set rtoken for a new link to an existing rmb */
smc_rtoken_set(struct smc_link_group * lgr,int link_idx,int link_idx_new,__be32 nw_rkey_known,__be64 nw_vaddr,__be32 nw_rkey)2618 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2619 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2620 {
2621 int rtok_idx;
2622
2623 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2624 if (rtok_idx == -ENOENT)
2625 return;
2626 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2627 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2628 }
2629
2630 /* set rtoken for a new link whose link_id is given */
smc_rtoken_set2(struct smc_link_group * lgr,int rtok_idx,int link_id,__be64 nw_vaddr,__be32 nw_rkey)2631 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2632 __be64 nw_vaddr, __be32 nw_rkey)
2633 {
2634 u64 dma_addr = be64_to_cpu(nw_vaddr);
2635 u32 rkey = ntohl(nw_rkey);
2636 bool found = false;
2637 int link_idx;
2638
2639 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2640 if (lgr->lnk[link_idx].link_id == link_id) {
2641 found = true;
2642 break;
2643 }
2644 }
2645 if (!found)
2646 return;
2647 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2648 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2649 }
2650
2651 /* add a new rtoken from peer */
smc_rtoken_add(struct smc_link * lnk,__be64 nw_vaddr,__be32 nw_rkey)2652 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2653 {
2654 struct smc_link_group *lgr = smc_get_lgr(lnk);
2655 u64 dma_addr = be64_to_cpu(nw_vaddr);
2656 u32 rkey = ntohl(nw_rkey);
2657 int i;
2658
2659 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2660 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2661 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2662 test_bit(i, lgr->rtokens_used_mask)) {
2663 /* already in list */
2664 return i;
2665 }
2666 }
2667 i = smc_rmb_reserve_rtoken_idx(lgr);
2668 if (i < 0)
2669 return i;
2670 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2671 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2672 return i;
2673 }
2674
2675 /* delete an rtoken from all links */
smc_rtoken_delete(struct smc_link * lnk,__be32 nw_rkey)2676 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2677 {
2678 struct smc_link_group *lgr = smc_get_lgr(lnk);
2679 u32 rkey = ntohl(nw_rkey);
2680 int i, j;
2681
2682 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2683 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2684 test_bit(i, lgr->rtokens_used_mask)) {
2685 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2686 lgr->rtokens[i][j].rkey = 0;
2687 lgr->rtokens[i][j].dma_addr = 0;
2688 }
2689 clear_bit(i, lgr->rtokens_used_mask);
2690 return 0;
2691 }
2692 }
2693 return -ENOENT;
2694 }
2695
2696 /* save rkey and dma_addr received from peer during clc handshake */
smc_rmb_rtoken_handling(struct smc_connection * conn,struct smc_link * lnk,struct smc_clc_msg_accept_confirm * clc)2697 int smc_rmb_rtoken_handling(struct smc_connection *conn,
2698 struct smc_link *lnk,
2699 struct smc_clc_msg_accept_confirm *clc)
2700 {
2701 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2702 clc->r0.rmb_rkey);
2703 if (conn->rtoken_idx < 0)
2704 return conn->rtoken_idx;
2705 return 0;
2706 }
2707
smc_core_going_away(void)2708 static void smc_core_going_away(void)
2709 {
2710 struct smc_ib_device *smcibdev;
2711 struct smcd_dev *smcd;
2712
2713 mutex_lock(&smc_ib_devices.mutex);
2714 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2715 int i;
2716
2717 for (i = 0; i < SMC_MAX_PORTS; i++)
2718 set_bit(i, smcibdev->ports_going_away);
2719 }
2720 mutex_unlock(&smc_ib_devices.mutex);
2721
2722 mutex_lock(&smcd_dev_list.mutex);
2723 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2724 smcd->going_away = 1;
2725 }
2726 mutex_unlock(&smcd_dev_list.mutex);
2727 }
2728
2729 /* Clean up all SMC link groups */
smc_lgrs_shutdown(void)2730 static void smc_lgrs_shutdown(void)
2731 {
2732 struct smcd_dev *smcd;
2733
2734 smc_core_going_away();
2735
2736 smc_smcr_terminate_all(NULL);
2737
2738 mutex_lock(&smcd_dev_list.mutex);
2739 list_for_each_entry(smcd, &smcd_dev_list.list, list)
2740 smc_smcd_terminate_all(smcd);
2741 mutex_unlock(&smcd_dev_list.mutex);
2742 }
2743
smc_core_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)2744 static int smc_core_reboot_event(struct notifier_block *this,
2745 unsigned long event, void *ptr)
2746 {
2747 smc_lgrs_shutdown();
2748 smc_ib_unregister_client();
2749 smc_ism_exit();
2750 return 0;
2751 }
2752
2753 static struct notifier_block smc_reboot_notifier = {
2754 .notifier_call = smc_core_reboot_event,
2755 };
2756
smc_core_init(void)2757 int __init smc_core_init(void)
2758 {
2759 return register_reboot_notifier(&smc_reboot_notifier);
2760 }
2761
2762 /* Called (from smc_exit) when module is removed */
smc_core_exit(void)2763 void smc_core_exit(void)
2764 {
2765 unregister_reboot_notifier(&smc_reboot_notifier);
2766 smc_lgrs_shutdown();
2767 }
2768