xref: /linux/net/smc/smc_llc.c (revision 6c363eafc4d637ac4bd83d4a7dd06dd3cfbe7c5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Link Layer Control (LLC)
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Klaus Wacker <Klaus.Wacker@de.ibm.com>
10  *              Ursula Braun <ubraun@linux.vnet.ibm.com>
11  */
12 
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15 
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21 
22 #define SMC_LLC_DATA_LEN		40
23 
24 struct smc_llc_hdr {
25 	struct smc_wr_rx_hdr common;
26 	u8 length;	/* 44 */
27 #if defined(__BIG_ENDIAN_BITFIELD)
28 	u8 reserved:4,
29 	   add_link_rej_rsn:4;
30 #elif defined(__LITTLE_ENDIAN_BITFIELD)
31 	u8 add_link_rej_rsn:4,
32 	   reserved:4;
33 #endif
34 	u8 flags;
35 };
36 
37 #define SMC_LLC_FLAG_NO_RMBE_EYEC	0x03
38 
39 struct smc_llc_msg_confirm_link {	/* type 0x01 */
40 	struct smc_llc_hdr hd;
41 	u8 sender_mac[ETH_ALEN];
42 	u8 sender_gid[SMC_GID_SIZE];
43 	u8 sender_qp_num[3];
44 	u8 link_num;
45 	u8 link_uid[SMC_LGR_ID_SIZE];
46 	u8 max_links;
47 	u8 reserved[9];
48 };
49 
50 #define SMC_LLC_FLAG_ADD_LNK_REJ	0x40
51 #define SMC_LLC_REJ_RSN_NO_ALT_PATH	1
52 
53 #define SMC_LLC_ADD_LNK_MAX_LINKS	2
54 
55 struct smc_llc_msg_add_link {		/* type 0x02 */
56 	struct smc_llc_hdr hd;
57 	u8 sender_mac[ETH_ALEN];
58 	u8 reserved2[2];
59 	u8 sender_gid[SMC_GID_SIZE];
60 	u8 sender_qp_num[3];
61 	u8 link_num;
62 #if defined(__BIG_ENDIAN_BITFIELD)
63 	u8 reserved3 : 4,
64 	   qp_mtu   : 4;
65 #elif defined(__LITTLE_ENDIAN_BITFIELD)
66 	u8 qp_mtu   : 4,
67 	   reserved3 : 4;
68 #endif
69 	u8 initial_psn[3];
70 	u8 reserved[8];
71 };
72 
73 struct smc_llc_msg_add_link_cont_rt {
74 	__be32 rmb_key;
75 	__be32 rmb_key_new;
76 	__be64 rmb_vaddr_new;
77 };
78 
79 #define SMC_LLC_RKEYS_PER_CONT_MSG	2
80 
81 struct smc_llc_msg_add_link_cont {	/* type 0x03 */
82 	struct smc_llc_hdr hd;
83 	u8 link_num;
84 	u8 num_rkeys;
85 	u8 reserved2[2];
86 	struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
87 	u8 reserved[4];
88 } __packed;			/* format defined in RFC7609 */
89 
90 #define SMC_LLC_FLAG_DEL_LINK_ALL	0x40
91 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY	0x20
92 
93 struct smc_llc_msg_del_link {		/* type 0x04 */
94 	struct smc_llc_hdr hd;
95 	u8 link_num;
96 	__be32 reason;
97 	u8 reserved[35];
98 } __packed;			/* format defined in RFC7609 */
99 
100 struct smc_llc_msg_test_link {		/* type 0x07 */
101 	struct smc_llc_hdr hd;
102 	u8 user_data[16];
103 	u8 reserved[24];
104 };
105 
106 struct smc_rmb_rtoken {
107 	union {
108 		u8 num_rkeys;	/* first rtoken byte of CONFIRM LINK msg */
109 				/* is actually the num of rtokens, first */
110 				/* rtoken is always for the current link */
111 		u8 link_id;	/* link id of the rtoken */
112 	};
113 	__be32 rmb_key;
114 	__be64 rmb_vaddr;
115 } __packed;			/* format defined in RFC7609 */
116 
117 #define SMC_LLC_RKEYS_PER_MSG	3
118 
119 struct smc_llc_msg_confirm_rkey {	/* type 0x06 */
120 	struct smc_llc_hdr hd;
121 	struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
122 	u8 reserved;
123 };
124 
125 #define SMC_LLC_DEL_RKEY_MAX	8
126 #define SMC_LLC_FLAG_RKEY_RETRY	0x10
127 #define SMC_LLC_FLAG_RKEY_NEG	0x20
128 
129 struct smc_llc_msg_delete_rkey {	/* type 0x09 */
130 	struct smc_llc_hdr hd;
131 	u8 num_rkeys;
132 	u8 err_mask;
133 	u8 reserved[2];
134 	__be32 rkey[8];
135 	u8 reserved2[4];
136 };
137 
138 union smc_llc_msg {
139 	struct smc_llc_msg_confirm_link confirm_link;
140 	struct smc_llc_msg_add_link add_link;
141 	struct smc_llc_msg_add_link_cont add_link_cont;
142 	struct smc_llc_msg_del_link delete_link;
143 
144 	struct smc_llc_msg_confirm_rkey confirm_rkey;
145 	struct smc_llc_msg_delete_rkey delete_rkey;
146 
147 	struct smc_llc_msg_test_link test_link;
148 	struct {
149 		struct smc_llc_hdr hdr;
150 		u8 data[SMC_LLC_DATA_LEN];
151 	} raw;
152 };
153 
154 #define SMC_LLC_FLAG_RESP		0x80
155 
156 struct smc_llc_qentry {
157 	struct list_head list;
158 	struct smc_link *link;
159 	union smc_llc_msg msg;
160 };
161 
162 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
163 
164 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
165 {
166 	struct smc_llc_qentry *qentry = flow->qentry;
167 
168 	flow->qentry = NULL;
169 	return qentry;
170 }
171 
172 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
173 {
174 	struct smc_llc_qentry *qentry;
175 
176 	if (flow->qentry) {
177 		qentry = flow->qentry;
178 		flow->qentry = NULL;
179 		kfree(qentry);
180 	}
181 }
182 
183 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
184 					   struct smc_llc_qentry *qentry)
185 {
186 	flow->qentry = qentry;
187 }
188 
189 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
190 				  struct smc_llc_qentry *qentry)
191 {
192 	u8 msg_type = qentry->msg.raw.hdr.common.type;
193 
194 	if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
195 	    flow_type != msg_type && !lgr->delayed_event) {
196 		lgr->delayed_event = qentry;
197 		return;
198 	}
199 	/* drop parallel or already-in-progress llc requests */
200 	if (flow_type != msg_type)
201 		pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
202 			     "LLC msg: msg %d flow %d role %d\n",
203 			     SMC_LGR_ID_SIZE, &lgr->id,
204 			     qentry->msg.raw.hdr.common.type,
205 			     flow_type, lgr->role);
206 	kfree(qentry);
207 }
208 
209 /* try to start a new llc flow, initiated by an incoming llc msg */
210 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
211 			       struct smc_llc_qentry *qentry)
212 {
213 	struct smc_link_group *lgr = qentry->link->lgr;
214 
215 	spin_lock_bh(&lgr->llc_flow_lock);
216 	if (flow->type) {
217 		/* a flow is already active */
218 		smc_llc_flow_parallel(lgr, flow->type, qentry);
219 		spin_unlock_bh(&lgr->llc_flow_lock);
220 		return false;
221 	}
222 	switch (qentry->msg.raw.hdr.common.type) {
223 	case SMC_LLC_ADD_LINK:
224 		flow->type = SMC_LLC_FLOW_ADD_LINK;
225 		break;
226 	case SMC_LLC_DELETE_LINK:
227 		flow->type = SMC_LLC_FLOW_DEL_LINK;
228 		break;
229 	case SMC_LLC_CONFIRM_RKEY:
230 	case SMC_LLC_DELETE_RKEY:
231 		flow->type = SMC_LLC_FLOW_RKEY;
232 		break;
233 	default:
234 		flow->type = SMC_LLC_FLOW_NONE;
235 	}
236 	smc_llc_flow_qentry_set(flow, qentry);
237 	spin_unlock_bh(&lgr->llc_flow_lock);
238 	return true;
239 }
240 
241 /* start a new local llc flow, wait till current flow finished */
242 int smc_llc_flow_initiate(struct smc_link_group *lgr,
243 			  enum smc_llc_flowtype type)
244 {
245 	enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
246 	int rc;
247 
248 	/* all flows except confirm_rkey and delete_rkey are exclusive,
249 	 * confirm/delete rkey flows can run concurrently (local and remote)
250 	 */
251 	if (type == SMC_LLC_FLOW_RKEY)
252 		allowed_remote = SMC_LLC_FLOW_RKEY;
253 again:
254 	if (list_empty(&lgr->list))
255 		return -ENODEV;
256 	spin_lock_bh(&lgr->llc_flow_lock);
257 	if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
258 	    (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
259 	     lgr->llc_flow_rmt.type == allowed_remote)) {
260 		lgr->llc_flow_lcl.type = type;
261 		spin_unlock_bh(&lgr->llc_flow_lock);
262 		return 0;
263 	}
264 	spin_unlock_bh(&lgr->llc_flow_lock);
265 	rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
266 				(lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
267 				 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
268 				  lgr->llc_flow_rmt.type == allowed_remote))),
269 				SMC_LLC_WAIT_TIME * 10);
270 	if (!rc)
271 		return -ETIMEDOUT;
272 	goto again;
273 }
274 
275 /* finish the current llc flow */
276 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
277 {
278 	spin_lock_bh(&lgr->llc_flow_lock);
279 	memset(flow, 0, sizeof(*flow));
280 	flow->type = SMC_LLC_FLOW_NONE;
281 	spin_unlock_bh(&lgr->llc_flow_lock);
282 	if (!list_empty(&lgr->list) && lgr->delayed_event &&
283 	    flow == &lgr->llc_flow_lcl)
284 		schedule_work(&lgr->llc_event_work);
285 	else
286 		wake_up(&lgr->llc_flow_waiter);
287 }
288 
289 /* lnk is optional and used for early wakeup when link goes down, useful in
290  * cases where we wait for a response on the link after we sent a request
291  */
292 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
293 				    struct smc_link *lnk,
294 				    int time_out, u8 exp_msg)
295 {
296 	struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
297 	u8 rcv_msg;
298 
299 	wait_event_timeout(lgr->llc_msg_waiter,
300 			   (flow->qentry ||
301 			    (lnk && !smc_link_usable(lnk)) ||
302 			    list_empty(&lgr->list)),
303 			   time_out);
304 	if (!flow->qentry ||
305 	    (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
306 		smc_llc_flow_qentry_del(flow);
307 		goto out;
308 	}
309 	rcv_msg = flow->qentry->msg.raw.hdr.common.type;
310 	if (exp_msg && rcv_msg != exp_msg) {
311 		if (exp_msg == SMC_LLC_ADD_LINK &&
312 		    rcv_msg == SMC_LLC_DELETE_LINK) {
313 			/* flow_start will delay the unexpected msg */
314 			smc_llc_flow_start(&lgr->llc_flow_lcl,
315 					   smc_llc_flow_qentry_clr(flow));
316 			return NULL;
317 		}
318 		pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
319 			     "msg %d exp %d flow %d role %d flags %x\n",
320 			     SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
321 			     flow->type, lgr->role,
322 			     flow->qentry->msg.raw.hdr.flags);
323 		smc_llc_flow_qentry_del(flow);
324 	}
325 out:
326 	return flow->qentry;
327 }
328 
329 /********************************** send *************************************/
330 
331 struct smc_llc_tx_pend {
332 };
333 
334 /* handler for send/transmission completion of an LLC msg */
335 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
336 			       struct smc_link *link,
337 			       enum ib_wc_status wc_status)
338 {
339 	/* future work: handle wc_status error for recovery and failover */
340 }
341 
342 /**
343  * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
344  * @link: Pointer to SMC link used for sending LLC control message.
345  * @wr_buf: Out variable returning pointer to work request payload buffer.
346  * @pend: Out variable returning pointer to private pending WR tracking.
347  *	  It's the context the transmit complete handler will get.
348  *
349  * Reserves and pre-fills an entry for a pending work request send/tx.
350  * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
351  * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
352  *
353  * Return: 0 on success, otherwise an error value.
354  */
355 static int smc_llc_add_pending_send(struct smc_link *link,
356 				    struct smc_wr_buf **wr_buf,
357 				    struct smc_wr_tx_pend_priv **pend)
358 {
359 	int rc;
360 
361 	rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
362 				     pend);
363 	if (rc < 0)
364 		return rc;
365 	BUILD_BUG_ON_MSG(
366 		sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
367 		"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
368 	BUILD_BUG_ON_MSG(
369 		sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
370 		"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
371 	BUILD_BUG_ON_MSG(
372 		sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
373 		"must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
374 	return 0;
375 }
376 
377 /* high-level API to send LLC confirm link */
378 int smc_llc_send_confirm_link(struct smc_link *link,
379 			      enum smc_llc_reqresp reqresp)
380 {
381 	struct smc_llc_msg_confirm_link *confllc;
382 	struct smc_wr_tx_pend_priv *pend;
383 	struct smc_wr_buf *wr_buf;
384 	int rc;
385 
386 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
387 	if (rc)
388 		return rc;
389 	confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
390 	memset(confllc, 0, sizeof(*confllc));
391 	confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
392 	confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
393 	confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
394 	if (reqresp == SMC_LLC_RESP)
395 		confllc->hd.flags |= SMC_LLC_FLAG_RESP;
396 	memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
397 	       ETH_ALEN);
398 	memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
399 	hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
400 	confllc->link_num = link->link_id;
401 	memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
402 	confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
403 	/* send llc message */
404 	rc = smc_wr_tx_send(link, pend);
405 	return rc;
406 }
407 
408 /* send LLC confirm rkey request */
409 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
410 				     struct smc_buf_desc *rmb_desc)
411 {
412 	struct smc_llc_msg_confirm_rkey *rkeyllc;
413 	struct smc_wr_tx_pend_priv *pend;
414 	struct smc_wr_buf *wr_buf;
415 	struct smc_link *link;
416 	int i, rc, rtok_ix;
417 
418 	rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
419 	if (rc)
420 		return rc;
421 	rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
422 	memset(rkeyllc, 0, sizeof(*rkeyllc));
423 	rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
424 	rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
425 
426 	rtok_ix = 1;
427 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
428 		link = &send_link->lgr->lnk[i];
429 		if (smc_link_active(link) && link != send_link) {
430 			rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
431 			rkeyllc->rtoken[rtok_ix].rmb_key =
432 				htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
433 			rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
434 				(u64)sg_dma_address(
435 					rmb_desc->sgt[link->link_idx].sgl));
436 			rtok_ix++;
437 		}
438 	}
439 	/* rkey of send_link is in rtoken[0] */
440 	rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
441 	rkeyllc->rtoken[0].rmb_key =
442 		htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
443 	rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
444 		(u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
445 	/* send llc message */
446 	rc = smc_wr_tx_send(send_link, pend);
447 	return rc;
448 }
449 
450 /* send LLC delete rkey request */
451 static int smc_llc_send_delete_rkey(struct smc_link *link,
452 				    struct smc_buf_desc *rmb_desc)
453 {
454 	struct smc_llc_msg_delete_rkey *rkeyllc;
455 	struct smc_wr_tx_pend_priv *pend;
456 	struct smc_wr_buf *wr_buf;
457 	int rc;
458 
459 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
460 	if (rc)
461 		return rc;
462 	rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
463 	memset(rkeyllc, 0, sizeof(*rkeyllc));
464 	rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
465 	rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
466 	rkeyllc->num_rkeys = 1;
467 	rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
468 	/* send llc message */
469 	rc = smc_wr_tx_send(link, pend);
470 	return rc;
471 }
472 
473 /* send ADD LINK request or response */
474 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
475 			  struct smc_link *link_new,
476 			  enum smc_llc_reqresp reqresp)
477 {
478 	struct smc_llc_msg_add_link *addllc;
479 	struct smc_wr_tx_pend_priv *pend;
480 	struct smc_wr_buf *wr_buf;
481 	int rc;
482 
483 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
484 	if (rc)
485 		return rc;
486 	addllc = (struct smc_llc_msg_add_link *)wr_buf;
487 
488 	memset(addllc, 0, sizeof(*addllc));
489 	addllc->hd.common.type = SMC_LLC_ADD_LINK;
490 	addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
491 	if (reqresp == SMC_LLC_RESP)
492 		addllc->hd.flags |= SMC_LLC_FLAG_RESP;
493 	memcpy(addllc->sender_mac, mac, ETH_ALEN);
494 	memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
495 	if (link_new) {
496 		addllc->link_num = link_new->link_id;
497 		hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
498 		hton24(addllc->initial_psn, link_new->psn_initial);
499 		if (reqresp == SMC_LLC_REQ)
500 			addllc->qp_mtu = link_new->path_mtu;
501 		else
502 			addllc->qp_mtu = min(link_new->path_mtu,
503 					     link_new->peer_mtu);
504 	}
505 	/* send llc message */
506 	rc = smc_wr_tx_send(link, pend);
507 	return rc;
508 }
509 
510 /* send DELETE LINK request or response */
511 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
512 			     enum smc_llc_reqresp reqresp, bool orderly,
513 			     u32 reason)
514 {
515 	struct smc_llc_msg_del_link *delllc;
516 	struct smc_wr_tx_pend_priv *pend;
517 	struct smc_wr_buf *wr_buf;
518 	int rc;
519 
520 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
521 	if (rc)
522 		return rc;
523 	delllc = (struct smc_llc_msg_del_link *)wr_buf;
524 
525 	memset(delllc, 0, sizeof(*delllc));
526 	delllc->hd.common.type = SMC_LLC_DELETE_LINK;
527 	delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
528 	if (reqresp == SMC_LLC_RESP)
529 		delllc->hd.flags |= SMC_LLC_FLAG_RESP;
530 	if (orderly)
531 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
532 	if (link_del_id)
533 		delllc->link_num = link_del_id;
534 	else
535 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
536 	delllc->reason = htonl(reason);
537 	/* send llc message */
538 	rc = smc_wr_tx_send(link, pend);
539 	return rc;
540 }
541 
542 /* send LLC test link request */
543 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
544 {
545 	struct smc_llc_msg_test_link *testllc;
546 	struct smc_wr_tx_pend_priv *pend;
547 	struct smc_wr_buf *wr_buf;
548 	int rc;
549 
550 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
551 	if (rc)
552 		return rc;
553 	testllc = (struct smc_llc_msg_test_link *)wr_buf;
554 	memset(testllc, 0, sizeof(*testllc));
555 	testllc->hd.common.type = SMC_LLC_TEST_LINK;
556 	testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
557 	memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
558 	/* send llc message */
559 	rc = smc_wr_tx_send(link, pend);
560 	return rc;
561 }
562 
563 /* schedule an llc send on link, may wait for buffers */
564 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
565 {
566 	struct smc_wr_tx_pend_priv *pend;
567 	struct smc_wr_buf *wr_buf;
568 	int rc;
569 
570 	if (!smc_link_usable(link))
571 		return -ENOLINK;
572 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
573 	if (rc)
574 		return rc;
575 	memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
576 	return smc_wr_tx_send(link, pend);
577 }
578 
579 /* schedule an llc send on link, may wait for buffers,
580  * and wait for send completion notification.
581  * @return 0 on success
582  */
583 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
584 {
585 	struct smc_wr_tx_pend_priv *pend;
586 	struct smc_wr_buf *wr_buf;
587 	int rc;
588 
589 	if (!smc_link_usable(link))
590 		return -ENOLINK;
591 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
592 	if (rc)
593 		return rc;
594 	memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
595 	return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
596 }
597 
598 /********************************* receive ***********************************/
599 
600 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
601 				  enum smc_lgr_type lgr_new_t)
602 {
603 	int i;
604 
605 	if (lgr->type == SMC_LGR_SYMMETRIC ||
606 	    (lgr->type != SMC_LGR_SINGLE &&
607 	     (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
608 	      lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
609 		return -EMLINK;
610 
611 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
612 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
613 		for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
614 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
615 				return i;
616 	} else {
617 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
618 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
619 				return i;
620 	}
621 	return -EMLINK;
622 }
623 
624 /* return first buffer from any of the next buf lists */
625 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
626 						  int *buf_lst)
627 {
628 	struct smc_buf_desc *buf_pos;
629 
630 	while (*buf_lst < SMC_RMBE_SIZES) {
631 		buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
632 						   struct smc_buf_desc, list);
633 		if (buf_pos)
634 			return buf_pos;
635 		(*buf_lst)++;
636 	}
637 	return NULL;
638 }
639 
640 /* return next rmb from buffer lists */
641 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
642 						 int *buf_lst,
643 						 struct smc_buf_desc *buf_pos)
644 {
645 	struct smc_buf_desc *buf_next;
646 
647 	if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
648 		(*buf_lst)++;
649 		return _smc_llc_get_next_rmb(lgr, buf_lst);
650 	}
651 	buf_next = list_next_entry(buf_pos, list);
652 	return buf_next;
653 }
654 
655 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
656 						  int *buf_lst)
657 {
658 	*buf_lst = 0;
659 	return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
660 }
661 
662 /* send one add_link_continue msg */
663 static int smc_llc_add_link_cont(struct smc_link *link,
664 				 struct smc_link *link_new, u8 *num_rkeys_todo,
665 				 int *buf_lst, struct smc_buf_desc **buf_pos)
666 {
667 	struct smc_llc_msg_add_link_cont *addc_llc;
668 	struct smc_link_group *lgr = link->lgr;
669 	int prim_lnk_idx, lnk_idx, i, rc;
670 	struct smc_wr_tx_pend_priv *pend;
671 	struct smc_wr_buf *wr_buf;
672 	struct smc_buf_desc *rmb;
673 	u8 n;
674 
675 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
676 	if (rc)
677 		return rc;
678 	addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
679 	memset(addc_llc, 0, sizeof(*addc_llc));
680 
681 	prim_lnk_idx = link->link_idx;
682 	lnk_idx = link_new->link_idx;
683 	addc_llc->link_num = link_new->link_id;
684 	addc_llc->num_rkeys = *num_rkeys_todo;
685 	n = *num_rkeys_todo;
686 	for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
687 		if (!*buf_pos) {
688 			addc_llc->num_rkeys = addc_llc->num_rkeys -
689 					      *num_rkeys_todo;
690 			*num_rkeys_todo = 0;
691 			break;
692 		}
693 		rmb = *buf_pos;
694 
695 		addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
696 		addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
697 		addc_llc->rt[i].rmb_vaddr_new =
698 			cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
699 
700 		(*num_rkeys_todo)--;
701 		*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
702 		while (*buf_pos && !(*buf_pos)->used)
703 			*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
704 	}
705 	addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
706 	addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
707 	if (lgr->role == SMC_CLNT)
708 		addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
709 	return smc_wr_tx_send(link, pend);
710 }
711 
712 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
713 				     struct smc_link *link_new)
714 {
715 	struct smc_llc_msg_add_link_cont *addc_llc;
716 	struct smc_link_group *lgr = link->lgr;
717 	u8 max, num_rkeys_send, num_rkeys_recv;
718 	struct smc_llc_qentry *qentry;
719 	struct smc_buf_desc *buf_pos;
720 	int buf_lst;
721 	int rc = 0;
722 	int i;
723 
724 	mutex_lock(&lgr->rmbs_lock);
725 	num_rkeys_send = lgr->conns_num;
726 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
727 	do {
728 		qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
729 				      SMC_LLC_ADD_LINK_CONT);
730 		if (!qentry) {
731 			rc = -ETIMEDOUT;
732 			break;
733 		}
734 		addc_llc = &qentry->msg.add_link_cont;
735 		num_rkeys_recv = addc_llc->num_rkeys;
736 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
737 		for (i = 0; i < max; i++) {
738 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
739 				       addc_llc->rt[i].rmb_key,
740 				       addc_llc->rt[i].rmb_vaddr_new,
741 				       addc_llc->rt[i].rmb_key_new);
742 			num_rkeys_recv--;
743 		}
744 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
745 		rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
746 					   &buf_lst, &buf_pos);
747 		if (rc)
748 			break;
749 	} while (num_rkeys_send || num_rkeys_recv);
750 
751 	mutex_unlock(&lgr->rmbs_lock);
752 	return rc;
753 }
754 
755 /* prepare and send an add link reject response */
756 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
757 {
758 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
759 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
760 	qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
761 	return smc_llc_send_message(qentry->link, &qentry->msg);
762 }
763 
764 static int smc_llc_cli_conf_link(struct smc_link *link,
765 				 struct smc_init_info *ini,
766 				 struct smc_link *link_new,
767 				 enum smc_lgr_type lgr_new_t)
768 {
769 	struct smc_link_group *lgr = link->lgr;
770 	struct smc_llc_qentry *qentry = NULL;
771 	int rc = 0;
772 
773 	/* receive CONFIRM LINK request over RoCE fabric */
774 	qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
775 	if (!qentry) {
776 		rc = smc_llc_send_delete_link(link, link_new->link_id,
777 					      SMC_LLC_REQ, false,
778 					      SMC_LLC_DEL_LOST_PATH);
779 		return -ENOLINK;
780 	}
781 	if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
782 		/* received DELETE_LINK instead */
783 		qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
784 		smc_llc_send_message(link, &qentry->msg);
785 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
786 		return -ENOLINK;
787 	}
788 	smc_llc_save_peer_uid(qentry);
789 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
790 
791 	rc = smc_ib_modify_qp_rts(link_new);
792 	if (rc) {
793 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
794 					 false, SMC_LLC_DEL_LOST_PATH);
795 		return -ENOLINK;
796 	}
797 	smc_wr_remember_qp_attr(link_new);
798 
799 	rc = smcr_buf_reg_lgr(link_new);
800 	if (rc) {
801 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
802 					 false, SMC_LLC_DEL_LOST_PATH);
803 		return -ENOLINK;
804 	}
805 
806 	/* send CONFIRM LINK response over RoCE fabric */
807 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
808 	if (rc) {
809 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
810 					 false, SMC_LLC_DEL_LOST_PATH);
811 		return -ENOLINK;
812 	}
813 	smc_llc_link_active(link_new);
814 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
815 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
816 		smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
817 	else
818 		smcr_lgr_set_type(lgr, lgr_new_t);
819 	return 0;
820 }
821 
822 static void smc_llc_save_add_link_info(struct smc_link *link,
823 				       struct smc_llc_msg_add_link *add_llc)
824 {
825 	link->peer_qpn = ntoh24(add_llc->sender_qp_num);
826 	memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
827 	memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
828 	link->peer_psn = ntoh24(add_llc->initial_psn);
829 	link->peer_mtu = add_llc->qp_mtu;
830 }
831 
832 /* as an SMC client, process an add link request */
833 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
834 {
835 	struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
836 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
837 	struct smc_link_group *lgr = smc_get_lgr(link);
838 	struct smc_link *lnk_new = NULL;
839 	struct smc_init_info ini;
840 	int lnk_idx, rc = 0;
841 
842 	if (!llc->qp_mtu)
843 		goto out_reject;
844 
845 	ini.vlan_id = lgr->vlan_id;
846 	smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
847 	if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
848 	    !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
849 		if (!ini.ib_dev)
850 			goto out_reject;
851 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
852 	}
853 	if (!ini.ib_dev) {
854 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
855 		ini.ib_dev = link->smcibdev;
856 		ini.ib_port = link->ibport;
857 	}
858 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
859 	if (lnk_idx < 0)
860 		goto out_reject;
861 	lnk_new = &lgr->lnk[lnk_idx];
862 	rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
863 	if (rc)
864 		goto out_reject;
865 	smc_llc_save_add_link_info(lnk_new, llc);
866 	lnk_new->link_id = llc->link_num;	/* SMC server assigns link id */
867 	smc_llc_link_set_uid(lnk_new);
868 
869 	rc = smc_ib_ready_link(lnk_new);
870 	if (rc)
871 		goto out_clear_lnk;
872 
873 	rc = smcr_buf_map_lgr(lnk_new);
874 	if (rc)
875 		goto out_clear_lnk;
876 
877 	rc = smc_llc_send_add_link(link,
878 				   lnk_new->smcibdev->mac[ini.ib_port - 1],
879 				   lnk_new->gid, lnk_new, SMC_LLC_RESP);
880 	if (rc)
881 		goto out_clear_lnk;
882 	rc = smc_llc_cli_rkey_exchange(link, lnk_new);
883 	if (rc) {
884 		rc = 0;
885 		goto out_clear_lnk;
886 	}
887 	rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
888 	if (!rc)
889 		goto out;
890 out_clear_lnk:
891 	smcr_link_clear(lnk_new, false);
892 out_reject:
893 	smc_llc_cli_add_link_reject(qentry);
894 out:
895 	kfree(qentry);
896 	return rc;
897 }
898 
899 /* as an SMC client, invite server to start the add_link processing */
900 static void smc_llc_cli_add_link_invite(struct smc_link *link,
901 					struct smc_llc_qentry *qentry)
902 {
903 	struct smc_link_group *lgr = smc_get_lgr(link);
904 	struct smc_init_info ini;
905 
906 	if (lgr->type == SMC_LGR_SYMMETRIC ||
907 	    lgr->type == SMC_LGR_ASYMMETRIC_PEER)
908 		goto out;
909 
910 	ini.vlan_id = lgr->vlan_id;
911 	smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
912 	if (!ini.ib_dev)
913 		goto out;
914 
915 	smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
916 			      ini.ib_gid, NULL, SMC_LLC_REQ);
917 out:
918 	kfree(qentry);
919 }
920 
921 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
922 {
923 	int i;
924 
925 	for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
926 		if (llc->raw.data[i])
927 			return false;
928 	return true;
929 }
930 
931 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
932 {
933 	if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
934 	    smc_llc_is_empty_llc_message(llc))
935 		return true;
936 	return false;
937 }
938 
939 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
940 {
941 	struct smc_llc_qentry *qentry;
942 
943 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
944 
945 	mutex_lock(&lgr->llc_conf_mutex);
946 	if (smc_llc_is_local_add_link(&qentry->msg))
947 		smc_llc_cli_add_link_invite(qentry->link, qentry);
948 	else
949 		smc_llc_cli_add_link(qentry->link, qentry);
950 	mutex_unlock(&lgr->llc_conf_mutex);
951 }
952 
953 static int smc_llc_active_link_count(struct smc_link_group *lgr)
954 {
955 	int i, link_count = 0;
956 
957 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
958 		if (!smc_link_active(&lgr->lnk[i]))
959 			continue;
960 		link_count++;
961 	}
962 	return link_count;
963 }
964 
965 /* find the asymmetric link when 3 links are established  */
966 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
967 {
968 	int asym_idx = -ENOENT;
969 	int i, j, k;
970 	bool found;
971 
972 	/* determine asymmetric link */
973 	found = false;
974 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
975 		for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
976 			if (!smc_link_usable(&lgr->lnk[i]) ||
977 			    !smc_link_usable(&lgr->lnk[j]))
978 				continue;
979 			if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
980 				    SMC_GID_SIZE)) {
981 				found = true;	/* asym_lnk is i or j */
982 				break;
983 			}
984 		}
985 		if (found)
986 			break;
987 	}
988 	if (!found)
989 		goto out; /* no asymmetric link */
990 	for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
991 		if (!smc_link_usable(&lgr->lnk[k]))
992 			continue;
993 		if (k != i &&
994 		    !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
995 			    SMC_GID_SIZE)) {
996 			asym_idx = i;
997 			break;
998 		}
999 		if (k != j &&
1000 		    !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1001 			    SMC_GID_SIZE)) {
1002 			asym_idx = j;
1003 			break;
1004 		}
1005 	}
1006 out:
1007 	return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1008 }
1009 
1010 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1011 {
1012 	struct smc_link *lnk_new = NULL, *lnk_asym;
1013 	struct smc_llc_qentry *qentry;
1014 	int rc;
1015 
1016 	lnk_asym = smc_llc_find_asym_link(lgr);
1017 	if (!lnk_asym)
1018 		return; /* no asymmetric link */
1019 	if (!smc_link_downing(&lnk_asym->state))
1020 		return;
1021 	lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1022 	smc_wr_tx_wait_no_pending_sends(lnk_asym);
1023 	if (!lnk_new)
1024 		goto out_free;
1025 	/* change flow type from ADD_LINK into DEL_LINK */
1026 	lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1027 	rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1028 				      true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1029 	if (rc) {
1030 		smcr_link_down_cond(lnk_new);
1031 		goto out_free;
1032 	}
1033 	qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1034 			      SMC_LLC_DELETE_LINK);
1035 	if (!qentry) {
1036 		smcr_link_down_cond(lnk_new);
1037 		goto out_free;
1038 	}
1039 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1040 out_free:
1041 	smcr_link_clear(lnk_asym, true);
1042 }
1043 
1044 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1045 				     struct smc_link *link_new)
1046 {
1047 	struct smc_llc_msg_add_link_cont *addc_llc;
1048 	struct smc_link_group *lgr = link->lgr;
1049 	u8 max, num_rkeys_send, num_rkeys_recv;
1050 	struct smc_llc_qentry *qentry = NULL;
1051 	struct smc_buf_desc *buf_pos;
1052 	int buf_lst;
1053 	int rc = 0;
1054 	int i;
1055 
1056 	mutex_lock(&lgr->rmbs_lock);
1057 	num_rkeys_send = lgr->conns_num;
1058 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1059 	do {
1060 		smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1061 				      &buf_lst, &buf_pos);
1062 		qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1063 				      SMC_LLC_ADD_LINK_CONT);
1064 		if (!qentry) {
1065 			rc = -ETIMEDOUT;
1066 			goto out;
1067 		}
1068 		addc_llc = &qentry->msg.add_link_cont;
1069 		num_rkeys_recv = addc_llc->num_rkeys;
1070 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1071 		for (i = 0; i < max; i++) {
1072 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1073 				       addc_llc->rt[i].rmb_key,
1074 				       addc_llc->rt[i].rmb_vaddr_new,
1075 				       addc_llc->rt[i].rmb_key_new);
1076 			num_rkeys_recv--;
1077 		}
1078 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1079 	} while (num_rkeys_send || num_rkeys_recv);
1080 out:
1081 	mutex_unlock(&lgr->rmbs_lock);
1082 	return rc;
1083 }
1084 
1085 static int smc_llc_srv_conf_link(struct smc_link *link,
1086 				 struct smc_link *link_new,
1087 				 enum smc_lgr_type lgr_new_t)
1088 {
1089 	struct smc_link_group *lgr = link->lgr;
1090 	struct smc_llc_qentry *qentry = NULL;
1091 	int rc;
1092 
1093 	/* send CONFIRM LINK request over the RoCE fabric */
1094 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1095 	if (rc)
1096 		return -ENOLINK;
1097 	/* receive CONFIRM LINK response over the RoCE fabric */
1098 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1099 	if (!qentry ||
1100 	    qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
1101 		/* send DELETE LINK */
1102 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1103 					 false, SMC_LLC_DEL_LOST_PATH);
1104 		if (qentry)
1105 			smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1106 		return -ENOLINK;
1107 	}
1108 	smc_llc_save_peer_uid(qentry);
1109 	smc_llc_link_active(link_new);
1110 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1111 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1112 		smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1113 	else
1114 		smcr_lgr_set_type(lgr, lgr_new_t);
1115 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1116 	return 0;
1117 }
1118 
1119 int smc_llc_srv_add_link(struct smc_link *link)
1120 {
1121 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1122 	struct smc_link_group *lgr = link->lgr;
1123 	struct smc_llc_msg_add_link *add_llc;
1124 	struct smc_llc_qentry *qentry = NULL;
1125 	struct smc_link *link_new;
1126 	struct smc_init_info ini;
1127 	int lnk_idx, rc = 0;
1128 
1129 	/* ignore client add link recommendation, start new flow */
1130 	ini.vlan_id = lgr->vlan_id;
1131 	smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
1132 	if (!ini.ib_dev) {
1133 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1134 		ini.ib_dev = link->smcibdev;
1135 		ini.ib_port = link->ibport;
1136 	}
1137 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1138 	if (lnk_idx < 0)
1139 		return 0;
1140 
1141 	rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
1142 	if (rc)
1143 		return rc;
1144 	link_new = &lgr->lnk[lnk_idx];
1145 	rc = smc_llc_send_add_link(link,
1146 				   link_new->smcibdev->mac[ini.ib_port - 1],
1147 				   link_new->gid, link_new, SMC_LLC_REQ);
1148 	if (rc)
1149 		goto out_err;
1150 	/* receive ADD LINK response over the RoCE fabric */
1151 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1152 	if (!qentry) {
1153 		rc = -ETIMEDOUT;
1154 		goto out_err;
1155 	}
1156 	add_llc = &qentry->msg.add_link;
1157 	if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1158 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1159 		rc = -ENOLINK;
1160 		goto out_err;
1161 	}
1162 	if (lgr->type == SMC_LGR_SINGLE &&
1163 	    (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1164 	     !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1165 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1166 	}
1167 	smc_llc_save_add_link_info(link_new, add_llc);
1168 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1169 
1170 	rc = smc_ib_ready_link(link_new);
1171 	if (rc)
1172 		goto out_err;
1173 	rc = smcr_buf_map_lgr(link_new);
1174 	if (rc)
1175 		goto out_err;
1176 	rc = smcr_buf_reg_lgr(link_new);
1177 	if (rc)
1178 		goto out_err;
1179 	rc = smc_llc_srv_rkey_exchange(link, link_new);
1180 	if (rc)
1181 		goto out_err;
1182 	rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1183 	if (rc)
1184 		goto out_err;
1185 	return 0;
1186 out_err:
1187 	smcr_link_clear(link_new, false);
1188 	return rc;
1189 }
1190 
1191 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1192 {
1193 	struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1194 	int rc;
1195 
1196 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1197 
1198 	mutex_lock(&lgr->llc_conf_mutex);
1199 	rc = smc_llc_srv_add_link(link);
1200 	if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1201 		/* delete any asymmetric link */
1202 		smc_llc_delete_asym_link(lgr);
1203 	}
1204 	mutex_unlock(&lgr->llc_conf_mutex);
1205 }
1206 
1207 /* enqueue a local add_link req to trigger a new add_link flow */
1208 void smc_llc_add_link_local(struct smc_link *link)
1209 {
1210 	struct smc_llc_msg_add_link add_llc = {};
1211 
1212 	add_llc.hd.length = sizeof(add_llc);
1213 	add_llc.hd.common.type = SMC_LLC_ADD_LINK;
1214 	/* no dev and port needed */
1215 	smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1216 }
1217 
1218 /* worker to process an add link message */
1219 static void smc_llc_add_link_work(struct work_struct *work)
1220 {
1221 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1222 						  llc_add_link_work);
1223 
1224 	if (list_empty(&lgr->list)) {
1225 		/* link group is terminating */
1226 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1227 		goto out;
1228 	}
1229 
1230 	if (lgr->role == SMC_CLNT)
1231 		smc_llc_process_cli_add_link(lgr);
1232 	else
1233 		smc_llc_process_srv_add_link(lgr);
1234 out:
1235 	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1236 }
1237 
1238 /* enqueue a local del_link msg to trigger a new del_link flow,
1239  * called only for role SMC_SERV
1240  */
1241 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1242 {
1243 	struct smc_llc_msg_del_link del_llc = {};
1244 
1245 	del_llc.hd.length = sizeof(del_llc);
1246 	del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
1247 	del_llc.link_num = del_link_id;
1248 	del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1249 	del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1250 	smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1251 }
1252 
1253 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1254 {
1255 	struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1256 	struct smc_llc_msg_del_link *del_llc;
1257 	struct smc_llc_qentry *qentry;
1258 	int active_links;
1259 	int lnk_idx;
1260 
1261 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1262 	lnk = qentry->link;
1263 	del_llc = &qentry->msg.delete_link;
1264 
1265 	if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1266 		smc_lgr_terminate_sched(lgr);
1267 		goto out;
1268 	}
1269 	mutex_lock(&lgr->llc_conf_mutex);
1270 	/* delete single link */
1271 	for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1272 		if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1273 			continue;
1274 		lnk_del = &lgr->lnk[lnk_idx];
1275 		break;
1276 	}
1277 	del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1278 	if (!lnk_del) {
1279 		/* link was not found */
1280 		del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1281 		smc_llc_send_message(lnk, &qentry->msg);
1282 		goto out_unlock;
1283 	}
1284 	lnk_asym = smc_llc_find_asym_link(lgr);
1285 
1286 	del_llc->reason = 0;
1287 	smc_llc_send_message(lnk, &qentry->msg); /* response */
1288 
1289 	if (smc_link_downing(&lnk_del->state)) {
1290 		if (smc_switch_conns(lgr, lnk_del, false))
1291 			smc_wr_tx_wait_no_pending_sends(lnk_del);
1292 	}
1293 	smcr_link_clear(lnk_del, true);
1294 
1295 	active_links = smc_llc_active_link_count(lgr);
1296 	if (lnk_del == lnk_asym) {
1297 		/* expected deletion of asym link, don't change lgr state */
1298 	} else if (active_links == 1) {
1299 		smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1300 	} else if (!active_links) {
1301 		smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1302 		smc_lgr_terminate_sched(lgr);
1303 	}
1304 out_unlock:
1305 	mutex_unlock(&lgr->llc_conf_mutex);
1306 out:
1307 	kfree(qentry);
1308 }
1309 
1310 /* try to send a DELETE LINK ALL request on any active link,
1311  * waiting for send completion
1312  */
1313 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1314 {
1315 	struct smc_llc_msg_del_link delllc = {};
1316 	int i;
1317 
1318 	delllc.hd.common.type = SMC_LLC_DELETE_LINK;
1319 	delllc.hd.length = sizeof(delllc);
1320 	if (ord)
1321 		delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1322 	delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1323 	delllc.reason = htonl(rsn);
1324 
1325 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1326 		if (!smc_link_usable(&lgr->lnk[i]))
1327 			continue;
1328 		if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1329 			break;
1330 	}
1331 }
1332 
1333 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1334 {
1335 	struct smc_llc_msg_del_link *del_llc;
1336 	struct smc_link *lnk, *lnk_del;
1337 	struct smc_llc_qentry *qentry;
1338 	int active_links;
1339 	int i;
1340 
1341 	mutex_lock(&lgr->llc_conf_mutex);
1342 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1343 	lnk = qentry->link;
1344 	del_llc = &qentry->msg.delete_link;
1345 
1346 	if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1347 		/* delete entire lgr */
1348 		smc_llc_send_link_delete_all(lgr, true, ntohl(
1349 					      qentry->msg.delete_link.reason));
1350 		smc_lgr_terminate_sched(lgr);
1351 		goto out;
1352 	}
1353 	/* delete single link */
1354 	lnk_del = NULL;
1355 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1356 		if (lgr->lnk[i].link_id == del_llc->link_num) {
1357 			lnk_del = &lgr->lnk[i];
1358 			break;
1359 		}
1360 	}
1361 	if (!lnk_del)
1362 		goto out; /* asymmetric link already deleted */
1363 
1364 	if (smc_link_downing(&lnk_del->state)) {
1365 		if (smc_switch_conns(lgr, lnk_del, false))
1366 			smc_wr_tx_wait_no_pending_sends(lnk_del);
1367 	}
1368 	if (!list_empty(&lgr->list)) {
1369 		/* qentry is either a request from peer (send it back to
1370 		 * initiate the DELETE_LINK processing), or a locally
1371 		 * enqueued DELETE_LINK request (forward it)
1372 		 */
1373 		if (!smc_llc_send_message(lnk, &qentry->msg)) {
1374 			struct smc_llc_qentry *qentry2;
1375 
1376 			qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1377 					       SMC_LLC_DELETE_LINK);
1378 			if (qentry2)
1379 				smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1380 		}
1381 	}
1382 	smcr_link_clear(lnk_del, true);
1383 
1384 	active_links = smc_llc_active_link_count(lgr);
1385 	if (active_links == 1) {
1386 		smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1387 	} else if (!active_links) {
1388 		smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1389 		smc_lgr_terminate_sched(lgr);
1390 	}
1391 
1392 	if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1393 		/* trigger setup of asymm alt link */
1394 		smc_llc_add_link_local(lnk);
1395 	}
1396 out:
1397 	mutex_unlock(&lgr->llc_conf_mutex);
1398 	kfree(qentry);
1399 }
1400 
1401 static void smc_llc_delete_link_work(struct work_struct *work)
1402 {
1403 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1404 						  llc_del_link_work);
1405 
1406 	if (list_empty(&lgr->list)) {
1407 		/* link group is terminating */
1408 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1409 		goto out;
1410 	}
1411 
1412 	if (lgr->role == SMC_CLNT)
1413 		smc_llc_process_cli_delete_link(lgr);
1414 	else
1415 		smc_llc_process_srv_delete_link(lgr);
1416 out:
1417 	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1418 }
1419 
1420 /* process a confirm_rkey request from peer, remote flow */
1421 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1422 {
1423 	struct smc_llc_msg_confirm_rkey *llc;
1424 	struct smc_llc_qentry *qentry;
1425 	struct smc_link *link;
1426 	int num_entries;
1427 	int rk_idx;
1428 	int i;
1429 
1430 	qentry = lgr->llc_flow_rmt.qentry;
1431 	llc = &qentry->msg.confirm_rkey;
1432 	link = qentry->link;
1433 
1434 	num_entries = llc->rtoken[0].num_rkeys;
1435 	/* first rkey entry is for receiving link */
1436 	rk_idx = smc_rtoken_add(link,
1437 				llc->rtoken[0].rmb_vaddr,
1438 				llc->rtoken[0].rmb_key);
1439 	if (rk_idx < 0)
1440 		goto out_err;
1441 
1442 	for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1443 		smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1444 				llc->rtoken[i].rmb_vaddr,
1445 				llc->rtoken[i].rmb_key);
1446 	/* max links is 3 so there is no need to support conf_rkey_cont msgs */
1447 	goto out;
1448 out_err:
1449 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1450 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1451 out:
1452 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1453 	smc_llc_send_message(link, &qentry->msg);
1454 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1455 }
1456 
1457 /* process a delete_rkey request from peer, remote flow */
1458 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1459 {
1460 	struct smc_llc_msg_delete_rkey *llc;
1461 	struct smc_llc_qentry *qentry;
1462 	struct smc_link *link;
1463 	u8 err_mask = 0;
1464 	int i, max;
1465 
1466 	qentry = lgr->llc_flow_rmt.qentry;
1467 	llc = &qentry->msg.delete_rkey;
1468 	link = qentry->link;
1469 
1470 	max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1471 	for (i = 0; i < max; i++) {
1472 		if (smc_rtoken_delete(link, llc->rkey[i]))
1473 			err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1474 	}
1475 	if (err_mask) {
1476 		llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1477 		llc->err_mask = err_mask;
1478 	}
1479 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1480 	smc_llc_send_message(link, &qentry->msg);
1481 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1482 }
1483 
1484 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1485 {
1486 	pr_warn_ratelimited("smc: SMC-R lg %*phN LLC protocol violation: "
1487 			    "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, type);
1488 	smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1489 	smc_lgr_terminate_sched(lgr);
1490 }
1491 
1492 /* flush the llc event queue */
1493 static void smc_llc_event_flush(struct smc_link_group *lgr)
1494 {
1495 	struct smc_llc_qentry *qentry, *q;
1496 
1497 	spin_lock_bh(&lgr->llc_event_q_lock);
1498 	list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1499 		list_del_init(&qentry->list);
1500 		kfree(qentry);
1501 	}
1502 	spin_unlock_bh(&lgr->llc_event_q_lock);
1503 }
1504 
1505 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1506 {
1507 	union smc_llc_msg *llc = &qentry->msg;
1508 	struct smc_link *link = qentry->link;
1509 	struct smc_link_group *lgr = link->lgr;
1510 
1511 	if (!smc_link_usable(link))
1512 		goto out;
1513 
1514 	switch (llc->raw.hdr.common.type) {
1515 	case SMC_LLC_TEST_LINK:
1516 		llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1517 		smc_llc_send_message(link, llc);
1518 		break;
1519 	case SMC_LLC_ADD_LINK:
1520 		if (list_empty(&lgr->list))
1521 			goto out;	/* lgr is terminating */
1522 		if (lgr->role == SMC_CLNT) {
1523 			if (smc_llc_is_local_add_link(llc)) {
1524 				if (lgr->llc_flow_lcl.type ==
1525 				    SMC_LLC_FLOW_ADD_LINK)
1526 					break;	/* add_link in progress */
1527 				if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1528 						       qentry)) {
1529 					schedule_work(&lgr->llc_add_link_work);
1530 				}
1531 				return;
1532 			}
1533 			if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1534 			    !lgr->llc_flow_lcl.qentry) {
1535 				/* a flow is waiting for this message */
1536 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1537 							qentry);
1538 				wake_up(&lgr->llc_msg_waiter);
1539 			} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1540 						      qentry)) {
1541 				schedule_work(&lgr->llc_add_link_work);
1542 			}
1543 		} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1544 			/* as smc server, handle client suggestion */
1545 			schedule_work(&lgr->llc_add_link_work);
1546 		}
1547 		return;
1548 	case SMC_LLC_CONFIRM_LINK:
1549 	case SMC_LLC_ADD_LINK_CONT:
1550 		if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1551 			/* a flow is waiting for this message */
1552 			smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1553 			wake_up(&lgr->llc_msg_waiter);
1554 			return;
1555 		}
1556 		break;
1557 	case SMC_LLC_DELETE_LINK:
1558 		if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1559 		    !lgr->llc_flow_lcl.qentry) {
1560 			/* DEL LINK REQ during ADD LINK SEQ */
1561 			smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1562 			wake_up(&lgr->llc_msg_waiter);
1563 		} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1564 			schedule_work(&lgr->llc_del_link_work);
1565 		}
1566 		return;
1567 	case SMC_LLC_CONFIRM_RKEY:
1568 		/* new request from remote, assign to remote flow */
1569 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1570 			/* process here, does not wait for more llc msgs */
1571 			smc_llc_rmt_conf_rkey(lgr);
1572 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1573 		}
1574 		return;
1575 	case SMC_LLC_CONFIRM_RKEY_CONT:
1576 		/* not used because max links is 3, and 3 rkeys fit into
1577 		 * one CONFIRM_RKEY message
1578 		 */
1579 		break;
1580 	case SMC_LLC_DELETE_RKEY:
1581 		/* new request from remote, assign to remote flow */
1582 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1583 			/* process here, does not wait for more llc msgs */
1584 			smc_llc_rmt_delete_rkey(lgr);
1585 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1586 		}
1587 		return;
1588 	default:
1589 		smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1590 		break;
1591 	}
1592 out:
1593 	kfree(qentry);
1594 }
1595 
1596 /* worker to process llc messages on the event queue */
1597 static void smc_llc_event_work(struct work_struct *work)
1598 {
1599 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1600 						  llc_event_work);
1601 	struct smc_llc_qentry *qentry;
1602 
1603 	if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1604 		qentry = lgr->delayed_event;
1605 		lgr->delayed_event = NULL;
1606 		if (smc_link_usable(qentry->link))
1607 			smc_llc_event_handler(qentry);
1608 		else
1609 			kfree(qentry);
1610 	}
1611 
1612 again:
1613 	spin_lock_bh(&lgr->llc_event_q_lock);
1614 	if (!list_empty(&lgr->llc_event_q)) {
1615 		qentry = list_first_entry(&lgr->llc_event_q,
1616 					  struct smc_llc_qentry, list);
1617 		list_del_init(&qentry->list);
1618 		spin_unlock_bh(&lgr->llc_event_q_lock);
1619 		smc_llc_event_handler(qentry);
1620 		goto again;
1621 	}
1622 	spin_unlock_bh(&lgr->llc_event_q_lock);
1623 }
1624 
1625 /* process llc responses in tasklet context */
1626 static void smc_llc_rx_response(struct smc_link *link,
1627 				struct smc_llc_qentry *qentry)
1628 {
1629 	enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
1630 	struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
1631 	u8 llc_type = qentry->msg.raw.hdr.common.type;
1632 
1633 	switch (llc_type) {
1634 	case SMC_LLC_TEST_LINK:
1635 		if (smc_link_active(link))
1636 			complete(&link->llc_testlink_resp);
1637 		break;
1638 	case SMC_LLC_ADD_LINK:
1639 	case SMC_LLC_ADD_LINK_CONT:
1640 	case SMC_LLC_CONFIRM_LINK:
1641 		if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
1642 			break;	/* drop out-of-flow response */
1643 		goto assign;
1644 	case SMC_LLC_DELETE_LINK:
1645 		if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
1646 			break;	/* drop out-of-flow response */
1647 		goto assign;
1648 	case SMC_LLC_CONFIRM_RKEY:
1649 	case SMC_LLC_DELETE_RKEY:
1650 		if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
1651 			break;	/* drop out-of-flow response */
1652 		goto assign;
1653 	case SMC_LLC_CONFIRM_RKEY_CONT:
1654 		/* not used because max links is 3 */
1655 		break;
1656 	default:
1657 		smc_llc_protocol_violation(link->lgr, llc_type);
1658 		break;
1659 	}
1660 	kfree(qentry);
1661 	return;
1662 assign:
1663 	/* assign responses to the local flow, we requested them */
1664 	smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
1665 	wake_up(&link->lgr->llc_msg_waiter);
1666 }
1667 
1668 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
1669 {
1670 	struct smc_link_group *lgr = link->lgr;
1671 	struct smc_llc_qentry *qentry;
1672 	unsigned long flags;
1673 
1674 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
1675 	if (!qentry)
1676 		return;
1677 	qentry->link = link;
1678 	INIT_LIST_HEAD(&qentry->list);
1679 	memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
1680 
1681 	/* process responses immediately */
1682 	if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
1683 		smc_llc_rx_response(link, qentry);
1684 		return;
1685 	}
1686 
1687 	/* add requests to event queue */
1688 	spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
1689 	list_add_tail(&qentry->list, &lgr->llc_event_q);
1690 	spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
1691 	queue_work(system_highpri_wq, &lgr->llc_event_work);
1692 }
1693 
1694 /* copy received msg and add it to the event queue */
1695 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
1696 {
1697 	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
1698 	union smc_llc_msg *llc = buf;
1699 
1700 	if (wc->byte_len < sizeof(*llc))
1701 		return; /* short message */
1702 	if (llc->raw.hdr.length != sizeof(*llc))
1703 		return; /* invalid message */
1704 
1705 	smc_llc_enqueue(link, llc);
1706 }
1707 
1708 /***************************** worker, utils *********************************/
1709 
1710 static void smc_llc_testlink_work(struct work_struct *work)
1711 {
1712 	struct smc_link *link = container_of(to_delayed_work(work),
1713 					     struct smc_link, llc_testlink_wrk);
1714 	unsigned long next_interval;
1715 	unsigned long expire_time;
1716 	u8 user_data[16] = { 0 };
1717 	int rc;
1718 
1719 	if (!smc_link_active(link))
1720 		return;		/* don't reschedule worker */
1721 	expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
1722 	if (time_is_after_jiffies(expire_time)) {
1723 		next_interval = expire_time - jiffies;
1724 		goto out;
1725 	}
1726 	reinit_completion(&link->llc_testlink_resp);
1727 	smc_llc_send_test_link(link, user_data);
1728 	/* receive TEST LINK response over RoCE fabric */
1729 	rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
1730 						       SMC_LLC_WAIT_TIME);
1731 	if (!smc_link_active(link))
1732 		return;		/* link state changed */
1733 	if (rc <= 0) {
1734 		smcr_link_down_cond_sched(link);
1735 		return;
1736 	}
1737 	next_interval = link->llc_testlink_time;
1738 out:
1739 	schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
1740 }
1741 
1742 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
1743 {
1744 	struct net *net = sock_net(smc->clcsock->sk);
1745 
1746 	INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
1747 	INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
1748 	INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
1749 	INIT_LIST_HEAD(&lgr->llc_event_q);
1750 	spin_lock_init(&lgr->llc_event_q_lock);
1751 	spin_lock_init(&lgr->llc_flow_lock);
1752 	init_waitqueue_head(&lgr->llc_flow_waiter);
1753 	init_waitqueue_head(&lgr->llc_msg_waiter);
1754 	mutex_init(&lgr->llc_conf_mutex);
1755 	lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
1756 }
1757 
1758 /* called after lgr was removed from lgr_list */
1759 void smc_llc_lgr_clear(struct smc_link_group *lgr)
1760 {
1761 	smc_llc_event_flush(lgr);
1762 	wake_up_all(&lgr->llc_flow_waiter);
1763 	wake_up_all(&lgr->llc_msg_waiter);
1764 	cancel_work_sync(&lgr->llc_event_work);
1765 	cancel_work_sync(&lgr->llc_add_link_work);
1766 	cancel_work_sync(&lgr->llc_del_link_work);
1767 	if (lgr->delayed_event) {
1768 		kfree(lgr->delayed_event);
1769 		lgr->delayed_event = NULL;
1770 	}
1771 }
1772 
1773 int smc_llc_link_init(struct smc_link *link)
1774 {
1775 	init_completion(&link->llc_testlink_resp);
1776 	INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
1777 	return 0;
1778 }
1779 
1780 void smc_llc_link_active(struct smc_link *link)
1781 {
1782 	pr_warn_ratelimited("smc: SMC-R lg %*phN link added: id %*phN, "
1783 			    "peerid %*phN, ibdev %s, ibport %d\n",
1784 			    SMC_LGR_ID_SIZE, &link->lgr->id,
1785 			    SMC_LGR_ID_SIZE, &link->link_uid,
1786 			    SMC_LGR_ID_SIZE, &link->peer_link_uid,
1787 			    link->smcibdev->ibdev->name, link->ibport);
1788 	link->state = SMC_LNK_ACTIVE;
1789 	if (link->lgr->llc_testlink_time) {
1790 		link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
1791 		schedule_delayed_work(&link->llc_testlink_wrk,
1792 				      link->llc_testlink_time);
1793 	}
1794 }
1795 
1796 /* called in worker context */
1797 void smc_llc_link_clear(struct smc_link *link, bool log)
1798 {
1799 	if (log)
1800 		pr_warn_ratelimited("smc: SMC-R lg %*phN link removed: id %*phN"
1801 				    ", peerid %*phN, ibdev %s, ibport %d\n",
1802 				    SMC_LGR_ID_SIZE, &link->lgr->id,
1803 				    SMC_LGR_ID_SIZE, &link->link_uid,
1804 				    SMC_LGR_ID_SIZE, &link->peer_link_uid,
1805 				    link->smcibdev->ibdev->name, link->ibport);
1806 	complete(&link->llc_testlink_resp);
1807 	cancel_delayed_work_sync(&link->llc_testlink_wrk);
1808 	smc_wr_wakeup_reg_wait(link);
1809 	smc_wr_wakeup_tx_wait(link);
1810 }
1811 
1812 /* register a new rtoken at the remote peer (for all links) */
1813 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
1814 			    struct smc_buf_desc *rmb_desc)
1815 {
1816 	struct smc_link_group *lgr = send_link->lgr;
1817 	struct smc_llc_qentry *qentry = NULL;
1818 	int rc = 0;
1819 
1820 	rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
1821 	if (rc)
1822 		goto out;
1823 	/* receive CONFIRM RKEY response from server over RoCE fabric */
1824 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1825 			      SMC_LLC_CONFIRM_RKEY);
1826 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1827 		rc = -EFAULT;
1828 out:
1829 	if (qentry)
1830 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1831 	return rc;
1832 }
1833 
1834 /* unregister an rtoken at the remote peer */
1835 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
1836 			   struct smc_buf_desc *rmb_desc)
1837 {
1838 	struct smc_llc_qentry *qentry = NULL;
1839 	struct smc_link *send_link;
1840 	int rc = 0;
1841 
1842 	send_link = smc_llc_usable_link(lgr);
1843 	if (!send_link)
1844 		return -ENOLINK;
1845 
1846 	/* protected by llc_flow control */
1847 	rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
1848 	if (rc)
1849 		goto out;
1850 	/* receive DELETE RKEY response from server over RoCE fabric */
1851 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1852 			      SMC_LLC_DELETE_RKEY);
1853 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1854 		rc = -EFAULT;
1855 out:
1856 	if (qentry)
1857 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1858 	return rc;
1859 }
1860 
1861 void smc_llc_link_set_uid(struct smc_link *link)
1862 {
1863 	__be32 link_uid;
1864 
1865 	link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
1866 	memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
1867 }
1868 
1869 /* save peers link user id, used for debug purposes */
1870 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
1871 {
1872 	memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
1873 	       SMC_LGR_ID_SIZE);
1874 }
1875 
1876 /* evaluate confirm link request or response */
1877 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
1878 			   enum smc_llc_reqresp type)
1879 {
1880 	if (type == SMC_LLC_REQ) {	/* SMC server assigns link_id */
1881 		qentry->link->link_id = qentry->msg.confirm_link.link_num;
1882 		smc_llc_link_set_uid(qentry->link);
1883 	}
1884 	if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
1885 		return -ENOTSUPP;
1886 	return 0;
1887 }
1888 
1889 /***************************** init, exit, misc ******************************/
1890 
1891 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
1892 	{
1893 		.handler	= smc_llc_rx_handler,
1894 		.type		= SMC_LLC_CONFIRM_LINK
1895 	},
1896 	{
1897 		.handler	= smc_llc_rx_handler,
1898 		.type		= SMC_LLC_TEST_LINK
1899 	},
1900 	{
1901 		.handler	= smc_llc_rx_handler,
1902 		.type		= SMC_LLC_ADD_LINK
1903 	},
1904 	{
1905 		.handler	= smc_llc_rx_handler,
1906 		.type		= SMC_LLC_ADD_LINK_CONT
1907 	},
1908 	{
1909 		.handler	= smc_llc_rx_handler,
1910 		.type		= SMC_LLC_DELETE_LINK
1911 	},
1912 	{
1913 		.handler	= smc_llc_rx_handler,
1914 		.type		= SMC_LLC_CONFIRM_RKEY
1915 	},
1916 	{
1917 		.handler	= smc_llc_rx_handler,
1918 		.type		= SMC_LLC_CONFIRM_RKEY_CONT
1919 	},
1920 	{
1921 		.handler	= smc_llc_rx_handler,
1922 		.type		= SMC_LLC_DELETE_RKEY
1923 	},
1924 	{
1925 		.handler	= NULL,
1926 	}
1927 };
1928 
1929 int __init smc_llc_init(void)
1930 {
1931 	struct smc_wr_rx_handler *handler;
1932 	int rc = 0;
1933 
1934 	for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
1935 		INIT_HLIST_NODE(&handler->list);
1936 		rc = smc_wr_rx_register_handler(handler);
1937 		if (rc)
1938 			break;
1939 	}
1940 	return rc;
1941 }
1942