xref: /linux/net/smc/smc_llc.c (revision 9406b485dea5e25bed7c81cd822747d494cc8bde)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Link Layer Control (LLC)
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Klaus Wacker <Klaus.Wacker@de.ibm.com>
10  *              Ursula Braun <ubraun@linux.vnet.ibm.com>
11  */
12 
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15 
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21 
22 #define SMC_LLC_DATA_LEN		40
23 
24 struct smc_llc_hdr {
25 	struct smc_wr_rx_hdr common;
26 	u8 length;	/* 44 */
27 #if defined(__BIG_ENDIAN_BITFIELD)
28 	u8 reserved:4,
29 	   add_link_rej_rsn:4;
30 #elif defined(__LITTLE_ENDIAN_BITFIELD)
31 	u8 add_link_rej_rsn:4,
32 	   reserved:4;
33 #endif
34 	u8 flags;
35 };
36 
37 #define SMC_LLC_FLAG_NO_RMBE_EYEC	0x03
38 
39 struct smc_llc_msg_confirm_link {	/* type 0x01 */
40 	struct smc_llc_hdr hd;
41 	u8 sender_mac[ETH_ALEN];
42 	u8 sender_gid[SMC_GID_SIZE];
43 	u8 sender_qp_num[3];
44 	u8 link_num;
45 	u8 link_uid[SMC_LGR_ID_SIZE];
46 	u8 max_links;
47 	u8 reserved[9];
48 };
49 
50 #define SMC_LLC_FLAG_ADD_LNK_REJ	0x40
51 #define SMC_LLC_REJ_RSN_NO_ALT_PATH	1
52 
53 #define SMC_LLC_ADD_LNK_MAX_LINKS	2
54 
55 struct smc_llc_msg_add_link {		/* type 0x02 */
56 	struct smc_llc_hdr hd;
57 	u8 sender_mac[ETH_ALEN];
58 	u8 reserved2[2];
59 	u8 sender_gid[SMC_GID_SIZE];
60 	u8 sender_qp_num[3];
61 	u8 link_num;
62 #if defined(__BIG_ENDIAN_BITFIELD)
63 	u8 reserved3 : 4,
64 	   qp_mtu   : 4;
65 #elif defined(__LITTLE_ENDIAN_BITFIELD)
66 	u8 qp_mtu   : 4,
67 	   reserved3 : 4;
68 #endif
69 	u8 initial_psn[3];
70 	u8 reserved[8];
71 };
72 
73 struct smc_llc_msg_add_link_cont_rt {
74 	__be32 rmb_key;
75 	__be32 rmb_key_new;
76 	__be64 rmb_vaddr_new;
77 };
78 
79 #define SMC_LLC_RKEYS_PER_CONT_MSG	2
80 
81 struct smc_llc_msg_add_link_cont {	/* type 0x03 */
82 	struct smc_llc_hdr hd;
83 	u8 link_num;
84 	u8 num_rkeys;
85 	u8 reserved2[2];
86 	struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
87 	u8 reserved[4];
88 } __packed;			/* format defined in RFC7609 */
89 
90 #define SMC_LLC_FLAG_DEL_LINK_ALL	0x40
91 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY	0x20
92 
93 struct smc_llc_msg_del_link {		/* type 0x04 */
94 	struct smc_llc_hdr hd;
95 	u8 link_num;
96 	__be32 reason;
97 	u8 reserved[35];
98 } __packed;			/* format defined in RFC7609 */
99 
100 struct smc_llc_msg_test_link {		/* type 0x07 */
101 	struct smc_llc_hdr hd;
102 	u8 user_data[16];
103 	u8 reserved[24];
104 };
105 
106 struct smc_rmb_rtoken {
107 	union {
108 		u8 num_rkeys;	/* first rtoken byte of CONFIRM LINK msg */
109 				/* is actually the num of rtokens, first */
110 				/* rtoken is always for the current link */
111 		u8 link_id;	/* link id of the rtoken */
112 	};
113 	__be32 rmb_key;
114 	__be64 rmb_vaddr;
115 } __packed;			/* format defined in RFC7609 */
116 
117 #define SMC_LLC_RKEYS_PER_MSG	3
118 
119 struct smc_llc_msg_confirm_rkey {	/* type 0x06 */
120 	struct smc_llc_hdr hd;
121 	struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
122 	u8 reserved;
123 };
124 
125 #define SMC_LLC_DEL_RKEY_MAX	8
126 #define SMC_LLC_FLAG_RKEY_RETRY	0x10
127 #define SMC_LLC_FLAG_RKEY_NEG	0x20
128 
129 struct smc_llc_msg_delete_rkey {	/* type 0x09 */
130 	struct smc_llc_hdr hd;
131 	u8 num_rkeys;
132 	u8 err_mask;
133 	u8 reserved[2];
134 	__be32 rkey[8];
135 	u8 reserved2[4];
136 };
137 
138 union smc_llc_msg {
139 	struct smc_llc_msg_confirm_link confirm_link;
140 	struct smc_llc_msg_add_link add_link;
141 	struct smc_llc_msg_add_link_cont add_link_cont;
142 	struct smc_llc_msg_del_link delete_link;
143 
144 	struct smc_llc_msg_confirm_rkey confirm_rkey;
145 	struct smc_llc_msg_delete_rkey delete_rkey;
146 
147 	struct smc_llc_msg_test_link test_link;
148 	struct {
149 		struct smc_llc_hdr hdr;
150 		u8 data[SMC_LLC_DATA_LEN];
151 	} raw;
152 };
153 
154 #define SMC_LLC_FLAG_RESP		0x80
155 
156 struct smc_llc_qentry {
157 	struct list_head list;
158 	struct smc_link *link;
159 	union smc_llc_msg msg;
160 };
161 
162 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
163 
164 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
165 {
166 	struct smc_llc_qentry *qentry = flow->qentry;
167 
168 	flow->qentry = NULL;
169 	return qentry;
170 }
171 
172 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
173 {
174 	struct smc_llc_qentry *qentry;
175 
176 	if (flow->qentry) {
177 		qentry = flow->qentry;
178 		flow->qentry = NULL;
179 		kfree(qentry);
180 	}
181 }
182 
183 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
184 					   struct smc_llc_qentry *qentry)
185 {
186 	flow->qentry = qentry;
187 }
188 
189 /* try to start a new llc flow, initiated by an incoming llc msg */
190 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
191 			       struct smc_llc_qentry *qentry)
192 {
193 	struct smc_link_group *lgr = qentry->link->lgr;
194 
195 	spin_lock_bh(&lgr->llc_flow_lock);
196 	if (flow->type) {
197 		/* a flow is already active */
198 		if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
199 		     qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
200 		    !lgr->delayed_event) {
201 			lgr->delayed_event = qentry;
202 		} else {
203 			/* forget this llc request */
204 			kfree(qentry);
205 		}
206 		spin_unlock_bh(&lgr->llc_flow_lock);
207 		return false;
208 	}
209 	switch (qentry->msg.raw.hdr.common.type) {
210 	case SMC_LLC_ADD_LINK:
211 		flow->type = SMC_LLC_FLOW_ADD_LINK;
212 		break;
213 	case SMC_LLC_DELETE_LINK:
214 		flow->type = SMC_LLC_FLOW_DEL_LINK;
215 		break;
216 	case SMC_LLC_CONFIRM_RKEY:
217 	case SMC_LLC_DELETE_RKEY:
218 		flow->type = SMC_LLC_FLOW_RKEY;
219 		break;
220 	default:
221 		flow->type = SMC_LLC_FLOW_NONE;
222 	}
223 	if (qentry == lgr->delayed_event)
224 		lgr->delayed_event = NULL;
225 	spin_unlock_bh(&lgr->llc_flow_lock);
226 	smc_llc_flow_qentry_set(flow, qentry);
227 	return true;
228 }
229 
230 /* start a new local llc flow, wait till current flow finished */
231 int smc_llc_flow_initiate(struct smc_link_group *lgr,
232 			  enum smc_llc_flowtype type)
233 {
234 	enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
235 	int rc;
236 
237 	/* all flows except confirm_rkey and delete_rkey are exclusive,
238 	 * confirm/delete rkey flows can run concurrently (local and remote)
239 	 */
240 	if (type == SMC_LLC_FLOW_RKEY)
241 		allowed_remote = SMC_LLC_FLOW_RKEY;
242 again:
243 	if (list_empty(&lgr->list))
244 		return -ENODEV;
245 	spin_lock_bh(&lgr->llc_flow_lock);
246 	if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
247 	    (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
248 	     lgr->llc_flow_rmt.type == allowed_remote)) {
249 		lgr->llc_flow_lcl.type = type;
250 		spin_unlock_bh(&lgr->llc_flow_lock);
251 		return 0;
252 	}
253 	spin_unlock_bh(&lgr->llc_flow_lock);
254 	rc = wait_event_interruptible_timeout(lgr->llc_waiter,
255 			(lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
256 			 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
257 			  lgr->llc_flow_rmt.type == allowed_remote)),
258 			SMC_LLC_WAIT_TIME);
259 	if (!rc)
260 		return -ETIMEDOUT;
261 	goto again;
262 }
263 
264 /* finish the current llc flow */
265 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
266 {
267 	spin_lock_bh(&lgr->llc_flow_lock);
268 	memset(flow, 0, sizeof(*flow));
269 	flow->type = SMC_LLC_FLOW_NONE;
270 	spin_unlock_bh(&lgr->llc_flow_lock);
271 	if (!list_empty(&lgr->list) && lgr->delayed_event &&
272 	    flow == &lgr->llc_flow_lcl)
273 		schedule_work(&lgr->llc_event_work);
274 	else
275 		wake_up_interruptible(&lgr->llc_waiter);
276 }
277 
278 /* lnk is optional and used for early wakeup when link goes down, useful in
279  * cases where we wait for a response on the link after we sent a request
280  */
281 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
282 				    struct smc_link *lnk,
283 				    int time_out, u8 exp_msg)
284 {
285 	struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
286 
287 	wait_event_interruptible_timeout(lgr->llc_waiter,
288 					 (flow->qentry ||
289 					  (lnk && !smc_link_usable(lnk)) ||
290 					  list_empty(&lgr->list)),
291 					 time_out);
292 	if (!flow->qentry ||
293 	    (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
294 		smc_llc_flow_qentry_del(flow);
295 		goto out;
296 	}
297 	if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
298 		if (exp_msg == SMC_LLC_ADD_LINK &&
299 		    flow->qentry->msg.raw.hdr.common.type ==
300 		    SMC_LLC_DELETE_LINK) {
301 			/* flow_start will delay the unexpected msg */
302 			smc_llc_flow_start(&lgr->llc_flow_lcl,
303 					   smc_llc_flow_qentry_clr(flow));
304 			return NULL;
305 		}
306 		smc_llc_flow_qentry_del(flow);
307 	}
308 out:
309 	return flow->qentry;
310 }
311 
312 /********************************** send *************************************/
313 
314 struct smc_llc_tx_pend {
315 };
316 
317 /* handler for send/transmission completion of an LLC msg */
318 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
319 			       struct smc_link *link,
320 			       enum ib_wc_status wc_status)
321 {
322 	/* future work: handle wc_status error for recovery and failover */
323 }
324 
325 /**
326  * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
327  * @link: Pointer to SMC link used for sending LLC control message.
328  * @wr_buf: Out variable returning pointer to work request payload buffer.
329  * @pend: Out variable returning pointer to private pending WR tracking.
330  *	  It's the context the transmit complete handler will get.
331  *
332  * Reserves and pre-fills an entry for a pending work request send/tx.
333  * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
334  * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
335  *
336  * Return: 0 on success, otherwise an error value.
337  */
338 static int smc_llc_add_pending_send(struct smc_link *link,
339 				    struct smc_wr_buf **wr_buf,
340 				    struct smc_wr_tx_pend_priv **pend)
341 {
342 	int rc;
343 
344 	rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
345 				     pend);
346 	if (rc < 0)
347 		return rc;
348 	BUILD_BUG_ON_MSG(
349 		sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
350 		"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
351 	BUILD_BUG_ON_MSG(
352 		sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
353 		"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
354 	BUILD_BUG_ON_MSG(
355 		sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
356 		"must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
357 	return 0;
358 }
359 
360 /* high-level API to send LLC confirm link */
361 int smc_llc_send_confirm_link(struct smc_link *link,
362 			      enum smc_llc_reqresp reqresp)
363 {
364 	struct smc_link_group *lgr = smc_get_lgr(link);
365 	struct smc_llc_msg_confirm_link *confllc;
366 	struct smc_wr_tx_pend_priv *pend;
367 	struct smc_wr_buf *wr_buf;
368 	int rc;
369 
370 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
371 	if (rc)
372 		return rc;
373 	confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
374 	memset(confllc, 0, sizeof(*confllc));
375 	confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
376 	confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
377 	confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
378 	if (reqresp == SMC_LLC_RESP)
379 		confllc->hd.flags |= SMC_LLC_FLAG_RESP;
380 	memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
381 	       ETH_ALEN);
382 	memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
383 	hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
384 	confllc->link_num = link->link_id;
385 	memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
386 	confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
387 	/* send llc message */
388 	rc = smc_wr_tx_send(link, pend);
389 	return rc;
390 }
391 
392 /* send LLC confirm rkey request */
393 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
394 				     struct smc_buf_desc *rmb_desc)
395 {
396 	struct smc_llc_msg_confirm_rkey *rkeyllc;
397 	struct smc_wr_tx_pend_priv *pend;
398 	struct smc_wr_buf *wr_buf;
399 	struct smc_link *link;
400 	int i, rc, rtok_ix;
401 
402 	rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
403 	if (rc)
404 		return rc;
405 	rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
406 	memset(rkeyllc, 0, sizeof(*rkeyllc));
407 	rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
408 	rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
409 
410 	rtok_ix = 1;
411 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
412 		link = &send_link->lgr->lnk[i];
413 		if (link->state == SMC_LNK_ACTIVE && link != send_link) {
414 			rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
415 			rkeyllc->rtoken[rtok_ix].rmb_key =
416 				htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
417 			rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
418 				(u64)sg_dma_address(
419 					rmb_desc->sgt[link->link_idx].sgl));
420 			rtok_ix++;
421 		}
422 	}
423 	/* rkey of send_link is in rtoken[0] */
424 	rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
425 	rkeyllc->rtoken[0].rmb_key =
426 		htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
427 	rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
428 		(u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
429 	/* send llc message */
430 	rc = smc_wr_tx_send(send_link, pend);
431 	return rc;
432 }
433 
434 /* send LLC delete rkey request */
435 static int smc_llc_send_delete_rkey(struct smc_link *link,
436 				    struct smc_buf_desc *rmb_desc)
437 {
438 	struct smc_llc_msg_delete_rkey *rkeyllc;
439 	struct smc_wr_tx_pend_priv *pend;
440 	struct smc_wr_buf *wr_buf;
441 	int rc;
442 
443 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
444 	if (rc)
445 		return rc;
446 	rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
447 	memset(rkeyllc, 0, sizeof(*rkeyllc));
448 	rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
449 	rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
450 	rkeyllc->num_rkeys = 1;
451 	rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
452 	/* send llc message */
453 	rc = smc_wr_tx_send(link, pend);
454 	return rc;
455 }
456 
457 /* send ADD LINK request or response */
458 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
459 			  struct smc_link *link_new,
460 			  enum smc_llc_reqresp reqresp)
461 {
462 	struct smc_llc_msg_add_link *addllc;
463 	struct smc_wr_tx_pend_priv *pend;
464 	struct smc_wr_buf *wr_buf;
465 	int rc;
466 
467 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
468 	if (rc)
469 		return rc;
470 	addllc = (struct smc_llc_msg_add_link *)wr_buf;
471 
472 	memset(addllc, 0, sizeof(*addllc));
473 	addllc->hd.common.type = SMC_LLC_ADD_LINK;
474 	addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
475 	if (reqresp == SMC_LLC_RESP)
476 		addllc->hd.flags |= SMC_LLC_FLAG_RESP;
477 	memcpy(addllc->sender_mac, mac, ETH_ALEN);
478 	memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
479 	if (link_new) {
480 		addllc->link_num = link_new->link_id;
481 		hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
482 		hton24(addllc->initial_psn, link_new->psn_initial);
483 		if (reqresp == SMC_LLC_REQ)
484 			addllc->qp_mtu = link_new->path_mtu;
485 		else
486 			addllc->qp_mtu = min(link_new->path_mtu,
487 					     link_new->peer_mtu);
488 	}
489 	/* send llc message */
490 	rc = smc_wr_tx_send(link, pend);
491 	return rc;
492 }
493 
494 /* send DELETE LINK request or response */
495 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
496 			     enum smc_llc_reqresp reqresp, bool orderly,
497 			     u32 reason)
498 {
499 	struct smc_llc_msg_del_link *delllc;
500 	struct smc_wr_tx_pend_priv *pend;
501 	struct smc_wr_buf *wr_buf;
502 	int rc;
503 
504 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
505 	if (rc)
506 		return rc;
507 	delllc = (struct smc_llc_msg_del_link *)wr_buf;
508 
509 	memset(delllc, 0, sizeof(*delllc));
510 	delllc->hd.common.type = SMC_LLC_DELETE_LINK;
511 	delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
512 	if (reqresp == SMC_LLC_RESP)
513 		delllc->hd.flags |= SMC_LLC_FLAG_RESP;
514 	if (orderly)
515 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
516 	if (link_del_id)
517 		delllc->link_num = link_del_id;
518 	else
519 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
520 	delllc->reason = htonl(reason);
521 	/* send llc message */
522 	rc = smc_wr_tx_send(link, pend);
523 	return rc;
524 }
525 
526 /* send LLC test link request */
527 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
528 {
529 	struct smc_llc_msg_test_link *testllc;
530 	struct smc_wr_tx_pend_priv *pend;
531 	struct smc_wr_buf *wr_buf;
532 	int rc;
533 
534 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
535 	if (rc)
536 		return rc;
537 	testllc = (struct smc_llc_msg_test_link *)wr_buf;
538 	memset(testllc, 0, sizeof(*testllc));
539 	testllc->hd.common.type = SMC_LLC_TEST_LINK;
540 	testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
541 	memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
542 	/* send llc message */
543 	rc = smc_wr_tx_send(link, pend);
544 	return rc;
545 }
546 
547 /* schedule an llc send on link, may wait for buffers */
548 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
549 {
550 	struct smc_wr_tx_pend_priv *pend;
551 	struct smc_wr_buf *wr_buf;
552 	int rc;
553 
554 	if (!smc_link_usable(link))
555 		return -ENOLINK;
556 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
557 	if (rc)
558 		return rc;
559 	memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
560 	return smc_wr_tx_send(link, pend);
561 }
562 
563 /********************************* receive ***********************************/
564 
565 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
566 				  enum smc_lgr_type lgr_new_t)
567 {
568 	int i;
569 
570 	if (lgr->type == SMC_LGR_SYMMETRIC ||
571 	    (lgr->type != SMC_LGR_SINGLE &&
572 	     (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
573 	      lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
574 		return -EMLINK;
575 
576 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
577 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
578 		for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
579 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
580 				return i;
581 	} else {
582 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
583 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
584 				return i;
585 	}
586 	return -EMLINK;
587 }
588 
589 /* return first buffer from any of the next buf lists */
590 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
591 						  int *buf_lst)
592 {
593 	struct smc_buf_desc *buf_pos;
594 
595 	while (*buf_lst < SMC_RMBE_SIZES) {
596 		buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
597 						   struct smc_buf_desc, list);
598 		if (buf_pos)
599 			return buf_pos;
600 		(*buf_lst)++;
601 	}
602 	return NULL;
603 }
604 
605 /* return next rmb from buffer lists */
606 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
607 						 int *buf_lst,
608 						 struct smc_buf_desc *buf_pos)
609 {
610 	struct smc_buf_desc *buf_next;
611 
612 	if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
613 		(*buf_lst)++;
614 		return _smc_llc_get_next_rmb(lgr, buf_lst);
615 	}
616 	buf_next = list_next_entry(buf_pos, list);
617 	return buf_next;
618 }
619 
620 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
621 						  int *buf_lst)
622 {
623 	*buf_lst = 0;
624 	return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
625 }
626 
627 /* send one add_link_continue msg */
628 static int smc_llc_add_link_cont(struct smc_link *link,
629 				 struct smc_link *link_new, u8 *num_rkeys_todo,
630 				 int *buf_lst, struct smc_buf_desc **buf_pos)
631 {
632 	struct smc_llc_msg_add_link_cont *addc_llc;
633 	struct smc_link_group *lgr = link->lgr;
634 	int prim_lnk_idx, lnk_idx, i, rc;
635 	struct smc_wr_tx_pend_priv *pend;
636 	struct smc_wr_buf *wr_buf;
637 	struct smc_buf_desc *rmb;
638 	u8 n;
639 
640 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
641 	if (rc)
642 		return rc;
643 	addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
644 	memset(addc_llc, 0, sizeof(*addc_llc));
645 
646 	prim_lnk_idx = link->link_idx;
647 	lnk_idx = link_new->link_idx;
648 	addc_llc->link_num = link_new->link_id;
649 	addc_llc->num_rkeys = *num_rkeys_todo;
650 	n = *num_rkeys_todo;
651 	for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
652 		if (!*buf_pos) {
653 			addc_llc->num_rkeys = addc_llc->num_rkeys -
654 					      *num_rkeys_todo;
655 			*num_rkeys_todo = 0;
656 			break;
657 		}
658 		rmb = *buf_pos;
659 
660 		addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
661 		addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
662 		addc_llc->rt[i].rmb_vaddr_new =
663 			cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
664 
665 		(*num_rkeys_todo)--;
666 		*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
667 		while (*buf_pos && !(*buf_pos)->used)
668 			*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
669 	}
670 	addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
671 	addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
672 	if (lgr->role == SMC_CLNT)
673 		addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
674 	return smc_wr_tx_send(link, pend);
675 }
676 
677 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
678 				     struct smc_link *link_new)
679 {
680 	struct smc_llc_msg_add_link_cont *addc_llc;
681 	struct smc_link_group *lgr = link->lgr;
682 	u8 max, num_rkeys_send, num_rkeys_recv;
683 	struct smc_llc_qentry *qentry;
684 	struct smc_buf_desc *buf_pos;
685 	int buf_lst;
686 	int rc = 0;
687 	int i;
688 
689 	mutex_lock(&lgr->rmbs_lock);
690 	num_rkeys_send = lgr->conns_num;
691 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
692 	do {
693 		qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
694 				      SMC_LLC_ADD_LINK_CONT);
695 		if (!qentry) {
696 			rc = -ETIMEDOUT;
697 			break;
698 		}
699 		addc_llc = &qentry->msg.add_link_cont;
700 		num_rkeys_recv = addc_llc->num_rkeys;
701 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
702 		for (i = 0; i < max; i++) {
703 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
704 				       addc_llc->rt[i].rmb_key,
705 				       addc_llc->rt[i].rmb_vaddr_new,
706 				       addc_llc->rt[i].rmb_key_new);
707 			num_rkeys_recv--;
708 		}
709 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
710 		rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
711 					   &buf_lst, &buf_pos);
712 		if (rc)
713 			break;
714 	} while (num_rkeys_send || num_rkeys_recv);
715 
716 	mutex_unlock(&lgr->rmbs_lock);
717 	return rc;
718 }
719 
720 /* prepare and send an add link reject response */
721 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
722 {
723 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
724 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
725 	qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
726 	return smc_llc_send_message(qentry->link, &qentry->msg);
727 }
728 
729 static int smc_llc_cli_conf_link(struct smc_link *link,
730 				 struct smc_init_info *ini,
731 				 struct smc_link *link_new,
732 				 enum smc_lgr_type lgr_new_t)
733 {
734 	struct smc_link_group *lgr = link->lgr;
735 	struct smc_llc_msg_del_link *del_llc;
736 	struct smc_llc_qentry *qentry = NULL;
737 	int rc = 0;
738 
739 	/* receive CONFIRM LINK request over RoCE fabric */
740 	qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
741 	if (!qentry) {
742 		rc = smc_llc_send_delete_link(link, link_new->link_id,
743 					      SMC_LLC_REQ, false,
744 					      SMC_LLC_DEL_LOST_PATH);
745 		return -ENOLINK;
746 	}
747 	if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
748 		/* received DELETE_LINK instead */
749 		del_llc = &qentry->msg.delete_link;
750 		qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
751 		smc_llc_send_message(link, &qentry->msg);
752 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
753 		return -ENOLINK;
754 	}
755 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
756 
757 	rc = smc_ib_modify_qp_rts(link_new);
758 	if (rc) {
759 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
760 					 false, SMC_LLC_DEL_LOST_PATH);
761 		return -ENOLINK;
762 	}
763 	smc_wr_remember_qp_attr(link_new);
764 
765 	rc = smcr_buf_reg_lgr(link_new);
766 	if (rc) {
767 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
768 					 false, SMC_LLC_DEL_LOST_PATH);
769 		return -ENOLINK;
770 	}
771 
772 	/* send CONFIRM LINK response over RoCE fabric */
773 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
774 	if (rc) {
775 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
776 					 false, SMC_LLC_DEL_LOST_PATH);
777 		return -ENOLINK;
778 	}
779 	smc_llc_link_active(link_new);
780 	lgr->type = lgr_new_t;
781 	return 0;
782 }
783 
784 static void smc_llc_save_add_link_info(struct smc_link *link,
785 				       struct smc_llc_msg_add_link *add_llc)
786 {
787 	link->peer_qpn = ntoh24(add_llc->sender_qp_num);
788 	memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
789 	memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
790 	link->peer_psn = ntoh24(add_llc->initial_psn);
791 	link->peer_mtu = add_llc->qp_mtu;
792 }
793 
794 /* as an SMC client, process an add link request */
795 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
796 {
797 	struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
798 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
799 	struct smc_link_group *lgr = smc_get_lgr(link);
800 	struct smc_link *lnk_new = NULL;
801 	struct smc_init_info ini;
802 	int lnk_idx, rc = 0;
803 
804 	ini.vlan_id = lgr->vlan_id;
805 	smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
806 	if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
807 	    !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
808 		if (!ini.ib_dev)
809 			goto out_reject;
810 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
811 	}
812 	if (!ini.ib_dev) {
813 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
814 		ini.ib_dev = link->smcibdev;
815 		ini.ib_port = link->ibport;
816 	}
817 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
818 	if (lnk_idx < 0)
819 		goto out_reject;
820 	lnk_new = &lgr->lnk[lnk_idx];
821 	rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
822 	if (rc)
823 		goto out_reject;
824 	smc_llc_save_add_link_info(lnk_new, llc);
825 	lnk_new->link_id = llc->link_num;
826 
827 	rc = smc_ib_ready_link(lnk_new);
828 	if (rc)
829 		goto out_clear_lnk;
830 
831 	rc = smcr_buf_map_lgr(lnk_new);
832 	if (rc)
833 		goto out_clear_lnk;
834 
835 	rc = smc_llc_send_add_link(link,
836 				   lnk_new->smcibdev->mac[ini.ib_port - 1],
837 				   lnk_new->gid, lnk_new, SMC_LLC_RESP);
838 	if (rc)
839 		goto out_clear_lnk;
840 	rc = smc_llc_cli_rkey_exchange(link, lnk_new);
841 	if (rc) {
842 		rc = 0;
843 		goto out_clear_lnk;
844 	}
845 	rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
846 	if (!rc)
847 		goto out;
848 out_clear_lnk:
849 	smcr_link_clear(lnk_new);
850 out_reject:
851 	smc_llc_cli_add_link_reject(qentry);
852 out:
853 	kfree(qentry);
854 	return rc;
855 }
856 
857 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
858 {
859 	struct smc_llc_qentry *qentry;
860 
861 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
862 
863 	mutex_lock(&lgr->llc_conf_mutex);
864 	smc_llc_cli_add_link(qentry->link, qentry);
865 	mutex_unlock(&lgr->llc_conf_mutex);
866 }
867 
868 static int smc_llc_active_link_count(struct smc_link_group *lgr)
869 {
870 	int i, link_count = 0;
871 
872 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
873 		if (!smc_link_usable(&lgr->lnk[i]))
874 			continue;
875 		link_count++;
876 	}
877 	return link_count;
878 }
879 
880 /* find the asymmetric link when 3 links are established  */
881 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
882 {
883 	int asym_idx = -ENOENT;
884 	int i, j, k;
885 	bool found;
886 
887 	/* determine asymmetric link */
888 	found = false;
889 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
890 		for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
891 			if (!smc_link_usable(&lgr->lnk[i]) ||
892 			    !smc_link_usable(&lgr->lnk[j]))
893 				continue;
894 			if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
895 				    SMC_GID_SIZE)) {
896 				found = true;	/* asym_lnk is i or j */
897 				break;
898 			}
899 		}
900 		if (found)
901 			break;
902 	}
903 	if (!found)
904 		goto out; /* no asymmetric link */
905 	for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
906 		if (!smc_link_usable(&lgr->lnk[k]))
907 			continue;
908 		if (k != i &&
909 		    !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
910 			    SMC_GID_SIZE)) {
911 			asym_idx = i;
912 			break;
913 		}
914 		if (k != j &&
915 		    !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
916 			    SMC_GID_SIZE)) {
917 			asym_idx = j;
918 			break;
919 		}
920 	}
921 out:
922 	return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
923 }
924 
925 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
926 {
927 	struct smc_link *lnk_new = NULL, *lnk_asym;
928 	struct smc_llc_qentry *qentry;
929 	int rc;
930 
931 	lnk_asym = smc_llc_find_asym_link(lgr);
932 	if (!lnk_asym)
933 		return; /* no asymmetric link */
934 	if (!smc_link_downing(&lnk_asym->state))
935 		return;
936 	/* tbd: lnk_new = smc_switch_conns(lgr, lnk_asym, false); */
937 	smc_wr_tx_wait_no_pending_sends(lnk_asym);
938 	if (!lnk_new)
939 		goto out_free;
940 	/* change flow type from ADD_LINK into DEL_LINK */
941 	lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
942 	rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
943 				      true, SMC_LLC_DEL_NO_ASYM_NEEDED);
944 	if (rc) {
945 		smcr_link_down_cond(lnk_new);
946 		goto out_free;
947 	}
948 	qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
949 			      SMC_LLC_DELETE_LINK);
950 	if (!qentry) {
951 		smcr_link_down_cond(lnk_new);
952 		goto out_free;
953 	}
954 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
955 out_free:
956 	smcr_link_clear(lnk_asym);
957 }
958 
959 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
960 				     struct smc_link *link_new)
961 {
962 	struct smc_llc_msg_add_link_cont *addc_llc;
963 	struct smc_link_group *lgr = link->lgr;
964 	u8 max, num_rkeys_send, num_rkeys_recv;
965 	struct smc_llc_qentry *qentry = NULL;
966 	struct smc_buf_desc *buf_pos;
967 	int buf_lst;
968 	int rc = 0;
969 	int i;
970 
971 	mutex_lock(&lgr->rmbs_lock);
972 	num_rkeys_send = lgr->conns_num;
973 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
974 	do {
975 		smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
976 				      &buf_lst, &buf_pos);
977 		qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
978 				      SMC_LLC_ADD_LINK_CONT);
979 		if (!qentry) {
980 			rc = -ETIMEDOUT;
981 			goto out;
982 		}
983 		addc_llc = &qentry->msg.add_link_cont;
984 		num_rkeys_recv = addc_llc->num_rkeys;
985 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
986 		for (i = 0; i < max; i++) {
987 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
988 				       addc_llc->rt[i].rmb_key,
989 				       addc_llc->rt[i].rmb_vaddr_new,
990 				       addc_llc->rt[i].rmb_key_new);
991 			num_rkeys_recv--;
992 		}
993 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
994 	} while (num_rkeys_send || num_rkeys_recv);
995 out:
996 	mutex_unlock(&lgr->rmbs_lock);
997 	return rc;
998 }
999 
1000 static int smc_llc_srv_conf_link(struct smc_link *link,
1001 				 struct smc_link *link_new,
1002 				 enum smc_lgr_type lgr_new_t)
1003 {
1004 	struct smc_link_group *lgr = link->lgr;
1005 	struct smc_llc_qentry *qentry = NULL;
1006 	int rc;
1007 
1008 	/* send CONFIRM LINK request over the RoCE fabric */
1009 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1010 	if (rc)
1011 		return -ENOLINK;
1012 	/* receive CONFIRM LINK response over the RoCE fabric */
1013 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME,
1014 			      SMC_LLC_CONFIRM_LINK);
1015 	if (!qentry) {
1016 		/* send DELETE LINK */
1017 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1018 					 false, SMC_LLC_DEL_LOST_PATH);
1019 		return -ENOLINK;
1020 	}
1021 	smc_llc_link_active(link_new);
1022 	lgr->type = lgr_new_t;
1023 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1024 	return 0;
1025 }
1026 
1027 int smc_llc_srv_add_link(struct smc_link *link)
1028 {
1029 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1030 	struct smc_link_group *lgr = link->lgr;
1031 	struct smc_llc_msg_add_link *add_llc;
1032 	struct smc_llc_qentry *qentry = NULL;
1033 	struct smc_link *link_new;
1034 	struct smc_init_info ini;
1035 	int lnk_idx, rc = 0;
1036 
1037 	/* ignore client add link recommendation, start new flow */
1038 	ini.vlan_id = lgr->vlan_id;
1039 	smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
1040 	if (!ini.ib_dev) {
1041 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1042 		ini.ib_dev = link->smcibdev;
1043 		ini.ib_port = link->ibport;
1044 	}
1045 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1046 	if (lnk_idx < 0)
1047 		return 0;
1048 
1049 	rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
1050 	if (rc)
1051 		return rc;
1052 	link_new = &lgr->lnk[lnk_idx];
1053 	rc = smc_llc_send_add_link(link,
1054 				   link_new->smcibdev->mac[ini.ib_port - 1],
1055 				   link_new->gid, link_new, SMC_LLC_REQ);
1056 	if (rc)
1057 		goto out_err;
1058 	/* receive ADD LINK response over the RoCE fabric */
1059 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1060 	if (!qentry) {
1061 		rc = -ETIMEDOUT;
1062 		goto out_err;
1063 	}
1064 	add_llc = &qentry->msg.add_link;
1065 	if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1066 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1067 		rc = -ENOLINK;
1068 		goto out_err;
1069 	}
1070 	if (lgr->type == SMC_LGR_SINGLE &&
1071 	    (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1072 	     !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1073 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1074 	}
1075 	smc_llc_save_add_link_info(link_new, add_llc);
1076 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1077 
1078 	rc = smc_ib_ready_link(link_new);
1079 	if (rc)
1080 		goto out_err;
1081 	rc = smcr_buf_map_lgr(link_new);
1082 	if (rc)
1083 		goto out_err;
1084 	rc = smcr_buf_reg_lgr(link_new);
1085 	if (rc)
1086 		goto out_err;
1087 	rc = smc_llc_srv_rkey_exchange(link, link_new);
1088 	if (rc)
1089 		goto out_err;
1090 	rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1091 	if (rc)
1092 		goto out_err;
1093 	return 0;
1094 out_err:
1095 	smcr_link_clear(link_new);
1096 	return rc;
1097 }
1098 
1099 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1100 {
1101 	struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1102 	int rc;
1103 
1104 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1105 
1106 	mutex_lock(&lgr->llc_conf_mutex);
1107 	rc = smc_llc_srv_add_link(link);
1108 	if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1109 		/* delete any asymmetric link */
1110 		smc_llc_delete_asym_link(lgr);
1111 	}
1112 	mutex_unlock(&lgr->llc_conf_mutex);
1113 }
1114 
1115 /* enqueue a local add_link req to trigger a new add_link flow, only as SERV */
1116 void smc_llc_srv_add_link_local(struct smc_link *link)
1117 {
1118 	struct smc_llc_msg_add_link add_llc = {0};
1119 
1120 	add_llc.hd.length = sizeof(add_llc);
1121 	add_llc.hd.common.type = SMC_LLC_ADD_LINK;
1122 	/* no dev and port needed, we as server ignore client data anyway */
1123 	smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1124 }
1125 
1126 /* worker to process an add link message */
1127 static void smc_llc_add_link_work(struct work_struct *work)
1128 {
1129 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1130 						  llc_add_link_work);
1131 
1132 	if (list_empty(&lgr->list)) {
1133 		/* link group is terminating */
1134 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1135 		goto out;
1136 	}
1137 
1138 	if (lgr->role == SMC_CLNT)
1139 		smc_llc_process_cli_add_link(lgr);
1140 	else
1141 		smc_llc_process_srv_add_link(lgr);
1142 out:
1143 	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1144 }
1145 
1146 /* enqueue a local del_link msg to trigger a new del_link flow,
1147  * called only for role SMC_SERV
1148  */
1149 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1150 {
1151 	struct smc_llc_msg_del_link del_llc = {0};
1152 
1153 	del_llc.hd.length = sizeof(del_llc);
1154 	del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
1155 	del_llc.link_num = del_link_id;
1156 	del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1157 	del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1158 	smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1159 }
1160 
1161 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1162 {
1163 	struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1164 	struct smc_llc_msg_del_link *del_llc;
1165 	struct smc_llc_qentry *qentry;
1166 	int active_links;
1167 	int lnk_idx;
1168 
1169 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1170 	lnk = qentry->link;
1171 	del_llc = &qentry->msg.delete_link;
1172 
1173 	if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1174 		smc_lgr_terminate_sched(lgr);
1175 		goto out;
1176 	}
1177 	mutex_lock(&lgr->llc_conf_mutex);
1178 	/* delete single link */
1179 	for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1180 		if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1181 			continue;
1182 		lnk_del = &lgr->lnk[lnk_idx];
1183 		break;
1184 	}
1185 	del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1186 	if (!lnk_del) {
1187 		/* link was not found */
1188 		del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1189 		smc_llc_send_message(lnk, &qentry->msg);
1190 		goto out_unlock;
1191 	}
1192 	lnk_asym = smc_llc_find_asym_link(lgr);
1193 
1194 	del_llc->reason = 0;
1195 	smc_llc_send_message(lnk, &qentry->msg); /* response */
1196 
1197 	if (smc_link_downing(&lnk_del->state)) {
1198 		/* tbd: call smc_switch_conns(lgr, lnk_del, false); */
1199 		smc_wr_tx_wait_no_pending_sends(lnk_del);
1200 	}
1201 	smcr_link_clear(lnk_del);
1202 
1203 	active_links = smc_llc_active_link_count(lgr);
1204 	if (lnk_del == lnk_asym) {
1205 		/* expected deletion of asym link, don't change lgr state */
1206 	} else if (active_links == 1) {
1207 		lgr->type = SMC_LGR_SINGLE;
1208 	} else if (!active_links) {
1209 		lgr->type = SMC_LGR_NONE;
1210 		smc_lgr_terminate_sched(lgr);
1211 	}
1212 out_unlock:
1213 	mutex_unlock(&lgr->llc_conf_mutex);
1214 out:
1215 	kfree(qentry);
1216 }
1217 
1218 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1219 {
1220 	struct smc_llc_msg_del_link *del_llc;
1221 	struct smc_link *lnk, *lnk_del;
1222 	struct smc_llc_qentry *qentry;
1223 	int active_links;
1224 	int i;
1225 
1226 	mutex_lock(&lgr->llc_conf_mutex);
1227 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1228 	lnk = qentry->link;
1229 	del_llc = &qentry->msg.delete_link;
1230 
1231 	if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1232 		/* delete entire lgr */
1233 		smc_lgr_terminate_sched(lgr);
1234 		goto out;
1235 	}
1236 	/* delete single link */
1237 	lnk_del = NULL;
1238 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1239 		if (lgr->lnk[i].link_id == del_llc->link_num) {
1240 			lnk_del = &lgr->lnk[i];
1241 			break;
1242 		}
1243 	}
1244 	if (!lnk_del)
1245 		goto out; /* asymmetric link already deleted */
1246 
1247 	if (smc_link_downing(&lnk_del->state)) {
1248 		/* tbd: call smc_switch_conns(lgr, lnk_del, false); */
1249 		smc_wr_tx_wait_no_pending_sends(lnk_del);
1250 	}
1251 	if (!list_empty(&lgr->list)) {
1252 		/* qentry is either a request from peer (send it back to
1253 		 * initiate the DELETE_LINK processing), or a locally
1254 		 * enqueued DELETE_LINK request (forward it)
1255 		 */
1256 		if (!smc_llc_send_message(lnk, &qentry->msg)) {
1257 			struct smc_llc_msg_del_link *del_llc_resp;
1258 			struct smc_llc_qentry *qentry2;
1259 
1260 			qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1261 					       SMC_LLC_DELETE_LINK);
1262 			if (!qentry2) {
1263 			} else {
1264 				del_llc_resp = &qentry2->msg.delete_link;
1265 				smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1266 			}
1267 		}
1268 	}
1269 	smcr_link_clear(lnk_del);
1270 
1271 	active_links = smc_llc_active_link_count(lgr);
1272 	if (active_links == 1) {
1273 		lgr->type = SMC_LGR_SINGLE;
1274 	} else if (!active_links) {
1275 		lgr->type = SMC_LGR_NONE;
1276 		smc_lgr_terminate_sched(lgr);
1277 	}
1278 
1279 	if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1280 		/* trigger setup of asymm alt link */
1281 		smc_llc_srv_add_link_local(lnk);
1282 	}
1283 out:
1284 	mutex_unlock(&lgr->llc_conf_mutex);
1285 	kfree(qentry);
1286 }
1287 
1288 static void smc_llc_delete_link_work(struct work_struct *work)
1289 {
1290 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1291 						  llc_del_link_work);
1292 
1293 	if (list_empty(&lgr->list)) {
1294 		/* link group is terminating */
1295 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1296 		goto out;
1297 	}
1298 
1299 	if (lgr->role == SMC_CLNT)
1300 		smc_llc_process_cli_delete_link(lgr);
1301 	else
1302 		smc_llc_process_srv_delete_link(lgr);
1303 out:
1304 	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1305 }
1306 
1307 /* process a confirm_rkey request from peer, remote flow */
1308 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1309 {
1310 	struct smc_llc_msg_confirm_rkey *llc;
1311 	struct smc_llc_qentry *qentry;
1312 	struct smc_link *link;
1313 	int num_entries;
1314 	int rk_idx;
1315 	int i;
1316 
1317 	qentry = lgr->llc_flow_rmt.qentry;
1318 	llc = &qentry->msg.confirm_rkey;
1319 	link = qentry->link;
1320 
1321 	num_entries = llc->rtoken[0].num_rkeys;
1322 	/* first rkey entry is for receiving link */
1323 	rk_idx = smc_rtoken_add(link,
1324 				llc->rtoken[0].rmb_vaddr,
1325 				llc->rtoken[0].rmb_key);
1326 	if (rk_idx < 0)
1327 		goto out_err;
1328 
1329 	for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1330 		smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1331 				llc->rtoken[i].rmb_vaddr,
1332 				llc->rtoken[i].rmb_key);
1333 	/* max links is 3 so there is no need to support conf_rkey_cont msgs */
1334 	goto out;
1335 out_err:
1336 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1337 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1338 out:
1339 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1340 	smc_llc_send_message(link, &qentry->msg);
1341 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1342 }
1343 
1344 /* process a delete_rkey request from peer, remote flow */
1345 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1346 {
1347 	struct smc_llc_msg_delete_rkey *llc;
1348 	struct smc_llc_qentry *qentry;
1349 	struct smc_link *link;
1350 	u8 err_mask = 0;
1351 	int i, max;
1352 
1353 	qentry = lgr->llc_flow_rmt.qentry;
1354 	llc = &qentry->msg.delete_rkey;
1355 	link = qentry->link;
1356 
1357 	max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1358 	for (i = 0; i < max; i++) {
1359 		if (smc_rtoken_delete(link, llc->rkey[i]))
1360 			err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1361 	}
1362 	if (err_mask) {
1363 		llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1364 		llc->err_mask = err_mask;
1365 	}
1366 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1367 	smc_llc_send_message(link, &qentry->msg);
1368 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1369 }
1370 
1371 /* flush the llc event queue */
1372 static void smc_llc_event_flush(struct smc_link_group *lgr)
1373 {
1374 	struct smc_llc_qentry *qentry, *q;
1375 
1376 	spin_lock_bh(&lgr->llc_event_q_lock);
1377 	list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1378 		list_del_init(&qentry->list);
1379 		kfree(qentry);
1380 	}
1381 	spin_unlock_bh(&lgr->llc_event_q_lock);
1382 }
1383 
1384 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1385 {
1386 	union smc_llc_msg *llc = &qentry->msg;
1387 	struct smc_link *link = qentry->link;
1388 	struct smc_link_group *lgr = link->lgr;
1389 
1390 	if (!smc_link_usable(link))
1391 		goto out;
1392 
1393 	switch (llc->raw.hdr.common.type) {
1394 	case SMC_LLC_TEST_LINK:
1395 		llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1396 		smc_llc_send_message(link, llc);
1397 		break;
1398 	case SMC_LLC_ADD_LINK:
1399 		if (list_empty(&lgr->list))
1400 			goto out;	/* lgr is terminating */
1401 		if (lgr->role == SMC_CLNT) {
1402 			if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
1403 				/* a flow is waiting for this message */
1404 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1405 							qentry);
1406 				wake_up_interruptible(&lgr->llc_waiter);
1407 			} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1408 						      qentry)) {
1409 				schedule_work(&lgr->llc_add_link_work);
1410 			}
1411 		} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1412 			/* as smc server, handle client suggestion */
1413 			schedule_work(&lgr->llc_add_link_work);
1414 		}
1415 		return;
1416 	case SMC_LLC_CONFIRM_LINK:
1417 	case SMC_LLC_ADD_LINK_CONT:
1418 		if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1419 			/* a flow is waiting for this message */
1420 			smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1421 			wake_up_interruptible(&lgr->llc_waiter);
1422 			return;
1423 		}
1424 		break;
1425 	case SMC_LLC_DELETE_LINK:
1426 		if (lgr->role == SMC_CLNT) {
1427 			/* server requests to delete this link, send response */
1428 			if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1429 				/* DEL LINK REQ during ADD LINK SEQ */
1430 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1431 							qentry);
1432 				wake_up_interruptible(&lgr->llc_waiter);
1433 			} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1434 						      qentry)) {
1435 				schedule_work(&lgr->llc_del_link_work);
1436 			}
1437 		} else {
1438 			if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1439 			    !lgr->llc_flow_lcl.qentry) {
1440 				/* DEL LINK REQ during ADD LINK SEQ */
1441 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1442 							qentry);
1443 				wake_up_interruptible(&lgr->llc_waiter);
1444 			} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1445 						      qentry)) {
1446 				schedule_work(&lgr->llc_del_link_work);
1447 			}
1448 		}
1449 		return;
1450 	case SMC_LLC_CONFIRM_RKEY:
1451 		/* new request from remote, assign to remote flow */
1452 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1453 			/* process here, does not wait for more llc msgs */
1454 			smc_llc_rmt_conf_rkey(lgr);
1455 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1456 		}
1457 		return;
1458 	case SMC_LLC_CONFIRM_RKEY_CONT:
1459 		/* not used because max links is 3, and 3 rkeys fit into
1460 		 * one CONFIRM_RKEY message
1461 		 */
1462 		break;
1463 	case SMC_LLC_DELETE_RKEY:
1464 		/* new request from remote, assign to remote flow */
1465 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1466 			/* process here, does not wait for more llc msgs */
1467 			smc_llc_rmt_delete_rkey(lgr);
1468 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1469 		}
1470 		return;
1471 	}
1472 out:
1473 	kfree(qentry);
1474 }
1475 
1476 /* worker to process llc messages on the event queue */
1477 static void smc_llc_event_work(struct work_struct *work)
1478 {
1479 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1480 						  llc_event_work);
1481 	struct smc_llc_qentry *qentry;
1482 
1483 	if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1484 		if (smc_link_usable(lgr->delayed_event->link)) {
1485 			smc_llc_event_handler(lgr->delayed_event);
1486 		} else {
1487 			qentry = lgr->delayed_event;
1488 			lgr->delayed_event = NULL;
1489 			kfree(qentry);
1490 		}
1491 	}
1492 
1493 again:
1494 	spin_lock_bh(&lgr->llc_event_q_lock);
1495 	if (!list_empty(&lgr->llc_event_q)) {
1496 		qentry = list_first_entry(&lgr->llc_event_q,
1497 					  struct smc_llc_qentry, list);
1498 		list_del_init(&qentry->list);
1499 		spin_unlock_bh(&lgr->llc_event_q_lock);
1500 		smc_llc_event_handler(qentry);
1501 		goto again;
1502 	}
1503 	spin_unlock_bh(&lgr->llc_event_q_lock);
1504 }
1505 
1506 /* process llc responses in tasklet context */
1507 static void smc_llc_rx_response(struct smc_link *link,
1508 				struct smc_llc_qentry *qentry)
1509 {
1510 	u8 llc_type = qentry->msg.raw.hdr.common.type;
1511 
1512 	switch (llc_type) {
1513 	case SMC_LLC_TEST_LINK:
1514 		if (link->state == SMC_LNK_ACTIVE)
1515 			complete(&link->llc_testlink_resp);
1516 		break;
1517 	case SMC_LLC_ADD_LINK:
1518 	case SMC_LLC_DELETE_LINK:
1519 	case SMC_LLC_CONFIRM_LINK:
1520 	case SMC_LLC_ADD_LINK_CONT:
1521 	case SMC_LLC_CONFIRM_RKEY:
1522 	case SMC_LLC_DELETE_RKEY:
1523 		/* assign responses to the local flow, we requested them */
1524 		smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
1525 		wake_up_interruptible(&link->lgr->llc_waiter);
1526 		return;
1527 	case SMC_LLC_CONFIRM_RKEY_CONT:
1528 		/* not used because max links is 3 */
1529 		break;
1530 	}
1531 	kfree(qentry);
1532 }
1533 
1534 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
1535 {
1536 	struct smc_link_group *lgr = link->lgr;
1537 	struct smc_llc_qentry *qentry;
1538 	unsigned long flags;
1539 
1540 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
1541 	if (!qentry)
1542 		return;
1543 	qentry->link = link;
1544 	INIT_LIST_HEAD(&qentry->list);
1545 	memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
1546 
1547 	/* process responses immediately */
1548 	if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
1549 		smc_llc_rx_response(link, qentry);
1550 		return;
1551 	}
1552 
1553 	/* add requests to event queue */
1554 	spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
1555 	list_add_tail(&qentry->list, &lgr->llc_event_q);
1556 	spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
1557 	schedule_work(&link->lgr->llc_event_work);
1558 }
1559 
1560 /* copy received msg and add it to the event queue */
1561 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
1562 {
1563 	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
1564 	union smc_llc_msg *llc = buf;
1565 
1566 	if (wc->byte_len < sizeof(*llc))
1567 		return; /* short message */
1568 	if (llc->raw.hdr.length != sizeof(*llc))
1569 		return; /* invalid message */
1570 
1571 	smc_llc_enqueue(link, llc);
1572 }
1573 
1574 /***************************** worker, utils *********************************/
1575 
1576 static void smc_llc_testlink_work(struct work_struct *work)
1577 {
1578 	struct smc_link *link = container_of(to_delayed_work(work),
1579 					     struct smc_link, llc_testlink_wrk);
1580 	unsigned long next_interval;
1581 	unsigned long expire_time;
1582 	u8 user_data[16] = { 0 };
1583 	int rc;
1584 
1585 	if (link->state != SMC_LNK_ACTIVE)
1586 		return;		/* don't reschedule worker */
1587 	expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
1588 	if (time_is_after_jiffies(expire_time)) {
1589 		next_interval = expire_time - jiffies;
1590 		goto out;
1591 	}
1592 	reinit_completion(&link->llc_testlink_resp);
1593 	smc_llc_send_test_link(link, user_data);
1594 	/* receive TEST LINK response over RoCE fabric */
1595 	rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
1596 						       SMC_LLC_WAIT_TIME);
1597 	if (link->state != SMC_LNK_ACTIVE)
1598 		return;		/* link state changed */
1599 	if (rc <= 0) {
1600 		smcr_link_down_cond_sched(link);
1601 		return;
1602 	}
1603 	next_interval = link->llc_testlink_time;
1604 out:
1605 	schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
1606 }
1607 
1608 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
1609 {
1610 	struct net *net = sock_net(smc->clcsock->sk);
1611 
1612 	INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
1613 	INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
1614 	INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
1615 	INIT_LIST_HEAD(&lgr->llc_event_q);
1616 	spin_lock_init(&lgr->llc_event_q_lock);
1617 	spin_lock_init(&lgr->llc_flow_lock);
1618 	init_waitqueue_head(&lgr->llc_waiter);
1619 	mutex_init(&lgr->llc_conf_mutex);
1620 	lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
1621 }
1622 
1623 /* called after lgr was removed from lgr_list */
1624 void smc_llc_lgr_clear(struct smc_link_group *lgr)
1625 {
1626 	smc_llc_event_flush(lgr);
1627 	wake_up_interruptible_all(&lgr->llc_waiter);
1628 	cancel_work_sync(&lgr->llc_event_work);
1629 	cancel_work_sync(&lgr->llc_add_link_work);
1630 	cancel_work_sync(&lgr->llc_del_link_work);
1631 	if (lgr->delayed_event) {
1632 		kfree(lgr->delayed_event);
1633 		lgr->delayed_event = NULL;
1634 	}
1635 }
1636 
1637 int smc_llc_link_init(struct smc_link *link)
1638 {
1639 	init_completion(&link->llc_testlink_resp);
1640 	INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
1641 	return 0;
1642 }
1643 
1644 void smc_llc_link_active(struct smc_link *link)
1645 {
1646 	link->state = SMC_LNK_ACTIVE;
1647 	if (link->lgr->llc_testlink_time) {
1648 		link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
1649 		schedule_delayed_work(&link->llc_testlink_wrk,
1650 				      link->llc_testlink_time);
1651 	}
1652 }
1653 
1654 /* called in worker context */
1655 void smc_llc_link_clear(struct smc_link *link)
1656 {
1657 	complete(&link->llc_testlink_resp);
1658 	cancel_delayed_work_sync(&link->llc_testlink_wrk);
1659 	smc_wr_wakeup_reg_wait(link);
1660 	smc_wr_wakeup_tx_wait(link);
1661 }
1662 
1663 /* register a new rtoken at the remote peer (for all links) */
1664 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
1665 			    struct smc_buf_desc *rmb_desc)
1666 {
1667 	struct smc_link_group *lgr = send_link->lgr;
1668 	struct smc_llc_qentry *qentry = NULL;
1669 	int rc = 0;
1670 
1671 	rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
1672 	if (rc)
1673 		goto out;
1674 	/* receive CONFIRM RKEY response from server over RoCE fabric */
1675 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1676 			      SMC_LLC_CONFIRM_RKEY);
1677 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1678 		rc = -EFAULT;
1679 out:
1680 	if (qentry)
1681 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1682 	return rc;
1683 }
1684 
1685 /* unregister an rtoken at the remote peer */
1686 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
1687 			   struct smc_buf_desc *rmb_desc)
1688 {
1689 	struct smc_llc_qentry *qentry = NULL;
1690 	struct smc_link *send_link;
1691 	int rc = 0;
1692 
1693 	send_link = smc_llc_usable_link(lgr);
1694 	if (!send_link)
1695 		return -ENOLINK;
1696 
1697 	/* protected by llc_flow control */
1698 	rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
1699 	if (rc)
1700 		goto out;
1701 	/* receive DELETE RKEY response from server over RoCE fabric */
1702 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1703 			      SMC_LLC_DELETE_RKEY);
1704 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1705 		rc = -EFAULT;
1706 out:
1707 	if (qentry)
1708 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1709 	return rc;
1710 }
1711 
1712 /* evaluate confirm link request or response */
1713 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
1714 			   enum smc_llc_reqresp type)
1715 {
1716 	if (type == SMC_LLC_REQ)	/* SMC server assigns link_id */
1717 		qentry->link->link_id = qentry->msg.confirm_link.link_num;
1718 	if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
1719 		return -ENOTSUPP;
1720 	return 0;
1721 }
1722 
1723 /***************************** init, exit, misc ******************************/
1724 
1725 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
1726 	{
1727 		.handler	= smc_llc_rx_handler,
1728 		.type		= SMC_LLC_CONFIRM_LINK
1729 	},
1730 	{
1731 		.handler	= smc_llc_rx_handler,
1732 		.type		= SMC_LLC_TEST_LINK
1733 	},
1734 	{
1735 		.handler	= smc_llc_rx_handler,
1736 		.type		= SMC_LLC_ADD_LINK
1737 	},
1738 	{
1739 		.handler	= smc_llc_rx_handler,
1740 		.type		= SMC_LLC_ADD_LINK_CONT
1741 	},
1742 	{
1743 		.handler	= smc_llc_rx_handler,
1744 		.type		= SMC_LLC_DELETE_LINK
1745 	},
1746 	{
1747 		.handler	= smc_llc_rx_handler,
1748 		.type		= SMC_LLC_CONFIRM_RKEY
1749 	},
1750 	{
1751 		.handler	= smc_llc_rx_handler,
1752 		.type		= SMC_LLC_CONFIRM_RKEY_CONT
1753 	},
1754 	{
1755 		.handler	= smc_llc_rx_handler,
1756 		.type		= SMC_LLC_DELETE_RKEY
1757 	},
1758 	{
1759 		.handler	= NULL,
1760 	}
1761 };
1762 
1763 int __init smc_llc_init(void)
1764 {
1765 	struct smc_wr_rx_handler *handler;
1766 	int rc = 0;
1767 
1768 	for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
1769 		INIT_HLIST_NODE(&handler->list);
1770 		rc = smc_wr_rx_register_handler(handler);
1771 		if (rc)
1772 			break;
1773 	}
1774 	return rc;
1775 }
1776