xref: /linux/net/smc/smc_llc.c (revision ff40b5769a50fab654a70575ff0f49853b799b0e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Link Layer Control (LLC)
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Klaus Wacker <Klaus.Wacker@de.ibm.com>
10  *              Ursula Braun <ubraun@linux.vnet.ibm.com>
11  */
12 
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15 
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21 
22 #define SMC_LLC_DATA_LEN		40
23 
24 struct smc_llc_hdr {
25 	struct smc_wr_rx_hdr common;
26 	union {
27 		struct {
28 			u8 length;	/* 44 */
29 	#if defined(__BIG_ENDIAN_BITFIELD)
30 			u8 reserved:4,
31 			   add_link_rej_rsn:4;
32 #elif defined(__LITTLE_ENDIAN_BITFIELD)
33 			u8 add_link_rej_rsn:4,
34 			   reserved:4;
35 #endif
36 		};
37 		u16 length_v2;	/* 44 - 8192*/
38 	};
39 	u8 flags;
40 } __packed;		/* format defined in
41 			 * IBM Shared Memory Communications Version 2
42 			 * (https://www.ibm.com/support/pages/node/6326337)
43 			 */
44 
45 #define SMC_LLC_FLAG_NO_RMBE_EYEC	0x03
46 
47 struct smc_llc_msg_confirm_link {	/* type 0x01 */
48 	struct smc_llc_hdr hd;
49 	u8 sender_mac[ETH_ALEN];
50 	u8 sender_gid[SMC_GID_SIZE];
51 	u8 sender_qp_num[3];
52 	u8 link_num;
53 	u8 link_uid[SMC_LGR_ID_SIZE];
54 	u8 max_links;
55 	u8 reserved[9];
56 };
57 
58 #define SMC_LLC_FLAG_ADD_LNK_REJ	0x40
59 #define SMC_LLC_REJ_RSN_NO_ALT_PATH	1
60 
61 #define SMC_LLC_ADD_LNK_MAX_LINKS	2
62 
63 struct smc_llc_msg_add_link {		/* type 0x02 */
64 	struct smc_llc_hdr hd;
65 	u8 sender_mac[ETH_ALEN];
66 	u8 reserved2[2];
67 	u8 sender_gid[SMC_GID_SIZE];
68 	u8 sender_qp_num[3];
69 	u8 link_num;
70 #if defined(__BIG_ENDIAN_BITFIELD)
71 	u8 reserved3 : 4,
72 	   qp_mtu   : 4;
73 #elif defined(__LITTLE_ENDIAN_BITFIELD)
74 	u8 qp_mtu   : 4,
75 	   reserved3 : 4;
76 #endif
77 	u8 initial_psn[3];
78 	u8 reserved[8];
79 };
80 
81 struct smc_llc_msg_add_link_cont_rt {
82 	__be32 rmb_key;
83 	__be32 rmb_key_new;
84 	__be64 rmb_vaddr_new;
85 };
86 
87 struct smc_llc_msg_add_link_v2_ext {
88 #if defined(__BIG_ENDIAN_BITFIELD)
89 	u8 v2_direct : 1,
90 	   reserved  : 7;
91 #elif defined(__LITTLE_ENDIAN_BITFIELD)
92 	u8 reserved  : 7,
93 	   v2_direct : 1;
94 #endif
95 	u8 reserved2;
96 	u8 client_target_gid[SMC_GID_SIZE];
97 	u8 reserved3[8];
98 	u16 num_rkeys;
99 	struct smc_llc_msg_add_link_cont_rt rt[];
100 } __packed;		/* format defined in
101 			 * IBM Shared Memory Communications Version 2
102 			 * (https://www.ibm.com/support/pages/node/6326337)
103 			 */
104 
105 struct smc_llc_msg_req_add_link_v2 {
106 	struct smc_llc_hdr hd;
107 	u8 reserved[20];
108 	u8 gid_cnt;
109 	u8 reserved2[3];
110 	u8 gid[][SMC_GID_SIZE];
111 };
112 
113 #define SMC_LLC_RKEYS_PER_CONT_MSG	2
114 
115 struct smc_llc_msg_add_link_cont {	/* type 0x03 */
116 	struct smc_llc_hdr hd;
117 	u8 link_num;
118 	u8 num_rkeys;
119 	u8 reserved2[2];
120 	struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
121 	u8 reserved[4];
122 } __packed;			/* format defined in RFC7609 */
123 
124 #define SMC_LLC_FLAG_DEL_LINK_ALL	0x40
125 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY	0x20
126 
127 struct smc_llc_msg_del_link {		/* type 0x04 */
128 	struct smc_llc_hdr hd;
129 	u8 link_num;
130 	__be32 reason;
131 	u8 reserved[35];
132 } __packed;			/* format defined in RFC7609 */
133 
134 struct smc_llc_msg_test_link {		/* type 0x07 */
135 	struct smc_llc_hdr hd;
136 	u8 user_data[16];
137 	u8 reserved[24];
138 };
139 
140 struct smc_rmb_rtoken {
141 	union {
142 		u8 num_rkeys;	/* first rtoken byte of CONFIRM LINK msg */
143 				/* is actually the num of rtokens, first */
144 				/* rtoken is always for the current link */
145 		u8 link_id;	/* link id of the rtoken */
146 	};
147 	__be32 rmb_key;
148 	__be64 rmb_vaddr;
149 } __packed;			/* format defined in RFC7609 */
150 
151 #define SMC_LLC_RKEYS_PER_MSG		3
152 #define SMC_LLC_RKEYS_PER_MSG_V2	255
153 
154 struct smc_llc_msg_confirm_rkey {	/* type 0x06 */
155 	struct smc_llc_hdr hd;
156 	struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
157 	u8 reserved;
158 };
159 
160 #define SMC_LLC_DEL_RKEY_MAX	8
161 #define SMC_LLC_FLAG_RKEY_RETRY	0x10
162 #define SMC_LLC_FLAG_RKEY_NEG	0x20
163 
164 struct smc_llc_msg_delete_rkey {	/* type 0x09 */
165 	struct smc_llc_hdr hd;
166 	u8 num_rkeys;
167 	u8 err_mask;
168 	u8 reserved[2];
169 	__be32 rkey[8];
170 	u8 reserved2[4];
171 };
172 
173 struct smc_llc_msg_delete_rkey_v2 {	/* type 0x29 */
174 	struct smc_llc_hdr hd;
175 	u8 num_rkeys;
176 	u8 num_inval_rkeys;
177 	u8 reserved[2];
178 	__be32 rkey[];
179 };
180 
181 union smc_llc_msg {
182 	struct smc_llc_msg_confirm_link confirm_link;
183 	struct smc_llc_msg_add_link add_link;
184 	struct smc_llc_msg_req_add_link_v2 req_add_link;
185 	struct smc_llc_msg_add_link_cont add_link_cont;
186 	struct smc_llc_msg_del_link delete_link;
187 
188 	struct smc_llc_msg_confirm_rkey confirm_rkey;
189 	struct smc_llc_msg_delete_rkey delete_rkey;
190 
191 	struct smc_llc_msg_test_link test_link;
192 	struct {
193 		struct smc_llc_hdr hdr;
194 		u8 data[SMC_LLC_DATA_LEN];
195 	} raw;
196 };
197 
198 #define SMC_LLC_FLAG_RESP		0x80
199 
200 struct smc_llc_qentry {
201 	struct list_head list;
202 	struct smc_link *link;
203 	union smc_llc_msg msg;
204 };
205 
206 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
207 
208 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
209 {
210 	struct smc_llc_qentry *qentry = flow->qentry;
211 
212 	flow->qentry = NULL;
213 	return qentry;
214 }
215 
216 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
217 {
218 	struct smc_llc_qentry *qentry;
219 
220 	if (flow->qentry) {
221 		qentry = flow->qentry;
222 		flow->qentry = NULL;
223 		kfree(qentry);
224 	}
225 }
226 
227 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
228 					   struct smc_llc_qentry *qentry)
229 {
230 	flow->qentry = qentry;
231 }
232 
233 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
234 				  struct smc_llc_qentry *qentry)
235 {
236 	u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
237 
238 	if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
239 	    flow_type != msg_type && !lgr->delayed_event) {
240 		lgr->delayed_event = qentry;
241 		return;
242 	}
243 	/* drop parallel or already-in-progress llc requests */
244 	if (flow_type != msg_type)
245 		pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel "
246 			     "LLC msg: msg %d flow %d role %d\n",
247 			     SMC_LGR_ID_SIZE, &lgr->id,
248 			     lgr->net->net_cookie,
249 			     qentry->msg.raw.hdr.common.type,
250 			     flow_type, lgr->role);
251 	kfree(qentry);
252 }
253 
254 /* try to start a new llc flow, initiated by an incoming llc msg */
255 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
256 			       struct smc_llc_qentry *qentry)
257 {
258 	struct smc_link_group *lgr = qentry->link->lgr;
259 
260 	spin_lock_bh(&lgr->llc_flow_lock);
261 	if (flow->type) {
262 		/* a flow is already active */
263 		smc_llc_flow_parallel(lgr, flow->type, qentry);
264 		spin_unlock_bh(&lgr->llc_flow_lock);
265 		return false;
266 	}
267 	switch (qentry->msg.raw.hdr.common.llc_type) {
268 	case SMC_LLC_ADD_LINK:
269 		flow->type = SMC_LLC_FLOW_ADD_LINK;
270 		break;
271 	case SMC_LLC_DELETE_LINK:
272 		flow->type = SMC_LLC_FLOW_DEL_LINK;
273 		break;
274 	case SMC_LLC_CONFIRM_RKEY:
275 	case SMC_LLC_DELETE_RKEY:
276 		flow->type = SMC_LLC_FLOW_RKEY;
277 		break;
278 	default:
279 		flow->type = SMC_LLC_FLOW_NONE;
280 	}
281 	smc_llc_flow_qentry_set(flow, qentry);
282 	spin_unlock_bh(&lgr->llc_flow_lock);
283 	return true;
284 }
285 
286 /* start a new local llc flow, wait till current flow finished */
287 int smc_llc_flow_initiate(struct smc_link_group *lgr,
288 			  enum smc_llc_flowtype type)
289 {
290 	enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
291 	int rc;
292 
293 	/* all flows except confirm_rkey and delete_rkey are exclusive,
294 	 * confirm/delete rkey flows can run concurrently (local and remote)
295 	 */
296 	if (type == SMC_LLC_FLOW_RKEY)
297 		allowed_remote = SMC_LLC_FLOW_RKEY;
298 again:
299 	if (list_empty(&lgr->list))
300 		return -ENODEV;
301 	spin_lock_bh(&lgr->llc_flow_lock);
302 	if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
303 	    (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
304 	     lgr->llc_flow_rmt.type == allowed_remote)) {
305 		lgr->llc_flow_lcl.type = type;
306 		spin_unlock_bh(&lgr->llc_flow_lock);
307 		return 0;
308 	}
309 	spin_unlock_bh(&lgr->llc_flow_lock);
310 	rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
311 				(lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
312 				 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
313 				  lgr->llc_flow_rmt.type == allowed_remote))),
314 				SMC_LLC_WAIT_TIME * 10);
315 	if (!rc)
316 		return -ETIMEDOUT;
317 	goto again;
318 }
319 
320 /* finish the current llc flow */
321 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
322 {
323 	spin_lock_bh(&lgr->llc_flow_lock);
324 	memset(flow, 0, sizeof(*flow));
325 	flow->type = SMC_LLC_FLOW_NONE;
326 	spin_unlock_bh(&lgr->llc_flow_lock);
327 	if (!list_empty(&lgr->list) && lgr->delayed_event &&
328 	    flow == &lgr->llc_flow_lcl)
329 		schedule_work(&lgr->llc_event_work);
330 	else
331 		wake_up(&lgr->llc_flow_waiter);
332 }
333 
334 /* lnk is optional and used for early wakeup when link goes down, useful in
335  * cases where we wait for a response on the link after we sent a request
336  */
337 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
338 				    struct smc_link *lnk,
339 				    int time_out, u8 exp_msg)
340 {
341 	struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
342 	u8 rcv_msg;
343 
344 	wait_event_timeout(lgr->llc_msg_waiter,
345 			   (flow->qentry ||
346 			    (lnk && !smc_link_usable(lnk)) ||
347 			    list_empty(&lgr->list)),
348 			   time_out);
349 	if (!flow->qentry ||
350 	    (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
351 		smc_llc_flow_qentry_del(flow);
352 		goto out;
353 	}
354 	rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
355 	if (exp_msg && rcv_msg != exp_msg) {
356 		if (exp_msg == SMC_LLC_ADD_LINK &&
357 		    rcv_msg == SMC_LLC_DELETE_LINK) {
358 			/* flow_start will delay the unexpected msg */
359 			smc_llc_flow_start(&lgr->llc_flow_lcl,
360 					   smc_llc_flow_qentry_clr(flow));
361 			return NULL;
362 		}
363 		pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: "
364 			     "msg %d exp %d flow %d role %d flags %x\n",
365 			     SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie,
366 			     rcv_msg, exp_msg,
367 			     flow->type, lgr->role,
368 			     flow->qentry->msg.raw.hdr.flags);
369 		smc_llc_flow_qentry_del(flow);
370 	}
371 out:
372 	return flow->qentry;
373 }
374 
375 /********************************** send *************************************/
376 
377 struct smc_llc_tx_pend {
378 };
379 
380 /* handler for send/transmission completion of an LLC msg */
381 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
382 			       struct smc_link *link,
383 			       enum ib_wc_status wc_status)
384 {
385 	/* future work: handle wc_status error for recovery and failover */
386 }
387 
388 /**
389  * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
390  * @link: Pointer to SMC link used for sending LLC control message.
391  * @wr_buf: Out variable returning pointer to work request payload buffer.
392  * @pend: Out variable returning pointer to private pending WR tracking.
393  *	  It's the context the transmit complete handler will get.
394  *
395  * Reserves and pre-fills an entry for a pending work request send/tx.
396  * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
397  * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
398  *
399  * Return: 0 on success, otherwise an error value.
400  */
401 static int smc_llc_add_pending_send(struct smc_link *link,
402 				    struct smc_wr_buf **wr_buf,
403 				    struct smc_wr_tx_pend_priv **pend)
404 {
405 	int rc;
406 
407 	rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
408 				     pend);
409 	if (rc < 0)
410 		return rc;
411 	BUILD_BUG_ON_MSG(
412 		sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
413 		"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
414 	BUILD_BUG_ON_MSG(
415 		sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
416 		"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
417 	BUILD_BUG_ON_MSG(
418 		sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
419 		"must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
420 	return 0;
421 }
422 
423 static int smc_llc_add_pending_send_v2(struct smc_link *link,
424 				       struct smc_wr_v2_buf **wr_buf,
425 				       struct smc_wr_tx_pend_priv **pend)
426 {
427 	int rc;
428 
429 	rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
430 	if (rc < 0)
431 		return rc;
432 	return 0;
433 }
434 
435 static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
436 				 struct smc_link_group *lgr, size_t len)
437 {
438 	if (lgr->smc_version == SMC_V2) {
439 		hdr->common.llc_version = SMC_V2;
440 		hdr->length_v2 = len;
441 	} else {
442 		hdr->common.llc_version = 0;
443 		hdr->length = len;
444 	}
445 }
446 
447 /* high-level API to send LLC confirm link */
448 int smc_llc_send_confirm_link(struct smc_link *link,
449 			      enum smc_llc_reqresp reqresp)
450 {
451 	struct smc_llc_msg_confirm_link *confllc;
452 	struct smc_wr_tx_pend_priv *pend;
453 	struct smc_wr_buf *wr_buf;
454 	int rc;
455 
456 	if (!smc_wr_tx_link_hold(link))
457 		return -ENOLINK;
458 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
459 	if (rc)
460 		goto put_out;
461 	confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
462 	memset(confllc, 0, sizeof(*confllc));
463 	confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
464 	smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
465 	confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
466 	if (reqresp == SMC_LLC_RESP)
467 		confllc->hd.flags |= SMC_LLC_FLAG_RESP;
468 	memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
469 	       ETH_ALEN);
470 	memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
471 	hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
472 	confllc->link_num = link->link_id;
473 	memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
474 	confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
475 	/* send llc message */
476 	rc = smc_wr_tx_send(link, pend);
477 put_out:
478 	smc_wr_tx_link_put(link);
479 	return rc;
480 }
481 
482 /* send LLC confirm rkey request */
483 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
484 				     struct smc_buf_desc *rmb_desc)
485 {
486 	struct smc_llc_msg_confirm_rkey *rkeyllc;
487 	struct smc_wr_tx_pend_priv *pend;
488 	struct smc_wr_buf *wr_buf;
489 	struct smc_link *link;
490 	int i, rc, rtok_ix;
491 
492 	if (!smc_wr_tx_link_hold(send_link))
493 		return -ENOLINK;
494 	rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
495 	if (rc)
496 		goto put_out;
497 	rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
498 	memset(rkeyllc, 0, sizeof(*rkeyllc));
499 	rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
500 	smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
501 
502 	rtok_ix = 1;
503 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
504 		link = &send_link->lgr->lnk[i];
505 		if (smc_link_active(link) && link != send_link) {
506 			rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
507 			rkeyllc->rtoken[rtok_ix].rmb_key =
508 				htonl(rmb_desc->mr[link->link_idx]->rkey);
509 			rkeyllc->rtoken[rtok_ix].rmb_vaddr = rmb_desc->is_vm ?
510 				cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
511 				cpu_to_be64((u64)sg_dma_address
512 					    (rmb_desc->sgt[link->link_idx].sgl));
513 			rtok_ix++;
514 		}
515 	}
516 	/* rkey of send_link is in rtoken[0] */
517 	rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
518 	rkeyllc->rtoken[0].rmb_key =
519 		htonl(rmb_desc->mr[send_link->link_idx]->rkey);
520 	rkeyllc->rtoken[0].rmb_vaddr = rmb_desc->is_vm ?
521 		cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
522 		cpu_to_be64((u64)sg_dma_address
523 			    (rmb_desc->sgt[send_link->link_idx].sgl));
524 	/* send llc message */
525 	rc = smc_wr_tx_send(send_link, pend);
526 put_out:
527 	smc_wr_tx_link_put(send_link);
528 	return rc;
529 }
530 
531 /* send LLC delete rkey request */
532 static int smc_llc_send_delete_rkey(struct smc_link *link,
533 				    struct smc_buf_desc *rmb_desc)
534 {
535 	struct smc_llc_msg_delete_rkey *rkeyllc;
536 	struct smc_wr_tx_pend_priv *pend;
537 	struct smc_wr_buf *wr_buf;
538 	int rc;
539 
540 	if (!smc_wr_tx_link_hold(link))
541 		return -ENOLINK;
542 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
543 	if (rc)
544 		goto put_out;
545 	rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
546 	memset(rkeyllc, 0, sizeof(*rkeyllc));
547 	rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
548 	smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
549 	rkeyllc->num_rkeys = 1;
550 	rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey);
551 	/* send llc message */
552 	rc = smc_wr_tx_send(link, pend);
553 put_out:
554 	smc_wr_tx_link_put(link);
555 	return rc;
556 }
557 
558 /* return first buffer from any of the next buf lists */
559 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
560 						  int *buf_lst)
561 {
562 	struct smc_buf_desc *buf_pos;
563 
564 	while (*buf_lst < SMC_RMBE_SIZES) {
565 		buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
566 						   struct smc_buf_desc, list);
567 		if (buf_pos)
568 			return buf_pos;
569 		(*buf_lst)++;
570 	}
571 	return NULL;
572 }
573 
574 /* return next rmb from buffer lists */
575 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
576 						 int *buf_lst,
577 						 struct smc_buf_desc *buf_pos)
578 {
579 	struct smc_buf_desc *buf_next;
580 
581 	if (!buf_pos)
582 		return _smc_llc_get_next_rmb(lgr, buf_lst);
583 
584 	if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
585 		(*buf_lst)++;
586 		return _smc_llc_get_next_rmb(lgr, buf_lst);
587 	}
588 	buf_next = list_next_entry(buf_pos, list);
589 	return buf_next;
590 }
591 
592 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
593 						  int *buf_lst)
594 {
595 	*buf_lst = 0;
596 	return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
597 }
598 
599 static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
600 			       struct smc_link *link, struct smc_link *link_new)
601 {
602 	struct smc_link_group *lgr = link->lgr;
603 	struct smc_buf_desc *buf_pos;
604 	int prim_lnk_idx, lnk_idx, i;
605 	struct smc_buf_desc *rmb;
606 	int len = sizeof(*ext);
607 	int buf_lst;
608 
609 	ext->v2_direct = !lgr->uses_gateway;
610 	memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
611 
612 	prim_lnk_idx = link->link_idx;
613 	lnk_idx = link_new->link_idx;
614 	down_write(&lgr->rmbs_lock);
615 	ext->num_rkeys = lgr->conns_num;
616 	if (!ext->num_rkeys)
617 		goto out;
618 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
619 	for (i = 0; i < ext->num_rkeys; i++) {
620 		while (buf_pos && !(buf_pos)->used)
621 			buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
622 		if (!buf_pos)
623 			break;
624 		rmb = buf_pos;
625 		ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
626 		ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
627 		ext->rt[i].rmb_vaddr_new = rmb->is_vm ?
628 			cpu_to_be64((uintptr_t)rmb->cpu_addr) :
629 			cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
630 		buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
631 	}
632 	len += i * sizeof(ext->rt[0]);
633 out:
634 	up_write(&lgr->rmbs_lock);
635 	return len;
636 }
637 
638 /* send ADD LINK request or response */
639 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
640 			  struct smc_link *link_new,
641 			  enum smc_llc_reqresp reqresp)
642 {
643 	struct smc_llc_msg_add_link_v2_ext *ext = NULL;
644 	struct smc_llc_msg_add_link *addllc;
645 	struct smc_wr_tx_pend_priv *pend;
646 	int len = sizeof(*addllc);
647 	int rc;
648 
649 	if (!smc_wr_tx_link_hold(link))
650 		return -ENOLINK;
651 	if (link->lgr->smc_version == SMC_V2) {
652 		struct smc_wr_v2_buf *wr_buf;
653 
654 		rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
655 		if (rc)
656 			goto put_out;
657 		addllc = (struct smc_llc_msg_add_link *)wr_buf;
658 		ext = (struct smc_llc_msg_add_link_v2_ext *)
659 						&wr_buf->raw[sizeof(*addllc)];
660 		memset(ext, 0, SMC_WR_TX_SIZE);
661 	} else {
662 		struct smc_wr_buf *wr_buf;
663 
664 		rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
665 		if (rc)
666 			goto put_out;
667 		addllc = (struct smc_llc_msg_add_link *)wr_buf;
668 	}
669 
670 	memset(addllc, 0, sizeof(*addllc));
671 	addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
672 	if (reqresp == SMC_LLC_RESP)
673 		addllc->hd.flags |= SMC_LLC_FLAG_RESP;
674 	memcpy(addllc->sender_mac, mac, ETH_ALEN);
675 	memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
676 	if (link_new) {
677 		addllc->link_num = link_new->link_id;
678 		hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
679 		hton24(addllc->initial_psn, link_new->psn_initial);
680 		if (reqresp == SMC_LLC_REQ)
681 			addllc->qp_mtu = link_new->path_mtu;
682 		else
683 			addllc->qp_mtu = min(link_new->path_mtu,
684 					     link_new->peer_mtu);
685 	}
686 	if (ext && link_new)
687 		len += smc_llc_fill_ext_v2(ext, link, link_new);
688 	smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
689 	/* send llc message */
690 	if (link->lgr->smc_version == SMC_V2)
691 		rc = smc_wr_tx_v2_send(link, pend, len);
692 	else
693 		rc = smc_wr_tx_send(link, pend);
694 put_out:
695 	smc_wr_tx_link_put(link);
696 	return rc;
697 }
698 
699 /* send DELETE LINK request or response */
700 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
701 			     enum smc_llc_reqresp reqresp, bool orderly,
702 			     u32 reason)
703 {
704 	struct smc_llc_msg_del_link *delllc;
705 	struct smc_wr_tx_pend_priv *pend;
706 	struct smc_wr_buf *wr_buf;
707 	int rc;
708 
709 	if (!smc_wr_tx_link_hold(link))
710 		return -ENOLINK;
711 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
712 	if (rc)
713 		goto put_out;
714 	delllc = (struct smc_llc_msg_del_link *)wr_buf;
715 
716 	memset(delllc, 0, sizeof(*delllc));
717 	delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
718 	smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
719 	if (reqresp == SMC_LLC_RESP)
720 		delllc->hd.flags |= SMC_LLC_FLAG_RESP;
721 	if (orderly)
722 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
723 	if (link_del_id)
724 		delllc->link_num = link_del_id;
725 	else
726 		delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
727 	delllc->reason = htonl(reason);
728 	/* send llc message */
729 	rc = smc_wr_tx_send(link, pend);
730 put_out:
731 	smc_wr_tx_link_put(link);
732 	return rc;
733 }
734 
735 /* send LLC test link request */
736 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
737 {
738 	struct smc_llc_msg_test_link *testllc;
739 	struct smc_wr_tx_pend_priv *pend;
740 	struct smc_wr_buf *wr_buf;
741 	int rc;
742 
743 	if (!smc_wr_tx_link_hold(link))
744 		return -ENOLINK;
745 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
746 	if (rc)
747 		goto put_out;
748 	testllc = (struct smc_llc_msg_test_link *)wr_buf;
749 	memset(testllc, 0, sizeof(*testllc));
750 	testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
751 	smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
752 	memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
753 	/* send llc message */
754 	rc = smc_wr_tx_send(link, pend);
755 put_out:
756 	smc_wr_tx_link_put(link);
757 	return rc;
758 }
759 
760 /* schedule an llc send on link, may wait for buffers */
761 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
762 {
763 	struct smc_wr_tx_pend_priv *pend;
764 	struct smc_wr_buf *wr_buf;
765 	int rc;
766 
767 	if (!smc_wr_tx_link_hold(link))
768 		return -ENOLINK;
769 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
770 	if (rc)
771 		goto put_out;
772 	memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
773 	rc = smc_wr_tx_send(link, pend);
774 put_out:
775 	smc_wr_tx_link_put(link);
776 	return rc;
777 }
778 
779 /* schedule an llc send on link, may wait for buffers,
780  * and wait for send completion notification.
781  * @return 0 on success
782  */
783 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
784 {
785 	struct smc_wr_tx_pend_priv *pend;
786 	struct smc_wr_buf *wr_buf;
787 	int rc;
788 
789 	if (!smc_wr_tx_link_hold(link))
790 		return -ENOLINK;
791 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
792 	if (rc)
793 		goto put_out;
794 	memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
795 	rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
796 put_out:
797 	smc_wr_tx_link_put(link);
798 	return rc;
799 }
800 
801 /********************************* receive ***********************************/
802 
803 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
804 				  enum smc_lgr_type lgr_new_t)
805 {
806 	int i;
807 
808 	if (lgr->type == SMC_LGR_SYMMETRIC ||
809 	    (lgr->type != SMC_LGR_SINGLE &&
810 	     (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
811 	      lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
812 		return -EMLINK;
813 
814 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
815 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
816 		for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
817 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
818 				return i;
819 	} else {
820 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
821 			if (lgr->lnk[i].state == SMC_LNK_UNUSED)
822 				return i;
823 	}
824 	return -EMLINK;
825 }
826 
827 /* send one add_link_continue msg */
828 static int smc_llc_add_link_cont(struct smc_link *link,
829 				 struct smc_link *link_new, u8 *num_rkeys_todo,
830 				 int *buf_lst, struct smc_buf_desc **buf_pos)
831 {
832 	struct smc_llc_msg_add_link_cont *addc_llc;
833 	struct smc_link_group *lgr = link->lgr;
834 	int prim_lnk_idx, lnk_idx, i, rc;
835 	struct smc_wr_tx_pend_priv *pend;
836 	struct smc_wr_buf *wr_buf;
837 	struct smc_buf_desc *rmb;
838 	u8 n;
839 
840 	if (!smc_wr_tx_link_hold(link))
841 		return -ENOLINK;
842 	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
843 	if (rc)
844 		goto put_out;
845 	addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
846 	memset(addc_llc, 0, sizeof(*addc_llc));
847 
848 	prim_lnk_idx = link->link_idx;
849 	lnk_idx = link_new->link_idx;
850 	addc_llc->link_num = link_new->link_id;
851 	addc_llc->num_rkeys = *num_rkeys_todo;
852 	n = *num_rkeys_todo;
853 	for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
854 		if (!*buf_pos) {
855 			addc_llc->num_rkeys = addc_llc->num_rkeys -
856 					      *num_rkeys_todo;
857 			*num_rkeys_todo = 0;
858 			break;
859 		}
860 		rmb = *buf_pos;
861 
862 		addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
863 		addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
864 		addc_llc->rt[i].rmb_vaddr_new = rmb->is_vm ?
865 			cpu_to_be64((uintptr_t)rmb->cpu_addr) :
866 			cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
867 
868 		(*num_rkeys_todo)--;
869 		*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
870 		while (*buf_pos && !(*buf_pos)->used)
871 			*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
872 	}
873 	addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
874 	addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
875 	if (lgr->role == SMC_CLNT)
876 		addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
877 	rc = smc_wr_tx_send(link, pend);
878 put_out:
879 	smc_wr_tx_link_put(link);
880 	return rc;
881 }
882 
883 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
884 				     struct smc_link *link_new)
885 {
886 	struct smc_llc_msg_add_link_cont *addc_llc;
887 	struct smc_link_group *lgr = link->lgr;
888 	u8 max, num_rkeys_send, num_rkeys_recv;
889 	struct smc_llc_qentry *qentry;
890 	struct smc_buf_desc *buf_pos;
891 	int buf_lst;
892 	int rc = 0;
893 	int i;
894 
895 	down_write(&lgr->rmbs_lock);
896 	num_rkeys_send = lgr->conns_num;
897 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
898 	do {
899 		qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
900 				      SMC_LLC_ADD_LINK_CONT);
901 		if (!qentry) {
902 			rc = -ETIMEDOUT;
903 			break;
904 		}
905 		addc_llc = &qentry->msg.add_link_cont;
906 		num_rkeys_recv = addc_llc->num_rkeys;
907 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
908 		for (i = 0; i < max; i++) {
909 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
910 				       addc_llc->rt[i].rmb_key,
911 				       addc_llc->rt[i].rmb_vaddr_new,
912 				       addc_llc->rt[i].rmb_key_new);
913 			num_rkeys_recv--;
914 		}
915 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
916 		rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
917 					   &buf_lst, &buf_pos);
918 		if (rc)
919 			break;
920 	} while (num_rkeys_send || num_rkeys_recv);
921 
922 	up_write(&lgr->rmbs_lock);
923 	return rc;
924 }
925 
926 /* prepare and send an add link reject response */
927 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
928 {
929 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
930 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
931 	qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
932 	smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
933 			     sizeof(qentry->msg));
934 	return smc_llc_send_message(qentry->link, &qentry->msg);
935 }
936 
937 static int smc_llc_cli_conf_link(struct smc_link *link,
938 				 struct smc_init_info *ini,
939 				 struct smc_link *link_new,
940 				 enum smc_lgr_type lgr_new_t)
941 {
942 	struct smc_link_group *lgr = link->lgr;
943 	struct smc_llc_qentry *qentry = NULL;
944 	int rc = 0;
945 
946 	/* receive CONFIRM LINK request over RoCE fabric */
947 	qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
948 	if (!qentry) {
949 		rc = smc_llc_send_delete_link(link, link_new->link_id,
950 					      SMC_LLC_REQ, false,
951 					      SMC_LLC_DEL_LOST_PATH);
952 		return -ENOLINK;
953 	}
954 	if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
955 		/* received DELETE_LINK instead */
956 		qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
957 		smc_llc_send_message(link, &qentry->msg);
958 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
959 		return -ENOLINK;
960 	}
961 	smc_llc_save_peer_uid(qentry);
962 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
963 
964 	rc = smc_ib_modify_qp_rts(link_new);
965 	if (rc) {
966 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
967 					 false, SMC_LLC_DEL_LOST_PATH);
968 		return -ENOLINK;
969 	}
970 	smc_wr_remember_qp_attr(link_new);
971 
972 	rc = smcr_buf_reg_lgr(link_new);
973 	if (rc) {
974 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
975 					 false, SMC_LLC_DEL_LOST_PATH);
976 		return -ENOLINK;
977 	}
978 
979 	/* send CONFIRM LINK response over RoCE fabric */
980 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
981 	if (rc) {
982 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
983 					 false, SMC_LLC_DEL_LOST_PATH);
984 		return -ENOLINK;
985 	}
986 	smc_llc_link_active(link_new);
987 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
988 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
989 		smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
990 	else
991 		smcr_lgr_set_type(lgr, lgr_new_t);
992 	return 0;
993 }
994 
995 static void smc_llc_save_add_link_rkeys(struct smc_link *link,
996 					struct smc_link *link_new)
997 {
998 	struct smc_llc_msg_add_link_v2_ext *ext;
999 	struct smc_link_group *lgr = link->lgr;
1000 	int max, i;
1001 
1002 	ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
1003 						     SMC_WR_TX_SIZE);
1004 	max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1005 	down_write(&lgr->rmbs_lock);
1006 	for (i = 0; i < max; i++) {
1007 		smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1008 			       ext->rt[i].rmb_key,
1009 			       ext->rt[i].rmb_vaddr_new,
1010 			       ext->rt[i].rmb_key_new);
1011 	}
1012 	up_write(&lgr->rmbs_lock);
1013 }
1014 
1015 static void smc_llc_save_add_link_info(struct smc_link *link,
1016 				       struct smc_llc_msg_add_link *add_llc)
1017 {
1018 	link->peer_qpn = ntoh24(add_llc->sender_qp_num);
1019 	memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
1020 	memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
1021 	link->peer_psn = ntoh24(add_llc->initial_psn);
1022 	link->peer_mtu = add_llc->qp_mtu;
1023 }
1024 
1025 /* as an SMC client, process an add link request */
1026 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
1027 {
1028 	struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
1029 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1030 	struct smc_link_group *lgr = smc_get_lgr(link);
1031 	struct smc_init_info *ini = NULL;
1032 	struct smc_link *lnk_new = NULL;
1033 	int lnk_idx, rc = 0;
1034 
1035 	if (!llc->qp_mtu)
1036 		goto out_reject;
1037 
1038 	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1039 	if (!ini) {
1040 		rc = -ENOMEM;
1041 		goto out_reject;
1042 	}
1043 
1044 	ini->vlan_id = lgr->vlan_id;
1045 	if (lgr->smc_version == SMC_V2) {
1046 		ini->check_smcrv2 = true;
1047 		ini->smcrv2.saddr = lgr->saddr;
1048 		ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
1049 	}
1050 	smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1051 	if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1052 	    (lgr->smc_version == SMC_V2 ||
1053 	     !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1054 		if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
1055 			goto out_reject;
1056 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1057 	}
1058 	if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1059 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1060 		ini->smcrv2.ib_dev_v2 = link->smcibdev;
1061 		ini->smcrv2.ib_port_v2 = link->ibport;
1062 	} else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1063 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1064 		ini->ib_dev = link->smcibdev;
1065 		ini->ib_port = link->ibport;
1066 	}
1067 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1068 	if (lnk_idx < 0)
1069 		goto out_reject;
1070 	lnk_new = &lgr->lnk[lnk_idx];
1071 	rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
1072 	if (rc)
1073 		goto out_reject;
1074 	smc_llc_save_add_link_info(lnk_new, llc);
1075 	lnk_new->link_id = llc->link_num;	/* SMC server assigns link id */
1076 	smc_llc_link_set_uid(lnk_new);
1077 
1078 	rc = smc_ib_ready_link(lnk_new);
1079 	if (rc)
1080 		goto out_clear_lnk;
1081 
1082 	rc = smcr_buf_map_lgr(lnk_new);
1083 	if (rc)
1084 		goto out_clear_lnk;
1085 
1086 	rc = smc_llc_send_add_link(link,
1087 				   lnk_new->smcibdev->mac[lnk_new->ibport - 1],
1088 				   lnk_new->gid, lnk_new, SMC_LLC_RESP);
1089 	if (rc)
1090 		goto out_clear_lnk;
1091 	if (lgr->smc_version == SMC_V2) {
1092 		smc_llc_save_add_link_rkeys(link, lnk_new);
1093 	} else {
1094 		rc = smc_llc_cli_rkey_exchange(link, lnk_new);
1095 		if (rc) {
1096 			rc = 0;
1097 			goto out_clear_lnk;
1098 		}
1099 	}
1100 	rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
1101 	if (!rc)
1102 		goto out;
1103 out_clear_lnk:
1104 	lnk_new->state = SMC_LNK_INACTIVE;
1105 	smcr_link_clear(lnk_new, false);
1106 out_reject:
1107 	smc_llc_cli_add_link_reject(qentry);
1108 out:
1109 	kfree(ini);
1110 	kfree(qentry);
1111 	return rc;
1112 }
1113 
1114 static void smc_llc_send_request_add_link(struct smc_link *link)
1115 {
1116 	struct smc_llc_msg_req_add_link_v2 *llc;
1117 	struct smc_wr_tx_pend_priv *pend;
1118 	struct smc_wr_v2_buf *wr_buf;
1119 	struct smc_gidlist gidlist;
1120 	int rc, len, i;
1121 
1122 	if (!smc_wr_tx_link_hold(link))
1123 		return;
1124 	if (link->lgr->type == SMC_LGR_SYMMETRIC ||
1125 	    link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1126 		goto put_out;
1127 
1128 	smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
1129 	if (gidlist.len <= 1)
1130 		goto put_out;
1131 
1132 	rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
1133 	if (rc)
1134 		goto put_out;
1135 	llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
1136 	memset(llc, 0, SMC_WR_TX_SIZE);
1137 
1138 	llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
1139 	for (i = 0; i < gidlist.len; i++)
1140 		memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
1141 	llc->gid_cnt = gidlist.len;
1142 	len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
1143 	smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
1144 	rc = smc_wr_tx_v2_send(link, pend, len);
1145 	if (!rc)
1146 		/* set REQ_ADD_LINK flow and wait for response from peer */
1147 		link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
1148 put_out:
1149 	smc_wr_tx_link_put(link);
1150 }
1151 
1152 /* as an SMC client, invite server to start the add_link processing */
1153 static void smc_llc_cli_add_link_invite(struct smc_link *link,
1154 					struct smc_llc_qentry *qentry)
1155 {
1156 	struct smc_link_group *lgr = smc_get_lgr(link);
1157 	struct smc_init_info *ini = NULL;
1158 
1159 	if (lgr->smc_version == SMC_V2) {
1160 		smc_llc_send_request_add_link(link);
1161 		goto out;
1162 	}
1163 
1164 	if (lgr->type == SMC_LGR_SYMMETRIC ||
1165 	    lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1166 		goto out;
1167 
1168 	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1169 	if (!ini)
1170 		goto out;
1171 
1172 	ini->vlan_id = lgr->vlan_id;
1173 	smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1174 	if (!ini->ib_dev)
1175 		goto out;
1176 
1177 	smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
1178 			      ini->ib_gid, NULL, SMC_LLC_REQ);
1179 out:
1180 	kfree(ini);
1181 	kfree(qentry);
1182 }
1183 
1184 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
1185 {
1186 	int i;
1187 
1188 	for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
1189 		if (llc->raw.data[i])
1190 			return false;
1191 	return true;
1192 }
1193 
1194 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
1195 {
1196 	if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
1197 	    smc_llc_is_empty_llc_message(llc))
1198 		return true;
1199 	return false;
1200 }
1201 
1202 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
1203 {
1204 	struct smc_llc_qentry *qentry;
1205 
1206 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1207 
1208 	down_write(&lgr->llc_conf_mutex);
1209 	if (smc_llc_is_local_add_link(&qentry->msg))
1210 		smc_llc_cli_add_link_invite(qentry->link, qentry);
1211 	else
1212 		smc_llc_cli_add_link(qentry->link, qentry);
1213 	up_write(&lgr->llc_conf_mutex);
1214 }
1215 
1216 static int smc_llc_active_link_count(struct smc_link_group *lgr)
1217 {
1218 	int i, link_count = 0;
1219 
1220 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1221 		if (!smc_link_active(&lgr->lnk[i]))
1222 			continue;
1223 		link_count++;
1224 	}
1225 	return link_count;
1226 }
1227 
1228 /* find the asymmetric link when 3 links are established  */
1229 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
1230 {
1231 	int asym_idx = -ENOENT;
1232 	int i, j, k;
1233 	bool found;
1234 
1235 	/* determine asymmetric link */
1236 	found = false;
1237 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1238 		for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
1239 			if (!smc_link_usable(&lgr->lnk[i]) ||
1240 			    !smc_link_usable(&lgr->lnk[j]))
1241 				continue;
1242 			if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
1243 				    SMC_GID_SIZE)) {
1244 				found = true;	/* asym_lnk is i or j */
1245 				break;
1246 			}
1247 		}
1248 		if (found)
1249 			break;
1250 	}
1251 	if (!found)
1252 		goto out; /* no asymmetric link */
1253 	for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
1254 		if (!smc_link_usable(&lgr->lnk[k]))
1255 			continue;
1256 		if (k != i &&
1257 		    !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
1258 			    SMC_GID_SIZE)) {
1259 			asym_idx = i;
1260 			break;
1261 		}
1262 		if (k != j &&
1263 		    !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1264 			    SMC_GID_SIZE)) {
1265 			asym_idx = j;
1266 			break;
1267 		}
1268 	}
1269 out:
1270 	return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1271 }
1272 
1273 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1274 {
1275 	struct smc_link *lnk_new = NULL, *lnk_asym;
1276 	struct smc_llc_qentry *qentry;
1277 	int rc;
1278 
1279 	lnk_asym = smc_llc_find_asym_link(lgr);
1280 	if (!lnk_asym)
1281 		return; /* no asymmetric link */
1282 	if (!smc_link_downing(&lnk_asym->state))
1283 		return;
1284 	lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1285 	smc_wr_tx_wait_no_pending_sends(lnk_asym);
1286 	if (!lnk_new)
1287 		goto out_free;
1288 	/* change flow type from ADD_LINK into DEL_LINK */
1289 	lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1290 	rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1291 				      true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1292 	if (rc) {
1293 		smcr_link_down_cond(lnk_new);
1294 		goto out_free;
1295 	}
1296 	qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1297 			      SMC_LLC_DELETE_LINK);
1298 	if (!qentry) {
1299 		smcr_link_down_cond(lnk_new);
1300 		goto out_free;
1301 	}
1302 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1303 out_free:
1304 	smcr_link_clear(lnk_asym, true);
1305 }
1306 
1307 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1308 				     struct smc_link *link_new)
1309 {
1310 	struct smc_llc_msg_add_link_cont *addc_llc;
1311 	struct smc_link_group *lgr = link->lgr;
1312 	u8 max, num_rkeys_send, num_rkeys_recv;
1313 	struct smc_llc_qentry *qentry = NULL;
1314 	struct smc_buf_desc *buf_pos;
1315 	int buf_lst;
1316 	int rc = 0;
1317 	int i;
1318 
1319 	down_write(&lgr->rmbs_lock);
1320 	num_rkeys_send = lgr->conns_num;
1321 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1322 	do {
1323 		smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1324 				      &buf_lst, &buf_pos);
1325 		qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1326 				      SMC_LLC_ADD_LINK_CONT);
1327 		if (!qentry) {
1328 			rc = -ETIMEDOUT;
1329 			goto out;
1330 		}
1331 		addc_llc = &qentry->msg.add_link_cont;
1332 		num_rkeys_recv = addc_llc->num_rkeys;
1333 		max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1334 		for (i = 0; i < max; i++) {
1335 			smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1336 				       addc_llc->rt[i].rmb_key,
1337 				       addc_llc->rt[i].rmb_vaddr_new,
1338 				       addc_llc->rt[i].rmb_key_new);
1339 			num_rkeys_recv--;
1340 		}
1341 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1342 	} while (num_rkeys_send || num_rkeys_recv);
1343 out:
1344 	up_write(&lgr->rmbs_lock);
1345 	return rc;
1346 }
1347 
1348 static int smc_llc_srv_conf_link(struct smc_link *link,
1349 				 struct smc_link *link_new,
1350 				 enum smc_lgr_type lgr_new_t)
1351 {
1352 	struct smc_link_group *lgr = link->lgr;
1353 	struct smc_llc_qentry *qentry = NULL;
1354 	int rc;
1355 
1356 	/* send CONFIRM LINK request over the RoCE fabric */
1357 	rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1358 	if (rc)
1359 		return -ENOLINK;
1360 	/* receive CONFIRM LINK response over the RoCE fabric */
1361 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1362 	if (!qentry ||
1363 	    qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
1364 		/* send DELETE LINK */
1365 		smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1366 					 false, SMC_LLC_DEL_LOST_PATH);
1367 		if (qentry)
1368 			smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1369 		return -ENOLINK;
1370 	}
1371 	smc_llc_save_peer_uid(qentry);
1372 	smc_llc_link_active(link_new);
1373 	if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1374 	    lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1375 		smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1376 	else
1377 		smcr_lgr_set_type(lgr, lgr_new_t);
1378 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1379 	return 0;
1380 }
1381 
1382 static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
1383 {
1384 	qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
1385 	smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
1386 			     sizeof(qentry->msg));
1387 	memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
1388 	smc_llc_send_message(qentry->link, &qentry->msg);
1389 }
1390 
1391 int smc_llc_srv_add_link(struct smc_link *link,
1392 			 struct smc_llc_qentry *req_qentry)
1393 {
1394 	enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1395 	struct smc_link_group *lgr = link->lgr;
1396 	struct smc_llc_msg_add_link *add_llc;
1397 	struct smc_llc_qentry *qentry = NULL;
1398 	bool send_req_add_link_resp = false;
1399 	struct smc_link *link_new = NULL;
1400 	struct smc_init_info *ini = NULL;
1401 	int lnk_idx, rc = 0;
1402 
1403 	if (req_qentry &&
1404 	    req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
1405 		send_req_add_link_resp = true;
1406 
1407 	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1408 	if (!ini) {
1409 		rc = -ENOMEM;
1410 		goto out;
1411 	}
1412 
1413 	/* ignore client add link recommendation, start new flow */
1414 	ini->vlan_id = lgr->vlan_id;
1415 	if (lgr->smc_version == SMC_V2) {
1416 		ini->check_smcrv2 = true;
1417 		ini->smcrv2.saddr = lgr->saddr;
1418 		if (send_req_add_link_resp) {
1419 			struct smc_llc_msg_req_add_link_v2 *req_add =
1420 				&req_qentry->msg.req_add_link;
1421 
1422 			ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
1423 		}
1424 	}
1425 	smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1426 	if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1427 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1428 		ini->smcrv2.ib_dev_v2 = link->smcibdev;
1429 		ini->smcrv2.ib_port_v2 = link->ibport;
1430 	} else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1431 		lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1432 		ini->ib_dev = link->smcibdev;
1433 		ini->ib_port = link->ibport;
1434 	}
1435 	lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1436 	if (lnk_idx < 0) {
1437 		rc = 0;
1438 		goto out;
1439 	}
1440 
1441 	rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
1442 	if (rc)
1443 		goto out;
1444 	link_new = &lgr->lnk[lnk_idx];
1445 
1446 	rc = smcr_buf_map_lgr(link_new);
1447 	if (rc)
1448 		goto out_err;
1449 
1450 	rc = smc_llc_send_add_link(link,
1451 				   link_new->smcibdev->mac[link_new->ibport-1],
1452 				   link_new->gid, link_new, SMC_LLC_REQ);
1453 	if (rc)
1454 		goto out_err;
1455 	send_req_add_link_resp = false;
1456 	/* receive ADD LINK response over the RoCE fabric */
1457 	qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1458 	if (!qentry) {
1459 		rc = -ETIMEDOUT;
1460 		goto out_err;
1461 	}
1462 	add_llc = &qentry->msg.add_link;
1463 	if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1464 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1465 		rc = -ENOLINK;
1466 		goto out_err;
1467 	}
1468 	if (lgr->type == SMC_LGR_SINGLE &&
1469 	    (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1470 	     (lgr->smc_version == SMC_V2 ||
1471 	      !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
1472 		lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1473 	}
1474 	smc_llc_save_add_link_info(link_new, add_llc);
1475 	smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1476 
1477 	rc = smc_ib_ready_link(link_new);
1478 	if (rc)
1479 		goto out_err;
1480 	rc = smcr_buf_reg_lgr(link_new);
1481 	if (rc)
1482 		goto out_err;
1483 	if (lgr->smc_version == SMC_V2) {
1484 		smc_llc_save_add_link_rkeys(link, link_new);
1485 	} else {
1486 		rc = smc_llc_srv_rkey_exchange(link, link_new);
1487 		if (rc)
1488 			goto out_err;
1489 	}
1490 	rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1491 	if (rc)
1492 		goto out_err;
1493 	kfree(ini);
1494 	return 0;
1495 out_err:
1496 	if (link_new) {
1497 		link_new->state = SMC_LNK_INACTIVE;
1498 		smcr_link_clear(link_new, false);
1499 	}
1500 out:
1501 	kfree(ini);
1502 	if (send_req_add_link_resp)
1503 		smc_llc_send_req_add_link_response(req_qentry);
1504 	return rc;
1505 }
1506 
1507 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1508 {
1509 	struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1510 	struct smc_llc_qentry *qentry;
1511 	int rc;
1512 
1513 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1514 
1515 	down_write(&lgr->llc_conf_mutex);
1516 	rc = smc_llc_srv_add_link(link, qentry);
1517 	if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1518 		/* delete any asymmetric link */
1519 		smc_llc_delete_asym_link(lgr);
1520 	}
1521 	up_write(&lgr->llc_conf_mutex);
1522 	kfree(qentry);
1523 }
1524 
1525 /* enqueue a local add_link req to trigger a new add_link flow */
1526 void smc_llc_add_link_local(struct smc_link *link)
1527 {
1528 	struct smc_llc_msg_add_link add_llc = {};
1529 
1530 	add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
1531 	smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
1532 	/* no dev and port needed */
1533 	smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1534 }
1535 
1536 /* worker to process an add link message */
1537 static void smc_llc_add_link_work(struct work_struct *work)
1538 {
1539 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1540 						  llc_add_link_work);
1541 
1542 	if (list_empty(&lgr->list)) {
1543 		/* link group is terminating */
1544 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1545 		goto out;
1546 	}
1547 
1548 	if (lgr->role == SMC_CLNT)
1549 		smc_llc_process_cli_add_link(lgr);
1550 	else
1551 		smc_llc_process_srv_add_link(lgr);
1552 out:
1553 	if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
1554 		smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1555 }
1556 
1557 /* enqueue a local del_link msg to trigger a new del_link flow,
1558  * called only for role SMC_SERV
1559  */
1560 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1561 {
1562 	struct smc_llc_msg_del_link del_llc = {};
1563 
1564 	del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1565 	smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
1566 	del_llc.link_num = del_link_id;
1567 	del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1568 	del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1569 	smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1570 }
1571 
1572 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1573 {
1574 	struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1575 	struct smc_llc_msg_del_link *del_llc;
1576 	struct smc_llc_qentry *qentry;
1577 	int active_links;
1578 	int lnk_idx;
1579 
1580 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1581 	lnk = qentry->link;
1582 	del_llc = &qentry->msg.delete_link;
1583 
1584 	if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1585 		smc_lgr_terminate_sched(lgr);
1586 		goto out;
1587 	}
1588 	down_write(&lgr->llc_conf_mutex);
1589 	/* delete single link */
1590 	for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1591 		if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1592 			continue;
1593 		lnk_del = &lgr->lnk[lnk_idx];
1594 		break;
1595 	}
1596 	del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1597 	if (!lnk_del) {
1598 		/* link was not found */
1599 		del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1600 		smc_llc_send_message(lnk, &qentry->msg);
1601 		goto out_unlock;
1602 	}
1603 	lnk_asym = smc_llc_find_asym_link(lgr);
1604 
1605 	del_llc->reason = 0;
1606 	smc_llc_send_message(lnk, &qentry->msg); /* response */
1607 
1608 	if (smc_link_downing(&lnk_del->state))
1609 		smc_switch_conns(lgr, lnk_del, false);
1610 	smcr_link_clear(lnk_del, true);
1611 
1612 	active_links = smc_llc_active_link_count(lgr);
1613 	if (lnk_del == lnk_asym) {
1614 		/* expected deletion of asym link, don't change lgr state */
1615 	} else if (active_links == 1) {
1616 		smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1617 	} else if (!active_links) {
1618 		smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1619 		smc_lgr_terminate_sched(lgr);
1620 	}
1621 out_unlock:
1622 	up_write(&lgr->llc_conf_mutex);
1623 out:
1624 	kfree(qentry);
1625 }
1626 
1627 /* try to send a DELETE LINK ALL request on any active link,
1628  * waiting for send completion
1629  */
1630 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1631 {
1632 	struct smc_llc_msg_del_link delllc = {};
1633 	int i;
1634 
1635 	delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1636 	smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
1637 	if (ord)
1638 		delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1639 	delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1640 	delllc.reason = htonl(rsn);
1641 
1642 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1643 		if (!smc_link_sendable(&lgr->lnk[i]))
1644 			continue;
1645 		if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1646 			break;
1647 	}
1648 }
1649 
1650 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1651 {
1652 	struct smc_llc_msg_del_link *del_llc;
1653 	struct smc_link *lnk, *lnk_del;
1654 	struct smc_llc_qentry *qentry;
1655 	int active_links;
1656 	int i;
1657 
1658 	down_write(&lgr->llc_conf_mutex);
1659 	qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1660 	lnk = qentry->link;
1661 	del_llc = &qentry->msg.delete_link;
1662 
1663 	if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1664 		/* delete entire lgr */
1665 		smc_llc_send_link_delete_all(lgr, true, ntohl(
1666 					      qentry->msg.delete_link.reason));
1667 		smc_lgr_terminate_sched(lgr);
1668 		goto out;
1669 	}
1670 	/* delete single link */
1671 	lnk_del = NULL;
1672 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1673 		if (lgr->lnk[i].link_id == del_llc->link_num) {
1674 			lnk_del = &lgr->lnk[i];
1675 			break;
1676 		}
1677 	}
1678 	if (!lnk_del)
1679 		goto out; /* asymmetric link already deleted */
1680 
1681 	if (smc_link_downing(&lnk_del->state)) {
1682 		if (smc_switch_conns(lgr, lnk_del, false))
1683 			smc_wr_tx_wait_no_pending_sends(lnk_del);
1684 	}
1685 	if (!list_empty(&lgr->list)) {
1686 		/* qentry is either a request from peer (send it back to
1687 		 * initiate the DELETE_LINK processing), or a locally
1688 		 * enqueued DELETE_LINK request (forward it)
1689 		 */
1690 		if (!smc_llc_send_message(lnk, &qentry->msg)) {
1691 			struct smc_llc_qentry *qentry2;
1692 
1693 			qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1694 					       SMC_LLC_DELETE_LINK);
1695 			if (qentry2)
1696 				smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1697 		}
1698 	}
1699 	smcr_link_clear(lnk_del, true);
1700 
1701 	active_links = smc_llc_active_link_count(lgr);
1702 	if (active_links == 1) {
1703 		smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1704 	} else if (!active_links) {
1705 		smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1706 		smc_lgr_terminate_sched(lgr);
1707 	}
1708 
1709 	if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1710 		/* trigger setup of asymm alt link */
1711 		smc_llc_add_link_local(lnk);
1712 	}
1713 out:
1714 	up_write(&lgr->llc_conf_mutex);
1715 	kfree(qentry);
1716 }
1717 
1718 static void smc_llc_delete_link_work(struct work_struct *work)
1719 {
1720 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1721 						  llc_del_link_work);
1722 
1723 	if (list_empty(&lgr->list)) {
1724 		/* link group is terminating */
1725 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1726 		goto out;
1727 	}
1728 
1729 	if (lgr->role == SMC_CLNT)
1730 		smc_llc_process_cli_delete_link(lgr);
1731 	else
1732 		smc_llc_process_srv_delete_link(lgr);
1733 out:
1734 	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1735 }
1736 
1737 /* process a confirm_rkey request from peer, remote flow */
1738 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1739 {
1740 	struct smc_llc_msg_confirm_rkey *llc;
1741 	struct smc_llc_qentry *qentry;
1742 	struct smc_link *link;
1743 	int num_entries;
1744 	int rk_idx;
1745 	int i;
1746 
1747 	qentry = lgr->llc_flow_rmt.qentry;
1748 	llc = &qentry->msg.confirm_rkey;
1749 	link = qentry->link;
1750 
1751 	num_entries = llc->rtoken[0].num_rkeys;
1752 	if (num_entries > SMC_LLC_RKEYS_PER_MSG)
1753 		goto out_err;
1754 	/* first rkey entry is for receiving link */
1755 	rk_idx = smc_rtoken_add(link,
1756 				llc->rtoken[0].rmb_vaddr,
1757 				llc->rtoken[0].rmb_key);
1758 	if (rk_idx < 0)
1759 		goto out_err;
1760 
1761 	for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1762 		smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1763 				llc->rtoken[i].rmb_vaddr,
1764 				llc->rtoken[i].rmb_key);
1765 	/* max links is 3 so there is no need to support conf_rkey_cont msgs */
1766 	goto out;
1767 out_err:
1768 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1769 	llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1770 out:
1771 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1772 	smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1773 	smc_llc_send_message(link, &qentry->msg);
1774 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1775 }
1776 
1777 /* process a delete_rkey request from peer, remote flow */
1778 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1779 {
1780 	struct smc_llc_msg_delete_rkey *llc;
1781 	struct smc_llc_qentry *qentry;
1782 	struct smc_link *link;
1783 	u8 err_mask = 0;
1784 	int i, max;
1785 
1786 	qentry = lgr->llc_flow_rmt.qentry;
1787 	llc = &qentry->msg.delete_rkey;
1788 	link = qentry->link;
1789 
1790 	if (lgr->smc_version == SMC_V2) {
1791 		struct smc_llc_msg_delete_rkey_v2 *llcv2;
1792 
1793 		memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
1794 		llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
1795 		llcv2->num_inval_rkeys = 0;
1796 
1797 		max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1798 		for (i = 0; i < max; i++) {
1799 			if (smc_rtoken_delete(link, llcv2->rkey[i]))
1800 				llcv2->num_inval_rkeys++;
1801 		}
1802 		memset(&llc->rkey[0], 0, sizeof(llc->rkey));
1803 		memset(&llc->reserved2, 0, sizeof(llc->reserved2));
1804 		smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1805 		if (llcv2->num_inval_rkeys) {
1806 			llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1807 			llc->err_mask = llcv2->num_inval_rkeys;
1808 		}
1809 		goto finish;
1810 	}
1811 
1812 	max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1813 	for (i = 0; i < max; i++) {
1814 		if (smc_rtoken_delete(link, llc->rkey[i]))
1815 			err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1816 	}
1817 	if (err_mask) {
1818 		llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1819 		llc->err_mask = err_mask;
1820 	}
1821 finish:
1822 	llc->hd.flags |= SMC_LLC_FLAG_RESP;
1823 	smc_llc_send_message(link, &qentry->msg);
1824 	smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1825 }
1826 
1827 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1828 {
1829 	pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: "
1830 			    "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id,
1831 			    lgr->net->net_cookie, type);
1832 	smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1833 	smc_lgr_terminate_sched(lgr);
1834 }
1835 
1836 /* flush the llc event queue */
1837 static void smc_llc_event_flush(struct smc_link_group *lgr)
1838 {
1839 	struct smc_llc_qentry *qentry, *q;
1840 
1841 	spin_lock_bh(&lgr->llc_event_q_lock);
1842 	list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1843 		list_del_init(&qentry->list);
1844 		kfree(qentry);
1845 	}
1846 	spin_unlock_bh(&lgr->llc_event_q_lock);
1847 }
1848 
1849 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1850 {
1851 	union smc_llc_msg *llc = &qentry->msg;
1852 	struct smc_link *link = qentry->link;
1853 	struct smc_link_group *lgr = link->lgr;
1854 
1855 	if (!smc_link_usable(link))
1856 		goto out;
1857 
1858 	switch (llc->raw.hdr.common.llc_type) {
1859 	case SMC_LLC_TEST_LINK:
1860 		llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1861 		smc_llc_send_message(link, llc);
1862 		break;
1863 	case SMC_LLC_ADD_LINK:
1864 		if (list_empty(&lgr->list))
1865 			goto out;	/* lgr is terminating */
1866 		if (lgr->role == SMC_CLNT) {
1867 			if (smc_llc_is_local_add_link(llc)) {
1868 				if (lgr->llc_flow_lcl.type ==
1869 				    SMC_LLC_FLOW_ADD_LINK)
1870 					break;	/* add_link in progress */
1871 				if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1872 						       qentry)) {
1873 					schedule_work(&lgr->llc_add_link_work);
1874 				}
1875 				return;
1876 			}
1877 			if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1878 			    !lgr->llc_flow_lcl.qentry) {
1879 				/* a flow is waiting for this message */
1880 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1881 							qentry);
1882 				wake_up(&lgr->llc_msg_waiter);
1883 				return;
1884 			}
1885 			if (lgr->llc_flow_lcl.type ==
1886 					SMC_LLC_FLOW_REQ_ADD_LINK) {
1887 				/* server started add_link processing */
1888 				lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1889 				smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1890 							qentry);
1891 				schedule_work(&lgr->llc_add_link_work);
1892 				return;
1893 			}
1894 			if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1895 				schedule_work(&lgr->llc_add_link_work);
1896 			}
1897 		} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1898 			/* as smc server, handle client suggestion */
1899 			schedule_work(&lgr->llc_add_link_work);
1900 		}
1901 		return;
1902 	case SMC_LLC_CONFIRM_LINK:
1903 	case SMC_LLC_ADD_LINK_CONT:
1904 		if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1905 			/* a flow is waiting for this message */
1906 			smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1907 			wake_up(&lgr->llc_msg_waiter);
1908 			return;
1909 		}
1910 		break;
1911 	case SMC_LLC_DELETE_LINK:
1912 		if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1913 		    !lgr->llc_flow_lcl.qentry) {
1914 			/* DEL LINK REQ during ADD LINK SEQ */
1915 			smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1916 			wake_up(&lgr->llc_msg_waiter);
1917 		} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1918 			schedule_work(&lgr->llc_del_link_work);
1919 		}
1920 		return;
1921 	case SMC_LLC_CONFIRM_RKEY:
1922 		/* new request from remote, assign to remote flow */
1923 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1924 			/* process here, does not wait for more llc msgs */
1925 			smc_llc_rmt_conf_rkey(lgr);
1926 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1927 		}
1928 		return;
1929 	case SMC_LLC_CONFIRM_RKEY_CONT:
1930 		/* not used because max links is 3, and 3 rkeys fit into
1931 		 * one CONFIRM_RKEY message
1932 		 */
1933 		break;
1934 	case SMC_LLC_DELETE_RKEY:
1935 		/* new request from remote, assign to remote flow */
1936 		if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1937 			/* process here, does not wait for more llc msgs */
1938 			smc_llc_rmt_delete_rkey(lgr);
1939 			smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1940 		}
1941 		return;
1942 	case SMC_LLC_REQ_ADD_LINK:
1943 		/* handle response here, smc_llc_flow_stop() cannot be called
1944 		 * in tasklet context
1945 		 */
1946 		if (lgr->role == SMC_CLNT &&
1947 		    lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
1948 		    (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
1949 			smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
1950 		} else if (lgr->role == SMC_SERV) {
1951 			if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1952 				/* as smc server, handle client suggestion */
1953 				lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1954 				schedule_work(&lgr->llc_add_link_work);
1955 			}
1956 			return;
1957 		}
1958 		break;
1959 	default:
1960 		smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1961 		break;
1962 	}
1963 out:
1964 	kfree(qentry);
1965 }
1966 
1967 /* worker to process llc messages on the event queue */
1968 static void smc_llc_event_work(struct work_struct *work)
1969 {
1970 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1971 						  llc_event_work);
1972 	struct smc_llc_qentry *qentry;
1973 
1974 	if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1975 		qentry = lgr->delayed_event;
1976 		lgr->delayed_event = NULL;
1977 		if (smc_link_usable(qentry->link))
1978 			smc_llc_event_handler(qentry);
1979 		else
1980 			kfree(qentry);
1981 	}
1982 
1983 again:
1984 	spin_lock_bh(&lgr->llc_event_q_lock);
1985 	if (!list_empty(&lgr->llc_event_q)) {
1986 		qentry = list_first_entry(&lgr->llc_event_q,
1987 					  struct smc_llc_qentry, list);
1988 		list_del_init(&qentry->list);
1989 		spin_unlock_bh(&lgr->llc_event_q_lock);
1990 		smc_llc_event_handler(qentry);
1991 		goto again;
1992 	}
1993 	spin_unlock_bh(&lgr->llc_event_q_lock);
1994 }
1995 
1996 /* process llc responses in tasklet context */
1997 static void smc_llc_rx_response(struct smc_link *link,
1998 				struct smc_llc_qentry *qentry)
1999 {
2000 	enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
2001 	struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
2002 	u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
2003 
2004 	switch (llc_type) {
2005 	case SMC_LLC_TEST_LINK:
2006 		if (smc_link_active(link))
2007 			complete(&link->llc_testlink_resp);
2008 		break;
2009 	case SMC_LLC_ADD_LINK:
2010 	case SMC_LLC_ADD_LINK_CONT:
2011 	case SMC_LLC_CONFIRM_LINK:
2012 		if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
2013 			break;	/* drop out-of-flow response */
2014 		goto assign;
2015 	case SMC_LLC_DELETE_LINK:
2016 		if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
2017 			break;	/* drop out-of-flow response */
2018 		goto assign;
2019 	case SMC_LLC_CONFIRM_RKEY:
2020 	case SMC_LLC_DELETE_RKEY:
2021 		if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
2022 			break;	/* drop out-of-flow response */
2023 		goto assign;
2024 	case SMC_LLC_CONFIRM_RKEY_CONT:
2025 		/* not used because max links is 3 */
2026 		break;
2027 	default:
2028 		smc_llc_protocol_violation(link->lgr,
2029 					   qentry->msg.raw.hdr.common.type);
2030 		break;
2031 	}
2032 	kfree(qentry);
2033 	return;
2034 assign:
2035 	/* assign responses to the local flow, we requested them */
2036 	smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
2037 	wake_up(&link->lgr->llc_msg_waiter);
2038 }
2039 
2040 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
2041 {
2042 	struct smc_link_group *lgr = link->lgr;
2043 	struct smc_llc_qentry *qentry;
2044 	unsigned long flags;
2045 
2046 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
2047 	if (!qentry)
2048 		return;
2049 	qentry->link = link;
2050 	INIT_LIST_HEAD(&qentry->list);
2051 	memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
2052 
2053 	/* process responses immediately */
2054 	if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
2055 	    llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
2056 		smc_llc_rx_response(link, qentry);
2057 		return;
2058 	}
2059 
2060 	/* add requests to event queue */
2061 	spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
2062 	list_add_tail(&qentry->list, &lgr->llc_event_q);
2063 	spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
2064 	queue_work(system_highpri_wq, &lgr->llc_event_work);
2065 }
2066 
2067 /* copy received msg and add it to the event queue */
2068 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
2069 {
2070 	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
2071 	union smc_llc_msg *llc = buf;
2072 
2073 	if (wc->byte_len < sizeof(*llc))
2074 		return; /* short message */
2075 	if (!llc->raw.hdr.common.llc_version) {
2076 		if (llc->raw.hdr.length != sizeof(*llc))
2077 			return; /* invalid message */
2078 	} else {
2079 		if (llc->raw.hdr.length_v2 < sizeof(*llc))
2080 			return; /* invalid message */
2081 	}
2082 
2083 	smc_llc_enqueue(link, llc);
2084 }
2085 
2086 /***************************** worker, utils *********************************/
2087 
2088 static void smc_llc_testlink_work(struct work_struct *work)
2089 {
2090 	struct smc_link *link = container_of(to_delayed_work(work),
2091 					     struct smc_link, llc_testlink_wrk);
2092 	unsigned long next_interval;
2093 	unsigned long expire_time;
2094 	u8 user_data[16] = { 0 };
2095 	int rc;
2096 
2097 	if (!smc_link_active(link))
2098 		return;		/* don't reschedule worker */
2099 	expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
2100 	if (time_is_after_jiffies(expire_time)) {
2101 		next_interval = expire_time - jiffies;
2102 		goto out;
2103 	}
2104 	reinit_completion(&link->llc_testlink_resp);
2105 	smc_llc_send_test_link(link, user_data);
2106 	/* receive TEST LINK response over RoCE fabric */
2107 	rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
2108 						       SMC_LLC_WAIT_TIME);
2109 	if (!smc_link_active(link))
2110 		return;		/* link state changed */
2111 	if (rc <= 0) {
2112 		smcr_link_down_cond_sched(link);
2113 		return;
2114 	}
2115 	next_interval = link->llc_testlink_time;
2116 out:
2117 	schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
2118 }
2119 
2120 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
2121 {
2122 	struct net *net = sock_net(smc->clcsock->sk);
2123 
2124 	INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
2125 	INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
2126 	INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
2127 	INIT_LIST_HEAD(&lgr->llc_event_q);
2128 	spin_lock_init(&lgr->llc_event_q_lock);
2129 	spin_lock_init(&lgr->llc_flow_lock);
2130 	init_waitqueue_head(&lgr->llc_flow_waiter);
2131 	init_waitqueue_head(&lgr->llc_msg_waiter);
2132 	init_rwsem(&lgr->llc_conf_mutex);
2133 	lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time);
2134 }
2135 
2136 /* called after lgr was removed from lgr_list */
2137 void smc_llc_lgr_clear(struct smc_link_group *lgr)
2138 {
2139 	smc_llc_event_flush(lgr);
2140 	wake_up_all(&lgr->llc_flow_waiter);
2141 	wake_up_all(&lgr->llc_msg_waiter);
2142 	cancel_work_sync(&lgr->llc_event_work);
2143 	cancel_work_sync(&lgr->llc_add_link_work);
2144 	cancel_work_sync(&lgr->llc_del_link_work);
2145 	if (lgr->delayed_event) {
2146 		kfree(lgr->delayed_event);
2147 		lgr->delayed_event = NULL;
2148 	}
2149 }
2150 
2151 int smc_llc_link_init(struct smc_link *link)
2152 {
2153 	init_completion(&link->llc_testlink_resp);
2154 	INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
2155 	return 0;
2156 }
2157 
2158 void smc_llc_link_active(struct smc_link *link)
2159 {
2160 	pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, "
2161 			    "peerid %*phN, ibdev %s, ibport %d\n",
2162 			    SMC_LGR_ID_SIZE, &link->lgr->id,
2163 			    link->lgr->net->net_cookie,
2164 			    SMC_LGR_ID_SIZE, &link->link_uid,
2165 			    SMC_LGR_ID_SIZE, &link->peer_link_uid,
2166 			    link->smcibdev->ibdev->name, link->ibport);
2167 	link->state = SMC_LNK_ACTIVE;
2168 	if (link->lgr->llc_testlink_time) {
2169 		link->llc_testlink_time = link->lgr->llc_testlink_time;
2170 		schedule_delayed_work(&link->llc_testlink_wrk,
2171 				      link->llc_testlink_time);
2172 	}
2173 }
2174 
2175 /* called in worker context */
2176 void smc_llc_link_clear(struct smc_link *link, bool log)
2177 {
2178 	if (log)
2179 		pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN"
2180 				    ", peerid %*phN, ibdev %s, ibport %d\n",
2181 				    SMC_LGR_ID_SIZE, &link->lgr->id,
2182 				    link->lgr->net->net_cookie,
2183 				    SMC_LGR_ID_SIZE, &link->link_uid,
2184 				    SMC_LGR_ID_SIZE, &link->peer_link_uid,
2185 				    link->smcibdev->ibdev->name, link->ibport);
2186 	complete(&link->llc_testlink_resp);
2187 	cancel_delayed_work_sync(&link->llc_testlink_wrk);
2188 }
2189 
2190 /* register a new rtoken at the remote peer (for all links) */
2191 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
2192 			    struct smc_buf_desc *rmb_desc)
2193 {
2194 	struct smc_link_group *lgr = send_link->lgr;
2195 	struct smc_llc_qentry *qentry = NULL;
2196 	int rc = 0;
2197 
2198 	rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
2199 	if (rc)
2200 		goto out;
2201 	/* receive CONFIRM RKEY response from server over RoCE fabric */
2202 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2203 			      SMC_LLC_CONFIRM_RKEY);
2204 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2205 		rc = -EFAULT;
2206 out:
2207 	if (qentry)
2208 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2209 	return rc;
2210 }
2211 
2212 /* unregister an rtoken at the remote peer */
2213 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
2214 			   struct smc_buf_desc *rmb_desc)
2215 {
2216 	struct smc_llc_qentry *qentry = NULL;
2217 	struct smc_link *send_link;
2218 	int rc = 0;
2219 
2220 	send_link = smc_llc_usable_link(lgr);
2221 	if (!send_link)
2222 		return -ENOLINK;
2223 
2224 	/* protected by llc_flow control */
2225 	rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
2226 	if (rc)
2227 		goto out;
2228 	/* receive DELETE RKEY response from server over RoCE fabric */
2229 	qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2230 			      SMC_LLC_DELETE_RKEY);
2231 	if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2232 		rc = -EFAULT;
2233 out:
2234 	if (qentry)
2235 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2236 	return rc;
2237 }
2238 
2239 void smc_llc_link_set_uid(struct smc_link *link)
2240 {
2241 	__be32 link_uid;
2242 
2243 	link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
2244 	memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
2245 }
2246 
2247 /* save peers link user id, used for debug purposes */
2248 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
2249 {
2250 	memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
2251 	       SMC_LGR_ID_SIZE);
2252 }
2253 
2254 /* evaluate confirm link request or response */
2255 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
2256 			   enum smc_llc_reqresp type)
2257 {
2258 	if (type == SMC_LLC_REQ) {	/* SMC server assigns link_id */
2259 		qentry->link->link_id = qentry->msg.confirm_link.link_num;
2260 		smc_llc_link_set_uid(qentry->link);
2261 	}
2262 	if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
2263 		return -ENOTSUPP;
2264 	return 0;
2265 }
2266 
2267 /***************************** init, exit, misc ******************************/
2268 
2269 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
2270 	{
2271 		.handler	= smc_llc_rx_handler,
2272 		.type		= SMC_LLC_CONFIRM_LINK
2273 	},
2274 	{
2275 		.handler	= smc_llc_rx_handler,
2276 		.type		= SMC_LLC_TEST_LINK
2277 	},
2278 	{
2279 		.handler	= smc_llc_rx_handler,
2280 		.type		= SMC_LLC_ADD_LINK
2281 	},
2282 	{
2283 		.handler	= smc_llc_rx_handler,
2284 		.type		= SMC_LLC_ADD_LINK_CONT
2285 	},
2286 	{
2287 		.handler	= smc_llc_rx_handler,
2288 		.type		= SMC_LLC_DELETE_LINK
2289 	},
2290 	{
2291 		.handler	= smc_llc_rx_handler,
2292 		.type		= SMC_LLC_CONFIRM_RKEY
2293 	},
2294 	{
2295 		.handler	= smc_llc_rx_handler,
2296 		.type		= SMC_LLC_CONFIRM_RKEY_CONT
2297 	},
2298 	{
2299 		.handler	= smc_llc_rx_handler,
2300 		.type		= SMC_LLC_DELETE_RKEY
2301 	},
2302 	/* V2 types */
2303 	{
2304 		.handler	= smc_llc_rx_handler,
2305 		.type		= SMC_LLC_CONFIRM_LINK_V2
2306 	},
2307 	{
2308 		.handler	= smc_llc_rx_handler,
2309 		.type		= SMC_LLC_TEST_LINK_V2
2310 	},
2311 	{
2312 		.handler	= smc_llc_rx_handler,
2313 		.type		= SMC_LLC_ADD_LINK_V2
2314 	},
2315 	{
2316 		.handler	= smc_llc_rx_handler,
2317 		.type		= SMC_LLC_DELETE_LINK_V2
2318 	},
2319 	{
2320 		.handler	= smc_llc_rx_handler,
2321 		.type		= SMC_LLC_REQ_ADD_LINK_V2
2322 	},
2323 	{
2324 		.handler	= smc_llc_rx_handler,
2325 		.type		= SMC_LLC_CONFIRM_RKEY_V2
2326 	},
2327 	{
2328 		.handler	= smc_llc_rx_handler,
2329 		.type		= SMC_LLC_DELETE_RKEY_V2
2330 	},
2331 	{
2332 		.handler	= NULL,
2333 	}
2334 };
2335 
2336 int __init smc_llc_init(void)
2337 {
2338 	struct smc_wr_rx_handler *handler;
2339 	int rc = 0;
2340 
2341 	for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
2342 		INIT_HLIST_NODE(&handler->list);
2343 		rc = smc_wr_rx_register_handler(handler);
2344 		if (rc)
2345 			break;
2346 	}
2347 	return rc;
2348 }
2349