xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell IPSEC offload driver
3  *
4  * Copyright (C) 2024 Marvell.
5  */
6 
7 #include <net/xfrm.h>
8 #include <linux/netdevice.h>
9 #include <linux/bitfield.h>
10 #include <crypto/aead.h>
11 #include <crypto/gcm.h>
12 
13 #include "otx2_common.h"
14 #include "otx2_struct.h"
15 #include "cn10k_ipsec.h"
16 
is_dev_support_ipsec_offload(struct pci_dev * pdev)17 static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
18 {
19 	return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
20 }
21 
cn10k_cpt_device_set_inuse(struct otx2_nic * pf)22 static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
23 {
24 	enum cn10k_cpt_hw_state_e state;
25 
26 	while (true) {
27 		state = atomic_cmpxchg(&pf->ipsec.cpt_state,
28 				       CN10K_CPT_HW_AVAILABLE,
29 				       CN10K_CPT_HW_IN_USE);
30 		if (state == CN10K_CPT_HW_AVAILABLE)
31 			return true;
32 		if (state == CN10K_CPT_HW_UNAVAILABLE)
33 			return false;
34 
35 		mdelay(1);
36 	}
37 }
38 
cn10k_cpt_device_set_available(struct otx2_nic * pf)39 static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
40 {
41 	atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
42 }
43 
cn10k_cpt_device_set_unavailable(struct otx2_nic * pf)44 static void  cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
45 {
46 	atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
47 }
48 
cn10k_outb_cptlf_attach(struct otx2_nic * pf)49 static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
50 {
51 	struct rsrc_attach *attach;
52 	int ret = -ENOMEM;
53 
54 	mutex_lock(&pf->mbox.lock);
55 	/* Get memory to put this msg */
56 	attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
57 	if (!attach)
58 		goto unlock;
59 
60 	attach->cptlfs = true;
61 	attach->modify = true;
62 
63 	/* Send attach request to AF */
64 	ret = otx2_sync_mbox_msg(&pf->mbox);
65 
66 unlock:
67 	mutex_unlock(&pf->mbox.lock);
68 	return ret;
69 }
70 
cn10k_outb_cptlf_detach(struct otx2_nic * pf)71 static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
72 {
73 	struct rsrc_detach *detach;
74 	int ret = -ENOMEM;
75 
76 	mutex_lock(&pf->mbox.lock);
77 	detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
78 	if (!detach)
79 		goto unlock;
80 
81 	detach->partial = true;
82 	detach->cptlfs = true;
83 
84 	/* Send detach request to AF */
85 	ret = otx2_sync_mbox_msg(&pf->mbox);
86 
87 unlock:
88 	mutex_unlock(&pf->mbox.lock);
89 	return ret;
90 }
91 
cn10k_outb_cptlf_alloc(struct otx2_nic * pf)92 static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
93 {
94 	struct cpt_lf_alloc_req_msg *req;
95 	int ret = -ENOMEM;
96 
97 	mutex_lock(&pf->mbox.lock);
98 	req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
99 	if (!req)
100 		goto unlock;
101 
102 	/* PF function */
103 	req->nix_pf_func = pf->pcifunc;
104 	/* Enable SE-IE Engine Group */
105 	req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
106 
107 	ret = otx2_sync_mbox_msg(&pf->mbox);
108 
109 unlock:
110 	mutex_unlock(&pf->mbox.lock);
111 	return ret;
112 }
113 
cn10k_outb_cptlf_free(struct otx2_nic * pf)114 static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
115 {
116 	mutex_lock(&pf->mbox.lock);
117 	otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
118 	otx2_sync_mbox_msg(&pf->mbox);
119 	mutex_unlock(&pf->mbox.lock);
120 }
121 
cn10k_outb_cptlf_config(struct otx2_nic * pf)122 static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
123 {
124 	struct cpt_inline_ipsec_cfg_msg *req;
125 	int ret = -ENOMEM;
126 
127 	mutex_lock(&pf->mbox.lock);
128 	req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
129 	if (!req)
130 		goto unlock;
131 
132 	req->dir = CPT_INLINE_OUTBOUND;
133 	req->enable = 1;
134 	req->nix_pf_func = pf->pcifunc;
135 	ret = otx2_sync_mbox_msg(&pf->mbox);
136 unlock:
137 	mutex_unlock(&pf->mbox.lock);
138 	return ret;
139 }
140 
cn10k_outb_cptlf_iq_enable(struct otx2_nic * pf)141 static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
142 {
143 	u64 reg_val;
144 
145 	/* Set Execution Enable of instruction queue */
146 	reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
147 	reg_val |= BIT_ULL(16);
148 	otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
149 
150 	/* Set iqueue's enqueuing */
151 	reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
152 	reg_val |= BIT_ULL(0);
153 	otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
154 }
155 
cn10k_outb_cptlf_iq_disable(struct otx2_nic * pf)156 static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
157 {
158 	u32 inflight, grb_cnt, gwb_cnt;
159 	u32 nq_ptr, dq_ptr;
160 	int timeout = 20;
161 	u64 reg_val;
162 	int cnt;
163 
164 	/* Disable instructions enqueuing */
165 	otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
166 
167 	/* Wait for instruction queue to become empty.
168 	 * CPT_LF_INPROG.INFLIGHT count is zero
169 	 */
170 	do {
171 		reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
172 		inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
173 		if (!inflight)
174 			break;
175 
176 		usleep_range(10000, 20000);
177 		if (timeout-- < 0) {
178 			netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
179 			break;
180 		}
181 	} while (1);
182 
183 	/* Disable executions in the LF's queue,
184 	 * the queue should be empty at this point
185 	 */
186 	reg_val &= ~BIT_ULL(16);
187 	otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
188 
189 	/* Wait for instruction queue to become empty */
190 	cnt = 0;
191 	do {
192 		reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
193 		if (reg_val & BIT_ULL(31))
194 			cnt = 0;
195 		else
196 			cnt++;
197 		reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
198 		nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
199 		dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
200 	} while ((cnt < 10) && (nq_ptr != dq_ptr));
201 
202 	cnt = 0;
203 	do {
204 		reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
205 		inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
206 		grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
207 		gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
208 		if (inflight == 0 && gwb_cnt < 40 &&
209 		    (grb_cnt == 0 || grb_cnt == 40))
210 			cnt++;
211 		else
212 			cnt = 0;
213 	} while (cnt < 10);
214 }
215 
216 /* Allocate memory for CPT outbound Instruction queue.
217  * Instruction queue memory format is:
218  *      -----------------------------
219  *     | Instruction Group memory    |
220  *     |  (CPT_LF_Q_SIZE[SIZE_DIV40] |
221  *     |   x 16 Bytes)               |
222  *     |                             |
223  *      ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
224  *     | Flow Control (128 Bytes)    |
225  *     |                             |
226  *      -----------------------------
227  *     |  Instruction Memory         |
228  *     |  (CPT_LF_Q_SIZE[SIZE_DIV40] |
229  *     |   × 40 × 64 bytes)          |
230  *     |                             |
231  *      -----------------------------
232  */
cn10k_outb_cptlf_iq_alloc(struct otx2_nic * pf)233 static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
234 {
235 	struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
236 
237 	iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
238 		    CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
239 
240 	iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
241 					    &iq->real_dma_addr, GFP_KERNEL);
242 	if (!iq->real_vaddr)
243 		return -ENOMEM;
244 
245 	/* iq->vaddr/dma_addr points to Flow Control location */
246 	iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
247 	iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
248 
249 	/* Align pointers */
250 	iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
251 	iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
252 	return 0;
253 }
254 
cn10k_outb_cptlf_iq_free(struct otx2_nic * pf)255 static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
256 {
257 	struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
258 
259 	if (iq->real_vaddr)
260 		dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
261 				  iq->real_dma_addr);
262 
263 	iq->real_vaddr = NULL;
264 	iq->vaddr = NULL;
265 }
266 
cn10k_outb_cptlf_iq_init(struct otx2_nic * pf)267 static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
268 {
269 	u64 reg_val;
270 	int ret;
271 
272 	/* Allocate Memory for CPT IQ */
273 	ret = cn10k_outb_cptlf_iq_alloc(pf);
274 	if (ret)
275 		return ret;
276 
277 	/* Disable IQ */
278 	cn10k_outb_cptlf_iq_disable(pf);
279 
280 	/* Set IQ base address */
281 	otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
282 
283 	/* Set IQ size */
284 	reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
285 			     CN10K_CPT_EXTRA_SIZE_DIV40);
286 	otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
287 
288 	return 0;
289 }
290 
cn10k_outb_cptlf_init(struct otx2_nic * pf)291 static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
292 {
293 	int ret;
294 
295 	/* Initialize CPTLF Instruction Queue (IQ) */
296 	ret = cn10k_outb_cptlf_iq_init(pf);
297 	if (ret)
298 		return ret;
299 
300 	/* Configure CPTLF for outbound ipsec offload */
301 	ret = cn10k_outb_cptlf_config(pf);
302 	if (ret)
303 		goto iq_clean;
304 
305 	/* Enable CPTLF IQ */
306 	cn10k_outb_cptlf_iq_enable(pf);
307 	return 0;
308 iq_clean:
309 	cn10k_outb_cptlf_iq_free(pf);
310 	return ret;
311 }
312 
cn10k_outb_cpt_init(struct net_device * netdev)313 static int cn10k_outb_cpt_init(struct net_device *netdev)
314 {
315 	struct otx2_nic *pf = netdev_priv(netdev);
316 	int ret;
317 
318 	/* Attach a CPT LF for outbound ipsec offload */
319 	ret = cn10k_outb_cptlf_attach(pf);
320 	if (ret)
321 		return ret;
322 
323 	/* Allocate a CPT LF for outbound ipsec offload */
324 	ret = cn10k_outb_cptlf_alloc(pf);
325 	if (ret)
326 		goto detach;
327 
328 	/* Initialize the CPTLF for outbound ipsec offload */
329 	ret = cn10k_outb_cptlf_init(pf);
330 	if (ret)
331 		goto lf_free;
332 
333 	pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
334 						CN10K_CPT_LF_NQX(0));
335 
336 	/* Set ipsec offload enabled for this device */
337 	pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
338 
339 	cn10k_cpt_device_set_available(pf);
340 	return 0;
341 
342 lf_free:
343 	cn10k_outb_cptlf_free(pf);
344 detach:
345 	cn10k_outb_cptlf_detach(pf);
346 	return ret;
347 }
348 
cn10k_outb_cpt_clean(struct otx2_nic * pf)349 static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
350 {
351 	int ret;
352 
353 	if (!cn10k_cpt_device_set_inuse(pf)) {
354 		netdev_err(pf->netdev, "CPT LF device unavailable\n");
355 		return -ENODEV;
356 	}
357 
358 	/* Set ipsec offload disabled for this device */
359 	pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
360 
361 	/* Disable CPTLF Instruction Queue (IQ) */
362 	cn10k_outb_cptlf_iq_disable(pf);
363 
364 	/* Set IQ base address and size to 0 */
365 	otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
366 	otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
367 
368 	/* Free CPTLF IQ */
369 	cn10k_outb_cptlf_iq_free(pf);
370 
371 	/* Free and detach CPT LF */
372 	cn10k_outb_cptlf_free(pf);
373 	ret = cn10k_outb_cptlf_detach(pf);
374 	if (ret)
375 		netdev_err(pf->netdev, "Failed to detach CPT LF\n");
376 
377 	cn10k_cpt_device_set_unavailable(pf);
378 	return ret;
379 }
380 
cn10k_cpt_inst_flush(struct otx2_nic * pf,struct cpt_inst_s * inst,u64 size)381 static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
382 				 u64 size)
383 {
384 	struct otx2_lmt_info *lmt_info;
385 	u64 val = 0, tar_addr = 0;
386 
387 	lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
388 	/* FIXME: val[0:10] LMT_ID.
389 	 * [12:15] no of LMTST - 1 in the burst.
390 	 * [19:63] data size of each LMTST in the burst except first.
391 	 */
392 	val = (lmt_info->lmt_id & 0x7FF);
393 	/* Target address for LMTST flush tells HW how many 128bit
394 	 * words are present.
395 	 * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
396 	 */
397 	tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
398 	dma_wmb();
399 	memcpy((u64 *)lmt_info->lmt_addr, inst, size);
400 	cn10k_lmt_flush(val, tar_addr);
401 }
402 
cn10k_wait_for_cpt_respose(struct otx2_nic * pf,struct cpt_res_s * res)403 static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
404 				      struct cpt_res_s *res)
405 {
406 	unsigned long timeout = jiffies + msecs_to_jiffies(100);
407 	u64 *completion_ptr = (u64 *)res;
408 
409 	do {
410 		if (time_after(jiffies, timeout)) {
411 			netdev_err(pf->netdev, "CPT response timeout\n");
412 			return -EBUSY;
413 		}
414 	} while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
415 		 CN10K_CPT_COMP_E_NOTDONE);
416 
417 	if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
418 	      res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
419 		netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
420 			   res->compcode, res->doneint);
421 		netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
422 			   res->uc_compcode, (u64)res->uc_info, res->esn);
423 	}
424 	return 0;
425 }
426 
cn10k_outb_write_sa(struct otx2_nic * pf,struct qmem * sa_info)427 static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
428 {
429 	dma_addr_t res_iova, dptr_iova, sa_iova;
430 	struct cn10k_tx_sa_s *sa_dptr;
431 	struct cpt_inst_s inst = {};
432 	struct cpt_res_s *res;
433 	u32 sa_size, off;
434 	u64 *sptr, *dptr;
435 	u64 reg_val;
436 	int ret;
437 
438 	sa_iova = sa_info->iova;
439 	if (!sa_iova)
440 		return -EINVAL;
441 
442 	res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
443 				 &res_iova, GFP_ATOMIC);
444 	if (!res)
445 		return -ENOMEM;
446 
447 	sa_size = sizeof(struct cn10k_tx_sa_s);
448 	sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
449 	if (!sa_dptr) {
450 		dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
451 				  res_iova);
452 		return -ENOMEM;
453 	}
454 
455 	sptr = (__force u64 *)sa_info->base;
456 	dptr =  (__force u64 *)sa_dptr;
457 	for (off = 0; off < (sa_size / 8); off++)
458 		*(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
459 
460 	res->compcode = CN10K_CPT_COMP_E_NOTDONE;
461 	inst.res_addr = res_iova;
462 	inst.dptr = (u64)dptr_iova;
463 	inst.param2 = sa_size >> 3;
464 	inst.dlen = sa_size;
465 	inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
466 	inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
467 	inst.cptr = sa_iova;
468 	inst.ctx_val = 1;
469 	inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
470 
471 	/* Check if CPT-LF available */
472 	if (!cn10k_cpt_device_set_inuse(pf)) {
473 		ret = -ENODEV;
474 		goto free_mem;
475 	}
476 
477 	cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
478 	dma_wmb();
479 	ret = cn10k_wait_for_cpt_respose(pf, res);
480 	if (ret)
481 		goto set_available;
482 
483 	/* Trigger CTX flush to write dirty data back to DRAM */
484 	reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
485 	otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
486 
487 set_available:
488 	cn10k_cpt_device_set_available(pf);
489 free_mem:
490 	dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
491 	dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
492 	return ret;
493 }
494 
cn10k_ipsec_get_hw_ctx_offset(void)495 static int cn10k_ipsec_get_hw_ctx_offset(void)
496 {
497 	/* Offset on Hardware-context offset in word */
498 	return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
499 }
500 
cn10k_ipsec_get_ctx_push_size(void)501 static int cn10k_ipsec_get_ctx_push_size(void)
502 {
503 	/* Context push size is round up and in multiple of 8 Byte */
504 	return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
505 }
506 
cn10k_ipsec_get_aes_key_len(int key_len)507 static int cn10k_ipsec_get_aes_key_len(int key_len)
508 {
509 	/* key_len is aes key length in bytes */
510 	switch (key_len) {
511 	case 16:
512 		return CN10K_IPSEC_SA_AES_KEY_LEN_128;
513 	case 24:
514 		return CN10K_IPSEC_SA_AES_KEY_LEN_192;
515 	default:
516 		return CN10K_IPSEC_SA_AES_KEY_LEN_256;
517 	}
518 }
519 
cn10k_outb_prepare_sa(struct xfrm_state * x,struct cn10k_tx_sa_s * sa_entry)520 static void cn10k_outb_prepare_sa(struct xfrm_state *x,
521 				  struct cn10k_tx_sa_s *sa_entry)
522 {
523 	int key_len = (x->aead->alg_key_len + 7) / 8;
524 	struct net_device *netdev = x->xso.dev;
525 	u8 *key = x->aead->alg_key;
526 	struct otx2_nic *pf;
527 	u32 *tmp_salt;
528 	u64 *tmp_key;
529 	int idx;
530 
531 	memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
532 
533 	/* context size, 128 Byte aligned up */
534 	pf = netdev_priv(netdev);
535 	sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN)  & 0xF;
536 	sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
537 	sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
538 
539 	/* Ucode to skip two words of CPT_CTX_HW_S */
540 	sa_entry->ctx_hdr_size = 1;
541 
542 	/* Allow Atomic operation (AOP) */
543 	sa_entry->aop_valid = 1;
544 
545 	/* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
546 	sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
547 	sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
548 	sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
549 	sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
550 	if (x->props.mode == XFRM_MODE_TUNNEL)
551 		sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
552 	else
553 		sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
554 
555 	/* Last 4 bytes are salt */
556 	key_len -= 4;
557 	sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
558 	memcpy(sa_entry->cipher_key, key, key_len);
559 	tmp_key = (u64 *)sa_entry->cipher_key;
560 
561 	for (idx = 0; idx < key_len / 8; idx++)
562 		tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
563 
564 	memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
565 	tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
566 	*tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
567 
568 	/* Write SA context data to memory before enabling */
569 	wmb();
570 
571 	/* Enable SA */
572 	sa_entry->sa_valid = 1;
573 }
574 
cn10k_ipsec_validate_state(struct xfrm_state * x,struct netlink_ext_ack * extack)575 static int cn10k_ipsec_validate_state(struct xfrm_state *x,
576 				      struct netlink_ext_ack *extack)
577 {
578 	if (x->props.aalgo != SADB_AALG_NONE) {
579 		NL_SET_ERR_MSG_MOD(extack,
580 				   "Cannot offload authenticated xfrm states");
581 		return -EINVAL;
582 	}
583 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
584 		NL_SET_ERR_MSG_MOD(extack,
585 				   "Only AES-GCM-ICV16 xfrm state may be offloaded");
586 		return -EINVAL;
587 	}
588 	if (x->props.calgo != SADB_X_CALG_NONE) {
589 		NL_SET_ERR_MSG_MOD(extack,
590 				   "Cannot offload compressed xfrm states");
591 		return -EINVAL;
592 	}
593 	if (x->props.flags & XFRM_STATE_ESN) {
594 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
595 		return -EINVAL;
596 	}
597 	if (x->props.family != AF_INET && x->props.family != AF_INET6) {
598 		NL_SET_ERR_MSG_MOD(extack,
599 				   "Only IPv4/v6 xfrm states may be offloaded");
600 		return -EINVAL;
601 	}
602 	if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
603 		NL_SET_ERR_MSG_MOD(extack,
604 				   "Cannot offload other than crypto-mode");
605 		return -EINVAL;
606 	}
607 	if (x->props.mode != XFRM_MODE_TRANSPORT &&
608 	    x->props.mode != XFRM_MODE_TUNNEL) {
609 		NL_SET_ERR_MSG_MOD(extack,
610 				   "Only tunnel/transport xfrm states may be offloaded");
611 		return -EINVAL;
612 	}
613 	if (x->id.proto != IPPROTO_ESP) {
614 		NL_SET_ERR_MSG_MOD(extack,
615 				   "Only ESP xfrm state may be offloaded");
616 		return -EINVAL;
617 	}
618 	if (x->encap) {
619 		NL_SET_ERR_MSG_MOD(extack,
620 				   "Encapsulated xfrm state may not be offloaded");
621 		return -EINVAL;
622 	}
623 	if (!x->aead) {
624 		NL_SET_ERR_MSG_MOD(extack,
625 				   "Cannot offload xfrm states without aead");
626 		return -EINVAL;
627 	}
628 
629 	if (x->aead->alg_icv_len != 128) {
630 		NL_SET_ERR_MSG_MOD(extack,
631 				   "Cannot offload xfrm states with AEAD ICV length other than 128bit");
632 		return -EINVAL;
633 	}
634 	if (x->aead->alg_key_len != 128 + 32 &&
635 	    x->aead->alg_key_len != 192 + 32 &&
636 	    x->aead->alg_key_len != 256 + 32) {
637 		NL_SET_ERR_MSG_MOD(extack,
638 				   "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
639 		return -EINVAL;
640 	}
641 	if (x->tfcpad) {
642 		NL_SET_ERR_MSG_MOD(extack,
643 				   "Cannot offload xfrm states with tfc padding");
644 		return -EINVAL;
645 	}
646 	if (!x->geniv) {
647 		NL_SET_ERR_MSG_MOD(extack,
648 				   "Cannot offload xfrm states without geniv");
649 		return -EINVAL;
650 	}
651 	if (strcmp(x->geniv, "seqiv")) {
652 		NL_SET_ERR_MSG_MOD(extack,
653 				   "Cannot offload xfrm states with geniv other than seqiv");
654 		return -EINVAL;
655 	}
656 	return 0;
657 }
658 
cn10k_ipsec_inb_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)659 static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
660 				     struct netlink_ext_ack *extack)
661 {
662 	NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
663 	return -EOPNOTSUPP;
664 }
665 
cn10k_ipsec_outb_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)666 static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
667 				      struct netlink_ext_ack *extack)
668 {
669 	struct net_device *netdev = x->xso.dev;
670 	struct cn10k_tx_sa_s *sa_entry;
671 	struct qmem *sa_info;
672 	struct otx2_nic *pf;
673 	int err;
674 
675 	err = cn10k_ipsec_validate_state(x, extack);
676 	if (err)
677 		return err;
678 
679 	pf = netdev_priv(netdev);
680 
681 	err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
682 	if (err)
683 		return err;
684 
685 	sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
686 	cn10k_outb_prepare_sa(x, sa_entry);
687 
688 	err = cn10k_outb_write_sa(pf, sa_info);
689 	if (err) {
690 		NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
691 		qmem_free(pf->dev, sa_info);
692 		return err;
693 	}
694 
695 	x->xso.offload_handle = (unsigned long)sa_info;
696 	/* Enable static branch when first SA setup */
697 	if (!pf->ipsec.outb_sa_count)
698 		static_branch_enable(&cn10k_ipsec_sa_enabled);
699 	pf->ipsec.outb_sa_count++;
700 	return 0;
701 }
702 
cn10k_ipsec_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)703 static int cn10k_ipsec_add_state(struct xfrm_state *x,
704 				 struct netlink_ext_ack *extack)
705 {
706 	if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
707 		return cn10k_ipsec_inb_add_state(x, extack);
708 	else
709 		return cn10k_ipsec_outb_add_state(x, extack);
710 }
711 
cn10k_ipsec_del_state(struct xfrm_state * x)712 static void cn10k_ipsec_del_state(struct xfrm_state *x)
713 {
714 	struct net_device *netdev = x->xso.dev;
715 	struct cn10k_tx_sa_s *sa_entry;
716 	struct qmem *sa_info;
717 	struct otx2_nic *pf;
718 	int err;
719 
720 	if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
721 		return;
722 
723 	pf = netdev_priv(netdev);
724 
725 	sa_info = (struct qmem *)x->xso.offload_handle;
726 	sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
727 	memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
728 	/* Disable SA in CPT h/w */
729 	sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
730 	sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN)  & 0xF;
731 	sa_entry->aop_valid = 1;
732 
733 	err = cn10k_outb_write_sa(pf, sa_info);
734 	if (err)
735 		netdev_err(netdev, "Error (%d) deleting SA\n", err);
736 
737 	x->xso.offload_handle = 0;
738 	qmem_free(pf->dev, sa_info);
739 
740 	/* If no more SA's then update netdev feature for potential change
741 	 * in NETIF_F_HW_ESP.
742 	 */
743 	if (!--pf->ipsec.outb_sa_count)
744 		queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
745 }
746 
747 static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
748 	.xdo_dev_state_add	= cn10k_ipsec_add_state,
749 	.xdo_dev_state_delete	= cn10k_ipsec_del_state,
750 };
751 
cn10k_ipsec_sa_wq_handler(struct work_struct * work)752 static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
753 {
754 	struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
755 						 sa_work);
756 	struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
757 
758 	/* Disable static branch when no more SA enabled */
759 	static_branch_disable(&cn10k_ipsec_sa_enabled);
760 	rtnl_lock();
761 	netdev_update_features(pf->netdev);
762 	rtnl_unlock();
763 }
764 
cn10k_ipsec_ethtool_init(struct net_device * netdev,bool enable)765 int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
766 {
767 	struct otx2_nic *pf = netdev_priv(netdev);
768 
769 	/* IPsec offload supported on cn10k */
770 	if (!is_dev_support_ipsec_offload(pf->pdev))
771 		return -EOPNOTSUPP;
772 
773 	/* Initialize CPT for outbound ipsec offload */
774 	if (enable)
775 		return cn10k_outb_cpt_init(netdev);
776 
777 	/* Don't do CPT cleanup if SA installed */
778 	if (pf->ipsec.outb_sa_count) {
779 		netdev_err(pf->netdev, "SA installed on this device\n");
780 		return -EBUSY;
781 	}
782 
783 	return cn10k_outb_cpt_clean(pf);
784 }
785 
cn10k_ipsec_init(struct net_device * netdev)786 int cn10k_ipsec_init(struct net_device *netdev)
787 {
788 	struct otx2_nic *pf = netdev_priv(netdev);
789 	u32 sa_size;
790 
791 	if (!is_dev_support_ipsec_offload(pf->pdev))
792 		return 0;
793 
794 	/* Each SA entry size is 128 Byte round up in size */
795 	sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
796 			 (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
797 			 OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
798 	pf->ipsec.sa_size = sa_size;
799 
800 	INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
801 	pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
802 	if (!pf->ipsec.sa_workq) {
803 		netdev_err(pf->netdev, "SA alloc workqueue failed\n");
804 		return -ENOMEM;
805 	}
806 
807 	/* Set xfrm device ops */
808 	netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
809 	netdev->hw_features |= NETIF_F_HW_ESP;
810 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
811 
812 	cn10k_cpt_device_set_unavailable(pf);
813 	return 0;
814 }
815 EXPORT_SYMBOL(cn10k_ipsec_init);
816 
cn10k_ipsec_clean(struct otx2_nic * pf)817 void cn10k_ipsec_clean(struct otx2_nic *pf)
818 {
819 	if (!is_dev_support_ipsec_offload(pf->pdev))
820 		return;
821 
822 	if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
823 		return;
824 
825 	if (pf->ipsec.sa_workq) {
826 		destroy_workqueue(pf->ipsec.sa_workq);
827 		pf->ipsec.sa_workq = NULL;
828 	}
829 
830 	cn10k_outb_cpt_clean(pf);
831 }
832 EXPORT_SYMBOL(cn10k_ipsec_clean);
833 
cn10k_ipsec_get_ip_data_len(struct xfrm_state * x,struct sk_buff * skb)834 static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
835 				       struct sk_buff *skb)
836 {
837 	struct ipv6hdr *ipv6h;
838 	struct iphdr *iph;
839 	u8 *src;
840 
841 	src = (u8 *)skb->data + ETH_HLEN;
842 
843 	if (x->props.family == AF_INET) {
844 		iph = (struct iphdr *)src;
845 		return ntohs(iph->tot_len);
846 	}
847 
848 	ipv6h = (struct ipv6hdr *)src;
849 	return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
850 }
851 
852 /* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
853  * SG of NIX and CPT are same in size.
854  * Layout of a NIX SQE and CPT SG entry:
855  *      -----------------------------
856  *     |     CPT Scatter Gather      |
857  *     |       (SQE SIZE)            |
858  *     |                             |
859  *      -----------------------------
860  *     |       NIX SQE               |
861  *     |       (SQE SIZE)            |
862  *     |                             |
863  *      -----------------------------
864  */
otx2_sqe_add_sg_ipsec(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,struct sk_buff * skb,int num_segs,int * offset)865 bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
866 			   struct sk_buff *skb, int num_segs, int *offset)
867 {
868 	struct cpt_sg_s *cpt_sg = NULL;
869 	struct nix_sqe_sg_s *sg = NULL;
870 	u64 dma_addr, *iova = NULL;
871 	u64 *cpt_iova = NULL;
872 	u16 *sg_lens = NULL;
873 	int seg, len;
874 
875 	sq->sg[sq->head].num_segs = 0;
876 	cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
877 
878 	for (seg = 0; seg < num_segs; seg++) {
879 		if ((seg % MAX_SEGS_PER_SG) == 0) {
880 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
881 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
882 			sg->subdc = NIX_SUBDC_SG;
883 			sg->segs = 0;
884 			sg_lens = (void *)sg;
885 			iova = (void *)sg + sizeof(*sg);
886 			/* Next subdc always starts at a 16byte boundary.
887 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
888 			 */
889 			if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
890 				*offset += sizeof(*sg) + (3 * sizeof(u64));
891 			else
892 				*offset += sizeof(*sg) + sizeof(u64);
893 
894 			cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
895 			cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
896 		}
897 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
898 		if (dma_mapping_error(pfvf->dev, dma_addr))
899 			return false;
900 
901 		sg_lens[seg % MAX_SEGS_PER_SG] = len;
902 		sg->segs++;
903 		*iova++ = dma_addr;
904 		*cpt_iova++ = dma_addr;
905 
906 		/* Save DMA mapping info for later unmapping */
907 		sq->sg[sq->head].dma_addr[seg] = dma_addr;
908 		sq->sg[sq->head].size[seg] = len;
909 		sq->sg[sq->head].num_segs++;
910 
911 		*cpt_sg = *(struct cpt_sg_s *)sg;
912 		cpt_sg->rsvd_63_50 = 0;
913 	}
914 
915 	sq->sg[sq->head].skb = (u64)skb;
916 	return true;
917 }
918 
cn10k_ipsec_get_param1(u8 iv_offset)919 static u16 cn10k_ipsec_get_param1(u8 iv_offset)
920 {
921 	u16 param1_val;
922 
923 	/* Set Crypto mode, disable L3/L4 checksum */
924 	param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
925 		      CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
926 	param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
927 	return param1_val;
928 }
929 
cn10k_ipsec_transmit(struct otx2_nic * pf,struct netdev_queue * txq,struct otx2_snd_queue * sq,struct sk_buff * skb,int num_segs,int size)930 bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
931 			  struct otx2_snd_queue *sq, struct sk_buff *skb,
932 			  int num_segs, int size)
933 {
934 	struct cpt_inst_s inst;
935 	struct cpt_res_s *res;
936 	struct xfrm_state *x;
937 	struct qmem *sa_info;
938 	dma_addr_t dptr_iova;
939 	struct sec_path *sp;
940 	u8 encap_offset;
941 	u8 auth_offset;
942 	u8 gthr_size;
943 	u8 iv_offset;
944 	u16 dlen;
945 
946 	/* Check for IPSEC offload enabled */
947 	if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
948 		goto drop;
949 
950 	sp = skb_sec_path(skb);
951 	if (unlikely(!sp->len))
952 		goto drop;
953 
954 	x = xfrm_input_state(skb);
955 	if (unlikely(!x))
956 		goto drop;
957 
958 	if (x->props.mode != XFRM_MODE_TRANSPORT &&
959 	    x->props.mode != XFRM_MODE_TUNNEL)
960 		goto drop;
961 
962 	dlen = cn10k_ipsec_get_ip_data_len(x, skb);
963 	if (dlen == 0 && netif_msg_tx_err(pf)) {
964 		netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
965 		goto drop;
966 	}
967 
968 	/* Check for valid SA context */
969 	sa_info = (struct qmem *)x->xso.offload_handle;
970 	if (!sa_info)
971 		goto drop;
972 
973 	memset(&inst, 0, sizeof(struct cpt_inst_s));
974 
975 	/* Get authentication offset */
976 	if (x->props.family == AF_INET)
977 		auth_offset = sizeof(struct iphdr);
978 	else
979 		auth_offset = sizeof(struct ipv6hdr);
980 
981 	/* IV offset is after ESP header */
982 	iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
983 	/* Encap will start after IV */
984 	encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
985 
986 	/* CPT Instruction word-1 */
987 	res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
988 	res->compcode = 0;
989 	inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
990 
991 	/* CPT Instruction word-2 */
992 	inst.rvu_pf_func = pf->pcifunc;
993 
994 	/* CPT Instruction word-3:
995 	 * Set QORD to force CPT_RES_S write completion
996 	 */
997 	inst.qord = 1;
998 
999 	/* CPT Instruction word-4 */
1000 	/* inst.dlen should not include ICV length */
1001 	inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
1002 	inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
1003 	inst.param1 = cn10k_ipsec_get_param1(iv_offset);
1004 
1005 	inst.param2 = encap_offset <<
1006 		       CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
1007 	inst.param2 |= (u16)auth_offset <<
1008 			CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
1009 
1010 	/* CPT Instruction word-5 */
1011 	gthr_size = num_segs / MAX_SEGS_PER_SG;
1012 	gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
1013 
1014 	gthr_size &= 0xF;
1015 	dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
1016 	inst.dptr = dptr_iova | ((u64)gthr_size << 60);
1017 
1018 	/* CPT Instruction word-6 */
1019 	inst.rptr = inst.dptr;
1020 
1021 	/* CPT Instruction word-7 */
1022 	inst.cptr = sa_info->iova;
1023 	inst.ctx_val = 1;
1024 	inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
1025 
1026 	/* CPT Instruction word-0 */
1027 	inst.nixtxl = (size / 16) - 1;
1028 	inst.dat_offset = ETH_HLEN;
1029 	inst.nixtx_offset = sq->sqe_size;
1030 
1031 	netdev_tx_sent_queue(txq, skb->len);
1032 
1033 	/* Finally Flush the CPT instruction */
1034 	sq->head++;
1035 	sq->head &= (sq->sqe_cnt - 1);
1036 	cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
1037 	return true;
1038 drop:
1039 	dev_kfree_skb_any(skb);
1040 	return false;
1041 }
1042