xref: /linux/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 
10 /* SLC_STORE_INFO */
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
13 #define FDATA_SIZE 32
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 
17 #define REQ_NOT_POSTED 1
18 #define REQ_BACKLOG    2
19 #define REQ_POSTED     3
20 
21 /**
22  * Response codes from SE microcode
23  * 0x00 - Success
24  *   Completion with no error
25  * 0x43 - ERR_GC_DATA_LEN_INVALID
26  *   Invalid Data length if Encryption Data length is
27  *   less than 16 bytes for AES-XTS and AES-CTS.
28  * 0x45 - ERR_GC_CTX_LEN_INVALID
29  *   Invalid context length: CTXL != 23 words.
30  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
31  *   DOCSIS support is enabled with other than
32  *   AES/DES-CBC mode encryption.
33  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
34  *   Authentication offset is other than 0 with
35  *   Encryption IV source = 0.
36  *   Authentication offset is other than 8 (DES)/16 (AES)
37  *   with Encryption IV source = 1
38  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
39  *   CRC32 is enabled for other than DOCSIS encryption.
40  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
41  *   Invalid flag options in AES-CCM IV.
42  */
43 
44 static inline int incr_index(int index, int count, int max)
45 {
46 	if ((index + count) >= max)
47 		index = index + count - max;
48 	else
49 		index += count;
50 
51 	return index;
52 }
53 
54 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
55 {
56 	struct nitrox_device *ndev = sr->ndev;
57 	struct device *dev = DEV(ndev);
58 
59 
60 	dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
61 	dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
62 			 DMA_TO_DEVICE);
63 	kfree(sr->in.sgcomp);
64 	sr->in.sg = NULL;
65 	sr->in.sgmap_cnt = 0;
66 
67 	dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
68 		     DMA_BIDIRECTIONAL);
69 	dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
70 			 DMA_TO_DEVICE);
71 	kfree(sr->out.sgcomp);
72 	sr->out.sg = NULL;
73 	sr->out.sgmap_cnt = 0;
74 }
75 
76 static void softreq_destroy(struct nitrox_softreq *sr)
77 {
78 	softreq_unmap_sgbufs(sr);
79 	kfree(sr);
80 }
81 
82 /**
83  * create_sg_component - create SG componets for N5 device.
84  * @sr: Request structure
85  * @sgtbl: SG table
86  * @map_nents: number of dma mapped entries
87  *
88  * Component structure
89  *
90  *   63     48 47     32 31    16 15      0
91  *   --------------------------------------
92  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
93  *   |-------------------------------------
94  *   |               PTR0                 |
95  *   --------------------------------------
96  *   |               PTR1                 |
97  *   --------------------------------------
98  *   |               PTR2                 |
99  *   --------------------------------------
100  *   |               PTR3                 |
101  *   --------------------------------------
102  *
103  *   Returns 0 if success or a negative errno code on error.
104  */
105 static int create_sg_component(struct nitrox_softreq *sr,
106 			       struct nitrox_sgtable *sgtbl, int map_nents)
107 {
108 	struct nitrox_device *ndev = sr->ndev;
109 	struct nitrox_sgcomp *sgcomp;
110 	struct scatterlist *sg;
111 	dma_addr_t dma;
112 	size_t sz_comp;
113 	int i, j, nr_sgcomp;
114 
115 	nr_sgcomp = roundup(map_nents, 4) / 4;
116 
117 	/* each component holds 4 dma pointers */
118 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
119 	sgcomp = kzalloc(sz_comp, sr->gfp);
120 	if (!sgcomp)
121 		return -ENOMEM;
122 
123 	sgtbl->sgcomp = sgcomp;
124 
125 	sg = sgtbl->sg;
126 	/* populate device sg component */
127 	for (i = 0; i < nr_sgcomp; i++) {
128 		for (j = 0; j < 4 && sg; j++) {
129 			sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
130 			sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
131 			sg = sg_next(sg);
132 		}
133 	}
134 	/* map the device sg component */
135 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
136 	if (dma_mapping_error(DEV(ndev), dma)) {
137 		kfree(sgtbl->sgcomp);
138 		sgtbl->sgcomp = NULL;
139 		return -ENOMEM;
140 	}
141 
142 	sgtbl->sgcomp_dma = dma;
143 	sgtbl->sgcomp_len = sz_comp;
144 
145 	return 0;
146 }
147 
148 /**
149  * dma_map_inbufs - DMA map input sglist and creates sglist component
150  *                  for N5 device.
151  * @sr: Request structure
152  * @req: Crypto request structre
153  *
154  * Returns 0 if successful or a negative errno code on error.
155  */
156 static int dma_map_inbufs(struct nitrox_softreq *sr,
157 			  struct se_crypto_request *req)
158 {
159 	struct device *dev = DEV(sr->ndev);
160 	struct scatterlist *sg = req->src;
161 	int i, nents, ret = 0;
162 
163 	nents = dma_map_sg(dev, req->src, sg_nents(req->src),
164 			   DMA_BIDIRECTIONAL);
165 	if (!nents)
166 		return -EINVAL;
167 
168 	for_each_sg(req->src, sg, nents, i)
169 		sr->in.total_bytes += sg_dma_len(sg);
170 
171 	sr->in.sg = req->src;
172 	sr->in.sgmap_cnt = nents;
173 	ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
174 	if (ret)
175 		goto incomp_err;
176 
177 	return 0;
178 
179 incomp_err:
180 	dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
181 	sr->in.sgmap_cnt = 0;
182 	return ret;
183 }
184 
185 static int dma_map_outbufs(struct nitrox_softreq *sr,
186 			   struct se_crypto_request *req)
187 {
188 	struct device *dev = DEV(sr->ndev);
189 	int nents, ret = 0;
190 
191 	nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
192 			   DMA_BIDIRECTIONAL);
193 	if (!nents)
194 		return -EINVAL;
195 
196 	sr->out.sg = req->dst;
197 	sr->out.sgmap_cnt = nents;
198 	ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
199 	if (ret)
200 		goto outcomp_map_err;
201 
202 	return 0;
203 
204 outcomp_map_err:
205 	dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
206 	sr->out.sgmap_cnt = 0;
207 	sr->out.sg = NULL;
208 	return ret;
209 }
210 
211 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
212 				    struct se_crypto_request *creq)
213 {
214 	int ret;
215 
216 	ret = dma_map_inbufs(sr, creq);
217 	if (ret)
218 		return ret;
219 
220 	ret = dma_map_outbufs(sr, creq);
221 	if (ret)
222 		softreq_unmap_sgbufs(sr);
223 
224 	return ret;
225 }
226 
227 static inline void backlog_list_add(struct nitrox_softreq *sr,
228 				    struct nitrox_cmdq *cmdq)
229 {
230 	INIT_LIST_HEAD(&sr->backlog);
231 
232 	spin_lock_bh(&cmdq->backlog_qlock);
233 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
234 	atomic_inc(&cmdq->backlog_count);
235 	atomic_set(&sr->status, REQ_BACKLOG);
236 	spin_unlock_bh(&cmdq->backlog_qlock);
237 }
238 
239 static inline void response_list_add(struct nitrox_softreq *sr,
240 				     struct nitrox_cmdq *cmdq)
241 {
242 	INIT_LIST_HEAD(&sr->response);
243 
244 	spin_lock_bh(&cmdq->resp_qlock);
245 	list_add_tail(&sr->response, &cmdq->response_head);
246 	spin_unlock_bh(&cmdq->resp_qlock);
247 }
248 
249 static inline void response_list_del(struct nitrox_softreq *sr,
250 				     struct nitrox_cmdq *cmdq)
251 {
252 	spin_lock_bh(&cmdq->resp_qlock);
253 	list_del(&sr->response);
254 	spin_unlock_bh(&cmdq->resp_qlock);
255 }
256 
257 static struct nitrox_softreq *
258 get_first_response_entry(struct nitrox_cmdq *cmdq)
259 {
260 	return list_first_entry_or_null(&cmdq->response_head,
261 					struct nitrox_softreq, response);
262 }
263 
264 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
265 {
266 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
267 		atomic_dec(&cmdq->pending_count);
268 		/* sync with other cpus */
269 		smp_mb__after_atomic();
270 		return true;
271 	}
272 	/* sync with other cpus */
273 	smp_mb__after_atomic();
274 	return false;
275 }
276 
277 /**
278  * post_se_instr - Post SE instruction to Packet Input ring
279  * @sr: Request structure
280  *
281  * Returns 0 if successful or a negative error code,
282  * if no space in ring.
283  */
284 static void post_se_instr(struct nitrox_softreq *sr,
285 			  struct nitrox_cmdq *cmdq)
286 {
287 	struct nitrox_device *ndev = sr->ndev;
288 	int idx;
289 	u8 *ent;
290 
291 	spin_lock_bh(&cmdq->cmd_qlock);
292 
293 	idx = cmdq->write_idx;
294 	/* copy the instruction */
295 	ent = cmdq->base + (idx * cmdq->instr_size);
296 	memcpy(ent, &sr->instr, cmdq->instr_size);
297 
298 	atomic_set(&sr->status, REQ_POSTED);
299 	response_list_add(sr, cmdq);
300 	sr->tstamp = jiffies;
301 	/* flush the command queue updates */
302 	dma_wmb();
303 
304 	/* Ring doorbell with count 1 */
305 	writeq(1, cmdq->dbell_csr_addr);
306 
307 	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
308 
309 	spin_unlock_bh(&cmdq->cmd_qlock);
310 
311 	/* increment the posted command count */
312 	atomic64_inc(&ndev->stats.posted);
313 }
314 
315 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
316 {
317 	struct nitrox_device *ndev = cmdq->ndev;
318 	struct nitrox_softreq *sr, *tmp;
319 	int ret = 0;
320 
321 	if (!atomic_read(&cmdq->backlog_count))
322 		return 0;
323 
324 	spin_lock_bh(&cmdq->backlog_qlock);
325 
326 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
327 		/* submit until space available */
328 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
329 			ret = -ENOSPC;
330 			break;
331 		}
332 		/* delete from backlog list */
333 		list_del(&sr->backlog);
334 		atomic_dec(&cmdq->backlog_count);
335 		/* sync with other cpus */
336 		smp_mb__after_atomic();
337 
338 		/* post the command */
339 		post_se_instr(sr, cmdq);
340 	}
341 	spin_unlock_bh(&cmdq->backlog_qlock);
342 
343 	return ret;
344 }
345 
346 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
347 {
348 	struct nitrox_cmdq *cmdq = sr->cmdq;
349 	struct nitrox_device *ndev = sr->ndev;
350 
351 	/* try to post backlog requests */
352 	post_backlog_cmds(cmdq);
353 
354 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
355 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
356 			/* increment drop count */
357 			atomic64_inc(&ndev->stats.dropped);
358 			return -ENOSPC;
359 		}
360 		/* add to backlog list */
361 		backlog_list_add(sr, cmdq);
362 		return -EINPROGRESS;
363 	}
364 	post_se_instr(sr, cmdq);
365 
366 	return -EINPROGRESS;
367 }
368 
369 /**
370  * nitrox_se_request - Send request to SE core
371  * @ndev: NITROX device
372  * @req: Crypto request
373  *
374  * Returns 0 on success, or a negative error code.
375  */
376 int nitrox_process_se_request(struct nitrox_device *ndev,
377 			      struct se_crypto_request *req,
378 			      completion_t callback,
379 			      void *cb_arg)
380 {
381 	struct nitrox_softreq *sr;
382 	dma_addr_t ctx_handle = 0;
383 	int qno, ret = 0;
384 
385 	if (!nitrox_ready(ndev))
386 		return -ENODEV;
387 
388 	sr = kzalloc(sizeof(*sr), req->gfp);
389 	if (!sr)
390 		return -ENOMEM;
391 
392 	sr->ndev = ndev;
393 	sr->flags = req->flags;
394 	sr->gfp = req->gfp;
395 	sr->callback = callback;
396 	sr->cb_arg = cb_arg;
397 
398 	atomic_set(&sr->status, REQ_NOT_POSTED);
399 
400 	sr->resp.orh = req->orh;
401 	sr->resp.completion = req->comp;
402 
403 	ret = softreq_map_iobuf(sr, req);
404 	if (ret) {
405 		kfree(sr);
406 		return ret;
407 	}
408 
409 	/* get the context handle */
410 	if (req->ctx_handle) {
411 		struct ctx_hdr *hdr;
412 		u8 *ctx_ptr;
413 
414 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
415 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
416 		ctx_handle = hdr->ctx_dma;
417 	}
418 
419 	/* select the queue */
420 	qno = smp_processor_id() % ndev->nr_queues;
421 
422 	sr->cmdq = &ndev->pkt_inq[qno];
423 
424 	/*
425 	 * 64-Byte Instruction Format
426 	 *
427 	 *  ----------------------
428 	 *  |      DPTR0         | 8 bytes
429 	 *  ----------------------
430 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
431 	 *  ----------------------
432 	 *  |    PKT_IN_HDR      | 16 bytes
433 	 *  ----------------------
434 	 *  |    SLC_INFO        | 16 bytes
435 	 *  ----------------------
436 	 *  |   Front data       | 16 bytes
437 	 *  ----------------------
438 	 */
439 
440 	/* fill the packet instruction */
441 	/* word 0 */
442 	sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
443 
444 	/* word 1 */
445 	sr->instr.ih.value = 0;
446 	sr->instr.ih.s.g = 1;
447 	sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
448 	sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
449 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
450 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
451 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
452 
453 	/* word 2 */
454 	sr->instr.irh.value[0] = 0;
455 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
456 	/* context length in 64-bit words */
457 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
458 	/* offset from solicit base port 256 */
459 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
460 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
461 	sr->instr.irh.s.arg = req->ctrl.s.arg;
462 	sr->instr.irh.s.opcode = req->opcode;
463 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
464 
465 	/* word 3 */
466 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
467 
468 	/* word 4 */
469 	sr->instr.slc.value[0] = 0;
470 	sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
471 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
472 
473 	/* word 5 */
474 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
475 
476 	/*
477 	 * No conversion for front data,
478 	 * It goes into payload
479 	 * put GP Header in front data
480 	 */
481 	sr->instr.fdata[0] = *((u64 *)&req->gph);
482 	sr->instr.fdata[1] = 0;
483 
484 	ret = nitrox_enqueue_request(sr);
485 	if (ret == -ENOSPC)
486 		goto send_fail;
487 
488 	return ret;
489 
490 send_fail:
491 	softreq_destroy(sr);
492 	return ret;
493 }
494 
495 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
496 {
497 	return time_after_eq(jiffies, (tstamp + timeout));
498 }
499 
500 void backlog_qflush_work(struct work_struct *work)
501 {
502 	struct nitrox_cmdq *cmdq;
503 
504 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
505 	post_backlog_cmds(cmdq);
506 }
507 
508 static bool sr_completed(struct nitrox_softreq *sr)
509 {
510 	u64 orh = READ_ONCE(*sr->resp.orh);
511 	unsigned long timeout = jiffies + msecs_to_jiffies(1);
512 
513 	if ((orh != PENDING_SIG) && (orh & 0xff))
514 		return true;
515 
516 	while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
517 		if (time_after(jiffies, timeout)) {
518 			pr_err("comp not done\n");
519 			return false;
520 		}
521 	}
522 
523 	return true;
524 }
525 
526 /**
527  * process_request_list - process completed requests
528  * @ndev: N5 device
529  * @qno: queue to operate
530  *
531  * Returns the number of responses processed.
532  */
533 static void process_response_list(struct nitrox_cmdq *cmdq)
534 {
535 	struct nitrox_device *ndev = cmdq->ndev;
536 	struct nitrox_softreq *sr;
537 	int req_completed = 0, err = 0, budget;
538 	completion_t callback;
539 	void *cb_arg;
540 
541 	/* check all pending requests */
542 	budget = atomic_read(&cmdq->pending_count);
543 
544 	while (req_completed < budget) {
545 		sr = get_first_response_entry(cmdq);
546 		if (!sr)
547 			break;
548 
549 		if (atomic_read(&sr->status) != REQ_POSTED)
550 			break;
551 
552 		/* check orh and completion bytes updates */
553 		if (!sr_completed(sr)) {
554 			/* request not completed, check for timeout */
555 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
556 				break;
557 			dev_err_ratelimited(DEV(ndev),
558 					    "Request timeout, orh 0x%016llx\n",
559 					    READ_ONCE(*sr->resp.orh));
560 		}
561 		atomic_dec(&cmdq->pending_count);
562 		atomic64_inc(&ndev->stats.completed);
563 		/* sync with other cpus */
564 		smp_mb__after_atomic();
565 		/* remove from response list */
566 		response_list_del(sr, cmdq);
567 		/* ORH error code */
568 		err = READ_ONCE(*sr->resp.orh) & 0xff;
569 		callback = sr->callback;
570 		cb_arg = sr->cb_arg;
571 		softreq_destroy(sr);
572 		if (callback)
573 			callback(cb_arg, err);
574 
575 		req_completed++;
576 	}
577 }
578 
579 /**
580  * pkt_slc_resp_tasklet - post processing of SE responses
581  */
582 void pkt_slc_resp_tasklet(unsigned long data)
583 {
584 	struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
585 	struct nitrox_cmdq *cmdq = qvec->cmdq;
586 	union nps_pkt_slc_cnts slc_cnts;
587 
588 	/* read completion count */
589 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
590 	/* resend the interrupt if more work to do */
591 	slc_cnts.s.resend = 1;
592 
593 	process_response_list(cmdq);
594 
595 	/*
596 	 * clear the interrupt with resend bit enabled,
597 	 * MSI-X interrupt generates if Completion count > Threshold
598 	 */
599 	writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
600 
601 	if (atomic_read(&cmdq->backlog_count))
602 		schedule_work(&cmdq->backlog_qflush);
603 }
604