xref: /linux/drivers/crypto/cavium/cpt/cptvf_reqmanager.c (revision b50ecc5aca4d18f1f0c4942f5c797bc85edef144)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Cavium, Inc.
4  */
5 
6 #include "cptvf.h"
7 #include "cptvf_algs.h"
8 #include "request_manager.h"
9 
10 /**
11  * get_free_pending_entry - get free entry from pending queue
12  * @q: pending queue
13  * @qlen: queue length
14  */
15 static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
16 						    int qlen)
17 {
18 	struct pending_entry *ent = NULL;
19 
20 	ent = &q->head[q->rear];
21 	if (unlikely(ent->busy)) {
22 		ent = NULL;
23 		goto no_free_entry;
24 	}
25 
26 	q->rear++;
27 	if (unlikely(q->rear == qlen))
28 		q->rear = 0;
29 
30 no_free_entry:
31 	return ent;
32 }
33 
34 static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo,
35 					   int qno)
36 {
37 	struct pending_queue *queue = &pqinfo->queue[qno];
38 
39 	queue->front++;
40 	if (unlikely(queue->front == pqinfo->qlen))
41 		queue->front = 0;
42 }
43 
44 static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,
45 				 int buf_count, u8 *buffer)
46 {
47 	int ret = 0, i, j;
48 	int components;
49 	struct sglist_component *sg_ptr = NULL;
50 	struct pci_dev *pdev = cptvf->pdev;
51 
52 	if (unlikely(!list)) {
53 		dev_err(&pdev->dev, "Input List pointer is NULL\n");
54 		return -EFAULT;
55 	}
56 
57 	for (i = 0; i < buf_count; i++) {
58 		if (likely(list[i].vptr)) {
59 			list[i].dma_addr = dma_map_single(&pdev->dev,
60 							  list[i].vptr,
61 							  list[i].size,
62 							  DMA_BIDIRECTIONAL);
63 			if (unlikely(dma_mapping_error(&pdev->dev,
64 						       list[i].dma_addr))) {
65 				dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n",
66 					i);
67 				ret = -EIO;
68 				goto sg_cleanup;
69 			}
70 		}
71 	}
72 
73 	components = buf_count / 4;
74 	sg_ptr = (struct sglist_component *)buffer;
75 	for (i = 0; i < components; i++) {
76 		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
77 		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
78 		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
79 		sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
80 		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
81 		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
82 		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
83 		sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
84 		sg_ptr++;
85 	}
86 
87 	components = buf_count % 4;
88 
89 	switch (components) {
90 	case 3:
91 		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
92 		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
93 		fallthrough;
94 	case 2:
95 		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
96 		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
97 		fallthrough;
98 	case 1:
99 		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
100 		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
101 		break;
102 	default:
103 		break;
104 	}
105 
106 	return ret;
107 
108 sg_cleanup:
109 	for (j = 0; j < i; j++) {
110 		if (list[j].dma_addr) {
111 			dma_unmap_single(&pdev->dev, list[i].dma_addr,
112 					 list[i].size, DMA_BIDIRECTIONAL);
113 		}
114 
115 		list[j].dma_addr = 0;
116 	}
117 
118 	return ret;
119 }
120 
121 static inline int setup_sgio_list(struct cpt_vf *cptvf,
122 				  struct cpt_info_buffer *info,
123 				  struct cpt_request_info *req)
124 {
125 	u16 g_sz_bytes = 0, s_sz_bytes = 0;
126 	int ret = 0;
127 	struct pci_dev *pdev = cptvf->pdev;
128 
129 	if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {
130 		dev_err(&pdev->dev, "Request SG components are higher than supported\n");
131 		ret = -EINVAL;
132 		goto  scatter_gather_clean;
133 	}
134 
135 	/* Setup gather (input) components */
136 	g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
137 	info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
138 	if (!info->gather_components) {
139 		ret = -ENOMEM;
140 		goto  scatter_gather_clean;
141 	}
142 
143 	ret = setup_sgio_components(cptvf, req->in,
144 				    req->incnt,
145 				    info->gather_components);
146 	if (ret) {
147 		dev_err(&pdev->dev, "Failed to setup gather list\n");
148 		ret = -EFAULT;
149 		goto  scatter_gather_clean;
150 	}
151 
152 	/* Setup scatter (output) components */
153 	s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
154 	info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
155 	if (!info->scatter_components) {
156 		ret = -ENOMEM;
157 		goto  scatter_gather_clean;
158 	}
159 
160 	ret = setup_sgio_components(cptvf, req->out,
161 				    req->outcnt,
162 				    info->scatter_components);
163 	if (ret) {
164 		dev_err(&pdev->dev, "Failed to setup gather list\n");
165 		ret = -EFAULT;
166 		goto  scatter_gather_clean;
167 	}
168 
169 	/* Create and initialize DPTR */
170 	info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
171 	info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
172 	if (!info->in_buffer) {
173 		ret = -ENOMEM;
174 		goto  scatter_gather_clean;
175 	}
176 
177 	((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
178 	((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
179 	((__be16 *)info->in_buffer)[2] = 0;
180 	((__be16 *)info->in_buffer)[3] = 0;
181 
182 	memcpy(&info->in_buffer[8], info->gather_components,
183 	       g_sz_bytes);
184 	memcpy(&info->in_buffer[8 + g_sz_bytes],
185 	       info->scatter_components, s_sz_bytes);
186 
187 	info->dptr_baddr = dma_map_single(&pdev->dev,
188 					  (void *)info->in_buffer,
189 					  info->dlen,
190 					  DMA_BIDIRECTIONAL);
191 	if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) {
192 		dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen);
193 		ret = -EIO;
194 		goto  scatter_gather_clean;
195 	}
196 
197 	/* Create and initialize RPTR */
198 	info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
199 	if (!info->out_buffer) {
200 		ret = -ENOMEM;
201 		goto scatter_gather_clean;
202 	}
203 
204 	*((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT);
205 	info->alternate_caddr = (u64 *)info->out_buffer;
206 	info->rptr_baddr = dma_map_single(&pdev->dev,
207 					  (void *)info->out_buffer,
208 					  COMPLETION_CODE_SIZE,
209 					  DMA_BIDIRECTIONAL);
210 	if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) {
211 		dev_err(&pdev->dev, "Mapping RPTR Failed %d\n",
212 			COMPLETION_CODE_SIZE);
213 		ret = -EIO;
214 		goto  scatter_gather_clean;
215 	}
216 
217 	return 0;
218 
219 scatter_gather_clean:
220 	return ret;
221 }
222 
223 static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
224 		     u32 qno)
225 {
226 	struct pci_dev *pdev = cptvf->pdev;
227 	struct command_qinfo *qinfo = NULL;
228 	struct command_queue *queue;
229 	struct command_chunk *chunk;
230 	u8 *ent;
231 	int ret = 0;
232 
233 	if (unlikely(qno >= cptvf->nr_queues)) {
234 		dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n",
235 			qno, cptvf->nr_queues);
236 		return -EINVAL;
237 	}
238 
239 	qinfo = &cptvf->cqinfo;
240 	queue = &qinfo->queue[qno];
241 	/* lock command queue */
242 	spin_lock(&queue->lock);
243 	ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
244 	memcpy(ent, (void *)cmd, qinfo->cmd_size);
245 
246 	if (++queue->idx >= queue->qhead->size / 64) {
247 		hlist_for_each_entry(chunk, &queue->chead, nextchunk) {
248 			if (chunk == queue->qhead) {
249 				continue;
250 			} else {
251 				queue->qhead = chunk;
252 				break;
253 			}
254 		}
255 		queue->idx = 0;
256 	}
257 	/* make sure all memory stores are done before ringing doorbell */
258 	smp_wmb();
259 	cptvf_write_vq_doorbell(cptvf, 1);
260 	/* unlock command queue */
261 	spin_unlock(&queue->lock);
262 
263 	return ret;
264 }
265 
266 static void do_request_cleanup(struct cpt_vf *cptvf,
267 			struct cpt_info_buffer *info)
268 {
269 	int i;
270 	struct pci_dev *pdev = cptvf->pdev;
271 	struct cpt_request_info *req;
272 
273 	if (info->dptr_baddr)
274 		dma_unmap_single(&pdev->dev, info->dptr_baddr,
275 				 info->dlen, DMA_BIDIRECTIONAL);
276 
277 	if (info->rptr_baddr)
278 		dma_unmap_single(&pdev->dev, info->rptr_baddr,
279 				 COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL);
280 
281 	if (info->comp_baddr)
282 		dma_unmap_single(&pdev->dev, info->comp_baddr,
283 				 sizeof(union cpt_res_s), DMA_BIDIRECTIONAL);
284 
285 	if (info->req) {
286 		req = info->req;
287 		for (i = 0; i < req->outcnt; i++) {
288 			if (req->out[i].dma_addr)
289 				dma_unmap_single(&pdev->dev,
290 						 req->out[i].dma_addr,
291 						 req->out[i].size,
292 						 DMA_BIDIRECTIONAL);
293 		}
294 
295 		for (i = 0; i < req->incnt; i++) {
296 			if (req->in[i].dma_addr)
297 				dma_unmap_single(&pdev->dev,
298 						 req->in[i].dma_addr,
299 						 req->in[i].size,
300 						 DMA_BIDIRECTIONAL);
301 		}
302 	}
303 
304 	kfree_sensitive(info->scatter_components);
305 	kfree_sensitive(info->gather_components);
306 	kfree_sensitive(info->out_buffer);
307 	kfree_sensitive(info->in_buffer);
308 	kfree_sensitive((void *)info->completion_addr);
309 	kfree_sensitive(info);
310 }
311 
312 static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
313 {
314 	struct pci_dev *pdev = cptvf->pdev;
315 
316 	if (!info) {
317 		dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n");
318 		return;
319 	}
320 
321 	do_request_cleanup(cptvf, info);
322 }
323 
324 static inline void process_pending_queue(struct cpt_vf *cptvf,
325 					 struct pending_qinfo *pqinfo,
326 					 int qno)
327 {
328 	struct pci_dev *pdev = cptvf->pdev;
329 	struct pending_queue *pqueue = &pqinfo->queue[qno];
330 	struct pending_entry *pentry = NULL;
331 	struct cpt_info_buffer *info = NULL;
332 	union cpt_res_s *status = NULL;
333 	unsigned char ccode;
334 
335 	while (1) {
336 		spin_lock_bh(&pqueue->lock);
337 		pentry = &pqueue->head[pqueue->front];
338 		if (unlikely(!pentry->busy)) {
339 			spin_unlock_bh(&pqueue->lock);
340 			break;
341 		}
342 
343 		info = (struct cpt_info_buffer *)pentry->post_arg;
344 		if (unlikely(!info)) {
345 			dev_err(&pdev->dev, "Pending Entry post arg NULL\n");
346 			pending_queue_inc_front(pqinfo, qno);
347 			spin_unlock_bh(&pqueue->lock);
348 			continue;
349 		}
350 
351 		status = (union cpt_res_s *)pentry->completion_addr;
352 		ccode = status->s.compcode;
353 		if ((status->s.compcode == CPT_COMP_E_FAULT) ||
354 		    (status->s.compcode == CPT_COMP_E_SWERR)) {
355 			dev_err(&pdev->dev, "Request failed with %s\n",
356 				(status->s.compcode == CPT_COMP_E_FAULT) ?
357 				"DMA Fault" : "Software error");
358 			pentry->completion_addr = NULL;
359 			pentry->busy = false;
360 			atomic64_dec((&pqueue->pending_count));
361 			pentry->post_arg = NULL;
362 			pending_queue_inc_front(pqinfo, qno);
363 			do_request_cleanup(cptvf, info);
364 			spin_unlock_bh(&pqueue->lock);
365 			break;
366 		} else if (status->s.compcode == COMPLETION_CODE_INIT) {
367 			/* check for timeout */
368 			if (time_after_eq(jiffies,
369 					  (info->time_in +
370 					  (CPT_COMMAND_TIMEOUT * HZ)))) {
371 				dev_err(&pdev->dev, "Request timed out");
372 				pentry->completion_addr = NULL;
373 				pentry->busy = false;
374 				atomic64_dec((&pqueue->pending_count));
375 				pentry->post_arg = NULL;
376 				pending_queue_inc_front(pqinfo, qno);
377 				do_request_cleanup(cptvf, info);
378 				spin_unlock_bh(&pqueue->lock);
379 				break;
380 			} else if ((*info->alternate_caddr ==
381 				(~COMPLETION_CODE_INIT)) &&
382 				(info->extra_time < TIME_IN_RESET_COUNT)) {
383 				info->time_in = jiffies;
384 				info->extra_time++;
385 				spin_unlock_bh(&pqueue->lock);
386 				break;
387 			}
388 		}
389 
390 		pentry->completion_addr = NULL;
391 		pentry->busy = false;
392 		pentry->post_arg = NULL;
393 		atomic64_dec((&pqueue->pending_count));
394 		pending_queue_inc_front(pqinfo, qno);
395 		spin_unlock_bh(&pqueue->lock);
396 
397 		do_post_process(info->cptvf, info);
398 		/*
399 		 * Calling callback after we find
400 		 * that the request has been serviced
401 		 */
402 		pentry->callback(ccode, pentry->callback_arg);
403 	}
404 }
405 
406 int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
407 {
408 	int ret = 0, clear = 0, queue = 0;
409 	struct cpt_info_buffer *info = NULL;
410 	struct cptvf_request *cpt_req = NULL;
411 	union ctrl_info *ctrl = NULL;
412 	union cpt_res_s *result = NULL;
413 	struct pending_entry *pentry = NULL;
414 	struct pending_queue *pqueue = NULL;
415 	struct pci_dev *pdev = cptvf->pdev;
416 	u8 group = 0;
417 	struct cpt_vq_command vq_cmd;
418 	union cpt_inst_s cptinst;
419 
420 	info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
421 	if (unlikely(!info)) {
422 		dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
423 		return -ENOMEM;
424 	}
425 
426 	cpt_req = (struct cptvf_request *)&req->req;
427 	ctrl = (union ctrl_info *)&req->ctrl;
428 
429 	info->cptvf = cptvf;
430 	group = ctrl->s.grp;
431 	ret = setup_sgio_list(cptvf, info, req);
432 	if (ret) {
433 		dev_err(&pdev->dev, "Setting up SG list failed");
434 		goto request_cleanup;
435 	}
436 
437 	cpt_req->dlen = info->dlen;
438 	/*
439 	 * Get buffer for union cpt_res_s response
440 	 * structure and its physical address
441 	 */
442 	info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
443 	if (unlikely(!info->completion_addr)) {
444 		dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
445 		ret = -ENOMEM;
446 		goto request_cleanup;
447 	}
448 
449 	result = (union cpt_res_s *)info->completion_addr;
450 	result->s.compcode = COMPLETION_CODE_INIT;
451 	info->comp_baddr = dma_map_single(&pdev->dev,
452 					       (void *)info->completion_addr,
453 					       sizeof(union cpt_res_s),
454 					       DMA_BIDIRECTIONAL);
455 	if (dma_mapping_error(&pdev->dev, info->comp_baddr)) {
456 		dev_err(&pdev->dev, "mapping compptr Failed %lu\n",
457 			sizeof(union cpt_res_s));
458 		ret = -EFAULT;
459 		goto  request_cleanup;
460 	}
461 
462 	/* Fill the VQ command */
463 	vq_cmd.cmd.u64 = 0;
464 	vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
465 	vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
466 	vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
467 	vq_cmd.cmd.s.dlen   = cpu_to_be16(cpt_req->dlen);
468 
469 	vq_cmd.dptr = info->dptr_baddr;
470 	vq_cmd.rptr = info->rptr_baddr;
471 	vq_cmd.cptr.u64 = 0;
472 	vq_cmd.cptr.s.grp = group;
473 	/* Get Pending Entry to submit command */
474 	/* Always queue 0, because 1 queue per VF */
475 	queue = 0;
476 	pqueue = &cptvf->pqinfo.queue[queue];
477 
478 	if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) {
479 		dev_err(&pdev->dev, "pending threshold reached\n");
480 		process_pending_queue(cptvf, &cptvf->pqinfo, queue);
481 	}
482 
483 get_pending_entry:
484 	spin_lock_bh(&pqueue->lock);
485 	pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen);
486 	if (unlikely(!pentry)) {
487 		spin_unlock_bh(&pqueue->lock);
488 		if (clear == 0) {
489 			process_pending_queue(cptvf, &cptvf->pqinfo, queue);
490 			clear = 1;
491 			goto get_pending_entry;
492 		}
493 		dev_err(&pdev->dev, "Get free entry failed\n");
494 		dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n",
495 			queue, pqueue->rear, pqueue->front);
496 		ret = -EFAULT;
497 		goto request_cleanup;
498 	}
499 
500 	pentry->completion_addr = info->completion_addr;
501 	pentry->post_arg = (void *)info;
502 	pentry->callback = req->callback;
503 	pentry->callback_arg = req->callback_arg;
504 	info->pentry = pentry;
505 	pentry->busy = true;
506 	atomic64_inc(&pqueue->pending_count);
507 
508 	/* Send CPT command */
509 	info->pentry = pentry;
510 	info->time_in = jiffies;
511 	info->req = req;
512 
513 	/* Create the CPT_INST_S type command for HW interpretation */
514 	cptinst.s.doneint = true;
515 	cptinst.s.res_addr = (u64)info->comp_baddr;
516 	cptinst.s.tag = 0;
517 	cptinst.s.grp = 0;
518 	cptinst.s.wq_ptr = 0;
519 	cptinst.s.ei0 = vq_cmd.cmd.u64;
520 	cptinst.s.ei1 = vq_cmd.dptr;
521 	cptinst.s.ei2 = vq_cmd.rptr;
522 	cptinst.s.ei3 = vq_cmd.cptr.u64;
523 
524 	ret = send_cpt_command(cptvf, &cptinst, queue);
525 	spin_unlock_bh(&pqueue->lock);
526 	if (unlikely(ret)) {
527 		dev_err(&pdev->dev, "Send command failed for AE\n");
528 		ret = -EFAULT;
529 		goto request_cleanup;
530 	}
531 
532 	return 0;
533 
534 request_cleanup:
535 	dev_dbg(&pdev->dev, "Failed to submit CPT command\n");
536 	do_request_cleanup(cptvf, info);
537 
538 	return ret;
539 }
540 
541 void vq_post_process(struct cpt_vf *cptvf, u32 qno)
542 {
543 	struct pci_dev *pdev = cptvf->pdev;
544 
545 	if (unlikely(qno > cptvf->nr_queues)) {
546 		dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n",
547 			qno);
548 		return;
549 	}
550 
551 	process_pending_queue(cptvf, &cptvf->pqinfo, qno);
552 }
553 
554 int cptvf_do_request(void *vfdev, struct cpt_request_info *req)
555 {
556 	struct cpt_vf *cptvf = (struct cpt_vf *)vfdev;
557 	struct pci_dev *pdev = cptvf->pdev;
558 
559 	if (!cpt_device_ready(cptvf)) {
560 		dev_err(&pdev->dev, "CPT Device is not ready");
561 		return -ENODEV;
562 	}
563 
564 	if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {
565 		dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
566 			cptvf->vfid);
567 		return -EINVAL;
568 	} else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {
569 		dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
570 			cptvf->vfid);
571 		return -EINVAL;
572 	}
573 
574 	return process_request(cptvf, req);
575 }
576