xref: /linux/drivers/crypto/ccree/cc_request_mgr.c (revision 56fb34d86e875dbb0d3e6a81c5d3d035db373031)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 
4 #include <linux/kernel.h>
5 #include <linux/nospec.h>
6 #include "cc_driver.h"
7 #include "cc_buffer_mgr.h"
8 #include "cc_request_mgr.h"
9 #include "cc_pm.h"
10 
11 #define CC_MAX_POLL_ITER	10
12 /* The highest descriptor count in used */
13 #define CC_MAX_DESC_SEQ_LEN	23
14 
15 struct cc_req_mgr_handle {
16 	/* Request manager resources */
17 	unsigned int hw_queue_size; /* HW capability */
18 	unsigned int min_free_hw_slots;
19 	unsigned int max_used_sw_slots;
20 	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
21 	u32 req_queue_head;
22 	u32 req_queue_tail;
23 	u32 axi_completed;
24 	u32 q_free_slots;
25 	/* This lock protects access to HW register
26 	 * that must be single request at a time
27 	 */
28 	spinlock_t hw_lock;
29 	struct cc_hw_desc compl_desc;
30 	u8 *dummy_comp_buff;
31 	dma_addr_t dummy_comp_buff_dma;
32 
33 	/* backlog queue */
34 	struct list_head backlog;
35 	unsigned int bl_len;
36 	spinlock_t bl_lock; /* protect backlog queue */
37 
38 #ifdef COMP_IN_WQ
39 	struct workqueue_struct *workq;
40 	struct delayed_work compwork;
41 #else
42 	struct tasklet_struct comptask;
43 #endif
44 	bool is_runtime_suspended;
45 };
46 
47 struct cc_bl_item {
48 	struct cc_crypto_req creq;
49 	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
50 	unsigned int len;
51 	struct list_head list;
52 	bool notif;
53 };
54 
55 static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
56 	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
57 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
58 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
59 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
60 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
61 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
62 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
63 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
64 	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
65 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
66 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
67 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
68 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
69 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
70 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
71 	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
72 };
73 
74 static void comp_handler(unsigned long devarg);
75 #ifdef COMP_IN_WQ
76 static void comp_work_handler(struct work_struct *work);
77 #endif
78 
79 static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
80 {
81 	alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
82 	slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
83 
84 	return cc_cpp_int_masks[alg][slot];
85 }
86 
87 void cc_req_mgr_fini(struct cc_drvdata *drvdata)
88 {
89 	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
90 	struct device *dev = drvdata_to_dev(drvdata);
91 
92 	if (!req_mgr_h)
93 		return; /* Not allocated */
94 
95 	if (req_mgr_h->dummy_comp_buff_dma) {
96 		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
97 				  req_mgr_h->dummy_comp_buff_dma);
98 	}
99 
100 	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
101 						req_mgr_h->min_free_hw_slots));
102 	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
103 
104 #ifdef COMP_IN_WQ
105 	flush_workqueue(req_mgr_h->workq);
106 	destroy_workqueue(req_mgr_h->workq);
107 #else
108 	/* Kill tasklet */
109 	tasklet_kill(&req_mgr_h->comptask);
110 #endif
111 	kzfree(req_mgr_h);
112 	drvdata->request_mgr_handle = NULL;
113 }
114 
115 int cc_req_mgr_init(struct cc_drvdata *drvdata)
116 {
117 	struct cc_req_mgr_handle *req_mgr_h;
118 	struct device *dev = drvdata_to_dev(drvdata);
119 	int rc = 0;
120 
121 	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
122 	if (!req_mgr_h) {
123 		rc = -ENOMEM;
124 		goto req_mgr_init_err;
125 	}
126 
127 	drvdata->request_mgr_handle = req_mgr_h;
128 
129 	spin_lock_init(&req_mgr_h->hw_lock);
130 	spin_lock_init(&req_mgr_h->bl_lock);
131 	INIT_LIST_HEAD(&req_mgr_h->backlog);
132 
133 #ifdef COMP_IN_WQ
134 	dev_dbg(dev, "Initializing completion workqueue\n");
135 	req_mgr_h->workq = create_singlethread_workqueue("ccree");
136 	if (!req_mgr_h->workq) {
137 		dev_err(dev, "Failed creating work queue\n");
138 		rc = -ENOMEM;
139 		goto req_mgr_init_err;
140 	}
141 	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
142 #else
143 	dev_dbg(dev, "Initializing completion tasklet\n");
144 	tasklet_init(&req_mgr_h->comptask, comp_handler,
145 		     (unsigned long)drvdata);
146 #endif
147 	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
148 					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
149 	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
150 	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
151 		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
152 			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
153 		rc = -ENOMEM;
154 		goto req_mgr_init_err;
155 	}
156 	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
157 	req_mgr_h->max_used_sw_slots = 0;
158 
159 	/* Allocate DMA word for "dummy" completion descriptor use */
160 	req_mgr_h->dummy_comp_buff =
161 		dma_alloc_coherent(dev, sizeof(u32),
162 				   &req_mgr_h->dummy_comp_buff_dma,
163 				   GFP_KERNEL);
164 	if (!req_mgr_h->dummy_comp_buff) {
165 		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
166 			sizeof(u32));
167 		rc = -ENOMEM;
168 		goto req_mgr_init_err;
169 	}
170 
171 	/* Init. "dummy" completion descriptor */
172 	hw_desc_init(&req_mgr_h->compl_desc);
173 	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
174 	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
175 		      sizeof(u32), NS_BIT, 1);
176 	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
177 	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
178 
179 	return 0;
180 
181 req_mgr_init_err:
182 	cc_req_mgr_fini(drvdata);
183 	return rc;
184 }
185 
186 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
187 			unsigned int seq_len)
188 {
189 	int i, w;
190 	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
191 	struct device *dev = drvdata_to_dev(drvdata);
192 
193 	/*
194 	 * We do indeed write all 6 command words to the same
195 	 * register. The HW supports this.
196 	 */
197 
198 	for (i = 0; i < seq_len; i++) {
199 		for (w = 0; w <= 5; w++)
200 			writel_relaxed(seq[i].word[w], reg);
201 
202 		if (cc_dump_desc)
203 			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
204 				i, seq[i].word[0], seq[i].word[1],
205 				seq[i].word[2], seq[i].word[3],
206 				seq[i].word[4], seq[i].word[5]);
207 	}
208 }
209 
210 /*!
211  * Completion will take place if and only if user requested completion
212  * by cc_send_sync_request().
213  *
214  * \param dev
215  * \param dx_compl_h The completion event to signal
216  */
217 static void request_mgr_complete(struct device *dev, void *dx_compl_h,
218 				 int dummy)
219 {
220 	struct completion *this_compl = dx_compl_h;
221 
222 	complete(this_compl);
223 }
224 
225 static int cc_queues_status(struct cc_drvdata *drvdata,
226 			    struct cc_req_mgr_handle *req_mgr_h,
227 			    unsigned int total_seq_len)
228 {
229 	unsigned long poll_queue;
230 	struct device *dev = drvdata_to_dev(drvdata);
231 
232 	/* SW queue is checked only once as it will not
233 	 * be chaned during the poll because the spinlock_bh
234 	 * is held by the thread
235 	 */
236 	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
237 	    req_mgr_h->req_queue_tail) {
238 		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
239 			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
240 		return -ENOSPC;
241 	}
242 
243 	if (req_mgr_h->q_free_slots >= total_seq_len)
244 		return 0;
245 
246 	/* Wait for space in HW queue. Poll constant num of iterations. */
247 	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
248 		req_mgr_h->q_free_slots =
249 			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
250 		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
251 			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
252 
253 		if (req_mgr_h->q_free_slots >= total_seq_len) {
254 			/* If there is enough place return */
255 			return 0;
256 		}
257 
258 		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
259 			req_mgr_h->q_free_slots, total_seq_len);
260 	}
261 	/* No room in the HW queue try again later */
262 	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
263 		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
264 		req_mgr_h->q_free_slots, total_seq_len);
265 	return -ENOSPC;
266 }
267 
268 /*!
269  * Enqueue caller request to crypto hardware.
270  * Need to be called with HW lock held and PM running
271  *
272  * \param drvdata
273  * \param cc_req The request to enqueue
274  * \param desc The crypto sequence
275  * \param len The crypto sequence length
276  * \param add_comp If "true": add an artificial dout DMA to mark completion
277  *
278  * \return int Returns -EINPROGRESS or error code
279  */
280 static int cc_do_send_request(struct cc_drvdata *drvdata,
281 			      struct cc_crypto_req *cc_req,
282 			      struct cc_hw_desc *desc, unsigned int len,
283 				bool add_comp)
284 {
285 	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
286 	unsigned int used_sw_slots;
287 	unsigned int total_seq_len = len; /*initial sequence length*/
288 	struct device *dev = drvdata_to_dev(drvdata);
289 
290 	used_sw_slots = ((req_mgr_h->req_queue_head -
291 			  req_mgr_h->req_queue_tail) &
292 			 (MAX_REQUEST_QUEUE_SIZE - 1));
293 	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
294 		req_mgr_h->max_used_sw_slots = used_sw_slots;
295 
296 	/* Enqueue request - must be locked with HW lock*/
297 	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
298 	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
299 				    (MAX_REQUEST_QUEUE_SIZE - 1);
300 	/* TODO: Use circ_buf.h ? */
301 
302 	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
303 
304 	/*
305 	 * We are about to push command to the HW via the command registers
306 	 * that may refernece hsot memory. We need to issue a memory barrier
307 	 * to make sure there are no outstnading memory writes
308 	 */
309 	wmb();
310 
311 	/* STAT_PHASE_4: Push sequence */
312 
313 	enqueue_seq(drvdata, desc, len);
314 
315 	if (add_comp) {
316 		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
317 		total_seq_len++;
318 	}
319 
320 	if (req_mgr_h->q_free_slots < total_seq_len) {
321 		/* This situation should never occur. Maybe indicating problem
322 		 * with resuming power. Set the free slot count to 0 and hope
323 		 * for the best.
324 		 */
325 		dev_err(dev, "HW free slot count mismatch.");
326 		req_mgr_h->q_free_slots = 0;
327 	} else {
328 		/* Update the free slots in HW queue */
329 		req_mgr_h->q_free_slots -= total_seq_len;
330 	}
331 
332 	/* Operation still in process */
333 	return -EINPROGRESS;
334 }
335 
336 static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
337 			       struct cc_bl_item *bli)
338 {
339 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
340 	struct device *dev = drvdata_to_dev(drvdata);
341 
342 	spin_lock_bh(&mgr->bl_lock);
343 	list_add_tail(&bli->list, &mgr->backlog);
344 	++mgr->bl_len;
345 	dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
346 	spin_unlock_bh(&mgr->bl_lock);
347 	tasklet_schedule(&mgr->comptask);
348 }
349 
350 static void cc_proc_backlog(struct cc_drvdata *drvdata)
351 {
352 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
353 	struct cc_bl_item *bli;
354 	struct cc_crypto_req *creq;
355 	void *req;
356 	struct device *dev = drvdata_to_dev(drvdata);
357 	int rc;
358 
359 	spin_lock(&mgr->bl_lock);
360 
361 	while (mgr->bl_len) {
362 		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
363 		dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
364 
365 		spin_unlock(&mgr->bl_lock);
366 
367 
368 		creq = &bli->creq;
369 		req = creq->user_arg;
370 
371 		/*
372 		 * Notify the request we're moving out of the backlog
373 		 * but only if we haven't done so already.
374 		 */
375 		if (!bli->notif) {
376 			creq->user_cb(dev, req, -EINPROGRESS);
377 			bli->notif = true;
378 		}
379 
380 		spin_lock(&mgr->hw_lock);
381 
382 		rc = cc_queues_status(drvdata, mgr, bli->len);
383 		if (rc) {
384 			/*
385 			 * There is still not room in the FIFO for
386 			 * this request. Bail out. We'll return here
387 			 * on the next completion irq.
388 			 */
389 			spin_unlock(&mgr->hw_lock);
390 			return;
391 		}
392 
393 		rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
394 					bli->len, false);
395 
396 		spin_unlock(&mgr->hw_lock);
397 
398 		if (rc != -EINPROGRESS) {
399 			cc_pm_put_suspend(dev);
400 			creq->user_cb(dev, req, rc);
401 		}
402 
403 		/* Remove ourselves from the backlog list */
404 		spin_lock(&mgr->bl_lock);
405 		list_del(&bli->list);
406 		--mgr->bl_len;
407 	}
408 
409 	spin_unlock(&mgr->bl_lock);
410 }
411 
412 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
413 		    struct cc_hw_desc *desc, unsigned int len,
414 		    struct crypto_async_request *req)
415 {
416 	int rc;
417 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
418 	struct device *dev = drvdata_to_dev(drvdata);
419 	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
420 	gfp_t flags = cc_gfp_flags(req);
421 	struct cc_bl_item *bli;
422 
423 	rc = cc_pm_get(dev);
424 	if (rc) {
425 		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
426 		return rc;
427 	}
428 
429 	spin_lock_bh(&mgr->hw_lock);
430 	rc = cc_queues_status(drvdata, mgr, len);
431 
432 #ifdef CC_DEBUG_FORCE_BACKLOG
433 	if (backlog_ok)
434 		rc = -ENOSPC;
435 #endif /* CC_DEBUG_FORCE_BACKLOG */
436 
437 	if (rc == -ENOSPC && backlog_ok) {
438 		spin_unlock_bh(&mgr->hw_lock);
439 
440 		bli = kmalloc(sizeof(*bli), flags);
441 		if (!bli) {
442 			cc_pm_put_suspend(dev);
443 			return -ENOMEM;
444 		}
445 
446 		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
447 		memcpy(&bli->desc, desc, len * sizeof(*desc));
448 		bli->len = len;
449 		bli->notif = false;
450 		cc_enqueue_backlog(drvdata, bli);
451 		return -EBUSY;
452 	}
453 
454 	if (!rc)
455 		rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
456 
457 	spin_unlock_bh(&mgr->hw_lock);
458 	return rc;
459 }
460 
461 int cc_send_sync_request(struct cc_drvdata *drvdata,
462 			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
463 			 unsigned int len)
464 {
465 	int rc;
466 	struct device *dev = drvdata_to_dev(drvdata);
467 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
468 
469 	init_completion(&cc_req->seq_compl);
470 	cc_req->user_cb = request_mgr_complete;
471 	cc_req->user_arg = &cc_req->seq_compl;
472 
473 	rc = cc_pm_get(dev);
474 	if (rc) {
475 		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
476 		return rc;
477 	}
478 
479 	while (true) {
480 		spin_lock_bh(&mgr->hw_lock);
481 		rc = cc_queues_status(drvdata, mgr, len + 1);
482 
483 		if (!rc)
484 			break;
485 
486 		spin_unlock_bh(&mgr->hw_lock);
487 		if (rc != -EAGAIN) {
488 			cc_pm_put_suspend(dev);
489 			return rc;
490 		}
491 		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
492 		reinit_completion(&drvdata->hw_queue_avail);
493 	}
494 
495 	rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
496 	spin_unlock_bh(&mgr->hw_lock);
497 
498 	if (rc != -EINPROGRESS) {
499 		cc_pm_put_suspend(dev);
500 		return rc;
501 	}
502 
503 	wait_for_completion(&cc_req->seq_compl);
504 	return 0;
505 }
506 
507 /*!
508  * Enqueue caller request to crypto hardware during init process.
509  * assume this function is not called in middle of a flow,
510  * since we set QUEUE_LAST_IND flag in the last descriptor.
511  *
512  * \param drvdata
513  * \param desc The crypto sequence
514  * \param len The crypto sequence length
515  *
516  * \return int Returns "0" upon success
517  */
518 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
519 		      unsigned int len)
520 {
521 	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
522 	unsigned int total_seq_len = len; /*initial sequence length*/
523 	int rc = 0;
524 
525 	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
526 	 */
527 	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
528 	if (rc)
529 		return rc;
530 
531 	set_queue_last_ind(drvdata, &desc[(len - 1)]);
532 
533 	/*
534 	 * We are about to push command to the HW via the command registers
535 	 * that may refernece hsot memory. We need to issue a memory barrier
536 	 * to make sure there are no outstnading memory writes
537 	 */
538 	wmb();
539 	enqueue_seq(drvdata, desc, len);
540 
541 	/* Update the free slots in HW queue */
542 	req_mgr_h->q_free_slots =
543 		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
544 
545 	return 0;
546 }
547 
548 void complete_request(struct cc_drvdata *drvdata)
549 {
550 	struct cc_req_mgr_handle *request_mgr_handle =
551 						drvdata->request_mgr_handle;
552 
553 	complete(&drvdata->hw_queue_avail);
554 #ifdef COMP_IN_WQ
555 	queue_delayed_work(request_mgr_handle->workq,
556 			   &request_mgr_handle->compwork, 0);
557 #else
558 	tasklet_schedule(&request_mgr_handle->comptask);
559 #endif
560 }
561 
562 #ifdef COMP_IN_WQ
563 static void comp_work_handler(struct work_struct *work)
564 {
565 	struct cc_drvdata *drvdata =
566 		container_of(work, struct cc_drvdata, compwork.work);
567 
568 	comp_handler((unsigned long)drvdata);
569 }
570 #endif
571 
572 static void proc_completions(struct cc_drvdata *drvdata)
573 {
574 	struct cc_crypto_req *cc_req;
575 	struct device *dev = drvdata_to_dev(drvdata);
576 	struct cc_req_mgr_handle *request_mgr_handle =
577 						drvdata->request_mgr_handle;
578 	unsigned int *tail = &request_mgr_handle->req_queue_tail;
579 	unsigned int *head = &request_mgr_handle->req_queue_head;
580 	int rc;
581 	u32 mask;
582 
583 	while (request_mgr_handle->axi_completed) {
584 		request_mgr_handle->axi_completed--;
585 
586 		/* Dequeue request */
587 		if (*head == *tail) {
588 			/* We are supposed to handle a completion but our
589 			 * queue is empty. This is not normal. Return and
590 			 * hope for the best.
591 			 */
592 			dev_err(dev, "Request queue is empty head == tail %u\n",
593 				*head);
594 			break;
595 		}
596 
597 		cc_req = &request_mgr_handle->req_queue[*tail];
598 
599 		if (cc_req->cpp.is_cpp) {
600 
601 			dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
602 				cc_req->cpp.slot, cc_req->cpp.alg);
603 			mask = cc_cpp_int_mask(cc_req->cpp.alg,
604 					       cc_req->cpp.slot);
605 			rc = (drvdata->irq & mask ? -EPERM : 0);
606 			dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
607 				drvdata->irq, rc);
608 		} else {
609 			dev_dbg(dev, "None CPP request completion\n");
610 			rc = 0;
611 		}
612 
613 		if (cc_req->user_cb)
614 			cc_req->user_cb(dev, cc_req->user_arg, rc);
615 		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
616 		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
617 		dev_dbg(dev, "Request completed. axi_completed=%d\n",
618 			request_mgr_handle->axi_completed);
619 		cc_pm_put_suspend(dev);
620 	}
621 }
622 
623 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
624 {
625 	return FIELD_GET(AXIM_MON_COMP_VALUE,
626 			 cc_ioread(drvdata, drvdata->axim_mon_offset));
627 }
628 
629 /* Deferred service handler, run as interrupt-fired tasklet */
630 static void comp_handler(unsigned long devarg)
631 {
632 	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
633 	struct cc_req_mgr_handle *request_mgr_handle =
634 						drvdata->request_mgr_handle;
635 	struct device *dev = drvdata_to_dev(drvdata);
636 	u32 irq;
637 
638 	dev_dbg(dev, "Completion handler called!\n");
639 	irq = (drvdata->irq & drvdata->comp_mask);
640 
641 	/* To avoid the interrupt from firing as we unmask it,
642 	 * we clear it now
643 	 */
644 	cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
645 
646 	/* Avoid race with above clear: Test completion counter once more */
647 
648 	request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
649 
650 	dev_dbg(dev, "AXI completion after updated: %d\n",
651 		request_mgr_handle->axi_completed);
652 
653 	while (request_mgr_handle->axi_completed) {
654 		do {
655 			drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
656 			irq = (drvdata->irq & drvdata->comp_mask);
657 			proc_completions(drvdata);
658 
659 			/* At this point (after proc_completions()),
660 			 * request_mgr_handle->axi_completed is 0.
661 			 */
662 			request_mgr_handle->axi_completed +=
663 						cc_axi_comp_count(drvdata);
664 		} while (request_mgr_handle->axi_completed > 0);
665 
666 		cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
667 
668 		request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
669 	}
670 
671 	/* after verifing that there is nothing to do,
672 	 * unmask AXI completion interrupt
673 	 */
674 	cc_iowrite(drvdata, CC_REG(HOST_IMR),
675 		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
676 
677 	cc_proc_backlog(drvdata);
678 	dev_dbg(dev, "Comp. handler done.\n");
679 }
680 
681 /*
682  * resume the queue configuration - no need to take the lock as this happens
683  * inside the spin lock protection
684  */
685 #if defined(CONFIG_PM)
686 int cc_resume_req_queue(struct cc_drvdata *drvdata)
687 {
688 	struct cc_req_mgr_handle *request_mgr_handle =
689 		drvdata->request_mgr_handle;
690 
691 	spin_lock_bh(&request_mgr_handle->hw_lock);
692 	request_mgr_handle->is_runtime_suspended = false;
693 	spin_unlock_bh(&request_mgr_handle->hw_lock);
694 
695 	return 0;
696 }
697 
698 /*
699  * suspend the queue configuration. Since it is used for the runtime suspend
700  * only verify that the queue can be suspended.
701  */
702 int cc_suspend_req_queue(struct cc_drvdata *drvdata)
703 {
704 	struct cc_req_mgr_handle *request_mgr_handle =
705 						drvdata->request_mgr_handle;
706 
707 	/* lock the send_request */
708 	spin_lock_bh(&request_mgr_handle->hw_lock);
709 	if (request_mgr_handle->req_queue_head !=
710 	    request_mgr_handle->req_queue_tail) {
711 		spin_unlock_bh(&request_mgr_handle->hw_lock);
712 		return -EBUSY;
713 	}
714 	request_mgr_handle->is_runtime_suspended = true;
715 	spin_unlock_bh(&request_mgr_handle->hw_lock);
716 
717 	return 0;
718 }
719 
720 bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
721 {
722 	struct cc_req_mgr_handle *request_mgr_handle =
723 						drvdata->request_mgr_handle;
724 
725 	return	request_mgr_handle->is_runtime_suspended;
726 }
727 
728 #endif
729