xref: /linux/drivers/net/ethernet/intel/idpf/idpf_controlq.c (revision 17bbde2e1716e2ee4b997d476b48ae85c5a47671)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf_controlq.h"
5 
6 /**
7  * idpf_ctlq_setup_regs - initialize control queue registers
8  * @cq: pointer to the specific control queue
9  * @q_create_info: structs containing info for each queue to be initialized
10  */
idpf_ctlq_setup_regs(struct idpf_ctlq_info * cq,struct idpf_ctlq_create_info * q_create_info)11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
12 				 struct idpf_ctlq_create_info *q_create_info)
13 {
14 	/* set control queue registers in our local struct */
15 	cq->reg.head = q_create_info->reg.head;
16 	cq->reg.tail = q_create_info->reg.tail;
17 	cq->reg.len = q_create_info->reg.len;
18 	cq->reg.bah = q_create_info->reg.bah;
19 	cq->reg.bal = q_create_info->reg.bal;
20 	cq->reg.len_mask = q_create_info->reg.len_mask;
21 	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
22 	cq->reg.head_mask = q_create_info->reg.head_mask;
23 }
24 
25 /**
26  * idpf_ctlq_init_regs - Initialize control queue registers
27  * @hw: pointer to hw struct
28  * @cq: pointer to the specific Control queue
29  * @is_rxq: true if receive control queue, false otherwise
30  *
31  * Initialize registers. The caller is expected to have already initialized the
32  * descriptor ring memory and buffer memory
33  */
idpf_ctlq_init_regs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,bool is_rxq)34 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
35 				bool is_rxq)
36 {
37 	/* Update tail to post pre-allocated buffers for rx queues */
38 	if (is_rxq)
39 		wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
40 
41 	/* For non-Mailbox control queues only TAIL need to be set */
42 	if (cq->q_id != -1)
43 		return;
44 
45 	/* Clear Head for both send or receive */
46 	wr32(hw, cq->reg.head, 0);
47 
48 	/* set starting point */
49 	wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50 	wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51 	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
52 }
53 
54 /**
55  * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
56  * @cq: pointer to the specific Control queue
57  *
58  * Record the address of the receive queue DMA buffers in the descriptors.
59  * The buffers must have been previously allocated.
60  */
idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info * cq)61 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
62 {
63 	int i;
64 
65 	for (i = 0; i < cq->ring_size; i++) {
66 		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
67 		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
68 
69 		/* No buffer to post to descriptor, continue */
70 		if (!bi)
71 			continue;
72 
73 		desc->flags =
74 			cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
75 		desc->opcode = 0;
76 		desc->datalen = cpu_to_le16(bi->size);
77 		desc->ret_val = 0;
78 		desc->v_opcode_dtype = 0;
79 		desc->v_retval = 0;
80 		desc->params.indirect.addr_high =
81 			cpu_to_le32(upper_32_bits(bi->pa));
82 		desc->params.indirect.addr_low =
83 			cpu_to_le32(lower_32_bits(bi->pa));
84 		desc->params.indirect.param0 = 0;
85 		desc->params.indirect.sw_cookie = 0;
86 		desc->params.indirect.v_flags = 0;
87 	}
88 }
89 
90 /**
91  * idpf_ctlq_shutdown - shutdown the CQ
92  * @hw: pointer to hw struct
93  * @cq: pointer to the specific Control queue
94  *
95  * The main shutdown routine for any controq queue
96  */
idpf_ctlq_shutdown(struct idpf_hw * hw,struct idpf_ctlq_info * cq)97 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98 {
99 	spin_lock(&cq->cq_lock);
100 
101 	/* free ring buffers and the ring itself */
102 	idpf_ctlq_dealloc_ring_res(hw, cq);
103 
104 	/* Set ring_size to 0 to indicate uninitialized queue */
105 	cq->ring_size = 0;
106 
107 	spin_unlock(&cq->cq_lock);
108 }
109 
110 /**
111  * idpf_ctlq_add - add one control queue
112  * @hw: pointer to hardware struct
113  * @qinfo: info for queue to be created
114  * @cq_out: (output) double pointer to control queue to be created
115  *
116  * Allocate and initialize a control queue and add it to the control queue list.
117  * The cq parameter will be allocated/initialized and passed back to the caller
118  * if no errors occur.
119  *
120  * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
121  */
idpf_ctlq_add(struct idpf_hw * hw,struct idpf_ctlq_create_info * qinfo,struct idpf_ctlq_info ** cq_out)122 int idpf_ctlq_add(struct idpf_hw *hw,
123 		  struct idpf_ctlq_create_info *qinfo,
124 		  struct idpf_ctlq_info **cq_out)
125 {
126 	struct idpf_ctlq_info *cq;
127 	bool is_rxq = false;
128 	int err;
129 
130 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
131 	if (!cq)
132 		return -ENOMEM;
133 
134 	cq->cq_type = qinfo->type;
135 	cq->q_id = qinfo->id;
136 	cq->buf_size = qinfo->buf_size;
137 	cq->ring_size = qinfo->len;
138 
139 	cq->next_to_use = 0;
140 	cq->next_to_clean = 0;
141 	cq->next_to_post = cq->ring_size - 1;
142 
143 	switch (qinfo->type) {
144 	case IDPF_CTLQ_TYPE_MAILBOX_RX:
145 		is_rxq = true;
146 		fallthrough;
147 	case IDPF_CTLQ_TYPE_MAILBOX_TX:
148 		err = idpf_ctlq_alloc_ring_res(hw, cq);
149 		break;
150 	default:
151 		err = -EBADR;
152 		break;
153 	}
154 
155 	if (err)
156 		goto init_free_q;
157 
158 	if (is_rxq) {
159 		idpf_ctlq_init_rxq_bufs(cq);
160 	} else {
161 		/* Allocate the array of msg pointers for TX queues */
162 		cq->bi.tx_msg = kcalloc(qinfo->len,
163 					sizeof(struct idpf_ctlq_msg *),
164 					GFP_KERNEL);
165 		if (!cq->bi.tx_msg) {
166 			err = -ENOMEM;
167 			goto init_dealloc_q_mem;
168 		}
169 	}
170 
171 	idpf_ctlq_setup_regs(cq, qinfo);
172 
173 	idpf_ctlq_init_regs(hw, cq, is_rxq);
174 
175 	spin_lock_init(&cq->cq_lock);
176 
177 	list_add(&cq->cq_list, &hw->cq_list_head);
178 
179 	*cq_out = cq;
180 
181 	return 0;
182 
183 init_dealloc_q_mem:
184 	/* free ring buffers and the ring itself */
185 	idpf_ctlq_dealloc_ring_res(hw, cq);
186 init_free_q:
187 	kfree(cq);
188 
189 	return err;
190 }
191 
192 /**
193  * idpf_ctlq_remove - deallocate and remove specified control queue
194  * @hw: pointer to hardware struct
195  * @cq: pointer to control queue to be removed
196  */
idpf_ctlq_remove(struct idpf_hw * hw,struct idpf_ctlq_info * cq)197 void idpf_ctlq_remove(struct idpf_hw *hw,
198 		      struct idpf_ctlq_info *cq)
199 {
200 	list_del(&cq->cq_list);
201 	idpf_ctlq_shutdown(hw, cq);
202 	kfree(cq);
203 }
204 
205 /**
206  * idpf_ctlq_init - main initialization routine for all control queues
207  * @hw: pointer to hardware struct
208  * @num_q: number of queues to initialize
209  * @q_info: array of structs containing info for each queue to be initialized
210  *
211  * This initializes any number and any type of control queues. This is an all
212  * or nothing routine; if one fails, all previously allocated queues will be
213  * destroyed. This must be called prior to using the individual add/remove
214  * APIs.
215  */
idpf_ctlq_init(struct idpf_hw * hw,u8 num_q,struct idpf_ctlq_create_info * q_info)216 int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
217 		   struct idpf_ctlq_create_info *q_info)
218 {
219 	struct idpf_ctlq_info *cq, *tmp;
220 	int err;
221 	int i;
222 
223 	INIT_LIST_HEAD(&hw->cq_list_head);
224 
225 	for (i = 0; i < num_q; i++) {
226 		struct idpf_ctlq_create_info *qinfo = q_info + i;
227 
228 		err = idpf_ctlq_add(hw, qinfo, &cq);
229 		if (err)
230 			goto init_destroy_qs;
231 	}
232 
233 	return 0;
234 
235 init_destroy_qs:
236 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
237 		idpf_ctlq_remove(hw, cq);
238 
239 	return err;
240 }
241 
242 /**
243  * idpf_ctlq_deinit - destroy all control queues
244  * @hw: pointer to hw struct
245  */
idpf_ctlq_deinit(struct idpf_hw * hw)246 void idpf_ctlq_deinit(struct idpf_hw *hw)
247 {
248 	struct idpf_ctlq_info *cq, *tmp;
249 
250 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
251 		idpf_ctlq_remove(hw, cq);
252 }
253 
254 /**
255  * idpf_ctlq_send - send command to Control Queue (CTQ)
256  * @hw: pointer to hw struct
257  * @cq: handle to control queue struct to send on
258  * @num_q_msg: number of messages to send on control queue
259  * @q_msg: pointer to array of queue messages to be sent
260  *
261  * The caller is expected to allocate DMAable buffers and pass them to the
262  * send routine via the q_msg struct / control queue specific data struct.
263  * The control queue will hold a reference to each send message until
264  * the completion for that message has been cleaned.
265  */
idpf_ctlq_send(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 num_q_msg,struct idpf_ctlq_msg q_msg[])266 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
267 		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
268 {
269 	struct idpf_ctlq_desc *desc;
270 	int num_desc_avail;
271 	int err = 0;
272 	int i;
273 
274 	spin_lock(&cq->cq_lock);
275 
276 	/* Ensure there are enough descriptors to send all messages */
277 	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
278 	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
279 		err = -ENOSPC;
280 		goto err_unlock;
281 	}
282 
283 	for (i = 0; i < num_q_msg; i++) {
284 		struct idpf_ctlq_msg *msg = &q_msg[i];
285 
286 		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
287 
288 		desc->opcode = cpu_to_le16(msg->opcode);
289 		desc->pfid_vfid = cpu_to_le16(msg->func_id);
290 
291 		desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);
292 		desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);
293 
294 		desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<
295 					  IDPF_CTLQ_FLAG_HOST_ID_S);
296 		if (msg->data_len) {
297 			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
298 
299 			desc->datalen |= cpu_to_le16(msg->data_len);
300 			desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);
301 			desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);
302 
303 			/* Update the address values in the desc with the pa
304 			 * value for respective buffer
305 			 */
306 			desc->params.indirect.addr_high =
307 				cpu_to_le32(upper_32_bits(buff->pa));
308 			desc->params.indirect.addr_low =
309 				cpu_to_le32(lower_32_bits(buff->pa));
310 
311 			memcpy(&desc->params, msg->ctx.indirect.context,
312 			       IDPF_INDIRECT_CTX_SIZE);
313 		} else {
314 			memcpy(&desc->params, msg->ctx.direct,
315 			       IDPF_DIRECT_CTX_SIZE);
316 		}
317 
318 		/* Store buffer info */
319 		cq->bi.tx_msg[cq->next_to_use] = msg;
320 
321 		(cq->next_to_use)++;
322 		if (cq->next_to_use == cq->ring_size)
323 			cq->next_to_use = 0;
324 	}
325 
326 	/* Force memory write to complete before letting hardware
327 	 * know that there are new descriptors to fetch.
328 	 */
329 	dma_wmb();
330 
331 	wr32(hw, cq->reg.tail, cq->next_to_use);
332 
333 err_unlock:
334 	spin_unlock(&cq->cq_lock);
335 
336 	return err;
337 }
338 
339 /**
340  * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
341  * requested queue
342  * @cq: pointer to the specific Control queue
343  * @clean_count: (input|output) number of descriptors to clean as input, and
344  * number of descriptors actually cleaned as output
345  * @msg_status: (output) pointer to msg pointer array to be populated; needs
346  * to be allocated by caller
347  *
348  * Returns an array of message pointers associated with the cleaned
349  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
350  * descriptors.  The status will be returned for each; any messages that failed
351  * to send will have a non-zero status. The caller is expected to free original
352  * ctlq_msgs and free or reuse the DMA buffers.
353  */
idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])354 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
355 		       struct idpf_ctlq_msg *msg_status[])
356 {
357 	struct idpf_ctlq_desc *desc;
358 	u16 i, num_to_clean;
359 	u16 ntc, desc_err;
360 
361 	if (*clean_count == 0)
362 		return 0;
363 	if (*clean_count > cq->ring_size)
364 		return -EBADR;
365 
366 	spin_lock(&cq->cq_lock);
367 
368 	ntc = cq->next_to_clean;
369 
370 	num_to_clean = *clean_count;
371 
372 	for (i = 0; i < num_to_clean; i++) {
373 		/* Fetch next descriptor and check if marked as done */
374 		desc = IDPF_CTLQ_DESC(cq, ntc);
375 		if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
376 			break;
377 
378 		/* Ensure no other fields are read until DD flag is checked */
379 		dma_rmb();
380 
381 		/* strip off FW internal code */
382 		desc_err = le16_to_cpu(desc->ret_val) & 0xff;
383 
384 		msg_status[i] = cq->bi.tx_msg[ntc];
385 		msg_status[i]->status = desc_err;
386 
387 		cq->bi.tx_msg[ntc] = NULL;
388 
389 		/* Zero out any stale data */
390 		memset(desc, 0, sizeof(*desc));
391 
392 		ntc++;
393 		if (ntc == cq->ring_size)
394 			ntc = 0;
395 	}
396 
397 	cq->next_to_clean = ntc;
398 
399 	spin_unlock(&cq->cq_lock);
400 
401 	/* Return number of descriptors actually cleaned */
402 	*clean_count = i;
403 
404 	return 0;
405 }
406 
407 /**
408  * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
409  * @hw: pointer to hw struct
410  * @cq: pointer to control queue handle
411  * @buff_count: (input|output) input is number of buffers caller is trying to
412  * return; output is number of buffers that were not posted
413  * @buffs: array of pointers to dma mem structs to be given to hardware
414  *
415  * Caller uses this function to return DMA buffers to the descriptor ring after
416  * consuming them; buff_count will be the number of buffers.
417  *
418  * Note: this function needs to be called after a receive call even
419  * if there are no DMA buffers to be returned, i.e. buff_count = 0,
420  * buffs = NULL to support direct commands
421  */
idpf_ctlq_post_rx_buffs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 * buff_count,struct idpf_dma_mem ** buffs)422 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
423 			    u16 *buff_count, struct idpf_dma_mem **buffs)
424 {
425 	struct idpf_ctlq_desc *desc;
426 	u16 ntp = cq->next_to_post;
427 	bool buffs_avail = false;
428 	u16 tbp = ntp + 1;
429 	int i = 0;
430 
431 	if (*buff_count > cq->ring_size)
432 		return -EBADR;
433 
434 	if (*buff_count > 0)
435 		buffs_avail = true;
436 
437 	spin_lock(&cq->cq_lock);
438 
439 	if (tbp >= cq->ring_size)
440 		tbp = 0;
441 
442 	if (tbp == cq->next_to_clean)
443 		/* Nothing to do */
444 		goto post_buffs_out;
445 
446 	/* Post buffers for as many as provided or up until the last one used */
447 	while (ntp != cq->next_to_clean) {
448 		desc = IDPF_CTLQ_DESC(cq, ntp);
449 
450 		if (cq->bi.rx_buff[ntp])
451 			goto fill_desc;
452 		if (!buffs_avail) {
453 			/* If the caller hasn't given us any buffers or
454 			 * there are none left, search the ring itself
455 			 * for an available buffer to move to this
456 			 * entry starting at the next entry in the ring
457 			 */
458 			tbp = ntp + 1;
459 
460 			/* Wrap ring if necessary */
461 			if (tbp >= cq->ring_size)
462 				tbp = 0;
463 
464 			while (tbp != cq->next_to_clean) {
465 				if (cq->bi.rx_buff[tbp]) {
466 					cq->bi.rx_buff[ntp] =
467 						cq->bi.rx_buff[tbp];
468 					cq->bi.rx_buff[tbp] = NULL;
469 
470 					/* Found a buffer, no need to
471 					 * search anymore
472 					 */
473 					break;
474 				}
475 
476 				/* Wrap ring if necessary */
477 				tbp++;
478 				if (tbp >= cq->ring_size)
479 					tbp = 0;
480 			}
481 
482 			if (tbp == cq->next_to_clean)
483 				goto post_buffs_out;
484 		} else {
485 			/* Give back pointer to DMA buffer */
486 			cq->bi.rx_buff[ntp] = buffs[i];
487 			i++;
488 
489 			if (i >= *buff_count)
490 				buffs_avail = false;
491 		}
492 
493 fill_desc:
494 		desc->flags =
495 			cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
496 
497 		/* Post buffers to descriptor */
498 		desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
499 		desc->params.indirect.addr_high =
500 			cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
501 		desc->params.indirect.addr_low =
502 			cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
503 
504 		ntp++;
505 		if (ntp == cq->ring_size)
506 			ntp = 0;
507 	}
508 
509 post_buffs_out:
510 	/* Only update tail if buffers were actually posted */
511 	if (cq->next_to_post != ntp) {
512 		if (ntp)
513 			/* Update next_to_post to ntp - 1 since current ntp
514 			 * will not have a buffer
515 			 */
516 			cq->next_to_post = ntp - 1;
517 		else
518 			/* Wrap to end of end ring since current ntp is 0 */
519 			cq->next_to_post = cq->ring_size - 1;
520 
521 		dma_wmb();
522 
523 		wr32(hw, cq->reg.tail, cq->next_to_post);
524 	}
525 
526 	spin_unlock(&cq->cq_lock);
527 
528 	/* return the number of buffers that were not posted */
529 	*buff_count = *buff_count - i;
530 
531 	return 0;
532 }
533 
534 /**
535  * idpf_ctlq_recv - receive control queue message call back
536  * @cq: pointer to control queue handle to receive on
537  * @num_q_msg: (input|output) input number of messages that should be received;
538  * output number of messages actually received
539  * @q_msg: (output) array of received control queue messages on this q;
540  * needs to be pre-allocated by caller for as many messages as requested
541  *
542  * Called by interrupt handler or polling mechanism. Caller is expected
543  * to free buffers
544  */
idpf_ctlq_recv(struct idpf_ctlq_info * cq,u16 * num_q_msg,struct idpf_ctlq_msg * q_msg)545 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
546 		   struct idpf_ctlq_msg *q_msg)
547 {
548 	u16 num_to_clean, ntc, flags;
549 	struct idpf_ctlq_desc *desc;
550 	int err = 0;
551 	u16 i;
552 
553 	/* take the lock before we start messing with the ring */
554 	spin_lock(&cq->cq_lock);
555 
556 	ntc = cq->next_to_clean;
557 
558 	num_to_clean = *num_q_msg;
559 
560 	for (i = 0; i < num_to_clean; i++) {
561 		/* Fetch next descriptor and check if marked as done */
562 		desc = IDPF_CTLQ_DESC(cq, ntc);
563 		flags = le16_to_cpu(desc->flags);
564 
565 		if (!(flags & IDPF_CTLQ_FLAG_DD))
566 			break;
567 
568 		/* Ensure no other fields are read until DD flag is checked */
569 		dma_rmb();
570 
571 		q_msg[i].vmvf_type = (flags &
572 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
573 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
574 				       IDPF_CTLQ_FLAG_FTYPE_S;
575 
576 		if (flags & IDPF_CTLQ_FLAG_ERR)
577 			err  = -EBADMSG;
578 
579 		q_msg[i].cookie.mbx.chnl_opcode =
580 				le32_to_cpu(desc->v_opcode_dtype);
581 		q_msg[i].cookie.mbx.chnl_retval =
582 				le32_to_cpu(desc->v_retval);
583 
584 		q_msg[i].opcode = le16_to_cpu(desc->opcode);
585 		q_msg[i].data_len = le16_to_cpu(desc->datalen);
586 		q_msg[i].status = le16_to_cpu(desc->ret_val);
587 
588 		if (desc->datalen) {
589 			memcpy(q_msg[i].ctx.indirect.context,
590 			       &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
591 
592 			/* Assign pointer to dma buffer to ctlq_msg array
593 			 * to be given to upper layer
594 			 */
595 			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
596 
597 			/* Zero out pointer to DMA buffer info;
598 			 * will be repopulated by post buffers API
599 			 */
600 			cq->bi.rx_buff[ntc] = NULL;
601 		} else {
602 			memcpy(q_msg[i].ctx.direct, desc->params.raw,
603 			       IDPF_DIRECT_CTX_SIZE);
604 		}
605 
606 		/* Zero out stale data in descriptor */
607 		memset(desc, 0, sizeof(struct idpf_ctlq_desc));
608 
609 		ntc++;
610 		if (ntc == cq->ring_size)
611 			ntc = 0;
612 	}
613 
614 	cq->next_to_clean = ntc;
615 
616 	spin_unlock(&cq->cq_lock);
617 
618 	*num_q_msg = i;
619 	if (*num_q_msg == 0)
620 		err = -ENOMSG;
621 
622 	return err;
623 }
624