xref: /linux/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf_controlq.h"
5 
6 /**
7  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
8  * @hw: pointer to hw struct
9  * @cq: pointer to the specific Control queue
10  */
11 static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
12 				     struct idpf_ctlq_info *cq)
13 {
14 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
15 
16 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
17 	if (!cq->desc_ring.va)
18 		return -ENOMEM;
19 
20 	return 0;
21 }
22 
23 /**
24  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
25  * @hw: pointer to hw struct
26  * @cq: pointer to the specific Control queue
27  *
28  * Allocate the buffer head for all control queues, and if it's a receive
29  * queue, allocate DMA buffers
30  */
31 static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
32 				struct idpf_ctlq_info *cq)
33 {
34 	int i;
35 
36 	/* Do not allocate DMA buffers for transmit queues */
37 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
38 		return 0;
39 
40 	/* We'll be allocating the buffer info memory first, then we can
41 	 * allocate the mapped buffers for the event processing
42 	 */
43 	cq->bi.rx_buff = kzalloc_objs(struct idpf_dma_mem *, cq->ring_size);
44 	if (!cq->bi.rx_buff)
45 		return -ENOMEM;
46 
47 	/* allocate the mapped buffers (except for the last one) */
48 	for (i = 0; i < cq->ring_size - 1; i++) {
49 		struct idpf_dma_mem *bi;
50 		int num = 1; /* number of idpf_dma_mem to be allocated */
51 
52 		cq->bi.rx_buff[i] = kzalloc_objs(struct idpf_dma_mem, num);
53 		if (!cq->bi.rx_buff[i])
54 			goto unwind_alloc_cq_bufs;
55 
56 		bi = cq->bi.rx_buff[i];
57 
58 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
59 		if (!bi->va) {
60 			/* unwind will not free the failed entry */
61 			kfree(cq->bi.rx_buff[i]);
62 			goto unwind_alloc_cq_bufs;
63 		}
64 	}
65 
66 	return 0;
67 
68 unwind_alloc_cq_bufs:
69 	/* don't try to free the one that failed... */
70 	i--;
71 	for (; i >= 0; i--) {
72 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
73 		kfree(cq->bi.rx_buff[i]);
74 	}
75 	kfree(cq->bi.rx_buff);
76 
77 	return -ENOMEM;
78 }
79 
80 /**
81  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
82  * @hw: pointer to hw struct
83  * @cq: pointer to the specific Control queue
84  *
85  * This assumes the posted send buffers have already been cleaned
86  * and de-allocated
87  */
88 static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
89 				     struct idpf_ctlq_info *cq)
90 {
91 	idpf_free_dma_mem(hw, &cq->desc_ring);
92 }
93 
94 /**
95  * idpf_ctlq_free_bufs - Free CQ buffer info elements
96  * @hw: pointer to hw struct
97  * @cq: pointer to the specific Control queue
98  *
99  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
100  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
101  */
102 static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
103 {
104 	void *bi;
105 
106 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
107 		int i;
108 
109 		/* free DMA buffers for rx queues*/
110 		for (i = 0; i < cq->ring_size; i++) {
111 			if (cq->bi.rx_buff[i]) {
112 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
113 				kfree(cq->bi.rx_buff[i]);
114 			}
115 		}
116 
117 		bi = (void *)cq->bi.rx_buff;
118 	} else {
119 		bi = (void *)cq->bi.tx_msg;
120 	}
121 
122 	/* free the buffer header */
123 	kfree(bi);
124 }
125 
126 /**
127  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
128  * @hw: pointer to hw struct
129  * @cq: pointer to the specific Control queue
130  *
131  * Free the memory used by the ring, buffers and other related structures
132  */
133 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
134 {
135 	/* free ring buffers and the ring itself */
136 	idpf_ctlq_free_bufs(hw, cq);
137 	idpf_ctlq_free_desc_ring(hw, cq);
138 }
139 
140 /**
141  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
142  * @hw: pointer to hw struct
143  * @cq: pointer to control queue struct
144  *
145  * Do *NOT* hold cq_lock when calling this as the memory allocation routines
146  * called are not going to be atomic context safe
147  */
148 int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
149 {
150 	int err;
151 
152 	/* allocate the ring memory */
153 	err = idpf_ctlq_alloc_desc_ring(hw, cq);
154 	if (err)
155 		return err;
156 
157 	/* allocate buffers in the rings */
158 	err = idpf_ctlq_alloc_bufs(hw, cq);
159 	if (err)
160 		goto idpf_init_cq_free_ring;
161 
162 	/* success! */
163 	return 0;
164 
165 idpf_init_cq_free_ring:
166 	idpf_free_dma_mem(hw, &cq->desc_ring);
167 
168 	return err;
169 }
170