xref: /linux/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf_controlq.h"
5 
6 /**
7  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
8  * @hw: pointer to hw struct
9  * @cq: pointer to the specific Control queue
10  */
11 static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
12 				     struct idpf_ctlq_info *cq)
13 {
14 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
15 
16 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
17 	if (!cq->desc_ring.va)
18 		return -ENOMEM;
19 
20 	return 0;
21 }
22 
23 /**
24  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
25  * @hw: pointer to hw struct
26  * @cq: pointer to the specific Control queue
27  *
28  * Allocate the buffer head for all control queues, and if it's a receive
29  * queue, allocate DMA buffers
30  */
31 static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
32 				struct idpf_ctlq_info *cq)
33 {
34 	int i;
35 
36 	/* Do not allocate DMA buffers for transmit queues */
37 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
38 		return 0;
39 
40 	/* We'll be allocating the buffer info memory first, then we can
41 	 * allocate the mapped buffers for the event processing
42 	 */
43 	cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),
44 				 GFP_KERNEL);
45 	if (!cq->bi.rx_buff)
46 		return -ENOMEM;
47 
48 	/* allocate the mapped buffers (except for the last one) */
49 	for (i = 0; i < cq->ring_size - 1; i++) {
50 		struct idpf_dma_mem *bi;
51 		int num = 1; /* number of idpf_dma_mem to be allocated */
52 
53 		cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),
54 					    GFP_KERNEL);
55 		if (!cq->bi.rx_buff[i])
56 			goto unwind_alloc_cq_bufs;
57 
58 		bi = cq->bi.rx_buff[i];
59 
60 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
61 		if (!bi->va) {
62 			/* unwind will not free the failed entry */
63 			kfree(cq->bi.rx_buff[i]);
64 			goto unwind_alloc_cq_bufs;
65 		}
66 	}
67 
68 	return 0;
69 
70 unwind_alloc_cq_bufs:
71 	/* don't try to free the one that failed... */
72 	i--;
73 	for (; i >= 0; i--) {
74 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
75 		kfree(cq->bi.rx_buff[i]);
76 	}
77 	kfree(cq->bi.rx_buff);
78 
79 	return -ENOMEM;
80 }
81 
82 /**
83  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
84  * @hw: pointer to hw struct
85  * @cq: pointer to the specific Control queue
86  *
87  * This assumes the posted send buffers have already been cleaned
88  * and de-allocated
89  */
90 static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
91 				     struct idpf_ctlq_info *cq)
92 {
93 	idpf_free_dma_mem(hw, &cq->desc_ring);
94 }
95 
96 /**
97  * idpf_ctlq_free_bufs - Free CQ buffer info elements
98  * @hw: pointer to hw struct
99  * @cq: pointer to the specific Control queue
100  *
101  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
102  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
103  */
104 static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
105 {
106 	void *bi;
107 
108 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
109 		int i;
110 
111 		/* free DMA buffers for rx queues*/
112 		for (i = 0; i < cq->ring_size; i++) {
113 			if (cq->bi.rx_buff[i]) {
114 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
115 				kfree(cq->bi.rx_buff[i]);
116 			}
117 		}
118 
119 		bi = (void *)cq->bi.rx_buff;
120 	} else {
121 		bi = (void *)cq->bi.tx_msg;
122 	}
123 
124 	/* free the buffer header */
125 	kfree(bi);
126 }
127 
128 /**
129  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
130  * @hw: pointer to hw struct
131  * @cq: pointer to the specific Control queue
132  *
133  * Free the memory used by the ring, buffers and other related structures
134  */
135 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
136 {
137 	/* free ring buffers and the ring itself */
138 	idpf_ctlq_free_bufs(hw, cq);
139 	idpf_ctlq_free_desc_ring(hw, cq);
140 }
141 
142 /**
143  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
144  * @hw: pointer to hw struct
145  * @cq: pointer to control queue struct
146  *
147  * Do *NOT* hold cq_lock when calling this as the memory allocation routines
148  * called are not going to be atomic context safe
149  */
150 int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
151 {
152 	int err;
153 
154 	/* allocate the ring memory */
155 	err = idpf_ctlq_alloc_desc_ring(hw, cq);
156 	if (err)
157 		return err;
158 
159 	/* allocate buffers in the rings */
160 	err = idpf_ctlq_alloc_bufs(hw, cq);
161 	if (err)
162 		goto idpf_init_cq_free_ring;
163 
164 	/* success! */
165 	return 0;
166 
167 idpf_init_cq_free_ring:
168 	idpf_free_dma_mem(hw, &cq->desc_ring);
169 
170 	return err;
171 }
172