xref: /linux/drivers/net/ethernet/sunplus/spl2sw_desc.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1*fd3040b9SWells Lu // SPDX-License-Identifier: GPL-2.0
2*fd3040b9SWells Lu /* Copyright Sunplus Technology Co., Ltd.
3*fd3040b9SWells Lu  *       All rights reserved.
4*fd3040b9SWells Lu  */
5*fd3040b9SWells Lu 
6*fd3040b9SWells Lu #include <linux/platform_device.h>
7*fd3040b9SWells Lu #include <linux/netdevice.h>
8*fd3040b9SWells Lu #include <linux/of_mdio.h>
9*fd3040b9SWells Lu 
10*fd3040b9SWells Lu #include "spl2sw_define.h"
11*fd3040b9SWells Lu #include "spl2sw_desc.h"
12*fd3040b9SWells Lu 
spl2sw_rx_descs_flush(struct spl2sw_common * comm)13*fd3040b9SWells Lu void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
14*fd3040b9SWells Lu {
15*fd3040b9SWells Lu 	struct spl2sw_skb_info *rx_skbinfo;
16*fd3040b9SWells Lu 	struct spl2sw_mac_desc *rx_desc;
17*fd3040b9SWells Lu 	u32 i, j;
18*fd3040b9SWells Lu 
19*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
20*fd3040b9SWells Lu 		rx_desc = comm->rx_desc[i];
21*fd3040b9SWells Lu 		rx_skbinfo = comm->rx_skb_info[i];
22*fd3040b9SWells Lu 		for (j = 0; j < comm->rx_desc_num[i]; j++) {
23*fd3040b9SWells Lu 			rx_desc[j].addr1 = rx_skbinfo[j].mapping;
24*fd3040b9SWells Lu 			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
25*fd3040b9SWells Lu 					  RXD_EOR | comm->rx_desc_buff_size :
26*fd3040b9SWells Lu 					  comm->rx_desc_buff_size;
27*fd3040b9SWells Lu 			wmb();	/* Set RXD_OWN after other fields are ready. */
28*fd3040b9SWells Lu 			rx_desc[j].cmd1 = RXD_OWN;
29*fd3040b9SWells Lu 		}
30*fd3040b9SWells Lu 	}
31*fd3040b9SWells Lu }
32*fd3040b9SWells Lu 
spl2sw_tx_descs_clean(struct spl2sw_common * comm)33*fd3040b9SWells Lu void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
34*fd3040b9SWells Lu {
35*fd3040b9SWells Lu 	u32 i;
36*fd3040b9SWells Lu 
37*fd3040b9SWells Lu 	if (!comm->tx_desc)
38*fd3040b9SWells Lu 		return;
39*fd3040b9SWells Lu 
40*fd3040b9SWells Lu 	for (i = 0; i < TX_DESC_NUM; i++) {
41*fd3040b9SWells Lu 		comm->tx_desc[i].cmd1 = 0;
42*fd3040b9SWells Lu 		wmb();	/* Clear TXD_OWN and then set other fields. */
43*fd3040b9SWells Lu 		comm->tx_desc[i].cmd2 = 0;
44*fd3040b9SWells Lu 		comm->tx_desc[i].addr1 = 0;
45*fd3040b9SWells Lu 		comm->tx_desc[i].addr2 = 0;
46*fd3040b9SWells Lu 
47*fd3040b9SWells Lu 		if (comm->tx_temp_skb_info[i].mapping) {
48*fd3040b9SWells Lu 			dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
49*fd3040b9SWells Lu 					 comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
50*fd3040b9SWells Lu 			comm->tx_temp_skb_info[i].mapping = 0;
51*fd3040b9SWells Lu 		}
52*fd3040b9SWells Lu 
53*fd3040b9SWells Lu 		if (comm->tx_temp_skb_info[i].skb) {
54*fd3040b9SWells Lu 			dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
55*fd3040b9SWells Lu 			comm->tx_temp_skb_info[i].skb = NULL;
56*fd3040b9SWells Lu 		}
57*fd3040b9SWells Lu 	}
58*fd3040b9SWells Lu }
59*fd3040b9SWells Lu 
spl2sw_rx_descs_clean(struct spl2sw_common * comm)60*fd3040b9SWells Lu void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
61*fd3040b9SWells Lu {
62*fd3040b9SWells Lu 	struct spl2sw_skb_info *rx_skbinfo;
63*fd3040b9SWells Lu 	struct spl2sw_mac_desc *rx_desc;
64*fd3040b9SWells Lu 	u32 i, j;
65*fd3040b9SWells Lu 
66*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
67*fd3040b9SWells Lu 		if (!comm->rx_skb_info[i])
68*fd3040b9SWells Lu 			continue;
69*fd3040b9SWells Lu 
70*fd3040b9SWells Lu 		rx_desc = comm->rx_desc[i];
71*fd3040b9SWells Lu 		rx_skbinfo = comm->rx_skb_info[i];
72*fd3040b9SWells Lu 		for (j = 0; j < comm->rx_desc_num[i]; j++) {
73*fd3040b9SWells Lu 			rx_desc[j].cmd1 = 0;
74*fd3040b9SWells Lu 			wmb();	/* Clear RXD_OWN and then set other fields. */
75*fd3040b9SWells Lu 			rx_desc[j].cmd2 = 0;
76*fd3040b9SWells Lu 			rx_desc[j].addr1 = 0;
77*fd3040b9SWells Lu 
78*fd3040b9SWells Lu 			if (rx_skbinfo[j].skb) {
79*fd3040b9SWells Lu 				dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
80*fd3040b9SWells Lu 						 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
81*fd3040b9SWells Lu 				dev_kfree_skb_any(rx_skbinfo[j].skb);
82*fd3040b9SWells Lu 				rx_skbinfo[j].skb = NULL;
83*fd3040b9SWells Lu 				rx_skbinfo[j].mapping = 0;
84*fd3040b9SWells Lu 			}
85*fd3040b9SWells Lu 		}
86*fd3040b9SWells Lu 
87*fd3040b9SWells Lu 		kfree(rx_skbinfo);
88*fd3040b9SWells Lu 		comm->rx_skb_info[i] = NULL;
89*fd3040b9SWells Lu 	}
90*fd3040b9SWells Lu }
91*fd3040b9SWells Lu 
spl2sw_descs_clean(struct spl2sw_common * comm)92*fd3040b9SWells Lu void spl2sw_descs_clean(struct spl2sw_common *comm)
93*fd3040b9SWells Lu {
94*fd3040b9SWells Lu 	spl2sw_rx_descs_clean(comm);
95*fd3040b9SWells Lu 	spl2sw_tx_descs_clean(comm);
96*fd3040b9SWells Lu }
97*fd3040b9SWells Lu 
spl2sw_descs_free(struct spl2sw_common * comm)98*fd3040b9SWells Lu void spl2sw_descs_free(struct spl2sw_common *comm)
99*fd3040b9SWells Lu {
100*fd3040b9SWells Lu 	u32 i;
101*fd3040b9SWells Lu 
102*fd3040b9SWells Lu 	spl2sw_descs_clean(comm);
103*fd3040b9SWells Lu 	comm->tx_desc = NULL;
104*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
105*fd3040b9SWells Lu 		comm->rx_desc[i] = NULL;
106*fd3040b9SWells Lu 
107*fd3040b9SWells Lu 	/*  Free descriptor area  */
108*fd3040b9SWells Lu 	if (comm->desc_base) {
109*fd3040b9SWells Lu 		dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
110*fd3040b9SWells Lu 				  comm->desc_dma);
111*fd3040b9SWells Lu 		comm->desc_base = NULL;
112*fd3040b9SWells Lu 		comm->desc_dma = 0;
113*fd3040b9SWells Lu 		comm->desc_size = 0;
114*fd3040b9SWells Lu 	}
115*fd3040b9SWells Lu }
116*fd3040b9SWells Lu 
spl2sw_tx_descs_init(struct spl2sw_common * comm)117*fd3040b9SWells Lu void spl2sw_tx_descs_init(struct spl2sw_common *comm)
118*fd3040b9SWells Lu {
119*fd3040b9SWells Lu 	memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
120*fd3040b9SWells Lu 	       (TX_DESC_NUM + MAC_GUARD_DESC_NUM));
121*fd3040b9SWells Lu }
122*fd3040b9SWells Lu 
spl2sw_rx_descs_init(struct spl2sw_common * comm)123*fd3040b9SWells Lu int spl2sw_rx_descs_init(struct spl2sw_common *comm)
124*fd3040b9SWells Lu {
125*fd3040b9SWells Lu 	struct spl2sw_skb_info *rx_skbinfo;
126*fd3040b9SWells Lu 	struct spl2sw_mac_desc *rx_desc;
127*fd3040b9SWells Lu 	struct sk_buff *skb;
128*fd3040b9SWells Lu 	u32 mapping;
129*fd3040b9SWells Lu 	u32 i, j;
130*fd3040b9SWells Lu 
131*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
132*fd3040b9SWells Lu 		comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
133*fd3040b9SWells Lu 					       GFP_KERNEL | GFP_DMA);
134*fd3040b9SWells Lu 		if (!comm->rx_skb_info[i])
135*fd3040b9SWells Lu 			goto mem_alloc_fail;
136*fd3040b9SWells Lu 
137*fd3040b9SWells Lu 		rx_skbinfo = comm->rx_skb_info[i];
138*fd3040b9SWells Lu 		rx_desc = comm->rx_desc[i];
139*fd3040b9SWells Lu 		for (j = 0; j < comm->rx_desc_num[i]; j++) {
140*fd3040b9SWells Lu 			skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
141*fd3040b9SWells Lu 			if (!skb)
142*fd3040b9SWells Lu 				goto mem_alloc_fail;
143*fd3040b9SWells Lu 
144*fd3040b9SWells Lu 			rx_skbinfo[j].skb = skb;
145*fd3040b9SWells Lu 			mapping = dma_map_single(&comm->pdev->dev, skb->data,
146*fd3040b9SWells Lu 						 comm->rx_desc_buff_size,
147*fd3040b9SWells Lu 						 DMA_FROM_DEVICE);
148*fd3040b9SWells Lu 			if (dma_mapping_error(&comm->pdev->dev, mapping))
149*fd3040b9SWells Lu 				goto mem_alloc_fail;
150*fd3040b9SWells Lu 
151*fd3040b9SWells Lu 			rx_skbinfo[j].mapping = mapping;
152*fd3040b9SWells Lu 			rx_desc[j].addr1 = mapping;
153*fd3040b9SWells Lu 			rx_desc[j].addr2 = 0;
154*fd3040b9SWells Lu 			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
155*fd3040b9SWells Lu 					  RXD_EOR | comm->rx_desc_buff_size :
156*fd3040b9SWells Lu 					  comm->rx_desc_buff_size;
157*fd3040b9SWells Lu 			wmb();	/* Set RXD_OWN after other fields are effective. */
158*fd3040b9SWells Lu 			rx_desc[j].cmd1 = RXD_OWN;
159*fd3040b9SWells Lu 		}
160*fd3040b9SWells Lu 	}
161*fd3040b9SWells Lu 
162*fd3040b9SWells Lu 	return 0;
163*fd3040b9SWells Lu 
164*fd3040b9SWells Lu mem_alloc_fail:
165*fd3040b9SWells Lu 	spl2sw_rx_descs_clean(comm);
166*fd3040b9SWells Lu 	return -ENOMEM;
167*fd3040b9SWells Lu }
168*fd3040b9SWells Lu 
spl2sw_descs_alloc(struct spl2sw_common * comm)169*fd3040b9SWells Lu int spl2sw_descs_alloc(struct spl2sw_common *comm)
170*fd3040b9SWells Lu {
171*fd3040b9SWells Lu 	s32 desc_size;
172*fd3040b9SWells Lu 	u32 i;
173*fd3040b9SWells Lu 
174*fd3040b9SWells Lu 	/* Alloc descriptor area  */
175*fd3040b9SWells Lu 	desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
176*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
177*fd3040b9SWells Lu 		desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
178*fd3040b9SWells Lu 
179*fd3040b9SWells Lu 	comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
180*fd3040b9SWells Lu 					     GFP_KERNEL);
181*fd3040b9SWells Lu 	if (!comm->desc_base)
182*fd3040b9SWells Lu 		return -ENOMEM;
183*fd3040b9SWells Lu 
184*fd3040b9SWells Lu 	comm->desc_size = desc_size;
185*fd3040b9SWells Lu 
186*fd3040b9SWells Lu 	/* Setup Tx descriptor */
187*fd3040b9SWells Lu 	comm->tx_desc = comm->desc_base;
188*fd3040b9SWells Lu 
189*fd3040b9SWells Lu 	/* Setup Rx descriptor */
190*fd3040b9SWells Lu 	comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
191*fd3040b9SWells Lu 	for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
192*fd3040b9SWells Lu 		comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
193*fd3040b9SWells Lu 
194*fd3040b9SWells Lu 	return 0;
195*fd3040b9SWells Lu }
196*fd3040b9SWells Lu 
spl2sw_descs_init(struct spl2sw_common * comm)197*fd3040b9SWells Lu int spl2sw_descs_init(struct spl2sw_common *comm)
198*fd3040b9SWells Lu {
199*fd3040b9SWells Lu 	u32 i, ret;
200*fd3040b9SWells Lu 
201*fd3040b9SWells Lu 	/* Initialize rx descriptor's data */
202*fd3040b9SWells Lu 	comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
203*fd3040b9SWells Lu 	comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
204*fd3040b9SWells Lu 
205*fd3040b9SWells Lu 	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
206*fd3040b9SWells Lu 		comm->rx_desc[i] = NULL;
207*fd3040b9SWells Lu 		comm->rx_skb_info[i] = NULL;
208*fd3040b9SWells Lu 		comm->rx_pos[i] = 0;
209*fd3040b9SWells Lu 	}
210*fd3040b9SWells Lu 	comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
211*fd3040b9SWells Lu 
212*fd3040b9SWells Lu 	/* Initialize tx descriptor's data */
213*fd3040b9SWells Lu 	comm->tx_done_pos = 0;
214*fd3040b9SWells Lu 	comm->tx_desc = NULL;
215*fd3040b9SWells Lu 	comm->tx_pos = 0;
216*fd3040b9SWells Lu 	comm->tx_desc_full = 0;
217*fd3040b9SWells Lu 	for (i = 0; i < TX_DESC_NUM; i++)
218*fd3040b9SWells Lu 		comm->tx_temp_skb_info[i].skb = NULL;
219*fd3040b9SWells Lu 
220*fd3040b9SWells Lu 	/* Allocate tx & rx descriptors. */
221*fd3040b9SWells Lu 	ret = spl2sw_descs_alloc(comm);
222*fd3040b9SWells Lu 	if (ret)
223*fd3040b9SWells Lu 		return ret;
224*fd3040b9SWells Lu 
225*fd3040b9SWells Lu 	spl2sw_tx_descs_init(comm);
226*fd3040b9SWells Lu 
227*fd3040b9SWells Lu 	return spl2sw_rx_descs_init(comm);
228*fd3040b9SWells Lu }
229