xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #ifndef OTX2_TXRX_H
9 #define OTX2_TXRX_H
10 
11 #include <linux/etherdevice.h>
12 #include <linux/iommu.h>
13 #include <linux/if_vlan.h>
14 #include <net/xdp.h>
15 #include <net/xdp_sock_drv.h>
16 
17 #define LBK_CHAN_BASE	0x000
18 #define SDP_CHAN_BASE	0x700
19 #define CGX_CHAN_BASE	0x800
20 
21 #define OTX2_DATA_ALIGN(X)	ALIGN(X, OTX2_ALIGN)
22 #define OTX2_HEAD_ROOM		OTX2_ALIGN
23 
24 #define	OTX2_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN)
25 #define	OTX2_MIN_MTU		60
26 
27 #define OTX2_PAGE_POOL_SZ	2048
28 
29 #define OTX2_MAX_GSO_SEGS	255
30 #define OTX2_MAX_FRAGS_IN_SQE	9
31 
32 #define MAX_XDP_MTU	(1530 - OTX2_ETH_HLEN)
33 
34 /* Rx buffer size should be in multiples of 128bytes */
35 #define RCV_FRAG_LEN1(x)				\
36 		((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
37 		OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
38 
39 /* Prefer 2048 byte buffers for better last level cache
40  * utilization or data distribution across regions.
41  */
42 #define RCV_FRAG_LEN(x)	\
43 		((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
44 
45 #define DMA_BUFFER_LEN(x)	((x) - OTX2_HEAD_ROOM)
46 
47 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
48  * is equal to this value.
49  */
50 #define CQ_CQE_THRESH_DEFAULT	10
51 
52 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
53  * is nonzero and this much time elapses after that.
54  */
55 #define CQ_TIMER_THRESH_DEFAULT	1  /* 1 usec */
56 #define CQ_TIMER_THRESH_MAX     25 /* 25 usec */
57 
58 /* Min number of CQs (of the ones mapped to this CINT)
59  * with valid CQEs.
60  */
61 #define CQ_QCOUNT_DEFAULT	1
62 
63 #define CQ_OP_STAT_OP_ERR       63
64 #define CQ_OP_STAT_CQ_ERR       46
65 
66 /* Packet mark mask */
67 #define OTX2_RX_MATCH_ID_MASK 0x0000ffff
68 
69 struct queue_stats {
70 	u64	bytes;
71 	u64	pkts;
72 };
73 
74 struct otx2_rcv_queue {
75 	struct queue_stats	stats;
76 };
77 
78 struct sg_list {
79 	u16	num_segs;
80 	u16	flags;
81 	u64	skb;
82 	u64	size[OTX2_MAX_FRAGS_IN_SQE];
83 	u64	dma_addr[OTX2_MAX_FRAGS_IN_SQE];
84 };
85 
86 struct otx2_snd_queue {
87 	u8			aura_id;
88 	u16			head;
89 	u16			cons_head;
90 	u16			sqe_size;
91 	u32			sqe_cnt;
92 	u16			num_sqbs;
93 	u16			sqe_thresh;
94 	u8			sqe_per_sqb;
95 	u64			 io_addr;
96 	u64			*aura_fc_addr;
97 	u64			*lmt_addr;
98 	void			*sqe_base;
99 	struct qmem		*sqe;
100 	struct qmem		*tso_hdrs;
101 	struct sg_list		*sg;
102 	struct qmem		*timestamps;
103 	struct queue_stats	stats;
104 	u16			sqb_count;
105 	u64			*sqb_ptrs;
106 	/* SQE ring and CPT response queue for Inline IPSEC */
107 	struct qmem		*sqe_ring;
108 	struct qmem		*cpt_resp;
109 	/* Buffer pool for af_xdp zero-copy */
110 	struct xsk_buff_pool    *xsk_pool;
111 } ____cacheline_aligned_in_smp;
112 
113 enum cq_type {
114 	CQ_RX,
115 	CQ_TX,
116 	CQ_XDP,
117 	CQ_QOS,
118 	CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
119 };
120 
121 struct otx2_cq_poll {
122 	void			*dev;
123 #define CINT_INVALID_CQ		255
124 	u8			cint_idx;
125 	u8			cq_ids[CQS_PER_CINT];
126 	struct dim		dim;
127 	struct napi_struct	napi;
128 };
129 
130 struct otx2_pool {
131 	struct qmem		*stack;
132 	struct qmem		*fc_addr;
133 	struct page_pool	*page_pool;
134 	struct xsk_buff_pool	*xsk_pool;
135 	struct xdp_buff		**xdp;
136 	u16			xdp_cnt;
137 	u16			rbsize;
138 	u16			xdp_top;
139 };
140 
141 struct otx2_cq_queue {
142 	u8			cq_idx;
143 	u8			cq_type;
144 	u8			cint_idx; /* CQ interrupt id */
145 	u8			refill_task_sched;
146 	u16			cqe_size;
147 	u16			pool_ptrs;
148 	u32			cqe_cnt;
149 	u32			cq_head;
150 	u32			cq_tail;
151 	u32			pend_cqe;
152 	void			*cqe_base;
153 	struct qmem		*cqe;
154 	struct otx2_pool	*rbpool;
155 	bool			xsk_zc_en;
156 	struct xdp_rxq_info xdp_rxq;
157 } ____cacheline_aligned_in_smp;
158 
159 struct otx2_qset {
160 	u32			rqe_cnt;
161 	u32			sqe_cnt; /* Keep these two at top */
162 #define OTX2_MAX_CQ_CNT		64
163 	u16			cq_cnt;
164 	u16			xqe_size;
165 	struct otx2_pool	*pool;
166 	struct otx2_cq_poll	*napi;
167 	struct otx2_cq_queue	*cq;
168 	struct otx2_snd_queue	*sq;
169 	struct otx2_rcv_queue	*rq;
170 };
171 
172 /* Translate IOVA to physical address */
otx2_iova_to_phys(void * iommu_domain,dma_addr_t dma_addr)173 static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
174 {
175 	/* Translation is installed only when IOMMU is present */
176 	if (likely(iommu_domain))
177 		return iommu_iova_to_phys(iommu_domain, dma_addr);
178 	return dma_addr;
179 }
180 
181 int otx2_napi_handler(struct napi_struct *napi, int budget);
182 bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
183 			struct otx2_snd_queue *sq,
184 			struct sk_buff *skb, u16 qidx);
185 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
186 		     int size, int qidx);
187 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
188 		    int size, int qidx);
189 int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
190 int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
191 #endif /* OTX2_TXRX_H */
192