xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HNS3_ENET_H
5 #define __HNS3_ENET_H
6 
7 #include <linux/dim.h>
8 #include <linux/if_vlan.h>
9 #include <net/page_pool/types.h>
10 #include <asm/barrier.h>
11 
12 #include "hnae3.h"
13 
14 struct iphdr;
15 struct ipv6hdr;
16 
17 enum hns3_nic_state {
18 	HNS3_NIC_STATE_TESTING,
19 	HNS3_NIC_STATE_RESETTING,
20 	HNS3_NIC_STATE_INITED,
21 	HNS3_NIC_STATE_DOWN,
22 	HNS3_NIC_STATE_DISABLED,
23 	HNS3_NIC_STATE_REMOVING,
24 	HNS3_NIC_STATE_SERVICE_INITED,
25 	HNS3_NIC_STATE_SERVICE_SCHED,
26 	HNS3_NIC_STATE2_RESET_REQUESTED,
27 	HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
28 	HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
29 	HNS3_NIC_STATE_TX_PUSH_ENABLE,
30 	HNS3_NIC_STATE_MAX
31 };
32 
33 #define HNS3_MAX_PUSH_BD_NUM		2
34 
35 #define HNS3_RING_RX_RING_BASEADDR_L_REG	0x00000
36 #define HNS3_RING_RX_RING_BASEADDR_H_REG	0x00004
37 #define HNS3_RING_RX_RING_BD_NUM_REG		0x00008
38 #define HNS3_RING_RX_RING_BD_LEN_REG		0x0000C
39 #define HNS3_RING_RX_RING_TAIL_REG		0x00018
40 #define HNS3_RING_RX_RING_HEAD_REG		0x0001C
41 #define HNS3_RING_RX_RING_FBDNUM_REG		0x00020
42 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG	0x0002C
43 
44 #define HNS3_RING_TX_RING_BASEADDR_L_REG	0x00040
45 #define HNS3_RING_TX_RING_BASEADDR_H_REG	0x00044
46 #define HNS3_RING_TX_RING_BD_NUM_REG		0x00048
47 #define HNS3_RING_TX_RING_TC_REG		0x00050
48 #define HNS3_RING_TX_RING_TAIL_REG		0x00058
49 #define HNS3_RING_TX_RING_HEAD_REG		0x0005C
50 #define HNS3_RING_TX_RING_FBDNUM_REG		0x00060
51 #define HNS3_RING_TX_RING_OFFSET_REG		0x00064
52 #define HNS3_RING_TX_RING_EBDNUM_REG		0x00068
53 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG	0x0006C
54 #define HNS3_RING_TX_RING_EBD_OFFSET_REG	0x00070
55 #define HNS3_RING_TX_RING_BD_ERR_REG		0x00074
56 #define HNS3_RING_EN_REG			0x00090
57 #define HNS3_RING_RX_EN_REG			0x00098
58 #define HNS3_RING_TX_EN_REG			0x000D4
59 
60 #define HNS3_RX_HEAD_SIZE			256
61 
62 #define HNS3_TX_TIMEOUT (5 * HZ)
63 #define HNS3_RING_NAME_LEN			16
64 #define HNS3_BUFFER_SIZE_2048			2048
65 #define HNS3_RING_MAX_PENDING			32760
66 #define HNS3_RING_MIN_PENDING			72
67 #define HNS3_RING_BD_MULTIPLE			8
68 /* max frame size of mac */
69 #define HNS3_MAX_MTU(max_frm_size) \
70 	((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
71 
72 #define HNS3_BD_SIZE_512_TYPE			0
73 #define HNS3_BD_SIZE_1024_TYPE			1
74 #define HNS3_BD_SIZE_2048_TYPE			2
75 #define HNS3_BD_SIZE_4096_TYPE			3
76 
77 #define HNS3_RX_FLAG_VLAN_PRESENT		0x1
78 #define HNS3_RX_FLAG_L3ID_IPV4			0x0
79 #define HNS3_RX_FLAG_L3ID_IPV6			0x1
80 #define HNS3_RX_FLAG_L4ID_UDP			0x0
81 #define HNS3_RX_FLAG_L4ID_TCP			0x1
82 
83 #define HNS3_RXD_DMAC_S				0
84 #define HNS3_RXD_DMAC_M				(0x3 << HNS3_RXD_DMAC_S)
85 #define HNS3_RXD_VLAN_S				2
86 #define HNS3_RXD_VLAN_M				(0x3 << HNS3_RXD_VLAN_S)
87 #define HNS3_RXD_L3ID_S				4
88 #define HNS3_RXD_L3ID_M				(0xf << HNS3_RXD_L3ID_S)
89 #define HNS3_RXD_L4ID_S				8
90 #define HNS3_RXD_L4ID_M				(0xf << HNS3_RXD_L4ID_S)
91 #define HNS3_RXD_FRAG_B				12
92 #define HNS3_RXD_STRP_TAGP_S			13
93 #define HNS3_RXD_STRP_TAGP_M			(0x3 << HNS3_RXD_STRP_TAGP_S)
94 
95 #define HNS3_RXD_L2E_B				16
96 #define HNS3_RXD_L3E_B				17
97 #define HNS3_RXD_L4E_B				18
98 #define HNS3_RXD_TRUNCAT_B			19
99 #define HNS3_RXD_HOI_B				20
100 #define HNS3_RXD_DOI_B				21
101 #define HNS3_RXD_OL3E_B				22
102 #define HNS3_RXD_OL4E_B				23
103 #define HNS3_RXD_GRO_COUNT_S			24
104 #define HNS3_RXD_GRO_COUNT_M			(0x3f << HNS3_RXD_GRO_COUNT_S)
105 #define HNS3_RXD_GRO_FIXID_B			30
106 #define HNS3_RXD_GRO_ECN_B			31
107 
108 #define HNS3_RXD_ODMAC_S			0
109 #define HNS3_RXD_ODMAC_M			(0x3 << HNS3_RXD_ODMAC_S)
110 #define HNS3_RXD_OVLAN_S			2
111 #define HNS3_RXD_OVLAN_M			(0x3 << HNS3_RXD_OVLAN_S)
112 #define HNS3_RXD_OL3ID_S			4
113 #define HNS3_RXD_OL3ID_M			(0xf << HNS3_RXD_OL3ID_S)
114 #define HNS3_RXD_OL4ID_S			8
115 #define HNS3_RXD_OL4ID_M			(0xf << HNS3_RXD_OL4ID_S)
116 #define HNS3_RXD_FBHI_S				12
117 #define HNS3_RXD_FBHI_M				(0x3 << HNS3_RXD_FBHI_S)
118 #define HNS3_RXD_FBLI_S				14
119 #define HNS3_RXD_FBLI_M				(0x3 << HNS3_RXD_FBLI_S)
120 
121 #define HNS3_RXD_PTYPE_S			4
122 #define HNS3_RXD_PTYPE_M			GENMASK(11, 4)
123 
124 #define HNS3_RXD_BDTYPE_S			0
125 #define HNS3_RXD_BDTYPE_M			(0xf << HNS3_RXD_BDTYPE_S)
126 #define HNS3_RXD_VLD_B				4
127 #define HNS3_RXD_UDP0_B				5
128 #define HNS3_RXD_EXTEND_B			7
129 #define HNS3_RXD_FE_B				8
130 #define HNS3_RXD_LUM_B				9
131 #define HNS3_RXD_CRCP_B				10
132 #define HNS3_RXD_L3L4P_B			11
133 #define HNS3_RXD_TSIDX_S			12
134 #define HNS3_RXD_TSIDX_M			(0x3 << HNS3_RXD_TSIDX_S)
135 #define HNS3_RXD_TS_VLD_B			14
136 #define HNS3_RXD_LKBK_B				15
137 #define HNS3_RXD_GRO_SIZE_S			16
138 #define HNS3_RXD_GRO_SIZE_M			(0x3fff << HNS3_RXD_GRO_SIZE_S)
139 
140 #define HNS3_TXD_L3T_S				0
141 #define HNS3_TXD_L3T_M				(0x3 << HNS3_TXD_L3T_S)
142 #define HNS3_TXD_L4T_S				2
143 #define HNS3_TXD_L4T_M				(0x3 << HNS3_TXD_L4T_S)
144 #define HNS3_TXD_L3CS_B				4
145 #define HNS3_TXD_L4CS_B				5
146 #define HNS3_TXD_VLAN_B				6
147 #define HNS3_TXD_TSO_B				7
148 
149 #define HNS3_TXD_L2LEN_S			8
150 #define HNS3_TXD_L2LEN_M			(0xff << HNS3_TXD_L2LEN_S)
151 #define HNS3_TXD_L3LEN_S			16
152 #define HNS3_TXD_L3LEN_M			(0xff << HNS3_TXD_L3LEN_S)
153 #define HNS3_TXD_L4LEN_S			24
154 #define HNS3_TXD_L4LEN_M			(0xff << HNS3_TXD_L4LEN_S)
155 
156 #define HNS3_TXD_CSUM_START_S		8
157 #define HNS3_TXD_CSUM_START_M		(0xffff << HNS3_TXD_CSUM_START_S)
158 
159 #define HNS3_TXD_OL3T_S				0
160 #define HNS3_TXD_OL3T_M				(0x3 << HNS3_TXD_OL3T_S)
161 #define HNS3_TXD_OVLAN_B			2
162 #define HNS3_TXD_MACSEC_B			3
163 #define HNS3_TXD_TUNTYPE_S			4
164 #define HNS3_TXD_TUNTYPE_M			(0xf << HNS3_TXD_TUNTYPE_S)
165 
166 #define HNS3_TXD_CSUM_OFFSET_S		8
167 #define HNS3_TXD_CSUM_OFFSET_M		(0xffff << HNS3_TXD_CSUM_OFFSET_S)
168 
169 #define HNS3_TXD_BDTYPE_S			0
170 #define HNS3_TXD_BDTYPE_M			(0xf << HNS3_TXD_BDTYPE_S)
171 #define HNS3_TXD_FE_B				4
172 #define HNS3_TXD_SC_S				5
173 #define HNS3_TXD_SC_M				(0x3 << HNS3_TXD_SC_S)
174 #define HNS3_TXD_EXTEND_B			7
175 #define HNS3_TXD_VLD_B				8
176 #define HNS3_TXD_RI_B				9
177 #define HNS3_TXD_RA_B				10
178 #define HNS3_TXD_TSYN_B				11
179 #define HNS3_TXD_DECTTL_S			12
180 #define HNS3_TXD_DECTTL_M			(0xf << HNS3_TXD_DECTTL_S)
181 
182 #define HNS3_TXD_OL4CS_B			22
183 
184 #define HNS3_TXD_MSS_S				0
185 #define HNS3_TXD_MSS_M				(0x3fff << HNS3_TXD_MSS_S)
186 #define HNS3_TXD_HW_CS_B			14
187 
188 #define HNS3_VECTOR_TX_IRQ			BIT_ULL(0)
189 #define HNS3_VECTOR_RX_IRQ			BIT_ULL(1)
190 
191 #define HNS3_VECTOR_NOT_INITED			0
192 #define HNS3_VECTOR_INITED			1
193 
194 #define HNS3_MAX_BD_SIZE			65535
195 #define HNS3_MAX_TSO_BD_NUM			63U
196 #define HNS3_MAX_TSO_SIZE			1048576U
197 #define HNS3_MAX_NON_TSO_SIZE			9728U
198 
199 #define HNS3_VECTOR_GL_MASK			GENMASK(11, 0)
200 #define HNS3_VECTOR_GL0_OFFSET			0x100
201 #define HNS3_VECTOR_GL1_OFFSET			0x200
202 #define HNS3_VECTOR_GL2_OFFSET			0x300
203 #define HNS3_VECTOR_RL_OFFSET			0x900
204 #define HNS3_VECTOR_RL_EN_B			6
205 #define HNS3_VECTOR_QL_MASK			GENMASK(9, 0)
206 #define HNS3_VECTOR_TX_QL_OFFSET		0xe00
207 #define HNS3_VECTOR_RX_QL_OFFSET		0xf00
208 
209 #define HNS3_RING_EN_B				0
210 
211 #define HNS3_GL0_CQ_MODE_REG			0x20d00
212 #define HNS3_GL1_CQ_MODE_REG			0x20d04
213 #define HNS3_GL2_CQ_MODE_REG			0x20d08
214 #define HNS3_CQ_MODE_EQE			1U
215 #define HNS3_CQ_MODE_CQE			0U
216 
217 enum hns3_pkt_l2t_type {
218 	HNS3_L2_TYPE_UNICAST,
219 	HNS3_L2_TYPE_MULTICAST,
220 	HNS3_L2_TYPE_BROADCAST,
221 	HNS3_L2_TYPE_INVALID,
222 };
223 
224 enum hns3_pkt_l3t_type {
225 	HNS3_L3T_NONE,
226 	HNS3_L3T_IPV6,
227 	HNS3_L3T_IPV4,
228 	HNS3_L3T_RESERVED
229 };
230 
231 enum hns3_pkt_l4t_type {
232 	HNS3_L4T_UNKNOWN,
233 	HNS3_L4T_TCP,
234 	HNS3_L4T_UDP,
235 	HNS3_L4T_SCTP
236 };
237 
238 enum hns3_pkt_ol3t_type {
239 	HNS3_OL3T_NONE,
240 	HNS3_OL3T_IPV6,
241 	HNS3_OL3T_IPV4_NO_CSUM,
242 	HNS3_OL3T_IPV4_CSUM
243 };
244 
245 enum hns3_pkt_tun_type {
246 	HNS3_TUN_NONE,
247 	HNS3_TUN_MAC_IN_UDP,
248 	HNS3_TUN_NVGRE,
249 	HNS3_TUN_OTHER
250 };
251 
252 /* hardware spec ring buffer format */
253 struct __packed hns3_desc {
254 	union {
255 		__le64 addr;
256 		__le16 csum;
257 		struct {
258 			__le32 ts_nsec;
259 			__le32 ts_sec;
260 		};
261 	};
262 	union {
263 		struct {
264 			__le16 vlan_tag;
265 			__le16 send_size;
266 			union {
267 				__le32 type_cs_vlan_tso_len;
268 				struct {
269 					__u8 type_cs_vlan_tso;
270 					__u8 l2_len;
271 					__u8 l3_len;
272 					__u8 l4_len;
273 				};
274 			};
275 			__le16 outer_vlan_tag;
276 			__le16 tv;
277 
278 		union {
279 			__le32 ol_type_vlan_len_msec;
280 			struct {
281 				__u8 ol_type_vlan_msec;
282 				__u8 ol2_len;
283 				__u8 ol3_len;
284 				__u8 ol4_len;
285 			};
286 		};
287 
288 			__le32 paylen_ol4cs;
289 			__le16 bdtp_fe_sc_vld_ra_ri;
290 			__le16 mss_hw_csum;
291 		} tx;
292 
293 		struct {
294 			__le32 l234_info;
295 			__le16 pkt_len;
296 			__le16 size;
297 
298 			__le32 rss_hash;
299 			__le16 fd_id;
300 			__le16 vlan_tag;
301 
302 			union {
303 				__le32 ol_info;
304 				struct {
305 					__le16 o_dm_vlan_id_fb;
306 					__le16 ot_vlan_tag;
307 				};
308 			};
309 
310 			__le32 bd_base_info;
311 		} rx;
312 	};
313 };
314 
315 enum hns3_desc_type {
316 	DESC_TYPE_UNKNOWN		= 0,
317 	DESC_TYPE_SKB			= 1 << 0,
318 	DESC_TYPE_FRAGLIST_SKB		= 1 << 1,
319 	DESC_TYPE_PAGE			= 1 << 2,
320 	DESC_TYPE_BOUNCE_ALL		= 1 << 3,
321 	DESC_TYPE_BOUNCE_HEAD		= 1 << 4,
322 	DESC_TYPE_SGL_SKB		= 1 << 5,
323 	DESC_TYPE_PP_FRAG		= 1 << 6,
324 };
325 
326 struct hns3_desc_cb {
327 	dma_addr_t dma; /* dma address of this desc */
328 	void *buf;      /* cpu addr for a desc */
329 
330 	/* priv data for the desc, e.g. skb when use with ip stack */
331 	void *priv;
332 
333 	union {
334 		u32 page_offset;	/* for rx */
335 		u32 send_bytes;		/* for tx */
336 	};
337 
338 	u32 length;     /* length of the buffer */
339 
340 	u16 reuse_flag;
341 	u16 refill;
342 
343 	/* desc type, used by the ring user to mark the type of the priv data */
344 	u16 type;
345 	u16 pagecnt_bias;
346 };
347 
348 enum hns3_pkt_l3type {
349 	HNS3_L3_TYPE_IPV4,
350 	HNS3_L3_TYPE_IPV6,
351 	HNS3_L3_TYPE_ARP,
352 	HNS3_L3_TYPE_RARP,
353 	HNS3_L3_TYPE_IPV4_OPT,
354 	HNS3_L3_TYPE_IPV6_EXT,
355 	HNS3_L3_TYPE_LLDP,
356 	HNS3_L3_TYPE_BPDU,
357 	HNS3_L3_TYPE_MAC_PAUSE,
358 	HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
359 
360 	/* reserved for 0xA~0xB */
361 
362 	HNS3_L3_TYPE_CNM = 0xc,
363 
364 	/* reserved for 0xD~0xE */
365 
366 	HNS3_L3_TYPE_PARSE_FAIL	= 0xf /* must be last */
367 };
368 
369 enum hns3_pkt_l4type {
370 	HNS3_L4_TYPE_UDP,
371 	HNS3_L4_TYPE_TCP,
372 	HNS3_L4_TYPE_GRE,
373 	HNS3_L4_TYPE_SCTP,
374 	HNS3_L4_TYPE_IGMP,
375 	HNS3_L4_TYPE_ICMP,
376 
377 	/* reserved for 0x6~0xE */
378 
379 	HNS3_L4_TYPE_PARSE_FAIL	= 0xf /* must be last */
380 };
381 
382 enum hns3_pkt_ol3type {
383 	HNS3_OL3_TYPE_IPV4 = 0,
384 	HNS3_OL3_TYPE_IPV6,
385 	/* reserved for 0x2~0x3 */
386 	HNS3_OL3_TYPE_IPV4_OPT = 4,
387 	HNS3_OL3_TYPE_IPV6_EXT,
388 
389 	/* reserved for 0x6~0xE */
390 
391 	HNS3_OL3_TYPE_PARSE_FAIL = 0xf	/* must be last */
392 };
393 
394 enum hns3_pkt_ol4type {
395 	HNS3_OL4_TYPE_NO_TUN,
396 	HNS3_OL4_TYPE_MAC_IN_UDP,
397 	HNS3_OL4_TYPE_NVGRE,
398 	HNS3_OL4_TYPE_UNKNOWN
399 };
400 
401 struct hns3_rx_ptype {
402 	u32 ptype : 8;
403 	u32 csum_level : 2;
404 	u32 ip_summed : 2;
405 	u32 l3_type : 4;
406 	u32 valid : 1;
407 	u32 hash_type: 3;
408 };
409 
410 struct ring_stats {
411 	u64 sw_err_cnt;
412 	u64 seg_pkt_cnt;
413 	union {
414 		struct {
415 			u64 tx_pkts;
416 			u64 tx_bytes;
417 			u64 tx_more;
418 			u64 tx_push;
419 			u64 tx_mem_doorbell;
420 			u64 restart_queue;
421 			u64 tx_busy;
422 			u64 tx_copy;
423 			u64 tx_vlan_err;
424 			u64 tx_l4_proto_err;
425 			u64 tx_l2l3l4_err;
426 			u64 tx_tso_err;
427 			u64 over_max_recursion;
428 			u64 hw_limitation;
429 			u64 tx_bounce;
430 			u64 tx_spare_full;
431 			u64 copy_bits_err;
432 			u64 tx_sgl;
433 			u64 skb2sgl_err;
434 			u64 map_sg_err;
435 		};
436 		struct {
437 			u64 rx_pkts;
438 			u64 rx_bytes;
439 			u64 rx_err_cnt;
440 			u64 reuse_pg_cnt;
441 			u64 err_pkt_len;
442 			u64 err_bd_num;
443 			u64 l2_err;
444 			u64 l3l4_csum_err;
445 			u64 csum_complete;
446 			u64 rx_multicast;
447 			u64 non_reuse_pg;
448 			u64 frag_alloc_err;
449 			u64 frag_alloc;
450 		};
451 		__le16 csum;
452 	};
453 };
454 
455 struct hns3_tx_spare {
456 	dma_addr_t dma;
457 	void *buf;
458 	u32 next_to_use;
459 	u32 next_to_clean;
460 	u32 last_to_clean;
461 	u32 len;
462 };
463 
464 struct hns3_enet_ring {
465 	struct hns3_desc *desc; /* dma map address space */
466 	struct hns3_desc_cb *desc_cb;
467 	struct hns3_enet_ring *next;
468 	struct hns3_enet_tqp_vector *tqp_vector;
469 	struct hnae3_queue *tqp;
470 	int queue_index;
471 	struct device *dev; /* will be used for DMA mapping of descriptors */
472 	struct page_pool *page_pool;
473 
474 	/* statistic */
475 	struct ring_stats stats;
476 	struct u64_stats_sync syncp;
477 
478 	dma_addr_t desc_dma_addr;
479 	u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
480 	u16 desc_num;       /* total number of desc */
481 	int next_to_use;    /* idx of next spare desc */
482 
483 	/* idx of lastest sent desc, the ring is empty when equal to
484 	 * next_to_use
485 	 */
486 	int next_to_clean;
487 	u32 flag;          /* ring attribute */
488 
489 	int pending_buf;
490 	union {
491 		/* for Tx ring */
492 		struct {
493 			u32 fd_qb_tx_sample;
494 			int last_to_use;        /* last idx used by xmit */
495 			u32 tx_copybreak;
496 			struct hns3_tx_spare *tx_spare;
497 		};
498 
499 		/* for Rx ring */
500 		struct {
501 			u32 pull_len;   /* memcpy len for current rx packet */
502 			u32 rx_copybreak;
503 			u32 frag_num;
504 			/* first buffer address for current packet */
505 			unsigned char *va;
506 			struct sk_buff *skb;
507 			struct sk_buff *tail_skb;
508 		};
509 	};
510 } ____cacheline_internodealigned_in_smp;
511 
512 enum hns3_flow_level_range {
513 	HNS3_FLOW_LOW = 0,
514 	HNS3_FLOW_MID = 1,
515 	HNS3_FLOW_HIGH = 2,
516 	HNS3_FLOW_ULTRA = 3,
517 };
518 
519 #define HNS3_INT_GL_50K			0x0014
520 #define HNS3_INT_GL_20K			0x0032
521 #define HNS3_INT_GL_18K			0x0036
522 #define HNS3_INT_GL_8K			0x007C
523 
524 #define HNS3_INT_GL_1US			BIT(31)
525 
526 #define HNS3_INT_RL_MAX			0x00EC
527 #define HNS3_INT_RL_ENABLE_MASK		0x40
528 
529 #define HNS3_INT_QL_DEFAULT_CFG		0x20
530 
531 struct hns3_enet_coalesce {
532 	u16 int_gl;
533 	u16 int_ql;
534 	u16 int_ql_max;
535 	u8 adapt_enable : 1;
536 	u8 ql_enable : 1;
537 	u8 unit_1us : 1;
538 	enum hns3_flow_level_range flow_level;
539 };
540 
541 struct hns3_enet_ring_group {
542 	/* array of pointers to rings */
543 	struct hns3_enet_ring *ring;
544 	u64 total_bytes;	/* total bytes processed this group */
545 	u64 total_packets;	/* total packets processed this group */
546 	u16 count;
547 	struct hns3_enet_coalesce coal;
548 	struct dim dim;
549 };
550 
551 struct hns3_enet_tqp_vector {
552 	struct hnae3_handle *handle;
553 	u8 __iomem *mask_addr;
554 	int vector_irq;
555 	int irq_init_flag;
556 
557 	u16 idx;		/* index in the TQP vector array per handle. */
558 
559 	struct napi_struct napi;
560 
561 	struct hns3_enet_ring_group rx_group;
562 	struct hns3_enet_ring_group tx_group;
563 
564 	cpumask_t affinity_mask;
565 	u16 num_tqps;	/* total number of tqps in TQP vector */
566 	struct irq_affinity_notify affinity_notify;
567 
568 	char name[HNAE3_INT_NAME_LEN];
569 
570 	u64 event_cnt;
571 } ____cacheline_internodealigned_in_smp;
572 
573 struct hns3_nic_priv {
574 	struct hnae3_handle *ae_handle;
575 	struct net_device *netdev;
576 	struct device *dev;
577 
578 	/**
579 	 * the cb for nic to manage the ring buffer, the first half of the
580 	 * array is for tx_ring and vice versa for the second half
581 	 */
582 	struct hns3_enet_ring *ring;
583 	struct hns3_enet_tqp_vector *tqp_vector;
584 	u16 vector_num;
585 	u8 max_non_tso_bd_num;
586 
587 	u64 tx_timeout_count;
588 
589 	unsigned long state;
590 
591 	enum dim_cq_period_mode tx_cqe_mode;
592 	enum dim_cq_period_mode rx_cqe_mode;
593 	struct hns3_enet_coalesce tx_coal;
594 	struct hns3_enet_coalesce rx_coal;
595 	u32 tx_copybreak;
596 	u32 rx_copybreak;
597 };
598 
599 union l3_hdr_info {
600 	struct iphdr *v4;
601 	struct ipv6hdr *v6;
602 	unsigned char *hdr;
603 };
604 
605 union l4_hdr_info {
606 	struct tcphdr *tcp;
607 	struct udphdr *udp;
608 	struct gre_base_hdr *gre;
609 	unsigned char *hdr;
610 };
611 
612 struct hns3_hw_error_info {
613 	enum hnae3_hw_error_type type;
614 	const char *msg;
615 };
616 
617 struct hns3_reset_type_map {
618 	enum ethtool_reset_flags rst_flags;
619 	enum hnae3_reset_type rst_type;
620 };
621 
622 static inline int ring_space(struct hns3_enet_ring *ring)
623 {
624 	/* This smp_load_acquire() pairs with smp_store_release() in
625 	 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
626 	 */
627 	int begin = smp_load_acquire(&ring->next_to_clean);
628 	int end = READ_ONCE(ring->next_to_use);
629 
630 	return ((end >= begin) ? (ring->desc_num - end + begin) :
631 			(begin - end)) - 1;
632 }
633 
634 static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
635 {
636 	return readl_relaxed(ring->tqp->io_base + reg);
637 }
638 
639 static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
640 {
641 	return readl(base + reg);
642 }
643 
644 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
645 {
646 	u8 __iomem *reg_addr = READ_ONCE(base);
647 
648 	writel(value, reg_addr + reg);
649 }
650 
651 #define hns3_read_dev(a, reg) \
652 	hns3_read_reg((a)->io_base, reg)
653 
654 static inline bool hns3_nic_resetting(struct net_device *netdev)
655 {
656 	struct hns3_nic_priv *priv = netdev_priv(netdev);
657 
658 	return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
659 }
660 
661 #define hns3_write_dev(a, reg, value) \
662 	hns3_write_reg((a)->io_base, reg, value)
663 
664 #define ring_to_dev(ring) ((ring)->dev)
665 
666 #define ring_to_netdev(ring)	((ring)->tqp_vector->napi.dev)
667 
668 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
669 	DMA_TO_DEVICE : DMA_FROM_DEVICE)
670 
671 #define hns3_buf_size(_ring) ((_ring)->buf_size)
672 
673 #define hns3_ring_stats_update(ring, cnt) do { \
674 	typeof(ring) (tmp) = (ring); \
675 	u64_stats_update_begin(&(tmp)->syncp); \
676 	((tmp)->stats.cnt)++; \
677 	u64_stats_update_end(&(tmp)->syncp); \
678 } while (0) \
679 
680 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
681 {
682 #if (PAGE_SIZE < 8192)
683 	if (ring->buf_size > (PAGE_SIZE / 2))
684 		return 1;
685 #endif
686 	return 0;
687 }
688 
689 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
690 
691 /* iterator for handling rings in ring group */
692 #define hns3_for_each_ring(pos, head) \
693 	for (pos = (head).ring; (pos); pos = (pos)->next)
694 
695 #define hns3_get_handle(ndev) \
696 	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
697 
698 #define hns3_get_ae_dev(handle) \
699 	(pci_get_drvdata((handle)->pdev))
700 
701 #define hns3_get_ops(handle) \
702 	((handle)->ae_algo->ops)
703 
704 #define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
705 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
706 
707 #define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
708 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
709 
710 void hns3_ethtool_set_ops(struct net_device *netdev);
711 int hns3_set_channels(struct net_device *netdev,
712 		      struct ethtool_channels *ch);
713 
714 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
715 int hns3_init_all_ring(struct hns3_nic_priv *priv);
716 int hns3_nic_reset_all_ring(struct hnae3_handle *h);
717 void hns3_fini_ring(struct hns3_enet_ring *ring);
718 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
719 bool hns3_is_phys_func(struct pci_dev *pdev);
720 int hns3_clean_rx_ring(
721 		struct hns3_enet_ring *ring, int budget,
722 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
723 
724 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
725 				    u32 gl_value);
726 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
727 				    u32 gl_value);
728 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
729 				 u32 rl_value);
730 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
731 				    u32 ql_value);
732 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
733 				    u32 ql_value);
734 
735 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
736 int hns3_reset_notify(struct hnae3_handle *handle,
737 		      enum hnae3_reset_notify_type type);
738 
739 #ifdef CONFIG_HNS3_DCB
740 void hns3_dcbnl_setup(struct hnae3_handle *handle);
741 #else
742 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
743 #endif
744 
745 int hns3_dbg_init(struct hnae3_handle *handle);
746 void hns3_dbg_uninit(struct hnae3_handle *handle);
747 void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
748 void hns3_dbg_unregister_debugfs(void);
749 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
750 u16 hns3_get_max_available_channels(struct hnae3_handle *h);
751 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
752 			      enum dim_cq_period_mode tx_mode,
753 			      enum dim_cq_period_mode rx_mode);
754 
755 void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
756 void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
757 #endif
758