xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HNS3_ENET_H
5 #define __HNS3_ENET_H
6 
7 #include <linux/if_vlan.h>
8 
9 #include "hnae3.h"
10 
11 enum hns3_nic_state {
12 	HNS3_NIC_STATE_TESTING,
13 	HNS3_NIC_STATE_RESETTING,
14 	HNS3_NIC_STATE_INITED,
15 	HNS3_NIC_STATE_DOWN,
16 	HNS3_NIC_STATE_DISABLED,
17 	HNS3_NIC_STATE_REMOVING,
18 	HNS3_NIC_STATE_SERVICE_INITED,
19 	HNS3_NIC_STATE_SERVICE_SCHED,
20 	HNS3_NIC_STATE2_RESET_REQUESTED,
21 	HNS3_NIC_STATE_MAX
22 };
23 
24 #define HNS3_RING_RX_RING_BASEADDR_L_REG	0x00000
25 #define HNS3_RING_RX_RING_BASEADDR_H_REG	0x00004
26 #define HNS3_RING_RX_RING_BD_NUM_REG		0x00008
27 #define HNS3_RING_RX_RING_BD_LEN_REG		0x0000C
28 #define HNS3_RING_RX_RING_TAIL_REG		0x00018
29 #define HNS3_RING_RX_RING_HEAD_REG		0x0001C
30 #define HNS3_RING_RX_RING_FBDNUM_REG		0x00020
31 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG	0x0002C
32 
33 #define HNS3_RING_TX_RING_BASEADDR_L_REG	0x00040
34 #define HNS3_RING_TX_RING_BASEADDR_H_REG	0x00044
35 #define HNS3_RING_TX_RING_BD_NUM_REG		0x00048
36 #define HNS3_RING_TX_RING_TC_REG		0x00050
37 #define HNS3_RING_TX_RING_TAIL_REG		0x00058
38 #define HNS3_RING_TX_RING_HEAD_REG		0x0005C
39 #define HNS3_RING_TX_RING_FBDNUM_REG		0x00060
40 #define HNS3_RING_TX_RING_OFFSET_REG		0x00064
41 #define HNS3_RING_TX_RING_EBDNUM_REG		0x00068
42 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG	0x0006C
43 #define HNS3_RING_TX_RING_EBD_OFFSET_REG	0x00070
44 #define HNS3_RING_TX_RING_BD_ERR_REG		0x00074
45 #define HNS3_RING_PREFETCH_EN_REG		0x0007C
46 #define HNS3_RING_CFG_VF_NUM_REG		0x00080
47 #define HNS3_RING_ASID_REG			0x0008C
48 #define HNS3_RING_EN_REG			0x00090
49 
50 #define HNS3_TX_REG_OFFSET			0x40
51 
52 #define HNS3_RX_HEAD_SIZE			256
53 
54 #define HNS3_TX_TIMEOUT (5 * HZ)
55 #define HNS3_RING_NAME_LEN			16
56 #define HNS3_BUFFER_SIZE_2048			2048
57 #define HNS3_RING_MAX_PENDING			32760
58 #define HNS3_RING_MIN_PENDING			72
59 #define HNS3_RING_BD_MULTIPLE			8
60 /* max frame size of mac */
61 #define HNS3_MAC_MAX_FRAME			9728
62 #define HNS3_MAX_MTU \
63 	(HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
64 
65 #define HNS3_BD_SIZE_512_TYPE			0
66 #define HNS3_BD_SIZE_1024_TYPE			1
67 #define HNS3_BD_SIZE_2048_TYPE			2
68 #define HNS3_BD_SIZE_4096_TYPE			3
69 
70 #define HNS3_RX_FLAG_VLAN_PRESENT		0x1
71 #define HNS3_RX_FLAG_L3ID_IPV4			0x0
72 #define HNS3_RX_FLAG_L3ID_IPV6			0x1
73 #define HNS3_RX_FLAG_L4ID_UDP			0x0
74 #define HNS3_RX_FLAG_L4ID_TCP			0x1
75 
76 #define HNS3_RXD_DMAC_S				0
77 #define HNS3_RXD_DMAC_M				(0x3 << HNS3_RXD_DMAC_S)
78 #define HNS3_RXD_VLAN_S				2
79 #define HNS3_RXD_VLAN_M				(0x3 << HNS3_RXD_VLAN_S)
80 #define HNS3_RXD_L3ID_S				4
81 #define HNS3_RXD_L3ID_M				(0xf << HNS3_RXD_L3ID_S)
82 #define HNS3_RXD_L4ID_S				8
83 #define HNS3_RXD_L4ID_M				(0xf << HNS3_RXD_L4ID_S)
84 #define HNS3_RXD_FRAG_B				12
85 #define HNS3_RXD_STRP_TAGP_S			13
86 #define HNS3_RXD_STRP_TAGP_M			(0x3 << HNS3_RXD_STRP_TAGP_S)
87 
88 #define HNS3_RXD_L2E_B				16
89 #define HNS3_RXD_L3E_B				17
90 #define HNS3_RXD_L4E_B				18
91 #define HNS3_RXD_TRUNCAT_B			19
92 #define HNS3_RXD_HOI_B				20
93 #define HNS3_RXD_DOI_B				21
94 #define HNS3_RXD_OL3E_B				22
95 #define HNS3_RXD_OL4E_B				23
96 #define HNS3_RXD_GRO_COUNT_S			24
97 #define HNS3_RXD_GRO_COUNT_M			(0x3f << HNS3_RXD_GRO_COUNT_S)
98 #define HNS3_RXD_GRO_FIXID_B			30
99 #define HNS3_RXD_GRO_ECN_B			31
100 
101 #define HNS3_RXD_ODMAC_S			0
102 #define HNS3_RXD_ODMAC_M			(0x3 << HNS3_RXD_ODMAC_S)
103 #define HNS3_RXD_OVLAN_S			2
104 #define HNS3_RXD_OVLAN_M			(0x3 << HNS3_RXD_OVLAN_S)
105 #define HNS3_RXD_OL3ID_S			4
106 #define HNS3_RXD_OL3ID_M			(0xf << HNS3_RXD_OL3ID_S)
107 #define HNS3_RXD_OL4ID_S			8
108 #define HNS3_RXD_OL4ID_M			(0xf << HNS3_RXD_OL4ID_S)
109 #define HNS3_RXD_FBHI_S				12
110 #define HNS3_RXD_FBHI_M				(0x3 << HNS3_RXD_FBHI_S)
111 #define HNS3_RXD_FBLI_S				14
112 #define HNS3_RXD_FBLI_M				(0x3 << HNS3_RXD_FBLI_S)
113 
114 #define HNS3_RXD_BDTYPE_S			0
115 #define HNS3_RXD_BDTYPE_M			(0xf << HNS3_RXD_BDTYPE_S)
116 #define HNS3_RXD_VLD_B				4
117 #define HNS3_RXD_UDP0_B				5
118 #define HNS3_RXD_EXTEND_B			7
119 #define HNS3_RXD_FE_B				8
120 #define HNS3_RXD_LUM_B				9
121 #define HNS3_RXD_CRCP_B				10
122 #define HNS3_RXD_L3L4P_B			11
123 #define HNS3_RXD_TSIND_S			12
124 #define HNS3_RXD_TSIND_M			(0x7 << HNS3_RXD_TSIND_S)
125 #define HNS3_RXD_LKBK_B				15
126 #define HNS3_RXD_GRO_SIZE_S			16
127 #define HNS3_RXD_GRO_SIZE_M			(0x3fff << HNS3_RXD_GRO_SIZE_S)
128 
129 #define HNS3_TXD_L3T_S				0
130 #define HNS3_TXD_L3T_M				(0x3 << HNS3_TXD_L3T_S)
131 #define HNS3_TXD_L4T_S				2
132 #define HNS3_TXD_L4T_M				(0x3 << HNS3_TXD_L4T_S)
133 #define HNS3_TXD_L3CS_B				4
134 #define HNS3_TXD_L4CS_B				5
135 #define HNS3_TXD_VLAN_B				6
136 #define HNS3_TXD_TSO_B				7
137 
138 #define HNS3_TXD_L2LEN_S			8
139 #define HNS3_TXD_L2LEN_M			(0xff << HNS3_TXD_L2LEN_S)
140 #define HNS3_TXD_L3LEN_S			16
141 #define HNS3_TXD_L3LEN_M			(0xff << HNS3_TXD_L3LEN_S)
142 #define HNS3_TXD_L4LEN_S			24
143 #define HNS3_TXD_L4LEN_M			(0xff << HNS3_TXD_L4LEN_S)
144 
145 #define HNS3_TXD_OL3T_S				0
146 #define HNS3_TXD_OL3T_M				(0x3 << HNS3_TXD_OL3T_S)
147 #define HNS3_TXD_OVLAN_B			2
148 #define HNS3_TXD_MACSEC_B			3
149 #define HNS3_TXD_TUNTYPE_S			4
150 #define HNS3_TXD_TUNTYPE_M			(0xf << HNS3_TXD_TUNTYPE_S)
151 
152 #define HNS3_TXD_BDTYPE_S			0
153 #define HNS3_TXD_BDTYPE_M			(0xf << HNS3_TXD_BDTYPE_S)
154 #define HNS3_TXD_FE_B				4
155 #define HNS3_TXD_SC_S				5
156 #define HNS3_TXD_SC_M				(0x3 << HNS3_TXD_SC_S)
157 #define HNS3_TXD_EXTEND_B			7
158 #define HNS3_TXD_VLD_B				8
159 #define HNS3_TXD_RI_B				9
160 #define HNS3_TXD_RA_B				10
161 #define HNS3_TXD_TSYN_B				11
162 #define HNS3_TXD_DECTTL_S			12
163 #define HNS3_TXD_DECTTL_M			(0xf << HNS3_TXD_DECTTL_S)
164 
165 #define HNS3_TXD_MSS_S				0
166 #define HNS3_TXD_MSS_M				(0x3fff << HNS3_TXD_MSS_S)
167 
168 #define HNS3_VECTOR_TX_IRQ			BIT_ULL(0)
169 #define HNS3_VECTOR_RX_IRQ			BIT_ULL(1)
170 
171 #define HNS3_VECTOR_NOT_INITED			0
172 #define HNS3_VECTOR_INITED			1
173 
174 #define HNS3_MAX_BD_SIZE			65535
175 #define HNS3_MAX_NON_TSO_BD_NUM			8U
176 #define HNS3_MAX_TSO_BD_NUM			63U
177 #define HNS3_MAX_TSO_SIZE \
178 	(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
179 
180 #define HNS3_MAX_NON_TSO_SIZE \
181 	(HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
182 
183 #define HNS3_VECTOR_GL0_OFFSET			0x100
184 #define HNS3_VECTOR_GL1_OFFSET			0x200
185 #define HNS3_VECTOR_GL2_OFFSET			0x300
186 #define HNS3_VECTOR_RL_OFFSET			0x900
187 #define HNS3_VECTOR_RL_EN_B			6
188 
189 #define HNS3_RING_EN_B				0
190 
191 enum hns3_pkt_l2t_type {
192 	HNS3_L2_TYPE_UNICAST,
193 	HNS3_L2_TYPE_MULTICAST,
194 	HNS3_L2_TYPE_BROADCAST,
195 	HNS3_L2_TYPE_INVALID,
196 };
197 
198 enum hns3_pkt_l3t_type {
199 	HNS3_L3T_NONE,
200 	HNS3_L3T_IPV6,
201 	HNS3_L3T_IPV4,
202 	HNS3_L3T_RESERVED
203 };
204 
205 enum hns3_pkt_l4t_type {
206 	HNS3_L4T_UNKNOWN,
207 	HNS3_L4T_TCP,
208 	HNS3_L4T_UDP,
209 	HNS3_L4T_SCTP
210 };
211 
212 enum hns3_pkt_ol3t_type {
213 	HNS3_OL3T_NONE,
214 	HNS3_OL3T_IPV6,
215 	HNS3_OL3T_IPV4_NO_CSUM,
216 	HNS3_OL3T_IPV4_CSUM
217 };
218 
219 enum hns3_pkt_tun_type {
220 	HNS3_TUN_NONE,
221 	HNS3_TUN_MAC_IN_UDP,
222 	HNS3_TUN_NVGRE,
223 	HNS3_TUN_OTHER
224 };
225 
226 /* hardware spec ring buffer format */
227 struct __packed hns3_desc {
228 	__le64 addr;
229 	union {
230 		struct {
231 			__le16 vlan_tag;
232 			__le16 send_size;
233 			union {
234 				__le32 type_cs_vlan_tso_len;
235 				struct {
236 					__u8 type_cs_vlan_tso;
237 					__u8 l2_len;
238 					__u8 l3_len;
239 					__u8 l4_len;
240 				};
241 			};
242 			__le16 outer_vlan_tag;
243 			__le16 tv;
244 
245 		union {
246 			__le32 ol_type_vlan_len_msec;
247 			struct {
248 				__u8 ol_type_vlan_msec;
249 				__u8 ol2_len;
250 				__u8 ol3_len;
251 				__u8 ol4_len;
252 			};
253 		};
254 
255 			__le32 paylen;
256 			__le16 bdtp_fe_sc_vld_ra_ri;
257 			__le16 mss;
258 		} tx;
259 
260 		struct {
261 			__le32 l234_info;
262 			__le16 pkt_len;
263 			__le16 size;
264 
265 			__le32 rss_hash;
266 			__le16 fd_id;
267 			__le16 vlan_tag;
268 
269 			union {
270 				__le32 ol_info;
271 				struct {
272 					__le16 o_dm_vlan_id_fb;
273 					__le16 ot_vlan_tag;
274 				};
275 			};
276 
277 			__le32 bd_base_info;
278 		} rx;
279 	};
280 };
281 
282 struct hns3_desc_cb {
283 	dma_addr_t dma; /* dma address of this desc */
284 	void *buf;      /* cpu addr for a desc */
285 
286 	/* priv data for the desc, e.g. skb when use with ip stack */
287 	void *priv;
288 	u32 page_offset;
289 	u32 length;     /* length of the buffer */
290 
291 	u16 reuse_flag;
292 
293 	/* desc type, used by the ring user to mark the type of the priv data */
294 	u16 type;
295 };
296 
297 enum hns3_pkt_l3type {
298 	HNS3_L3_TYPE_IPV4,
299 	HNS3_L3_TYPE_IPV6,
300 	HNS3_L3_TYPE_ARP,
301 	HNS3_L3_TYPE_RARP,
302 	HNS3_L3_TYPE_IPV4_OPT,
303 	HNS3_L3_TYPE_IPV6_EXT,
304 	HNS3_L3_TYPE_LLDP,
305 	HNS3_L3_TYPE_BPDU,
306 	HNS3_L3_TYPE_MAC_PAUSE,
307 	HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
308 
309 	/* reserved for 0xA~0xB */
310 
311 	HNS3_L3_TYPE_CNM = 0xc,
312 
313 	/* reserved for 0xD~0xE */
314 
315 	HNS3_L3_TYPE_PARSE_FAIL	= 0xf /* must be last */
316 };
317 
318 enum hns3_pkt_l4type {
319 	HNS3_L4_TYPE_UDP,
320 	HNS3_L4_TYPE_TCP,
321 	HNS3_L4_TYPE_GRE,
322 	HNS3_L4_TYPE_SCTP,
323 	HNS3_L4_TYPE_IGMP,
324 	HNS3_L4_TYPE_ICMP,
325 
326 	/* reserved for 0x6~0xE */
327 
328 	HNS3_L4_TYPE_PARSE_FAIL	= 0xf /* must be last */
329 };
330 
331 enum hns3_pkt_ol3type {
332 	HNS3_OL3_TYPE_IPV4 = 0,
333 	HNS3_OL3_TYPE_IPV6,
334 	/* reserved for 0x2~0x3 */
335 	HNS3_OL3_TYPE_IPV4_OPT = 4,
336 	HNS3_OL3_TYPE_IPV6_EXT,
337 
338 	/* reserved for 0x6~0xE */
339 
340 	HNS3_OL3_TYPE_PARSE_FAIL = 0xf	/* must be last */
341 };
342 
343 enum hns3_pkt_ol4type {
344 	HNS3_OL4_TYPE_NO_TUN,
345 	HNS3_OL4_TYPE_MAC_IN_UDP,
346 	HNS3_OL4_TYPE_NVGRE,
347 	HNS3_OL4_TYPE_UNKNOWN
348 };
349 
350 struct ring_stats {
351 	u64 io_err_cnt;
352 	u64 sw_err_cnt;
353 	u64 seg_pkt_cnt;
354 	union {
355 		struct {
356 			u64 tx_pkts;
357 			u64 tx_bytes;
358 			u64 tx_err_cnt;
359 			u64 restart_queue;
360 			u64 tx_busy;
361 			u64 tx_copy;
362 			u64 tx_vlan_err;
363 			u64 tx_l4_proto_err;
364 			u64 tx_l2l3l4_err;
365 			u64 tx_tso_err;
366 		};
367 		struct {
368 			u64 rx_pkts;
369 			u64 rx_bytes;
370 			u64 rx_err_cnt;
371 			u64 reuse_pg_cnt;
372 			u64 err_pkt_len;
373 			u64 err_bd_num;
374 			u64 l2_err;
375 			u64 l3l4_csum_err;
376 			u64 rx_multicast;
377 			u64 non_reuse_pg;
378 		};
379 	};
380 };
381 
382 struct hns3_enet_ring {
383 	u8 __iomem *io_base; /* base io address for the ring */
384 	struct hns3_desc *desc; /* dma map address space */
385 	struct hns3_desc_cb *desc_cb;
386 	struct hns3_enet_ring *next;
387 	struct hns3_enet_tqp_vector *tqp_vector;
388 	struct hnae3_queue *tqp;
389 	int queue_index;
390 	struct device *dev; /* will be used for DMA mapping of descriptors */
391 
392 	/* statistic */
393 	struct ring_stats stats;
394 	struct u64_stats_sync syncp;
395 
396 	dma_addr_t desc_dma_addr;
397 	u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
398 	u16 desc_num;       /* total number of desc */
399 	int next_to_use;    /* idx of next spare desc */
400 
401 	/* idx of lastest sent desc, the ring is empty when equal to
402 	 * next_to_use
403 	 */
404 	int next_to_clean;
405 
406 	u32 pull_len; /* head length for current packet */
407 	u32 frag_num;
408 	void *va; /* first buffer address for current packet */
409 
410 	u32 flag;          /* ring attribute */
411 
412 	int pending_buf;
413 	struct sk_buff *skb;
414 	struct sk_buff *tail_skb;
415 } ____cacheline_internodealigned_in_smp;
416 
417 enum hns3_flow_level_range {
418 	HNS3_FLOW_LOW = 0,
419 	HNS3_FLOW_MID = 1,
420 	HNS3_FLOW_HIGH = 2,
421 	HNS3_FLOW_ULTRA = 3,
422 };
423 
424 #define HNS3_INT_GL_MAX			0x1FE0
425 #define HNS3_INT_GL_50K			0x0014
426 #define HNS3_INT_GL_20K			0x0032
427 #define HNS3_INT_GL_18K			0x0036
428 #define HNS3_INT_GL_8K			0x007C
429 
430 #define HNS3_INT_RL_MAX			0x00EC
431 #define HNS3_INT_RL_ENABLE_MASK		0x40
432 
433 struct hns3_enet_coalesce {
434 	u16 int_gl;
435 	u8 gl_adapt_enable;
436 	enum hns3_flow_level_range flow_level;
437 };
438 
439 struct hns3_enet_ring_group {
440 	/* array of pointers to rings */
441 	struct hns3_enet_ring *ring;
442 	u64 total_bytes;	/* total bytes processed this group */
443 	u64 total_packets;	/* total packets processed this group */
444 	u16 count;
445 	struct hns3_enet_coalesce coal;
446 };
447 
448 struct hns3_enet_tqp_vector {
449 	struct hnae3_handle *handle;
450 	u8 __iomem *mask_addr;
451 	int vector_irq;
452 	int irq_init_flag;
453 
454 	u16 idx;		/* index in the TQP vector array per handle. */
455 
456 	struct napi_struct napi;
457 
458 	struct hns3_enet_ring_group rx_group;
459 	struct hns3_enet_ring_group tx_group;
460 
461 	cpumask_t affinity_mask;
462 	u16 num_tqps;	/* total number of tqps in TQP vector */
463 	struct irq_affinity_notify affinity_notify;
464 
465 	char name[HNAE3_INT_NAME_LEN];
466 
467 	unsigned long last_jiffies;
468 } ____cacheline_internodealigned_in_smp;
469 
470 struct hns3_nic_priv {
471 	struct hnae3_handle *ae_handle;
472 	struct net_device *netdev;
473 	struct device *dev;
474 
475 	/**
476 	 * the cb for nic to manage the ring buffer, the first half of the
477 	 * array is for tx_ring and vice versa for the second half
478 	 */
479 	struct hns3_enet_ring *ring;
480 	struct hns3_enet_tqp_vector *tqp_vector;
481 	u16 vector_num;
482 
483 	u64 tx_timeout_count;
484 
485 	unsigned long state;
486 
487 	struct hns3_enet_coalesce tx_coal;
488 	struct hns3_enet_coalesce rx_coal;
489 };
490 
491 union l3_hdr_info {
492 	struct iphdr *v4;
493 	struct ipv6hdr *v6;
494 	unsigned char *hdr;
495 };
496 
497 union l4_hdr_info {
498 	struct tcphdr *tcp;
499 	struct udphdr *udp;
500 	struct gre_base_hdr *gre;
501 	unsigned char *hdr;
502 };
503 
504 struct hns3_hw_error_info {
505 	enum hnae3_hw_error_type type;
506 	const char *msg;
507 };
508 
509 static inline int ring_space(struct hns3_enet_ring *ring)
510 {
511 	/* This smp_load_acquire() pairs with smp_store_release() in
512 	 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
513 	 */
514 	int begin = smp_load_acquire(&ring->next_to_clean);
515 	int end = READ_ONCE(ring->next_to_use);
516 
517 	return ((end >= begin) ? (ring->desc_num - end + begin) :
518 			(begin - end)) - 1;
519 }
520 
521 static inline int is_ring_empty(struct hns3_enet_ring *ring)
522 {
523 	return ring->next_to_use == ring->next_to_clean;
524 }
525 
526 static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
527 {
528 	return readl(base + reg);
529 }
530 
531 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
532 {
533 	u8 __iomem *reg_addr = READ_ONCE(base);
534 
535 	writel(value, reg_addr + reg);
536 }
537 
538 #define hns3_read_dev(a, reg) \
539 	hns3_read_reg((a)->io_base, (reg))
540 
541 static inline bool hns3_nic_resetting(struct net_device *netdev)
542 {
543 	struct hns3_nic_priv *priv = netdev_priv(netdev);
544 
545 	return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
546 }
547 
548 #define hns3_write_dev(a, reg, value) \
549 	hns3_write_reg((a)->io_base, (reg), (value))
550 
551 #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
552 		(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
553 
554 #define ring_to_dev(ring) ((ring)->dev)
555 
556 #define ring_to_netdev(ring)	((ring)->tqp_vector->napi.dev)
557 
558 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
559 	DMA_TO_DEVICE : DMA_FROM_DEVICE)
560 
561 #define hns3_buf_size(_ring) ((_ring)->buf_size)
562 
563 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
564 {
565 #if (PAGE_SIZE < 8192)
566 	if (ring->buf_size > (PAGE_SIZE / 2))
567 		return 1;
568 #endif
569 	return 0;
570 }
571 
572 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
573 
574 /* iterator for handling rings in ring group */
575 #define hns3_for_each_ring(pos, head) \
576 	for (pos = (head).ring; pos; pos = pos->next)
577 
578 #define hns3_get_handle(ndev) \
579 	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
580 
581 #define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
582 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
583 
584 #define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
585 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
586 
587 void hns3_ethtool_set_ops(struct net_device *netdev);
588 int hns3_set_channels(struct net_device *netdev,
589 		      struct ethtool_channels *ch);
590 
591 void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
592 int hns3_init_all_ring(struct hns3_nic_priv *priv);
593 int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
594 int hns3_nic_reset_all_ring(struct hnae3_handle *h);
595 void hns3_fini_ring(struct hns3_enet_ring *ring);
596 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
597 bool hns3_is_phys_func(struct pci_dev *pdev);
598 int hns3_clean_rx_ring(
599 		struct hns3_enet_ring *ring, int budget,
600 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
601 
602 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
603 				    u32 gl_value);
604 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
605 				    u32 gl_value);
606 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
607 				 u32 rl_value);
608 
609 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
610 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
611 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
612 
613 #ifdef CONFIG_HNS3_DCB
614 void hns3_dcbnl_setup(struct hnae3_handle *handle);
615 #else
616 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
617 #endif
618 
619 void hns3_dbg_init(struct hnae3_handle *handle);
620 void hns3_dbg_uninit(struct hnae3_handle *handle);
621 void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
622 void hns3_dbg_unregister_debugfs(void);
623 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
624 #endif
625