xref: /linux/drivers/net/ethernet/freescale/enetc/enetc.h (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /* Copyright 2017-2019 NXP */
3 
4 #include <linux/timer.h>
5 #include <linux/pci.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/ethtool.h>
11 #include <linux/fsl/ntmp.h>
12 #include <linux/if_vlan.h>
13 #include <linux/phylink.h>
14 #include <linux/dim.h>
15 #include <net/xdp.h>
16 
17 #include "enetc_hw.h"
18 #include "enetc4_hw.h"
19 
20 #define ENETC_MAC_MAXFRM_SIZE	9600
21 #define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
22 				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
23 
24 #define ENETC_CBD_DATA_MEM_ALIGN 64
25 
26 #define ENETC_MADDR_HASH_TBL_SZ	64
27 
28 enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
29 
30 struct enetc_mac_filter {
31 	union {
32 		char mac_addr[ETH_ALEN];
33 		DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
34 	};
35 	int mac_addr_cnt;
36 };
37 
38 struct enetc_tx_swbd {
39 	union {
40 		struct sk_buff *skb;
41 		struct xdp_frame *xdp_frame;
42 	};
43 	dma_addr_t dma;
44 	struct page *page;	/* valid only if is_xdp_tx */
45 	u16 page_offset;	/* valid only if is_xdp_tx */
46 	u16 len;
47 	enum dma_data_direction dir;
48 	u8 is_dma_page:1;
49 	u8 check_wb:1;
50 	u8 do_twostep_tstamp:1;
51 	u8 is_eof:1;
52 	u8 is_xdp_tx:1;
53 	u8 is_xdp_redirect:1;
54 	u8 qbv_en:1;
55 };
56 
57 struct enetc_skb_cb {
58 	u8 flag;
59 	bool udp;
60 	u16 correction_off;
61 	u16 origin_tstamp_off;
62 };
63 
64 #define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
65 
66 struct enetc_lso_t {
67 	bool	ipv6;
68 	bool	tcp;
69 	u8	l3_hdr_len;
70 	u8	hdr_len; /* LSO header length */
71 	u8	l3_start;
72 	u16	lso_seg_size;
73 	int	total_len; /* total data length, not include LSO header */
74 };
75 
76 #define ENETC_LSO_MAX_DATA_LEN		SZ_256K
77 
78 #define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
79 #define ENETC_RXB_TRUESIZE	(PAGE_SIZE >> 1)
80 #define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
81 #define ENETC_RXB_DMA_SIZE	\
82 	min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD, 0xffff)
83 #define ENETC_RXB_DMA_SIZE_XDP	\
84 	min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM, 0xffff)
85 
86 struct enetc_rx_swbd {
87 	dma_addr_t dma;
88 	struct page *page;
89 	u16 page_offset;
90 	enum dma_data_direction dir;
91 	u16 len;
92 };
93 
94 /* ENETC overhead: optional extension BD + 1 BD gap */
95 #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
96 /* For LS1028A, max # of chained Tx BDs is 15, including head and
97  * extension BD.
98  */
99 #define ENETC_MAX_SKB_FRAGS	13
100 /* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
101  * including head and extension BD, but the range of MAX_SKB_FRAGS
102  * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
103  */
104 #define ENETC4_MAX_SKB_FRAGS		MAX_SKB_FRAGS
105 #define ENETC_TXBDS_MAX_NEEDED(x)	ENETC_TXBDS_NEEDED((x) + 1)
106 
107 struct enetc_ring_stats {
108 	unsigned long packets;
109 	unsigned long bytes;
110 	unsigned long rx_alloc_errs;
111 	unsigned long xdp_drops;
112 	unsigned long xdp_tx;
113 	unsigned long xdp_tx_drops;
114 	unsigned long xdp_redirect;
115 	unsigned long xdp_redirect_failures;
116 	unsigned long recycles;
117 	unsigned long recycle_failures;
118 	unsigned long win_drop;
119 };
120 
121 struct enetc_xdp_data {
122 	struct xdp_rxq_info rxq;
123 	struct bpf_prog *prog;
124 	int xdp_tx_in_flight;
125 };
126 
127 #define ENETC_RX_RING_DEFAULT_SIZE	2048
128 #define ENETC_TX_RING_DEFAULT_SIZE	2048
129 #define ENETC_DEFAULT_TX_WORK		(ENETC_TX_RING_DEFAULT_SIZE / 2)
130 
131 struct enetc_bdr_resource {
132 	/* Input arguments saved for teardown */
133 	struct device *dev; /* for DMA mapping */
134 	size_t bd_count;
135 	size_t bd_size;
136 
137 	/* Resource proper */
138 	void *bd_base; /* points to Rx or Tx BD ring */
139 	dma_addr_t bd_dma_base;
140 	union {
141 		struct enetc_tx_swbd *tx_swbd;
142 		struct enetc_rx_swbd *rx_swbd;
143 	};
144 	char *tso_headers;
145 	dma_addr_t tso_headers_dma;
146 };
147 
148 struct enetc_bdr {
149 	struct device *dev; /* for DMA mapping */
150 	struct net_device *ndev;
151 	void *bd_base; /* points to Rx or Tx BD ring */
152 	union {
153 		void __iomem *tpir;
154 		void __iomem *rcir;
155 	};
156 	u16 index;
157 	u16 prio;
158 	int bd_count; /* # of BDs */
159 	int next_to_use;
160 	int next_to_clean;
161 	union {
162 		struct enetc_tx_swbd *tx_swbd;
163 		struct enetc_rx_swbd *rx_swbd;
164 	};
165 	union {
166 		void __iomem *tcir; /* Tx */
167 		int next_to_alloc; /* Rx */
168 	};
169 	void __iomem *idr; /* Interrupt Detect Register pointer */
170 
171 	int buffer_offset;
172 	struct enetc_xdp_data xdp;
173 
174 	struct enetc_ring_stats stats;
175 
176 	dma_addr_t bd_dma_base;
177 	u8 tsd_enable; /* Time specific departure */
178 	bool ext_en; /* enable h/w descriptor extensions */
179 
180 	/* DMA buffer for TSO headers */
181 	char *tso_headers;
182 	dma_addr_t tso_headers_dma;
183 } ____cacheline_aligned_in_smp;
184 
enetc_bdr_idx_inc(struct enetc_bdr * bdr,int * i)185 static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
186 {
187 	if (unlikely(++*i == bdr->bd_count))
188 		*i = 0;
189 }
190 
enetc_bd_unused(struct enetc_bdr * bdr)191 static inline int enetc_bd_unused(struct enetc_bdr *bdr)
192 {
193 	if (bdr->next_to_clean > bdr->next_to_use)
194 		return bdr->next_to_clean - bdr->next_to_use - 1;
195 
196 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
197 }
198 
enetc_swbd_unused(struct enetc_bdr * bdr)199 static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
200 {
201 	if (bdr->next_to_clean > bdr->next_to_alloc)
202 		return bdr->next_to_clean - bdr->next_to_alloc - 1;
203 
204 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
205 }
206 
207 /* Control BD ring */
208 #define ENETC_CBDR_DEFAULT_SIZE	64
209 struct enetc_cbdr {
210 	void *bd_base; /* points to Rx or Tx BD ring */
211 	void __iomem *pir;
212 	void __iomem *cir;
213 	void __iomem *mr; /* mode register */
214 
215 	int bd_count; /* # of BDs */
216 	int next_to_use;
217 	int next_to_clean;
218 
219 	dma_addr_t bd_dma_base;
220 	struct device *dma_dev;
221 };
222 
223 #define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
224 
enetc_rxbd(struct enetc_bdr * rx_ring,int i)225 static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
226 {
227 	int hw_idx = i;
228 
229 	if (rx_ring->ext_en)
230 		hw_idx = 2 * i;
231 
232 	return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
233 }
234 
enetc_rxbd_next(struct enetc_bdr * rx_ring,union enetc_rx_bd ** old_rxbd,int * old_index)235 static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
236 				   union enetc_rx_bd **old_rxbd, int *old_index)
237 {
238 	union enetc_rx_bd *new_rxbd = *old_rxbd;
239 	int new_index = *old_index;
240 
241 	new_rxbd++;
242 
243 	if (rx_ring->ext_en)
244 		new_rxbd++;
245 
246 	if (unlikely(++new_index == rx_ring->bd_count)) {
247 		new_rxbd = rx_ring->bd_base;
248 		new_index = 0;
249 	}
250 
251 	*old_rxbd = new_rxbd;
252 	*old_index = new_index;
253 }
254 
enetc_rxbd_ext(union enetc_rx_bd * rxbd)255 static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
256 {
257 	return ++rxbd;
258 }
259 
260 struct enetc_msg_swbd {
261 	void *vaddr;
262 	dma_addr_t dma;
263 	int size;
264 };
265 
266 #define ENETC_REV1	0x1
267 #define ENETC_REV4	0x4
268 
269 enum enetc_errata {
270 	ENETC_ERR_VLAN_ISOL	= BIT(0),
271 	ENETC_ERR_UCMCSWP	= BIT(1),
272 };
273 
274 #define ENETC_SI_F_PSFP BIT(0)
275 #define ENETC_SI_F_QBV  BIT(1)
276 #define ENETC_SI_F_QBU  BIT(2)
277 #define ENETC_SI_F_LSO	BIT(3)
278 #define ENETC_SI_F_PPM	BIT(4) /* pseudo MAC */
279 
280 struct enetc_drvdata {
281 	u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
282 	u8 tx_csum:1;
283 	u8 max_frags;
284 	u64 sysclk_freq;
285 	const struct ethtool_ops *eth_ops;
286 };
287 
288 struct enetc_platform_info {
289 	u16 revision;
290 	u16 dev_id;
291 	const struct enetc_drvdata *data;
292 };
293 
294 struct enetc_si;
295 
296 /*
297  * This structure defines the some common hooks for ENETC PSI and VSI.
298  * In addition, since VSI only uses the struct enetc_si as its private
299  * driver data, so this structure also define some hooks specifically
300  * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
301  */
302 struct enetc_si_ops {
303 	int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
304 	int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
305 };
306 
307 /* PCI IEP device data */
308 struct enetc_si {
309 	struct pci_dev *pdev;
310 	struct enetc_hw hw;
311 	enum enetc_errata errata;
312 
313 	struct net_device *ndev; /* back ref. */
314 
315 	union {
316 		struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
317 		struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
318 	};
319 
320 	int num_rx_rings; /* how many rings are available in the SI */
321 	int num_tx_rings;
322 	int num_fs_entries;
323 	int num_rss; /* number of RSS buckets */
324 	unsigned short pad;
325 	u16 revision;
326 	int hw_features;
327 	const struct enetc_drvdata *drvdata;
328 	const struct enetc_si_ops *ops;
329 
330 	struct workqueue_struct *workqueue;
331 	struct work_struct rx_mode_task;
332 	struct dentry *debugfs_root;
333 	struct enetc_msg_swbd msg; /* Only valid for VSI */
334 };
335 
336 #define ENETC_SI_ALIGN	32
337 
is_enetc_rev1(struct enetc_si * si)338 static inline bool is_enetc_rev1(struct enetc_si *si)
339 {
340 	return si->pdev->revision == ENETC_REV1;
341 }
342 
enetc_si_priv(const struct enetc_si * si)343 static inline void *enetc_si_priv(const struct enetc_si *si)
344 {
345 	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
346 }
347 
enetc_si_is_pf(struct enetc_si * si)348 static inline bool enetc_si_is_pf(struct enetc_si *si)
349 {
350 	return !!(si->hw.port);
351 }
352 
enetc_pf_to_port(struct pci_dev * pf_pdev)353 static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
354 {
355 	switch (pf_pdev->devfn) {
356 	case 0:
357 		return 0;
358 	case 1:
359 		return 1;
360 	case 2:
361 		return 2;
362 	case 6:
363 		return 3;
364 	default:
365 		return -1;
366 	}
367 }
368 
enetc_is_pseudo_mac(struct enetc_si * si)369 static inline bool enetc_is_pseudo_mac(struct enetc_si *si)
370 {
371 	return si->hw_features & ENETC_SI_F_PPM;
372 }
373 
374 #define ENETC_MAX_NUM_TXQS	8
375 #define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
376 
377 struct enetc_int_vector {
378 	void __iomem *rbier;
379 	void __iomem *tbier_base;
380 	void __iomem *ricr1;
381 	unsigned long tx_rings_map;
382 	int count_tx_rings;
383 	u32 rx_ictt;
384 	u16 comp_cnt;
385 	bool rx_dim_en, rx_napi_work;
386 	struct napi_struct napi ____cacheline_aligned_in_smp;
387 	struct dim rx_dim ____cacheline_aligned_in_smp;
388 	char name[ENETC_INT_NAME_MAX];
389 
390 	struct enetc_bdr rx_ring;
391 	struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
392 } ____cacheline_aligned_in_smp;
393 
394 struct enetc_cls_rule {
395 	struct ethtool_rx_flow_spec fs;
396 	int used;
397 };
398 
399 #define ENETC_MAX_BDR_INT	6 /* fixed to max # of available cpus */
400 struct psfp_cap {
401 	u32 max_streamid;
402 	u32 max_psfp_filter;
403 	u32 max_psfp_gate;
404 	u32 max_psfp_gatelist;
405 	u32 max_psfp_meter;
406 };
407 
408 #define ENETC_F_TX_TSTAMP_MASK	0xff
409 enum enetc_active_offloads {
410 	/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
411 	ENETC_F_TX_TSTAMP		= BIT(0),
412 	ENETC_F_TX_ONESTEP_SYNC_TSTAMP	= BIT(1),
413 
414 	ENETC_F_RX_TSTAMP		= BIT(8),
415 	ENETC_F_QBV			= BIT(9),
416 	ENETC_F_QCI			= BIT(10),
417 	ENETC_F_QBU			= BIT(11),
418 	ENETC_F_TXCSUM			= BIT(12),
419 	ENETC_F_LSO			= BIT(13),
420 };
421 
422 enum enetc_flags_bit {
423 	ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
424 	ENETC_TX_DOWN,
425 };
426 
427 /* interrupt coalescing modes */
428 enum enetc_ic_mode {
429 	/* one interrupt per frame */
430 	ENETC_IC_NONE = 0,
431 	/* activated when int coalescing time is set to a non-0 value */
432 	ENETC_IC_RX_MANUAL = BIT(0),
433 	ENETC_IC_TX_MANUAL = BIT(1),
434 	/* use dynamic interrupt moderation */
435 	ENETC_IC_RX_ADAPTIVE = BIT(2),
436 };
437 
438 #define ENETC_RXIC_PKTTHR	min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
439 #define ENETC_TXIC_PKTTHR	min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
440 
441 struct enetc_ndev_priv {
442 	struct net_device *ndev;
443 	struct device *dev; /* dma-mapping device */
444 	struct enetc_si *si;
445 
446 	int bdr_int_num; /* number of Rx/Tx ring interrupts */
447 	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
448 	u16 num_rx_rings, num_tx_rings;
449 	u16 rx_bd_count, tx_bd_count;
450 
451 	u16 msg_enable;
452 
453 	u8 preemptible_tcs;
454 	u8 max_frags; /* The maximum number of BDs for fragments */
455 
456 	enum enetc_active_offloads active_offloads;
457 
458 	u32 speed; /* store speed for compare update pspeed */
459 
460 	struct enetc_bdr **xdp_tx_ring;
461 	struct enetc_bdr *tx_ring[16];
462 	struct enetc_bdr *rx_ring[16];
463 	const struct enetc_bdr_resource *tx_res;
464 	const struct enetc_bdr_resource *rx_res;
465 
466 	struct enetc_cls_rule *cls_rules;
467 
468 	struct psfp_cap psfp_cap;
469 
470 	/* Minimum number of TX queues required by the network stack */
471 	unsigned int min_num_stack_tx_queues;
472 
473 	struct phylink *phylink;
474 	int ic_mode;
475 	u32 tx_ictt;
476 
477 	struct bpf_prog *xdp_prog;
478 
479 	unsigned long flags;
480 
481 	struct work_struct	tx_onestep_tstamp;
482 	struct sk_buff_head	tx_skbs;
483 
484 	/* Serialize access to MAC Merge state between ethtool requests
485 	 * and link state updates
486 	 */
487 	struct mutex		mm_lock;
488 
489 	struct clk *ref_clk; /* RGMII/RMII reference clock */
490 	u64 sysclk_freq; /* NETC system clock frequency */
491 };
492 
493 /* Messaging */
494 
495 /* VF-PF set primary MAC address message format */
496 struct enetc_msg_cmd_set_primary_mac {
497 	struct enetc_msg_cmd_header header;
498 	struct sockaddr mac;
499 };
500 
501 #define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
502 
503 #define ENETC_CBDR_TIMEOUT	1000 /* usecs */
504 
505 /* SI common */
506 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
507 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
508 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
509 void enetc_pci_remove(struct pci_dev *pdev);
510 int enetc_alloc_msix(struct enetc_ndev_priv *priv);
511 void enetc_free_msix(struct enetc_ndev_priv *priv);
512 void enetc_get_si_caps(struct enetc_si *si);
513 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
514 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
515 void enetc_free_si_resources(struct enetc_ndev_priv *priv);
516 int enetc_configure_si(struct enetc_ndev_priv *priv);
517 int enetc_get_driver_data(struct enetc_si *si);
518 void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
519 				  const unsigned char *addr);
520 void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
521 
522 int enetc_open(struct net_device *ndev);
523 int enetc_close(struct net_device *ndev);
524 void enetc_start(struct net_device *ndev);
525 void enetc_stop(struct net_device *ndev);
526 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
527 struct net_device_stats *enetc_get_stats(struct net_device *ndev);
528 void enetc_set_features(struct net_device *ndev, netdev_features_t features);
529 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
530 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
531 void enetc_reset_tc_mqprio(struct net_device *ndev);
532 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
533 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
534 		   struct xdp_frame **frames, u32 flags);
535 
536 int enetc_hwtstamp_get(struct net_device *ndev,
537 		       struct kernel_hwtstamp_config *config);
538 int enetc_hwtstamp_set(struct net_device *ndev,
539 		       struct kernel_hwtstamp_config *config,
540 		       struct netlink_ext_ack *extack);
541 
542 /* ethtool */
543 extern const struct ethtool_ops enetc_pf_ethtool_ops;
544 extern const struct ethtool_ops enetc4_pf_ethtool_ops;
545 extern const struct ethtool_ops enetc_vf_ethtool_ops;
546 extern const struct ethtool_ops enetc4_ppm_ethtool_ops;
547 
548 void enetc_set_ethtool_ops(struct net_device *ndev);
549 void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
550 void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
551 
552 /* control buffer descriptor ring (CBDR) */
553 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
554 		     struct enetc_cbdr *cbdr);
555 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
556 int enetc4_setup_cbdr(struct enetc_si *si);
557 void enetc4_teardown_cbdr(struct enetc_si *si);
558 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
559 			    char *mac_addr, int si_map);
560 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
561 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
562 		       int index);
563 void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
564 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
565 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
566 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
567 int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
568 int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
569 
enetc_cbd_alloc_data_mem(struct enetc_si * si,struct enetc_cbd * cbd,int size,dma_addr_t * dma,void ** data_align)570 static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
571 					     struct enetc_cbd *cbd,
572 					     int size, dma_addr_t *dma,
573 					     void **data_align)
574 {
575 	struct enetc_cbdr *ring = &si->cbd_ring;
576 	dma_addr_t dma_align;
577 	void *data;
578 
579 	data = dma_alloc_coherent(ring->dma_dev,
580 				  size + ENETC_CBD_DATA_MEM_ALIGN,
581 				  dma, GFP_KERNEL);
582 	if (!data) {
583 		dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
584 		return NULL;
585 	}
586 
587 	dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
588 	*data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
589 
590 	cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
591 	cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
592 	cbd->length = cpu_to_le16(size);
593 
594 	return data;
595 }
596 
enetc_cbd_free_data_mem(struct enetc_si * si,int size,void * data,dma_addr_t * dma)597 static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
598 					   void *data, dma_addr_t *dma)
599 {
600 	struct enetc_cbdr *ring = &si->cbd_ring;
601 
602 	dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
603 			  data, *dma);
604 }
605 
606 void enetc_reset_ptcmsdur(struct enetc_hw *hw);
607 void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
608 
enetc_ptp_clock_is_enabled(struct enetc_si * si)609 static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
610 {
611 	if (is_enetc_rev1(si))
612 		return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
613 
614 	return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
615 }
616 
617 #ifdef CONFIG_FSL_ENETC_QOS
618 int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
619 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
620 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
621 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
622 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
623 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
624 			    void *cb_priv);
625 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
626 int enetc_psfp_init(struct enetc_ndev_priv *priv);
627 int enetc_psfp_clean(struct enetc_ndev_priv *priv);
628 int enetc_set_psfp(struct net_device *ndev, bool en);
629 
enetc_get_max_cap(struct enetc_ndev_priv * priv)630 static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
631 {
632 	struct enetc_hw *hw = &priv->si->hw;
633 	u32 reg;
634 
635 	reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
636 	priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
637 	/* Port stream filter capability */
638 	reg = enetc_port_rd(hw, ENETC_PSFCAPR);
639 	priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
640 	/* Port stream gate capability */
641 	reg = enetc_port_rd(hw, ENETC_PSGCAPR);
642 	priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
643 	priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
644 	/* Port flow meter capability */
645 	reg = enetc_port_rd(hw, ENETC_PFMCAPR);
646 	priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
647 }
648 
enetc_psfp_enable(struct enetc_ndev_priv * priv)649 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
650 {
651 	struct enetc_hw *hw = &priv->si->hw;
652 	int err;
653 
654 	enetc_get_max_cap(priv);
655 
656 	err = enetc_psfp_init(priv);
657 	if (err)
658 		return err;
659 
660 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
661 		 ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
662 		 ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
663 
664 	return 0;
665 }
666 
enetc_psfp_disable(struct enetc_ndev_priv * priv)667 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
668 {
669 	struct enetc_hw *hw = &priv->si->hw;
670 	int err;
671 
672 	err = enetc_psfp_clean(priv);
673 	if (err)
674 		return err;
675 
676 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
677 		 ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
678 		 ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
679 
680 	memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
681 
682 	return 0;
683 }
684 
685 #else
686 #define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
687 #define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
688 #define enetc_sched_speed_set(priv, speed) (void)0
689 #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
690 #define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
691 #define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
692 #define enetc_setup_tc_block_cb NULL
693 
694 #define enetc_get_max_cap(p)		\
695 	memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
696 
enetc_psfp_enable(struct enetc_ndev_priv * priv)697 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
698 {
699 	return 0;
700 }
701 
enetc_psfp_disable(struct enetc_ndev_priv * priv)702 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
703 {
704 	return 0;
705 }
706 
enetc_set_psfp(struct net_device * ndev,bool en)707 static inline int enetc_set_psfp(struct net_device *ndev, bool en)
708 {
709 	return 0;
710 }
711 #endif
712