xref: /linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
2 /*
3  * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4  */
5 
6 #ifndef __DPAA_H
7 #define __DPAA_H
8 
9 #include <linux/netdevice.h>
10 #include <linux/refcount.h>
11 #include <net/xdp.h>
12 #include <soc/fsl/qman.h>
13 #include <soc/fsl/bman.h>
14 
15 #include "fman.h"
16 #include "mac.h"
17 #include "dpaa_eth_trace.h"
18 
19 /* Number of prioritised traffic classes */
20 #define DPAA_TC_NUM		4
21 
22 /* More detailed FQ types - used for fine-grained WQ assignments */
23 enum dpaa_fq_type {
24 	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
25 	FQ_TYPE_RX_ERROR,	/* Rx Error FQs */
26 	FQ_TYPE_RX_PCD,		/* Rx Parse Classify Distribute FQs */
27 	FQ_TYPE_TX,		/* "Real" Tx FQs */
28 	FQ_TYPE_TX_CONFIRM,	/* Tx default Conf FQ (actually an Rx FQ) */
29 	FQ_TYPE_TX_CONF_MQ,	/* Tx conf FQs (one for each Tx FQ) */
30 	FQ_TYPE_TX_ERROR,	/* Tx Error FQs (these are actually Rx FQs) */
31 };
32 
33 struct dpaa_fq {
34 	struct qman_fq fq_base;
35 	struct list_head list;
36 	struct net_device *net_dev;
37 	bool init;
38 	u32 fqid;
39 	u32 flags;
40 	u16 channel;
41 	u8 wq;
42 	enum dpaa_fq_type fq_type;
43 	struct xdp_rxq_info xdp_rxq;
44 };
45 
46 struct dpaa_fq_cbs {
47 	struct qman_fq rx_defq;
48 	struct qman_fq tx_defq;
49 	struct qman_fq rx_errq;
50 	struct qman_fq tx_errq;
51 	struct qman_fq egress_ern;
52 };
53 
54 struct dpaa_priv;
55 
56 struct dpaa_bp {
57 	/* used in the DMA mapping operations */
58 	struct dpaa_priv *priv;
59 	/* current number of buffers in the buffer pool alloted to each CPU */
60 	int __percpu *percpu_count;
61 	/* all buffers allocated for this pool have this raw size */
62 	size_t raw_size;
63 	/* all buffers in this pool have this same usable size */
64 	size_t size;
65 	/* the buffer pools are initialized with config_count buffers for each
66 	 * CPU; at runtime the number of buffers per CPU is constantly brought
67 	 * back to this level
68 	 */
69 	u16 config_count;
70 	u8 bpid;
71 	struct bman_pool *pool;
72 	/* bpool can be seeded before use by this cb */
73 	int (*seed_cb)(struct dpaa_bp *);
74 	/* bpool can be emptied before freeing by this cb */
75 	void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
76 	refcount_t refs;
77 };
78 
79 struct dpaa_rx_errors {
80 	u64 dme;		/* DMA Error */
81 	u64 fpe;		/* Frame Physical Error */
82 	u64 fse;		/* Frame Size Error */
83 	u64 phe;		/* Header Error */
84 };
85 
86 /* Counters for QMan ERN frames - one counter per rejection code */
87 struct dpaa_ern_cnt {
88 	u64 cg_tdrop;		/* Congestion group taildrop */
89 	u64 wred;		/* WRED congestion */
90 	u64 err_cond;		/* Error condition */
91 	u64 early_window;	/* Order restoration, frame too early */
92 	u64 late_window;	/* Order restoration, frame too late */
93 	u64 fq_tdrop;		/* FQ taildrop */
94 	u64 fq_retired;		/* FQ is retired */
95 	u64 orp_zero;		/* ORP disabled */
96 };
97 
98 struct dpaa_napi_portal {
99 	struct napi_struct napi;
100 	struct qman_portal *p;
101 	bool down;
102 	int xdp_act;
103 };
104 
105 struct dpaa_percpu_priv {
106 	struct net_device *net_dev;
107 	struct dpaa_napi_portal np;
108 	u64 in_interrupt;
109 	u64 tx_confirm;
110 	/* fragmented (non-linear) skbuffs received from the stack */
111 	u64 tx_frag_skbuffs;
112 	struct rtnl_link_stats64 stats;
113 	struct dpaa_rx_errors rx_errors;
114 	struct dpaa_ern_cnt ern_cnt;
115 };
116 
117 struct dpaa_buffer_layout {
118 	u16 priv_data_size;
119 };
120 
121 /* Information to be used on the Tx confirmation path. Stored just
122  * before the start of the transmit buffer. Maximum size allowed
123  * is DPAA_TX_PRIV_DATA_SIZE bytes.
124  */
125 struct dpaa_eth_swbp {
126 	struct sk_buff *skb;
127 	struct xdp_frame *xdpf;
128 };
129 
130 struct dpaa_priv {
131 	struct dpaa_percpu_priv __percpu *percpu_priv;
132 	struct dpaa_bp *dpaa_bp;
133 	/* Store here the needed Tx headroom for convenience and speed
134 	 * (even though it can be computed based on the fields of buf_layout)
135 	 */
136 	u16 tx_headroom;
137 	struct net_device *net_dev;
138 	struct mac_device *mac_dev;
139 	struct device *rx_dma_dev;
140 	struct device *tx_dma_dev;
141 	struct qman_fq **egress_fqs;
142 	struct qman_fq **conf_fqs;
143 
144 	u16 channel;
145 	struct list_head dpaa_fq_list;
146 
147 	u8 num_tc;
148 	bool keygen_in_use;
149 	u32 msg_enable;	/* net_device message level */
150 
151 	struct {
152 		/* All egress queues to a given net device belong to one
153 		 * (and the same) congestion group.
154 		 */
155 		struct qman_cgr cgr;
156 		/* If congested, when it began. Used for performance stats. */
157 		u32 congestion_start_jiffies;
158 		/* Number of jiffies the Tx port was congested. */
159 		u32 congested_jiffies;
160 		/* Counter for the number of times the CGR
161 		 * entered congestion state
162 		 */
163 		u32 cgr_congested_count;
164 	} cgr_data;
165 	/* Use a per-port CGR for ingress traffic. */
166 	bool use_ingress_cgr;
167 	struct qman_cgr ingress_cgr;
168 
169 	struct dpaa_buffer_layout buf_layout[2];
170 	u16 rx_headroom;
171 
172 	bool tx_tstamp; /* Tx timestamping enabled */
173 	bool rx_tstamp; /* Rx timestamping enabled */
174 
175 	struct bpf_prog *xdp_prog;
176 };
177 
178 /* from dpaa_ethtool.c */
179 extern const struct ethtool_ops dpaa_ethtool_ops;
180 
181 /* from dpaa_eth_sysfs.c */
182 void dpaa_eth_sysfs_remove(struct device *dev);
183 void dpaa_eth_sysfs_init(struct device *dev);
184 
dpaa_num_txqs_per_tc(void)185 static inline size_t dpaa_num_txqs_per_tc(void)
186 {
187 	return num_possible_cpus();
188 }
189 
190 /* Total number of Tx queues */
dpaa_max_num_txqs(void)191 static inline size_t dpaa_max_num_txqs(void)
192 {
193 	return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
194 }
195 
196 #endif	/* __DPAA_H */
197