xref: /linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
2 /*
3  * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4  */
5 
6 #ifndef __DPAA_H
7 #define __DPAA_H
8 
9 #include <linux/netdevice.h>
10 #include <linux/refcount.h>
11 #include <soc/fsl/qman.h>
12 #include <soc/fsl/bman.h>
13 
14 #include "fman.h"
15 #include "mac.h"
16 #include "dpaa_eth_trace.h"
17 
18 /* Number of prioritised traffic classes */
19 #define DPAA_TC_NUM		4
20 /* Number of Tx queues per traffic class */
21 #define DPAA_TC_TXQ_NUM		NR_CPUS
22 /* Total number of Tx queues */
23 #define DPAA_ETH_TXQ_NUM	(DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
24 
25 /* More detailed FQ types - used for fine-grained WQ assignments */
26 enum dpaa_fq_type {
27 	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
28 	FQ_TYPE_RX_ERROR,	/* Rx Error FQs */
29 	FQ_TYPE_RX_PCD,		/* Rx Parse Classify Distribute FQs */
30 	FQ_TYPE_TX,		/* "Real" Tx FQs */
31 	FQ_TYPE_TX_CONFIRM,	/* Tx default Conf FQ (actually an Rx FQ) */
32 	FQ_TYPE_TX_CONF_MQ,	/* Tx conf FQs (one for each Tx FQ) */
33 	FQ_TYPE_TX_ERROR,	/* Tx Error FQs (these are actually Rx FQs) */
34 };
35 
36 struct dpaa_fq {
37 	struct qman_fq fq_base;
38 	struct list_head list;
39 	struct net_device *net_dev;
40 	bool init;
41 	u32 fqid;
42 	u32 flags;
43 	u16 channel;
44 	u8 wq;
45 	enum dpaa_fq_type fq_type;
46 	struct xdp_rxq_info xdp_rxq;
47 };
48 
49 struct dpaa_fq_cbs {
50 	struct qman_fq rx_defq;
51 	struct qman_fq tx_defq;
52 	struct qman_fq rx_errq;
53 	struct qman_fq tx_errq;
54 	struct qman_fq egress_ern;
55 };
56 
57 struct dpaa_priv;
58 
59 struct dpaa_bp {
60 	/* used in the DMA mapping operations */
61 	struct dpaa_priv *priv;
62 	/* current number of buffers in the buffer pool alloted to each CPU */
63 	int __percpu *percpu_count;
64 	/* all buffers allocated for this pool have this raw size */
65 	size_t raw_size;
66 	/* all buffers in this pool have this same usable size */
67 	size_t size;
68 	/* the buffer pools are initialized with config_count buffers for each
69 	 * CPU; at runtime the number of buffers per CPU is constantly brought
70 	 * back to this level
71 	 */
72 	u16 config_count;
73 	u8 bpid;
74 	struct bman_pool *pool;
75 	/* bpool can be seeded before use by this cb */
76 	int (*seed_cb)(struct dpaa_bp *);
77 	/* bpool can be emptied before freeing by this cb */
78 	void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
79 	refcount_t refs;
80 };
81 
82 struct dpaa_rx_errors {
83 	u64 dme;		/* DMA Error */
84 	u64 fpe;		/* Frame Physical Error */
85 	u64 fse;		/* Frame Size Error */
86 	u64 phe;		/* Header Error */
87 };
88 
89 /* Counters for QMan ERN frames - one counter per rejection code */
90 struct dpaa_ern_cnt {
91 	u64 cg_tdrop;		/* Congestion group taildrop */
92 	u64 wred;		/* WRED congestion */
93 	u64 err_cond;		/* Error condition */
94 	u64 early_window;	/* Order restoration, frame too early */
95 	u64 late_window;	/* Order restoration, frame too late */
96 	u64 fq_tdrop;		/* FQ taildrop */
97 	u64 fq_retired;		/* FQ is retired */
98 	u64 orp_zero;		/* ORP disabled */
99 };
100 
101 struct dpaa_napi_portal {
102 	struct napi_struct napi;
103 	struct qman_portal *p;
104 	bool down;
105 	int xdp_act;
106 };
107 
108 struct dpaa_percpu_priv {
109 	struct net_device *net_dev;
110 	struct dpaa_napi_portal np;
111 	u64 in_interrupt;
112 	u64 tx_confirm;
113 	/* fragmented (non-linear) skbuffs received from the stack */
114 	u64 tx_frag_skbuffs;
115 	struct rtnl_link_stats64 stats;
116 	struct dpaa_rx_errors rx_errors;
117 	struct dpaa_ern_cnt ern_cnt;
118 };
119 
120 struct dpaa_buffer_layout {
121 	u16 priv_data_size;
122 };
123 
124 /* Information to be used on the Tx confirmation path. Stored just
125  * before the start of the transmit buffer. Maximum size allowed
126  * is DPAA_TX_PRIV_DATA_SIZE bytes.
127  */
128 struct dpaa_eth_swbp {
129 	struct sk_buff *skb;
130 	struct xdp_frame *xdpf;
131 };
132 
133 struct dpaa_priv {
134 	struct dpaa_percpu_priv __percpu *percpu_priv;
135 	struct dpaa_bp *dpaa_bp;
136 	/* Store here the needed Tx headroom for convenience and speed
137 	 * (even though it can be computed based on the fields of buf_layout)
138 	 */
139 	u16 tx_headroom;
140 	struct net_device *net_dev;
141 	struct mac_device *mac_dev;
142 	struct device *rx_dma_dev;
143 	struct device *tx_dma_dev;
144 	struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
145 	struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
146 
147 	u16 channel;
148 	struct list_head dpaa_fq_list;
149 
150 	u8 num_tc;
151 	bool keygen_in_use;
152 	u32 msg_enable;	/* net_device message level */
153 
154 	struct {
155 		/* All egress queues to a given net device belong to one
156 		 * (and the same) congestion group.
157 		 */
158 		struct qman_cgr cgr;
159 		/* If congested, when it began. Used for performance stats. */
160 		u32 congestion_start_jiffies;
161 		/* Number of jiffies the Tx port was congested. */
162 		u32 congested_jiffies;
163 		/* Counter for the number of times the CGR
164 		 * entered congestion state
165 		 */
166 		u32 cgr_congested_count;
167 	} cgr_data;
168 	/* Use a per-port CGR for ingress traffic. */
169 	bool use_ingress_cgr;
170 	struct qman_cgr ingress_cgr;
171 
172 	struct dpaa_buffer_layout buf_layout[2];
173 	u16 rx_headroom;
174 
175 	bool tx_tstamp; /* Tx timestamping enabled */
176 	bool rx_tstamp; /* Rx timestamping enabled */
177 
178 	struct bpf_prog *xdp_prog;
179 };
180 
181 /* from dpaa_ethtool.c */
182 extern const struct ethtool_ops dpaa_ethtool_ops;
183 
184 /* from dpaa_eth_sysfs.c */
185 void dpaa_eth_sysfs_remove(struct device *dev);
186 void dpaa_eth_sysfs_init(struct device *dev);
187 #endif	/* __DPAA_H */
188