xref: /linux/drivers/net/ethernet/sfc/nic_common.h (revision 2cef30d7bd8b8fbddeb74e3753c29d4248c094e0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2005-2006 Fen Systems Ltd.
5  * Copyright 2006-2013 Solarflare Communications Inc.
6  * Copyright 2019-2020 Xilinx Inc.
7  */
8 
9 #ifndef EFX_NIC_COMMON_H
10 #define EFX_NIC_COMMON_H
11 
12 #include "net_driver.h"
13 #include "efx_common.h"
14 #include "mcdi.h"
15 #include "ptp.h"
16 
17 enum {
18 	/* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
19 	 * They are not supported by this driver but these revision numbers
20 	 * form part of the ethtool API for register dumping.
21 	 */
22 	EFX_REV_SIENA_A0 = 3,
23 	EFX_REV_HUNT_A0 = 4,
24 };
25 
26 static inline int efx_nic_rev(struct efx_nic *efx)
27 {
28 	return efx->type->revision;
29 }
30 
31 /* Read the current event from the event queue */
32 static inline efx_qword_t *efx_event(struct efx_channel *channel,
33 				     unsigned int index)
34 {
35 	return ((efx_qword_t *) (channel->eventq.buf.addr)) +
36 		(index & channel->eventq_mask);
37 }
38 
39 /* See if an event is present
40  *
41  * We check both the high and low dword of the event for all ones.  We
42  * wrote all ones when we cleared the event, and no valid event can
43  * have all ones in either its high or low dwords.  This approach is
44  * robust against reordering.
45  *
46  * Note that using a single 64-bit comparison is incorrect; even
47  * though the CPU read will be atomic, the DMA write may not be.
48  */
49 static inline int efx_event_present(efx_qword_t *event)
50 {
51 	return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
52 		  EFX_DWORD_IS_ALL_ONES(event->dword[1]));
53 }
54 
55 /* Returns a pointer to the specified transmit descriptor in the TX
56  * descriptor queue belonging to the specified channel.
57  */
58 static inline efx_qword_t *
59 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
60 {
61 	return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
62 }
63 
64 /* Report whether this TX queue would be empty for the given write_count.
65  * May return false negative.
66  */
67 static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
68 					 unsigned int write_count)
69 {
70 	unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
71 
72 	if (empty_read_count == 0)
73 		return false;
74 
75 	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
76 }
77 
78 /* Report whether the NIC considers this TX queue empty, using
79  * packet_write_count (the write count recorded for the last completable
80  * doorbell push).  May return false negative.  EF10 only, which is OK
81  * because only EF10 supports PIO.
82  */
83 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
84 {
85 	EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
86 	return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
87 }
88 
89 /* Get partner of a TX queue, seen as part of the same net core queue */
90 /* XXX is this a thing on EF100? */
91 static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
92 {
93 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
94 		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
95 	else
96 		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
97 }
98 
99 /* Decide whether we can use TX PIO, ie. write packet data directly into
100  * a buffer on the device.  This can reduce latency at the expense of
101  * throughput, so we only do this if both hardware and software TX rings
102  * are empty.  This also ensures that only one packet at a time can be
103  * using the PIO buffer.
104  */
105 static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
106 {
107 	struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
108 
109 	return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
110 	       efx_nic_tx_is_empty(partner);
111 }
112 
113 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
114 			bool *data_mapped);
115 
116 /* Decide whether to push a TX descriptor to the NIC vs merely writing
117  * the doorbell.  This can reduce latency when we are adding a single
118  * descriptor to an empty queue, but is otherwise pointless.  Further,
119  * Falcon and Siena have hardware bugs (SF bug 33851) that may be
120  * triggered if we don't check this.
121  * We use the write_count used for the last doorbell push, to get the
122  * NIC's view of the tx queue.
123  */
124 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
125 					    unsigned int write_count)
126 {
127 	bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
128 
129 	tx_queue->empty_read_count = 0;
130 	return was_empty && tx_queue->write_count - write_count == 1;
131 }
132 
133 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
134 static inline efx_qword_t *
135 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
136 {
137 	return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
138 }
139 
140 /* Alignment of PCIe DMA boundaries (4KB) */
141 #define EFX_PAGE_SIZE	4096
142 /* Size and alignment of buffer table entries (same) */
143 #define EFX_BUF_SIZE	EFX_PAGE_SIZE
144 
145 /* NIC-generic software stats */
146 enum {
147 	GENERIC_STAT_rx_noskb_drops,
148 	GENERIC_STAT_rx_nodesc_trunc,
149 	GENERIC_STAT_COUNT
150 };
151 
152 #define EFX_GENERIC_SW_STAT(ext_name)				\
153 	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
154 
155 /* TX data path */
156 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
157 {
158 	return tx_queue->efx->type->tx_probe(tx_queue);
159 }
160 static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
161 {
162 	tx_queue->efx->type->tx_init(tx_queue);
163 }
164 static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
165 {
166 	if (tx_queue->efx->type->tx_remove)
167 		tx_queue->efx->type->tx_remove(tx_queue);
168 }
169 static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
170 {
171 	tx_queue->efx->type->tx_write(tx_queue);
172 }
173 
174 /* RX data path */
175 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
176 {
177 	return rx_queue->efx->type->rx_probe(rx_queue);
178 }
179 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
180 {
181 	rx_queue->efx->type->rx_init(rx_queue);
182 }
183 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
184 {
185 	rx_queue->efx->type->rx_remove(rx_queue);
186 }
187 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
188 {
189 	rx_queue->efx->type->rx_write(rx_queue);
190 }
191 static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
192 {
193 	rx_queue->efx->type->rx_defer_refill(rx_queue);
194 }
195 
196 /* Event data path */
197 static inline int efx_nic_probe_eventq(struct efx_channel *channel)
198 {
199 	return channel->efx->type->ev_probe(channel);
200 }
201 static inline int efx_nic_init_eventq(struct efx_channel *channel)
202 {
203 	return channel->efx->type->ev_init(channel);
204 }
205 static inline void efx_nic_fini_eventq(struct efx_channel *channel)
206 {
207 	channel->efx->type->ev_fini(channel);
208 }
209 static inline void efx_nic_remove_eventq(struct efx_channel *channel)
210 {
211 	channel->efx->type->ev_remove(channel);
212 }
213 static inline int
214 efx_nic_process_eventq(struct efx_channel *channel, int quota)
215 {
216 	return channel->efx->type->ev_process(channel, quota);
217 }
218 static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
219 {
220 	channel->efx->type->ev_read_ack(channel);
221 }
222 
223 void efx_nic_event_test_start(struct efx_channel *channel);
224 
225 bool efx_nic_event_present(struct efx_channel *channel);
226 
227 /* Some statistics are computed as A - B where A and B each increase
228  * linearly with some hardware counter(s) and the counters are read
229  * asynchronously.  If the counters contributing to B are always read
230  * after those contributing to A, the computed value may be lower than
231  * the true value by some variable amount, and may decrease between
232  * subsequent computations.
233  *
234  * We should never allow statistics to decrease or to exceed the true
235  * value.  Since the computed value will never be greater than the
236  * true value, we can achieve this by only storing the computed value
237  * when it increases.
238  */
239 static inline void efx_update_diff_stat(u64 *stat, u64 diff)
240 {
241 	if ((s64)(diff - *stat) > 0)
242 		*stat = diff;
243 }
244 
245 /* Interrupts */
246 int efx_nic_init_interrupt(struct efx_nic *efx);
247 int efx_nic_irq_test_start(struct efx_nic *efx);
248 void efx_nic_fini_interrupt(struct efx_nic *efx);
249 
250 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
251 {
252 	return READ_ONCE(channel->event_test_cpu);
253 }
254 static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
255 {
256 	return READ_ONCE(efx->last_irq_cpu);
257 }
258 
259 /* Global Resources */
260 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
261 			 unsigned int len, gfp_t gfp_flags);
262 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
263 
264 size_t efx_nic_get_regs_len(struct efx_nic *efx);
265 void efx_nic_get_regs(struct efx_nic *efx, void *buf);
266 
267 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
268 
269 size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
270 			      const unsigned long *mask, u8 *names);
271 int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
272 void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
273 			  const unsigned long *mask, u64 *stats,
274 			  const void *dma_buf, bool accumulate);
275 void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
276 
277 #define EFX_MAX_FLUSH_TIME 5000
278 
279 #endif /* EFX_NIC_COMMON_H */
280