1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2013 Solarflare Communications Inc. 6 * Copyright 2019-2020 Xilinx Inc. 7 */ 8 9 #ifndef EFX_NIC_COMMON_H 10 #define EFX_NIC_COMMON_H 11 12 #include "net_driver.h" 13 #include "efx_common.h" 14 #include "mcdi.h" 15 #include "ptp.h" 16 17 enum { 18 /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. 19 * They are not supported by this driver but these revision numbers 20 * form part of the ethtool API for register dumping. 21 */ 22 EFX_REV_SIENA_A0 = 3, 23 EFX_REV_HUNT_A0 = 4, 24 EFX_REV_EF100 = 5, 25 }; 26 27 static inline int efx_nic_rev(struct efx_nic *efx) 28 { 29 return efx->type->revision; 30 } 31 32 /* Read the current event from the event queue */ 33 static inline efx_qword_t *efx_event(struct efx_channel *channel, 34 unsigned int index) 35 { 36 return ((efx_qword_t *) (channel->eventq.buf.addr)) + 37 (index & channel->eventq_mask); 38 } 39 40 /* See if an event is present 41 * 42 * We check both the high and low dword of the event for all ones. We 43 * wrote all ones when we cleared the event, and no valid event can 44 * have all ones in either its high or low dwords. This approach is 45 * robust against reordering. 46 * 47 * Note that using a single 64-bit comparison is incorrect; even 48 * though the CPU read will be atomic, the DMA write may not be. 49 */ 50 static inline int efx_event_present(efx_qword_t *event) 51 { 52 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 53 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 54 } 55 56 /* Returns a pointer to the specified transmit descriptor in the TX 57 * descriptor queue belonging to the specified channel. 58 */ 59 static inline efx_qword_t * 60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 61 { 62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 63 } 64 65 /* Report whether this TX queue would be empty for the given write_count. 66 * May return false negative. 67 */ 68 static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, 69 unsigned int write_count) 70 { 71 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); 72 73 if (empty_read_count == 0) 74 return false; 75 76 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 77 } 78 79 /* Report whether the NIC considers this TX queue empty, using 80 * packet_write_count (the write count recorded for the last completable 81 * doorbell push). May return false negative. EF10 only, which is OK 82 * because only EF10 supports PIO. 83 */ 84 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) 85 { 86 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); 87 return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); 88 } 89 90 /* Get partner of a TX queue, seen as part of the same net core queue */ 91 /* XXX is this a thing on EF100? */ 92 static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) 93 { 94 if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD) 95 return tx_queue - EFX_TXQ_TYPE_OFFLOAD; 96 else 97 return tx_queue + EFX_TXQ_TYPE_OFFLOAD; 98 } 99 100 /* Decide whether we can use TX PIO, ie. write packet data directly into 101 * a buffer on the device. This can reduce latency at the expense of 102 * throughput, so we only do this if both hardware and software TX rings 103 * are empty. This also ensures that only one packet at a time can be 104 * using the PIO buffer. 105 */ 106 static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) 107 { 108 struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); 109 110 return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && 111 efx_nic_tx_is_empty(partner); 112 } 113 114 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 115 bool *data_mapped); 116 117 /* Decide whether to push a TX descriptor to the NIC vs merely writing 118 * the doorbell. This can reduce latency when we are adding a single 119 * descriptor to an empty queue, but is otherwise pointless. Further, 120 * Falcon and Siena have hardware bugs (SF bug 33851) that may be 121 * triggered if we don't check this. 122 * We use the write_count used for the last doorbell push, to get the 123 * NIC's view of the tx queue. 124 */ 125 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, 126 unsigned int write_count) 127 { 128 bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); 129 130 tx_queue->empty_read_count = 0; 131 return was_empty && tx_queue->write_count - write_count == 1; 132 } 133 134 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 135 static inline efx_qword_t * 136 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 137 { 138 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; 139 } 140 141 /* Alignment of PCIe DMA boundaries (4KB) */ 142 #define EFX_PAGE_SIZE 4096 143 /* Size and alignment of buffer table entries (same) */ 144 #define EFX_BUF_SIZE EFX_PAGE_SIZE 145 146 /* NIC-generic software stats */ 147 enum { 148 GENERIC_STAT_rx_noskb_drops, 149 GENERIC_STAT_rx_nodesc_trunc, 150 GENERIC_STAT_COUNT 151 }; 152 153 #define EFX_GENERIC_SW_STAT(ext_name) \ 154 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 155 156 /* TX data path */ 157 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 158 { 159 return tx_queue->efx->type->tx_probe(tx_queue); 160 } 161 static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 162 { 163 tx_queue->efx->type->tx_init(tx_queue); 164 } 165 static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 166 { 167 if (tx_queue->efx->type->tx_remove) 168 tx_queue->efx->type->tx_remove(tx_queue); 169 } 170 static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 171 { 172 tx_queue->efx->type->tx_write(tx_queue); 173 } 174 175 /* RX data path */ 176 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 177 { 178 return rx_queue->efx->type->rx_probe(rx_queue); 179 } 180 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 181 { 182 rx_queue->efx->type->rx_init(rx_queue); 183 } 184 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 185 { 186 rx_queue->efx->type->rx_remove(rx_queue); 187 } 188 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 189 { 190 rx_queue->efx->type->rx_write(rx_queue); 191 } 192 static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 193 { 194 rx_queue->efx->type->rx_defer_refill(rx_queue); 195 } 196 197 /* Event data path */ 198 static inline int efx_nic_probe_eventq(struct efx_channel *channel) 199 { 200 return channel->efx->type->ev_probe(channel); 201 } 202 static inline int efx_nic_init_eventq(struct efx_channel *channel) 203 { 204 return channel->efx->type->ev_init(channel); 205 } 206 static inline void efx_nic_fini_eventq(struct efx_channel *channel) 207 { 208 channel->efx->type->ev_fini(channel); 209 } 210 static inline void efx_nic_remove_eventq(struct efx_channel *channel) 211 { 212 channel->efx->type->ev_remove(channel); 213 } 214 static inline int 215 efx_nic_process_eventq(struct efx_channel *channel, int quota) 216 { 217 return channel->efx->type->ev_process(channel, quota); 218 } 219 static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) 220 { 221 channel->efx->type->ev_read_ack(channel); 222 } 223 224 void efx_nic_event_test_start(struct efx_channel *channel); 225 226 bool efx_nic_event_present(struct efx_channel *channel); 227 228 static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev) 229 { 230 if (efx->type->sensor_event) 231 efx->type->sensor_event(efx, ev); 232 } 233 234 /* Some statistics are computed as A - B where A and B each increase 235 * linearly with some hardware counter(s) and the counters are read 236 * asynchronously. If the counters contributing to B are always read 237 * after those contributing to A, the computed value may be lower than 238 * the true value by some variable amount, and may decrease between 239 * subsequent computations. 240 * 241 * We should never allow statistics to decrease or to exceed the true 242 * value. Since the computed value will never be greater than the 243 * true value, we can achieve this by only storing the computed value 244 * when it increases. 245 */ 246 static inline void efx_update_diff_stat(u64 *stat, u64 diff) 247 { 248 if ((s64)(diff - *stat) > 0) 249 *stat = diff; 250 } 251 252 /* Interrupts */ 253 int efx_nic_init_interrupt(struct efx_nic *efx); 254 int efx_nic_irq_test_start(struct efx_nic *efx); 255 void efx_nic_fini_interrupt(struct efx_nic *efx); 256 257 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 258 { 259 return READ_ONCE(channel->event_test_cpu); 260 } 261 static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) 262 { 263 return READ_ONCE(efx->last_irq_cpu); 264 } 265 266 /* Global Resources */ 267 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 268 unsigned int len, gfp_t gfp_flags); 269 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); 270 271 size_t efx_nic_get_regs_len(struct efx_nic *efx); 272 void efx_nic_get_regs(struct efx_nic *efx, void *buf); 273 274 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) 275 276 size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, 277 const unsigned long *mask, u8 *names); 278 int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); 279 void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, 280 const unsigned long *mask, u64 *stats, 281 const void *dma_buf, bool accumulate); 282 void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); 283 284 #define EFX_MAX_FLUSH_TIME 5000 285 286 #endif /* EFX_NIC_COMMON_H */ 287