1 /*
2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #ifndef _MLX4_EN_H_
35 #define _MLX4_EN_H_
36
37 #include <linux/bitops.h>
38 #include <linux/compiler.h>
39 #include <linux/list.h>
40 #include <linux/mutex.h>
41 #include <linux/kobject.h>
42 #include <linux/if_vlan.h>
43 #include <linux/if_ether.h>
44 #ifdef CONFIG_MLX4_EN_DCB
45 #include <linux/dcbnl.h>
46 #endif
47
48 #include <sys/socket.h>
49 #include <sys/taskqueue.h>
50
51 #include <net/if_types.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_dl.h>
55
56 #include <dev/mlx4/device.h>
57 #include <dev/mlx4/qp.h>
58 #include <dev/mlx4/cq.h>
59 #include <dev/mlx4/srq.h>
60 #include <dev/mlx4/doorbell.h>
61 #include <dev/mlx4/cmd.h>
62
63 #include <net/debugnet.h>
64 #include <netinet/tcp_lro.h>
65
66 #include "en_port.h"
67 #include <dev/mlx4/stats.h>
68
69 #define DRV_NAME "mlx4_en"
70
71 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
72
73 /*
74 * Device constants
75 */
76
77
78 #define MLX4_EN_PAGE_SHIFT 12
79 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
80 #define MLX4_NET_IP_ALIGN 2 /* bytes */
81 #define DEF_RX_RINGS 16
82 #define MAX_RX_RINGS 128
83 #define MIN_RX_RINGS 4
84 #define TXBB_SIZE 64
85
86 #ifndef MLX4_EN_MAX_RX_SEGS
87 #define MLX4_EN_MAX_RX_SEGS 1 /* or 8 */
88 #endif
89
90 #ifndef MLX4_EN_MAX_RX_BYTES
91 #define MLX4_EN_MAX_RX_BYTES MCLBYTES
92 #endif
93
94 #define HEADROOM (2048 / TXBB_SIZE + 1)
95 #define INIT_OWNER_BIT 0xffffffff
96 #define STAMP_STRIDE 64
97 #define STAMP_DWORDS (STAMP_STRIDE / 4)
98 #define STAMP_SHIFT 31
99 #define STAMP_VAL 0x7fffffff
100 #define STATS_DELAY (HZ / 4)
101 #define SERVICE_TASK_DELAY (HZ / 4)
102 #define MAX_NUM_OF_FS_RULES 256
103
104 #define MLX4_EN_FILTER_HASH_SHIFT 4
105 #define MLX4_EN_FILTER_EXPIRY_QUOTA 60
106
107 #ifdef CONFIG_NET_RX_BUSY_POLL
108 #define LL_EXTENDED_STATS
109 #endif
110
111 /* vlan valid range */
112 #define VLAN_MIN_VALUE 1
113 #define VLAN_MAX_VALUE 4094
114
115 /*
116 * OS related constants and tunables
117 */
118
119 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
120
121 #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(PAGE_SIZE)
122 #define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
123
124 enum mlx4_en_alloc_type {
125 MLX4_EN_ALLOC_NEW = 0,
126 MLX4_EN_ALLOC_REPLACEMENT = 1,
127 };
128
129 /* Maximum ring sizes */
130 #define MLX4_EN_DEF_TX_QUEUE_SIZE 4096
131
132 /* Minimum packet number till arming the CQ */
133 #define MLX4_EN_MIN_RX_ARM 2048
134 #define MLX4_EN_MIN_TX_ARM 2048
135
136 /* Maximum ring sizes */
137 #define MLX4_EN_MAX_TX_SIZE 8192
138 #define MLX4_EN_MAX_RX_SIZE 8192
139
140 /* Minimum ring sizes */
141 #define MLX4_EN_MIN_RX_SIZE (4096 / TXBB_SIZE)
142 #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
143
144 #define MLX4_EN_SMALL_PKT_SIZE 64
145
146 #define MLX4_EN_MAX_TX_RING_P_UP 32
147 #define MLX4_EN_NUM_UP 1
148
149 #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
150 MLX4_EN_NUM_UP)
151
152 #define MLX4_EN_NO_VLAN 0xffff
153
154 #define MLX4_EN_DEF_TX_RING_SIZE 1024
155 #define MLX4_EN_DEF_RX_RING_SIZE 1024
156
157 /* Target number of bytes to coalesce with interrupt moderation */
158 #define MLX4_EN_RX_COAL_TARGET 44
159 #define MLX4_EN_RX_COAL_TIME 0x10
160
161 #define MLX4_EN_TX_COAL_PKTS 64
162 #define MLX4_EN_TX_COAL_TIME 64
163
164 #define MLX4_EN_RX_RATE_LOW 400000
165 #define MLX4_EN_RX_COAL_TIME_LOW 0
166 #define MLX4_EN_RX_RATE_HIGH 450000
167 #define MLX4_EN_RX_COAL_TIME_HIGH 128
168 #define MLX4_EN_RX_SIZE_THRESH 1024
169 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
170 #define MLX4_EN_SAMPLE_INTERVAL 0
171 #define MLX4_EN_AVG_PKT_SMALL 256
172
173 #define MLX4_EN_AUTO_CONF 0xffff
174
175 #define MLX4_EN_DEF_RX_PAUSE 1
176 #define MLX4_EN_DEF_TX_PAUSE 1
177
178 /* Interval between successive polls in the Tx routine when polling is used
179 instead of interrupts (in per-core Tx rings) - should be power of 2 */
180 #define MLX4_EN_TX_POLL_MODER 16
181 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
182
183 #define MLX4_EN_64_ALIGN (64 - NET_SKB_PAD)
184 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
185 #define HEADER_COPY_SIZE (128)
186 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETHER_HDR_LEN)
187
188 #define MLX4_EN_MIN_MTU 46
189 #define ETH_BCAST 0xffffffffffffULL
190
191 #define MLX4_EN_LOOPBACK_RETRIES 5
192 #define MLX4_EN_LOOPBACK_TIMEOUT 100
193
194 #ifdef MLX4_EN_PERF_STAT
195 /* Number of samples to 'average' */
196 #define AVG_SIZE 128
197 #define AVG_FACTOR 1024
198
199 #define INC_PERF_COUNTER(cnt) (++(cnt))
200 #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
201 #define AVG_PERF_COUNTER(cnt, sample) \
202 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
203 #define GET_PERF_COUNTER(cnt) (cnt)
204 #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
205
206 #else
207
208 #define INC_PERF_COUNTER(cnt) do {} while (0)
209 #define ADD_PERF_COUNTER(cnt, add) do {} while (0)
210 #define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
211 #define GET_PERF_COUNTER(cnt) (0)
212 #define GET_AVG_PERF_COUNTER(cnt) (0)
213 #endif /* MLX4_EN_PERF_STAT */
214
215 /* Constants for TX flow */
216 enum {
217 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
218 MAX_BF = 256,
219 MIN_PKT_LEN = 17,
220 };
221
222 /*
223 * Configurables
224 */
225
226 enum cq_type {
227 RX = 0,
228 TX = 1,
229 };
230
231
232 /*
233 * Useful macros
234 */
235 #define ROUNDUP_LOG2(x) order_base_2(x)
236 #define XNOR(x, y) (!(x) == !(y))
237 #define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
238
239 struct mlx4_en_tx_info {
240 bus_dmamap_t dma_map;
241 struct mbuf *mb;
242 u32 nr_txbb;
243 u32 nr_bytes;
244 };
245
246
247 #define MLX4_EN_BIT_DESC_OWN 0x80000000
248 #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
249 #define MLX4_EN_MEMTYPE_PAD 0x100
250 #define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
251
252
253 struct mlx4_en_tx_desc {
254 struct mlx4_wqe_ctrl_seg ctrl;
255 union {
256 struct mlx4_wqe_data_seg data; /* at least one data segment */
257 struct mlx4_wqe_lso_seg lso;
258 struct mlx4_wqe_inline_seg inl;
259 };
260 };
261
262 #define MLX4_EN_USE_SRQ 0x01000000
263
264 #define MLX4_EN_RX_BUDGET 64
265
266 #define MLX4_EN_TX_MAX_DESC_SIZE 512 /* bytes */
267 #define MLX4_EN_TX_MAX_MBUF_SIZE 65536 /* bytes */
268 #define MLX4_EN_TX_MAX_PAYLOAD_SIZE 65536 /* bytes */
269 #define MLX4_EN_TX_MAX_MBUF_FRAGS \
270 ((MLX4_EN_TX_MAX_DESC_SIZE - 128) / DS_SIZE_ALIGNMENT) /* units */
271 #define MLX4_EN_TX_WQE_MAX_WQEBBS \
272 (MLX4_EN_TX_MAX_DESC_SIZE / TXBB_SIZE) /* units */
273
274 #define MLX4_EN_CX3_LOW_ID 0x1000
275 #define MLX4_EN_CX3_HIGH_ID 0x1005
276
277 struct mlx4_en_tx_ring {
278 spinlock_t tx_lock;
279 bus_dma_tag_t dma_tag;
280 struct mlx4_hwq_resources wqres;
281 u32 size ; /* number of TXBBs */
282 u32 size_mask;
283 u16 stride;
284 u16 cqn; /* index of port CQ associated with this ring */
285 u32 prod;
286 u32 cons;
287 u32 buf_size;
288 u32 doorbell_qpn;
289 u8 *buf;
290 u16 poll_cnt;
291 struct mlx4_en_tx_info *tx_info;
292 u8 queue_index;
293 u32 last_nr_txbb;
294 struct mlx4_qp qp;
295 struct mlx4_qp_context context;
296 int qpn;
297 enum mlx4_qp_state qp_state;
298 struct mlx4_srq dummy;
299 u64 bytes;
300 u64 packets;
301 u64 tx_csum;
302 u64 queue_stopped;
303 u64 oversized_packets;
304 u64 wake_queue;
305 u64 tso_packets;
306 u64 defrag_attempts;
307 struct mlx4_bf bf;
308 bool bf_enabled;
309 int hwtstamp_tx_type;
310 spinlock_t comp_lock;
311 int inline_thold;
312 u64 watchdog_time;
313 };
314
315 struct mlx4_en_rx_desc {
316 struct mlx4_wqe_data_seg data[MLX4_EN_MAX_RX_SEGS];
317 };
318
319 /* the size of the structure above must be power of two */
320 CTASSERT(powerof2(sizeof(struct mlx4_en_rx_desc)));
321
322 struct mlx4_en_rx_mbuf {
323 bus_dmamap_t dma_map;
324 struct mbuf *mbuf;
325 };
326
327 struct mlx4_en_rx_spare {
328 bus_dmamap_t dma_map;
329 struct mbuf *mbuf;
330 bus_dma_segment_t segs[MLX4_EN_MAX_RX_SEGS];
331 };
332
333 struct mlx4_en_rx_ring {
334 struct mlx4_hwq_resources wqres;
335 bus_dma_tag_t dma_tag;
336 struct mlx4_en_rx_spare spare;
337 u32 size ; /* number of Rx descs*/
338 u32 actual_size;
339 u32 size_mask;
340 u16 log_stride;
341 u16 cqn; /* index of port CQ associated with this ring */
342 u32 prod;
343 u32 cons;
344 u32 buf_size;
345 u8 fcs_del;
346 u32 rx_mb_size;
347 u32 rx_mr_key_be;
348 int qpn;
349 u8 *buf;
350 struct mlx4_en_rx_mbuf *mbuf;
351 u64 errors;
352 u64 bytes;
353 u64 packets;
354 #ifdef LL_EXTENDED_STATS
355 u64 yields;
356 u64 misses;
357 u64 cleaned;
358 #endif
359 u64 csum_ok;
360 u64 csum_none;
361 int hwtstamp_rx_filter;
362 int numa_node;
363 struct lro_ctrl lro;
364 };
365
mlx4_en_can_lro(__be16 status)366 static inline int mlx4_en_can_lro(__be16 status)
367 {
368 const __be16 status_all = cpu_to_be16(
369 MLX4_CQE_STATUS_IPV4 |
370 MLX4_CQE_STATUS_IPV4F |
371 MLX4_CQE_STATUS_IPV6 |
372 MLX4_CQE_STATUS_IPV4OPT |
373 MLX4_CQE_STATUS_TCP |
374 MLX4_CQE_STATUS_UDP |
375 MLX4_CQE_STATUS_IPOK);
376 const __be16 status_ipv4_ipok_tcp = cpu_to_be16(
377 MLX4_CQE_STATUS_IPV4 |
378 MLX4_CQE_STATUS_IPOK |
379 MLX4_CQE_STATUS_TCP);
380 const __be16 status_ipv6_ipok_tcp = cpu_to_be16(
381 MLX4_CQE_STATUS_IPV6 |
382 MLX4_CQE_STATUS_IPOK |
383 MLX4_CQE_STATUS_TCP);
384
385 status &= status_all;
386 return (status == status_ipv4_ipok_tcp ||
387 status == status_ipv6_ipok_tcp);
388 }
389
390 struct mlx4_en_cq {
391 struct mlx4_cq mcq;
392 struct mlx4_hwq_resources wqres;
393 int ring;
394 spinlock_t lock;
395 if_t dev;
396 /* Per-core Tx cq processing support */
397 struct timer_list timer;
398 int size;
399 int buf_size;
400 unsigned vector;
401 enum cq_type is_tx;
402 u16 moder_time;
403 u16 moder_cnt;
404 struct mlx4_cqe *buf;
405 struct task cq_task;
406 struct taskqueue *tq;
407 #define MLX4_EN_OPCODE_ERROR 0x1e
408 u32 tot_rx;
409 u32 tot_tx;
410 u32 curr_poll_rx_cpu_id;
411
412 #ifdef CONFIG_NET_RX_BUSY_POLL
413 unsigned int state;
414 #define MLX4_EN_CQ_STATE_IDLE 0
415 #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
416 #define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
417 #define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
418 #define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
419 #define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
420 #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
421 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
422 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
423 #endif /* CONFIG_NET_RX_BUSY_POLL */
424 };
425
426 struct mlx4_en_port_profile {
427 u32 flags;
428 u32 tx_ring_num;
429 u32 rx_ring_num;
430 u32 tx_ring_size;
431 u32 rx_ring_size;
432 u8 rx_pause;
433 u8 rx_ppp;
434 u8 tx_pause;
435 u8 tx_ppp;
436 int rss_rings;
437 int inline_thold;
438 };
439
440 struct mlx4_en_profile {
441 int rss_xor;
442 int udp_rss;
443 u8 rss_mask;
444 u32 active_ports;
445 u32 small_pkt_int;
446 u8 no_reset;
447 u8 num_tx_rings_p_up;
448 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
449 };
450
451 struct mlx4_en_dev {
452 struct mlx4_dev *dev;
453 struct pci_dev *pdev;
454 struct mutex state_lock;
455 if_t pndev[MLX4_MAX_PORTS + 1];
456 u32 port_cnt;
457 bool device_up;
458 struct mlx4_en_profile profile;
459 u32 LSO_support;
460 struct workqueue_struct *workqueue;
461 struct device *dma_device;
462 void __iomem *uar_map;
463 struct mlx4_uar priv_uar;
464 struct mlx4_mr mr;
465 u32 priv_pdn;
466 spinlock_t uar_lock;
467 u8 mac_removed[MLX4_MAX_PORTS + 1];
468 unsigned long last_overflow_check;
469 unsigned long overflow_period;
470 };
471
472
473 struct mlx4_en_rss_map {
474 int base_qpn;
475 struct mlx4_qp qps[MAX_RX_RINGS];
476 enum mlx4_qp_state state[MAX_RX_RINGS];
477 struct mlx4_qp indir_qp;
478 enum mlx4_qp_state indir_state;
479 };
480
481 enum mlx4_en_port_flag {
482 MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
483 MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
484 };
485
486 struct mlx4_en_port_state {
487 int link_state;
488 int link_speed;
489 int transceiver;
490 u32 flags;
491 };
492
493 enum mlx4_en_addr_list_act {
494 MLX4_ADDR_LIST_NONE,
495 MLX4_ADDR_LIST_REM,
496 MLX4_ADDR_LIST_ADD,
497 };
498
499 struct mlx4_en_addr_list {
500 struct list_head list;
501 enum mlx4_en_addr_list_act action;
502 u8 addr[ETH_ALEN];
503 u64 reg_id;
504 u64 tunnel_reg_id;
505 };
506
507 #ifdef CONFIG_MLX4_EN_DCB
508 /* Minimal TC BW - setting to 0 will block traffic */
509 #define MLX4_EN_BW_MIN 1
510 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
511
512 #define MLX4_EN_TC_VENDOR 0
513 #define MLX4_EN_TC_ETS 7
514
515 #endif
516
517
518 enum {
519 MLX4_EN_FLAG_PROMISC = (1 << 0),
520 MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
521 /* whether we need to enable hardware loopback by putting dmac
522 * in Tx WQE
523 */
524 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
525 /* whether we need to drop packets that hardware loopback-ed */
526 MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
527 MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
528 #ifdef CONFIG_MLX4_EN_DCB
529 MLX4_EN_FLAG_DCB_ENABLED = (1 << 5)
530 #endif
531 };
532
533 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
534 #define MLX4_EN_MAC_HASH_IDX 5
535
536 struct en_port {
537 struct kobject kobj;
538 struct mlx4_dev *dev;
539 u8 port_num;
540 u8 vport_num;
541 };
542
543 struct mlx4_en_priv {
544 struct mlx4_en_dev *mdev;
545 struct mlx4_en_port_profile *prof;
546 if_t dev;
547 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
548 struct mlx4_en_port_state port_state;
549 spinlock_t stats_lock;
550 /* To allow rules removal while port is going down */
551 struct list_head ethtool_list;
552
553 unsigned long last_moder_packets[MAX_RX_RINGS];
554 unsigned long last_moder_tx_packets;
555 unsigned long last_moder_bytes[MAX_RX_RINGS];
556 unsigned long last_moder_jiffies;
557 int last_moder_time[MAX_RX_RINGS];
558 u16 rx_usecs;
559 u16 rx_frames;
560 u16 tx_usecs;
561 u16 tx_frames;
562 u32 pkt_rate_low;
563 u32 rx_usecs_low;
564 u32 pkt_rate_high;
565 u32 rx_usecs_high;
566 u32 sample_interval;
567 u32 adaptive_rx_coal;
568 u32 msg_enable;
569 u32 loopback_ok;
570 u32 validate_loopback;
571
572 struct mlx4_hwq_resources res;
573 int link_state;
574 int last_link_state;
575 bool port_up;
576 int port;
577 int registered;
578 int gone;
579 int allocated;
580 unsigned char current_mac[ETH_ALEN + 2];
581 u64 mac;
582 int mac_index;
583 unsigned max_mtu;
584 int base_qpn;
585 int cqe_factor;
586
587 struct mlx4_en_rss_map rss_map;
588 u32 flags;
589 u8 num_tx_rings_p_up;
590 u32 tx_ring_num;
591 u32 rx_ring_num;
592 u32 rx_mb_size;
593
594 struct mlx4_en_tx_ring **tx_ring;
595 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
596 struct mlx4_en_cq **tx_cq;
597 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
598 struct mlx4_qp drop_qp;
599 struct work_struct rx_mode_task;
600 struct work_struct watchdog_task;
601 struct work_struct linkstate_task;
602 struct delayed_work stats_task;
603 struct delayed_work service_task;
604 struct mlx4_en_perf_stats pstats;
605 struct mlx4_en_pkt_stats pkstats;
606 struct mlx4_en_pkt_stats pkstats_last;
607 struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
608 struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
609 struct mlx4_en_flow_stats_rx rx_flowstats;
610 struct mlx4_en_flow_stats_tx tx_flowstats;
611 struct mlx4_en_port_stats port_stats;
612 struct mlx4_en_vport_stats vport_stats;
613 struct mlx4_en_vf_stats vf_stats;
614 struct list_head mc_list;
615 struct list_head uc_list;
616 struct list_head curr_mc_list;
617 struct list_head curr_uc_list;
618 u64 broadcast_id;
619 struct mlx4_en_stat_out_mbox hw_stats;
620 int vids[128];
621 bool wol;
622 struct device *ddev;
623 struct dentry *dev_root;
624 u32 counter_index;
625 eventhandler_tag vlan_attach;
626 eventhandler_tag vlan_detach;
627 struct callout watchdog_timer;
628 struct ifmedia media;
629 volatile int blocked;
630 struct sysctl_oid *conf_sysctl;
631 struct sysctl_oid *stat_sysctl;
632 struct sysctl_ctx_list conf_ctx;
633 struct sysctl_ctx_list stat_ctx;
634
635 #ifdef CONFIG_MLX4_EN_DCB
636 struct ieee_ets ets;
637 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
638 u8 dcbx_cap;
639 #endif
640 #ifdef CONFIG_RFS_ACCEL
641 spinlock_t filters_lock;
642 int last_filter_id;
643 struct list_head filters;
644 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
645 #endif
646 u64 tunnel_reg_id;
647 struct en_port *vf_ports[MLX4_MAX_NUM_VF];
648 unsigned long last_ifq_jiffies;
649 u64 if_counters_rx_errors;
650 u64 if_counters_rx_no_buffer;
651 };
652
653 enum mlx4_en_wol {
654 MLX4_EN_WOL_MAGIC = (1ULL << 61),
655 MLX4_EN_WOL_ENABLED = (1ULL << 62),
656 };
657
658 struct mlx4_mac_entry {
659 struct hlist_node hlist;
660 unsigned char mac[ETH_ALEN + 2];
661 u64 reg_id;
662 };
663
664 static inline void *
mlx4_netdev_priv(const if_t dev)665 mlx4_netdev_priv(const if_t dev)
666 {
667 return (if_getsoftc(dev));
668 }
669
mlx4_en_get_cqe(u8 * buf,int idx,int cqe_sz)670 static inline struct mlx4_cqe *mlx4_en_get_cqe(u8 *buf, int idx, int cqe_sz)
671 {
672 return (struct mlx4_cqe *)(buf + idx * cqe_sz);
673 }
674
675 #ifdef CONFIG_NET_RX_BUSY_POLL
mlx4_en_cq_init_lock(struct mlx4_en_cq * cq)676 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
677 {
678 spin_lock_init(&cq->poll_lock);
679 cq->state = MLX4_EN_CQ_STATE_IDLE;
680 }
681
682 /* called from the device poll rutine to get ownership of a cq */
mlx4_en_cq_lock_napi(struct mlx4_en_cq * cq)683 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
684 {
685 int rc = true;
686 spin_lock(&cq->poll_lock);
687 if (cq->state & MLX4_CQ_LOCKED) {
688 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
689 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
690 rc = false;
691 } else
692 /* we don't care if someone yielded */
693 cq->state = MLX4_EN_CQ_STATE_NAPI;
694 spin_unlock(&cq->poll_lock);
695 return rc;
696 }
697
698 /* returns true is someone tried to get the cq while napi had it */
mlx4_en_cq_unlock_napi(struct mlx4_en_cq * cq)699 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
700 {
701 int rc = false;
702 spin_lock(&cq->poll_lock);
703 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
704 MLX4_EN_CQ_STATE_NAPI_YIELD));
705
706 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
707 rc = true;
708 cq->state = MLX4_EN_CQ_STATE_IDLE;
709 spin_unlock(&cq->poll_lock);
710 return rc;
711 }
712
713 /* called from mlx4_en_low_latency_poll() */
mlx4_en_cq_lock_poll(struct mlx4_en_cq * cq)714 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
715 {
716 int rc = true;
717 spin_lock_bh(&cq->poll_lock);
718 if ((cq->state & MLX4_CQ_LOCKED)) {
719 if_t dev = cq->dev;
720 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
721 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
722
723 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
724 rc = false;
725 #ifdef LL_EXTENDED_STATS
726 rx_ring->yields++;
727 #endif
728 } else
729 /* preserve yield marks */
730 cq->state |= MLX4_EN_CQ_STATE_POLL;
731 spin_unlock_bh(&cq->poll_lock);
732 return rc;
733 }
734
735 /* returns true if someone tried to get the cq while it was locked */
mlx4_en_cq_unlock_poll(struct mlx4_en_cq * cq)736 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
737 {
738 int rc = false;
739 spin_lock_bh(&cq->poll_lock);
740 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
741
742 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
743 rc = true;
744 cq->state = MLX4_EN_CQ_STATE_IDLE;
745 spin_unlock_bh(&cq->poll_lock);
746 return rc;
747 }
748
749 /* true if a socket is polling, even if it did not get the lock */
mlx4_en_cq_busy_polling(struct mlx4_en_cq * cq)750 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
751 {
752 WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
753 return cq->state & CQ_USER_PEND;
754 }
755 #else
mlx4_en_cq_init_lock(struct mlx4_en_cq * cq)756 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
757 {
758 }
759
mlx4_en_cq_lock_napi(struct mlx4_en_cq * cq)760 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
761 {
762 return true;
763 }
764
mlx4_en_cq_unlock_napi(struct mlx4_en_cq * cq)765 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
766 {
767 return false;
768 }
769
mlx4_en_cq_lock_poll(struct mlx4_en_cq * cq)770 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
771 {
772 return false;
773 }
774
mlx4_en_cq_unlock_poll(struct mlx4_en_cq * cq)775 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
776 {
777 return false;
778 }
779
mlx4_en_cq_busy_polling(struct mlx4_en_cq * cq)780 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
781 {
782 return false;
783 }
784 #endif /* CONFIG_NET_RX_BUSY_POLL */
785
786 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
787
788 void mlx4_en_destroy_netdev(if_t dev);
789 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
790 struct mlx4_en_port_profile *prof);
791
792 int mlx4_en_start_port(if_t dev);
793 void mlx4_en_stop_port(if_t dev);
794
795 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
796 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
797
798 int mlx4_en_pre_config(struct mlx4_en_priv *priv);
799 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
800 int entries, int ring, enum cq_type mode, int node);
801 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
802 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
803 int cq_idx);
804 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
805 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
806 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
807
808 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
809 u16 mlx4_en_select_queue(if_t dev, struct mbuf *mb);
810
811 int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp);
812 int mlx4_en_transmit(if_t dev, struct mbuf *m);
813 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
814 struct mlx4_en_tx_ring **pring,
815 u32 size, u16 stride, int node, int queue_idx);
816 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
817 struct mlx4_en_tx_ring **pring);
818 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
819 struct mlx4_en_tx_ring *ring,
820 int cq, int user_prio);
821 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
822 struct mlx4_en_tx_ring *ring);
823 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
824 void mlx4_en_qflush(if_t dev);
825
826 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
827 struct mlx4_en_rx_ring **pring,
828 u32 size, int node);
829 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
830 struct mlx4_en_rx_ring **pring,
831 u32 size);
832 void mlx4_en_rx_que(void *context, int pending);
833 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
834 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
835 struct mlx4_en_rx_ring *ring);
836 int mlx4_en_process_rx_cq(if_t dev,
837 struct mlx4_en_cq *cq,
838 int budget);
839 void mlx4_en_poll_tx_cq(unsigned long data);
840 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
841 int is_tx, int rss, int qpn, int cqn, int user_prio,
842 struct mlx4_qp_context *context);
843 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
844 int mlx4_en_map_buffer(struct mlx4_buf *buf);
845 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
846 void mlx4_en_calc_rx_buf(if_t dev);
847
848 const u32 *mlx4_en_get_rss_key(struct mlx4_en_priv *priv, u16 *keylen);
849 u8 mlx4_en_get_rss_mask(struct mlx4_en_priv *priv);
850 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
851 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
852 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
853 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
854 int mlx4_en_free_tx_buf(if_t dev, struct mlx4_en_tx_ring *ring);
855 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
856
857 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
858
859 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
860 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
861 int mlx4_en_get_vport_stats(struct mlx4_en_dev *mdev, u8 port);
862 void mlx4_en_create_debug_files(struct mlx4_en_priv *priv);
863 void mlx4_en_delete_debug_files(struct mlx4_en_priv *priv);
864 int mlx4_en_register_debugfs(void);
865 void mlx4_en_unregister_debugfs(void);
866
867 #ifdef CONFIG_MLX4_EN_DCB
868 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
869 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
870 #endif
871
872 int mlx4_en_setup_tc(if_t dev, u8 up);
873
874 #ifdef CONFIG_RFS_ACCEL
875 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
876 struct mlx4_en_rx_ring *rx_ring);
877 #endif
878
879 #define MLX4_EN_NUM_SELF_TEST 5
880 void mlx4_en_ex_selftest(if_t dev, u32 *flags, u64 *buf);
881 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
882
883 /*
884 * Functions for time stamping
885 */
886 #define SKBTX_HW_TSTAMP (1 << 0)
887 #define SKBTX_IN_PROGRESS (1 << 2)
888
889 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
890
891 /* Functions for caching and restoring statistics */
892 int mlx4_en_get_sset_count(if_t dev, int sset);
893 void mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv,
894 u64 *data);
895
896 /*
897 * Globals
898 */
899 extern const struct ethtool_ops mlx4_en_ethtool_ops;
900
901 /*
902 * Defines for link speed - needed by selftest
903 */
904 #define MLX4_EN_LINK_SPEED_1G 1000
905 #define MLX4_EN_LINK_SPEED_10G 10000
906 #define MLX4_EN_LINK_SPEED_40G 40000
907
908 enum {
909 NETIF_MSG_DRV = 0x0001,
910 NETIF_MSG_PROBE = 0x0002,
911 NETIF_MSG_LINK = 0x0004,
912 NETIF_MSG_TIMER = 0x0008,
913 NETIF_MSG_IFDOWN = 0x0010,
914 NETIF_MSG_IFUP = 0x0020,
915 NETIF_MSG_RX_ERR = 0x0040,
916 NETIF_MSG_TX_ERR = 0x0080,
917 NETIF_MSG_TX_QUEUED = 0x0100,
918 NETIF_MSG_INTR = 0x0200,
919 NETIF_MSG_TX_DONE = 0x0400,
920 NETIF_MSG_RX_STATUS = 0x0800,
921 NETIF_MSG_PKTDATA = 0x1000,
922 NETIF_MSG_HW = 0x2000,
923 NETIF_MSG_WOL = 0x4000,
924 };
925
926
927 /*
928 * printk / logging functions
929 */
930
931 #define en_print(level, priv, format, arg...) \
932 { \
933 if ((priv)->registered) \
934 printk(level "%s: %s: " format, DRV_NAME, \
935 if_name((priv)->dev), ## arg); \
936 else \
937 printk(level "%s: %s: Port %d: " format, \
938 DRV_NAME, dev_name(&(priv)->mdev->pdev->dev), \
939 (priv)->port, ## arg); \
940 }
941
942
943 #define en_dbg(mlevel, priv, format, arg...) \
944 do { \
945 if (NETIF_MSG_##mlevel & priv->msg_enable) \
946 en_print(KERN_DEBUG, priv, format, ##arg); \
947 } while (0)
948 #define en_warn(priv, format, arg...) \
949 en_print(KERN_WARNING, priv, format, ##arg)
950 #define en_err(priv, format, arg...) \
951 en_print(KERN_ERR, priv, format, ##arg)
952 #define en_info(priv, format, arg...) \
953 en_print(KERN_INFO, priv, format, ## arg)
954
955 #define mlx4_err(mdev, format, arg...) \
956 pr_err("%s %s: " format, DRV_NAME, \
957 dev_name(&(mdev)->pdev->dev), ##arg)
958 #define mlx4_info(mdev, format, arg...) \
959 pr_info("%s %s: " format, DRV_NAME, \
960 dev_name(&(mdev)->pdev->dev), ##arg)
961 #define mlx4_warn(mdev, format, arg...) \
962 pr_warning("%s %s: " format, DRV_NAME, \
963 dev_name(&(mdev)->pdev->dev), ##arg)
964
965 #endif
966