xref: /linux/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 /* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
2 
3 /* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
4  * - this file contains software data structures and any chip-specific
5  *   data structures (e.g. TX WQE format) that are memory resident.
6  *
7  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
8  */
9 
10 #ifndef __MLXBF_GIGE_H__
11 #define __MLXBF_GIGE_H__
12 
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/irqreturn.h>
15 #include <linux/netdevice.h>
16 #include <linux/irq.h>
17 
18 /* The silicon design supports a maximum RX ring size of
19  * 32K entries. Based on current testing this maximum size
20  * is not required to be supported.  Instead the RX ring
21  * will be capped at a realistic value of 1024 entries.
22  */
23 #define MLXBF_GIGE_MIN_RXQ_SZ     32
24 #define MLXBF_GIGE_MAX_RXQ_SZ     1024
25 #define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
26 
27 #define MLXBF_GIGE_MIN_TXQ_SZ     4
28 #define MLXBF_GIGE_MAX_TXQ_SZ     256
29 #define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
30 
31 #define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
32 
33 #define MLXBF_GIGE_DMA_PAGE_SZ    4096
34 #define MLXBF_GIGE_DMA_PAGE_SHIFT 12
35 
36 /* There are four individual MAC RX filters. Currently
37  * two of them are being used: one for the broadcast MAC
38  * (index 0) and one for local MAC (index 1)
39  */
40 #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
41 #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
42 
43 /* Define for broadcast MAC literal */
44 #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
45 
46 /* There are three individual interrupts:
47  *   1) Errors, "OOB" interrupt line
48  *   2) Receive Packet, "OOB_LLU" interrupt line
49  *   3) LLU and PLU Events, "OOB_PLU" interrupt line
50  */
51 #define MLXBF_GIGE_ERROR_INTR_IDX       0
52 #define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
53 #define MLXBF_GIGE_LLU_PLU_INTR_IDX     2
54 #define MLXBF_GIGE_PHY_INT_N            3
55 
56 #define MLXBF_GIGE_MDIO_DEFAULT_PHY_ADDR 0x3
57 
58 #define MLXBF_GIGE_DEFAULT_PHY_INT_GPIO 12
59 
60 struct mlxbf_gige_stats {
61 	u64 hw_access_errors;
62 	u64 tx_invalid_checksums;
63 	u64 tx_small_frames;
64 	u64 tx_index_errors;
65 	u64 sw_config_errors;
66 	u64 sw_access_errors;
67 	u64 rx_truncate_errors;
68 	u64 rx_mac_errors;
69 	u64 rx_din_dropped_pkts;
70 	u64 tx_fifo_full;
71 	u64 rx_filter_passed_pkts;
72 	u64 rx_filter_discard_pkts;
73 };
74 
75 struct mlxbf_gige {
76 	void __iomem *base;
77 	void __iomem *llu_base;
78 	void __iomem *plu_base;
79 	struct device *dev;
80 	struct net_device *netdev;
81 	struct platform_device *pdev;
82 	void __iomem *mdio_io;
83 	struct mii_bus *mdiobus;
84 	void __iomem *gpio_io;
85 	struct irq_domain *irqdomain;
86 	u32 phy_int_gpio_mask;
87 	spinlock_t lock;      /* for packet processing indices */
88 	spinlock_t gpio_lock; /* for GPIO bus access */
89 	u16 rx_q_entries;
90 	u16 tx_q_entries;
91 	u64 *tx_wqe_base;
92 	dma_addr_t tx_wqe_base_dma;
93 	u64 *tx_wqe_next;
94 	u64 *tx_cc;
95 	dma_addr_t tx_cc_dma;
96 	dma_addr_t *rx_wqe_base;
97 	dma_addr_t rx_wqe_base_dma;
98 	u64 *rx_cqe_base;
99 	dma_addr_t rx_cqe_base_dma;
100 	u16 tx_pi;
101 	u16 prev_tx_ci;
102 	u64 error_intr_count;
103 	u64 rx_intr_count;
104 	u64 llu_plu_intr_count;
105 	struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
106 	struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
107 	int error_irq;
108 	int rx_irq;
109 	int llu_plu_irq;
110 	int phy_irq;
111 	int hw_phy_irq;
112 	bool promisc_enabled;
113 	u8 valid_polarity;
114 	struct napi_struct napi;
115 	struct mlxbf_gige_stats stats;
116 };
117 
118 /* Rx Work Queue Element definitions */
119 #define MLXBF_GIGE_RX_WQE_SZ                   8
120 
121 /* Rx Completion Queue Element definitions */
122 #define MLXBF_GIGE_RX_CQE_SZ                   8
123 #define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK         GENMASK(10, 0)
124 #define MLXBF_GIGE_RX_CQE_VALID_MASK           GENMASK(11, 11)
125 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK      GENMASK(15, 12)
126 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR   GENMASK(12, 12)
127 #define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
128 #define MLXBF_GIGE_RX_CQE_CHKSUM_MASK          GENMASK(31, 16)
129 
130 /* Tx Work Queue Element definitions */
131 #define MLXBF_GIGE_TX_WQE_SZ_QWORDS            2
132 #define MLXBF_GIGE_TX_WQE_SZ                   16
133 #define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK         GENMASK(10, 0)
134 #define MLXBF_GIGE_TX_WQE_UPDATE_MASK          GENMASK(31, 31)
135 #define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK      GENMASK(42, 32)
136 #define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK    GENMASK(55, 48)
137 #define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK   GENMASK(63, 56)
138 
139 /* Macro to return packet length of specified TX WQE */
140 #define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
141 	(*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
142 
143 /* Tx Completion Count */
144 #define MLXBF_GIGE_TX_CC_SZ                    8
145 
146 /* List of resources in ACPI table */
147 enum mlxbf_gige_res {
148 	MLXBF_GIGE_RES_MAC,
149 	MLXBF_GIGE_RES_MDIO9,
150 	MLXBF_GIGE_RES_GPIO0,
151 	MLXBF_GIGE_RES_LLU,
152 	MLXBF_GIGE_RES_PLU
153 };
154 
155 /* Version of register data returned by mlxbf_gige_get_regs() */
156 #define MLXBF_GIGE_REGS_VERSION 1
157 
158 int mlxbf_gige_mdio_probe(struct platform_device *pdev,
159 			  struct mlxbf_gige *priv);
160 void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
161 irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
162 void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
163 
164 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
165 				  unsigned int index, u64 dmac);
166 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
167 				  unsigned int index, u64 *dmac);
168 void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
169 void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
170 int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
171 void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
172 int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
173 void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
174 bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
175 netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
176 				  struct net_device *netdev);
177 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
178 				     unsigned int map_len,
179 				     dma_addr_t *buf_dma,
180 				     enum dma_data_direction dir);
181 int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
182 void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
183 int mlxbf_gige_poll(struct napi_struct *napi, int budget);
184 extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
185 void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
186 
187 int mlxbf_gige_gpio_init(struct platform_device *pdev, struct mlxbf_gige *priv);
188 void mlxbf_gige_gpio_free(struct mlxbf_gige *priv);
189 
190 #endif /* !defined(__MLXBF_GIGE_H__) */
191