xref: /linux/drivers/net/ethernet/brocade/bna/bna.h (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #ifndef __BNA_H__
20 #define __BNA_H__
21 
22 #include "bfa_defs.h"
23 #include "bfa_ioc.h"
24 #include "bfi_enet.h"
25 #include "bna_types.h"
26 
27 extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
28 
29 /*  Macros and constants  */
30 
31 #define bna_is_small_rxq(_id) ((_id) & 0x1)
32 
33 /*
34  * input : _addr-> os dma addr in host endian format,
35  * output : _bna_dma_addr-> pointer to hw dma addr
36  */
37 #define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr)				\
38 do {									\
39 	u64 tmp_addr =						\
40 	cpu_to_be64((u64)(_addr));				\
41 	(_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
42 	(_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
43 } while (0)
44 
45 /*
46  * input : _bna_dma_addr-> pointer to hw dma addr
47  * output : _addr-> os dma addr in host endian format
48  */
49 #define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr)			\
50 do {								\
51 	(_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32)		\
52 	| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff));	\
53 } while (0)
54 
55 #define BNA_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
56 
57 #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)			\
58 	((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
59 
60 #define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)
61 
62 #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)		\
63 	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
64 
65 #define BNA_QE_FREE_CNT(_q_ptr, _q_depth)				\
66 	(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &	\
67 	 ((_q_depth) - 1))
68 #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)				\
69 	((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &	\
70 	 (_q_depth - 1))
71 
72 #define BNA_LARGE_PKT_SIZE		1000
73 
74 #define BNA_UPDATE_PKT_CNT(_pkt, _len)					\
75 do {									\
76 	if ((_len) > BNA_LARGE_PKT_SIZE) {				\
77 		(_pkt)->large_pkt_cnt++;				\
78 	} else {							\
79 		(_pkt)->small_pkt_cnt++;				\
80 	}								\
81 } while (0)
82 
83 #define	call_rxf_stop_cbfn(rxf)						\
84 do {									\
85 	if ((rxf)->stop_cbfn) {						\
86 		void (*cbfn)(struct bna_rx *);			\
87 		struct bna_rx *cbarg;					\
88 		cbfn = (rxf)->stop_cbfn;				\
89 		cbarg = (rxf)->stop_cbarg;				\
90 		(rxf)->stop_cbfn = NULL;				\
91 		(rxf)->stop_cbarg = NULL;				\
92 		cbfn(cbarg);						\
93 	}								\
94 } while (0)
95 
96 #define	call_rxf_start_cbfn(rxf)					\
97 do {									\
98 	if ((rxf)->start_cbfn) {					\
99 		void (*cbfn)(struct bna_rx *);			\
100 		struct bna_rx *cbarg;					\
101 		cbfn = (rxf)->start_cbfn;				\
102 		cbarg = (rxf)->start_cbarg;				\
103 		(rxf)->start_cbfn = NULL;				\
104 		(rxf)->start_cbarg = NULL;				\
105 		cbfn(cbarg);						\
106 	}								\
107 } while (0)
108 
109 #define	call_rxf_cam_fltr_cbfn(rxf)					\
110 do {									\
111 	if ((rxf)->cam_fltr_cbfn) {					\
112 		void (*cbfn)(struct bnad *, struct bna_rx *);	\
113 		struct bnad *cbarg;					\
114 		cbfn = (rxf)->cam_fltr_cbfn;				\
115 		cbarg = (rxf)->cam_fltr_cbarg;				\
116 		(rxf)->cam_fltr_cbfn = NULL;				\
117 		(rxf)->cam_fltr_cbarg = NULL;				\
118 		cbfn(cbarg, rxf->rx);					\
119 	}								\
120 } while (0)
121 
122 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
123 
124 #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
125 
126 #define xxx_enable(mode, bitmask, xxx)					\
127 do {									\
128 	bitmask |= xxx;							\
129 	mode |= xxx;							\
130 } while (0)
131 
132 #define xxx_disable(mode, bitmask, xxx)					\
133 do {									\
134 	bitmask |= xxx;							\
135 	mode &= ~xxx;							\
136 } while (0)
137 
138 #define xxx_inactive(mode, bitmask, xxx)				\
139 do {									\
140 	bitmask &= ~xxx;						\
141 	mode &= ~xxx;							\
142 } while (0)
143 
144 #define is_promisc_enable(mode, bitmask)				\
145 	is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
146 
147 #define is_promisc_disable(mode, bitmask)				\
148 	is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
149 
150 #define promisc_enable(mode, bitmask)					\
151 	xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
152 
153 #define promisc_disable(mode, bitmask)					\
154 	xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
155 
156 #define promisc_inactive(mode, bitmask)					\
157 	xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
158 
159 #define is_default_enable(mode, bitmask)				\
160 	is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
161 
162 #define is_default_disable(mode, bitmask)				\
163 	is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
164 
165 #define default_enable(mode, bitmask)					\
166 	xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
167 
168 #define default_disable(mode, bitmask)					\
169 	xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
170 
171 #define default_inactive(mode, bitmask)					\
172 	xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
173 
174 #define is_allmulti_enable(mode, bitmask)				\
175 	is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
176 
177 #define is_allmulti_disable(mode, bitmask)				\
178 	is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
179 
180 #define allmulti_enable(mode, bitmask)					\
181 	xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
182 
183 #define allmulti_disable(mode, bitmask)					\
184 	xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
185 
186 #define allmulti_inactive(mode, bitmask)				\
187 	xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
188 
189 #define	GET_RXQS(rxp, q0, q1)	do {					\
190 	switch ((rxp)->type) {						\
191 	case BNA_RXP_SINGLE:						\
192 		(q0) = rxp->rxq.single.only;				\
193 		(q1) = NULL;						\
194 		break;							\
195 	case BNA_RXP_SLR:						\
196 		(q0) = rxp->rxq.slr.large;				\
197 		(q1) = rxp->rxq.slr.small;				\
198 		break;							\
199 	case BNA_RXP_HDS:						\
200 		(q0) = rxp->rxq.hds.data;				\
201 		(q1) = rxp->rxq.hds.hdr;				\
202 		break;							\
203 	}								\
204 } while (0)
205 
206 #define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
207 
208 #define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
209 
210 #define bna_tx_from_rid(_bna, _rid, _tx)				\
211 do {									\
212 	struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;			\
213 	struct bna_tx *__tx;						\
214 	_tx = NULL;							\
215 	list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) {		\
216 		if (__tx->rid == (_rid)) {				\
217 			(_tx) = __tx;					\
218 			break;						\
219 		}							\
220 	}								\
221 } while (0)
222 
223 #define bna_rx_from_rid(_bna, _rid, _rx)				\
224 do {									\
225 	struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;			\
226 	struct bna_rx *__rx;						\
227 	_rx = NULL;							\
228 	list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) {		\
229 		if (__rx->rid == (_rid)) {				\
230 			(_rx) = __rx;					\
231 			break;						\
232 		}							\
233 	}								\
234 } while (0)
235 
236 #define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
237 
238 #define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
239 
240 #define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
241 
242 #define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
243 
244 /*  Inline functions  */
245 
246 static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
247 {
248 	struct bna_mac *mac;
249 
250 	list_for_each_entry(mac, q, qe)
251 		if (ether_addr_equal(mac->addr, addr))
252 			return mac;
253 	return NULL;
254 }
255 
256 #define bna_attr(_bna) (&(_bna)->ioceth.attr)
257 
258 /* Function prototypes */
259 
260 /* BNA */
261 
262 /* FW response handlers */
263 void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
264 
265 /* APIs for BNAD */
266 void bna_res_req(struct bna_res_info *res_info);
267 void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
268 void bna_init(struct bna *bna, struct bnad *bnad,
269 			struct bfa_pcidev *pcidev,
270 			struct bna_res_info *res_info);
271 void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
272 void bna_uninit(struct bna *bna);
273 int bna_num_txq_set(struct bna *bna, int num_txq);
274 int bna_num_rxp_set(struct bna *bna, int num_rxp);
275 void bna_hw_stats_get(struct bna *bna);
276 
277 /* APIs for RxF */
278 struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
279 struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
280 void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
281 			  struct bna_mcam_handle *handle);
282 
283 /* MBOX */
284 
285 /* API for BNAD */
286 void bna_mbox_handler(struct bna *bna, u32 intr_status);
287 
288 /* ETHPORT */
289 
290 /* Callbacks for RX */
291 void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
292 void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
293 
294 /* TX MODULE AND TX */
295 
296 /* FW response handelrs */
297 void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
298 			       struct bfi_msgq_mhdr *msghdr);
299 void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
300 			      struct bfi_msgq_mhdr *msghdr);
301 void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
302 
303 /* APIs for BNA */
304 void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
305 		     struct bna_res_info *res_info);
306 void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
307 
308 /* APIs for ENET */
309 void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
310 void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
311 void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
312 
313 /* APIs for BNAD */
314 void bna_tx_res_req(int num_txq, int txq_depth,
315 		    struct bna_res_info *res_info);
316 struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
317 			       struct bna_tx_config *tx_cfg,
318 			       const struct bna_tx_event_cbfn *tx_cbfn,
319 			       struct bna_res_info *res_info, void *priv);
320 void bna_tx_destroy(struct bna_tx *tx);
321 void bna_tx_enable(struct bna_tx *tx);
322 void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
323 		    void (*cbfn)(void *, struct bna_tx *));
324 void bna_tx_cleanup_complete(struct bna_tx *tx);
325 void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
326 
327 /* RX MODULE, RX, RXF */
328 
329 /* FW response handlers */
330 void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
331 			       struct bfi_msgq_mhdr *msghdr);
332 void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
333 			      struct bfi_msgq_mhdr *msghdr);
334 void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
335 void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
336 			       struct bfi_msgq_mhdr *msghdr);
337 void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
338 			       struct bfi_msgq_mhdr *msghdr);
339 
340 /* APIs for BNA */
341 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
342 		     struct bna_res_info *res_info);
343 void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
344 
345 /* APIs for ENET */
346 void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
347 void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
348 void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
349 
350 /* APIs for BNAD */
351 void bna_rx_res_req(struct bna_rx_config *rx_config,
352 		    struct bna_res_info *res_info);
353 struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
354 			       struct bna_rx_config *rx_cfg,
355 			       const struct bna_rx_event_cbfn *rx_cbfn,
356 			       struct bna_res_info *res_info, void *priv);
357 void bna_rx_destroy(struct bna_rx *rx);
358 void bna_rx_enable(struct bna_rx *rx);
359 void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
360 		    void (*cbfn)(void *, struct bna_rx *));
361 void bna_rx_cleanup_complete(struct bna_rx *rx);
362 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
363 void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
364 void bna_rx_dim_update(struct bna_ccb *ccb);
365 enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac);
366 enum bna_cb_status bna_rx_ucast_listset(struct bna_rx *rx, int count,
367 					const u8 *uclist);
368 enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, const u8 *mcmac,
369 				    void (*cbfn)(struct bnad *,
370 						 struct bna_rx *));
371 enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count,
372 					const u8 *mcmac);
373 void
374 bna_rx_mcast_delall(struct bna_rx *rx);
375 enum bna_cb_status
376 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
377 		enum bna_rxmode bitmask);
378 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
379 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
380 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
381 void bna_rx_vlan_strip_enable(struct bna_rx *rx);
382 void bna_rx_vlan_strip_disable(struct bna_rx *rx);
383 /* ENET */
384 
385 /* API for RX */
386 int bna_enet_mtu_get(struct bna_enet *enet);
387 
388 /* Callbacks for TX, RX */
389 void bna_enet_cb_tx_stopped(struct bna_enet *enet);
390 void bna_enet_cb_rx_stopped(struct bna_enet *enet);
391 
392 /* API for BNAD */
393 void bna_enet_enable(struct bna_enet *enet);
394 void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
395 		      void (*cbfn)(void *));
396 void bna_enet_pause_config(struct bna_enet *enet,
397 			   struct bna_pause_config *pause_config);
398 void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
399 		      void (*cbfn)(struct bnad *));
400 void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
401 
402 /* IOCETH */
403 
404 /* APIs for BNAD */
405 void bna_ioceth_enable(struct bna_ioceth *ioceth);
406 void bna_ioceth_disable(struct bna_ioceth *ioceth,
407 			enum bna_cleanup_type type);
408 
409 /* BNAD */
410 
411 /* Callbacks for ENET */
412 void bnad_cb_ethport_link_status(struct bnad *bnad,
413 			      enum bna_link_status status);
414 
415 /* Callbacks for IOCETH */
416 void bnad_cb_ioceth_ready(struct bnad *bnad);
417 void bnad_cb_ioceth_failed(struct bnad *bnad);
418 void bnad_cb_ioceth_disabled(struct bnad *bnad);
419 void bnad_cb_mbox_intr_enable(struct bnad *bnad);
420 void bnad_cb_mbox_intr_disable(struct bnad *bnad);
421 
422 /* Callbacks for BNA */
423 void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
424 		       struct bna_stats *stats);
425 
426 #endif  /* __BNA_H__ */
427