xref: /linux/drivers/net/ethernet/chelsio/cxgb3/common.h (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /*
2  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __CHELSIO_COMMON_H
33 #define __CHELSIO_COMMON_H
34 
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/ctype.h>
38 #include <linux/delay.h>
39 #include <linux/netdevice.h>
40 #include <linux/ethtool.h>
41 #include <linux/mdio.h>
42 #include "version.h"
43 
44 #define CH_ERR(adap, fmt, ...)   dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
45 #define CH_WARN(adap, fmt, ...)  dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
46 #define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
47 
48 /*
49  * More powerful macro that selectively prints messages based on msg_enable.
50  * For info and debugging messages.
51  */
52 #define CH_MSG(adapter, level, category, fmt, ...) do { \
53 	if ((adapter)->msg_enable & NETIF_MSG_##category) \
54 		dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
55 			   ## __VA_ARGS__); \
56 } while (0)
57 
58 #ifdef DEBUG
59 # define CH_DBG(adapter, category, fmt, ...) \
60 	CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
61 #else
62 # define CH_DBG(adapter, category, fmt, ...)
63 #endif
64 
65 /* Additional NETIF_MSG_* categories */
66 #define NETIF_MSG_MMIO 0x8000000
67 
68 enum {
69 	MAX_NPORTS = 2,		/* max # of ports */
70 	MAX_FRAME_SIZE = 10240,	/* max MAC frame size, including header + FCS */
71 	EEPROMSIZE = 8192,	/* Serial EEPROM size */
72 	SERNUM_LEN     = 16,    /* Serial # length */
73 	RSS_TABLE_SIZE = 64,	/* size of RSS lookup and mapping tables */
74 	TCB_SIZE = 128,		/* TCB size */
75 	NMTUS = 16,		/* size of MTU table */
76 	NCCTRL_WIN = 32,	/* # of congestion control windows */
77 	PROTO_SRAM_LINES = 128, /* size of TP sram */
78 };
79 
80 #define MAX_RX_COALESCING_LEN 12288U
81 
82 enum {
83 	PAUSE_RX = 1 << 0,
84 	PAUSE_TX = 1 << 1,
85 	PAUSE_AUTONEG = 1 << 2
86 };
87 
88 enum {
89 	SUPPORTED_IRQ      = 1 << 24
90 };
91 
92 enum {				/* adapter interrupt-maintained statistics */
93 	STAT_ULP_CH0_PBL_OOB,
94 	STAT_ULP_CH1_PBL_OOB,
95 	STAT_PCI_CORR_ECC,
96 
97 	IRQ_NUM_STATS		/* keep last */
98 };
99 
100 #define TP_VERSION_MAJOR	1
101 #define TP_VERSION_MINOR	1
102 #define TP_VERSION_MICRO	0
103 
104 #define S_TP_VERSION_MAJOR		16
105 #define M_TP_VERSION_MAJOR		0xFF
106 #define V_TP_VERSION_MAJOR(x)		((x) << S_TP_VERSION_MAJOR)
107 #define G_TP_VERSION_MAJOR(x)		\
108 	    (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
109 
110 #define S_TP_VERSION_MINOR		8
111 #define M_TP_VERSION_MINOR		0xFF
112 #define V_TP_VERSION_MINOR(x)		((x) << S_TP_VERSION_MINOR)
113 #define G_TP_VERSION_MINOR(x)		\
114 	    (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
115 
116 #define S_TP_VERSION_MICRO		0
117 #define M_TP_VERSION_MICRO		0xFF
118 #define V_TP_VERSION_MICRO(x)		((x) << S_TP_VERSION_MICRO)
119 #define G_TP_VERSION_MICRO(x)		\
120 	    (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
121 
122 enum {
123 	SGE_QSETS = 8,		/* # of SGE Tx/Rx/RspQ sets */
124 	SGE_RXQ_PER_SET = 2,	/* # of Rx queues per set */
125 	SGE_TXQ_PER_SET = 3	/* # of Tx queues per set */
126 };
127 
128 enum sge_context_type {		/* SGE egress context types */
129 	SGE_CNTXT_RDMA = 0,
130 	SGE_CNTXT_ETH = 2,
131 	SGE_CNTXT_OFLD = 4,
132 	SGE_CNTXT_CTRL = 5
133 };
134 
135 enum {
136 	AN_PKT_SIZE = 32,	/* async notification packet size */
137 	IMMED_PKT_SIZE = 48	/* packet size for immediate data */
138 };
139 
140 struct sg_ent {			/* SGE scatter/gather entry */
141 	__be32 len[2];
142 	__be64 addr[2];
143 };
144 
145 #ifndef SGE_NUM_GENBITS
146 /* Must be 1 or 2 */
147 # define SGE_NUM_GENBITS 2
148 #endif
149 
150 #define TX_DESC_FLITS 16U
151 #define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
152 
153 struct cphy;
154 struct adapter;
155 
156 struct mdio_ops {
157 	int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
158 		    u16 reg_addr);
159 	int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
160 		     u16 reg_addr, u16 val);
161 	unsigned mode_support;
162 };
163 
164 struct adapter_info {
165 	unsigned char nports0;        /* # of ports on channel 0 */
166 	unsigned char nports1;        /* # of ports on channel 1 */
167 	unsigned char phy_base_addr;	/* MDIO PHY base address */
168 	unsigned int gpio_out;	/* GPIO output settings */
169 	unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
170 	unsigned long caps;	/* adapter capabilities */
171 	const struct mdio_ops *mdio_ops;	/* MDIO operations */
172 	const char *desc;	/* product description */
173 };
174 
175 struct mc5_stats {
176 	unsigned long parity_err;
177 	unsigned long active_rgn_full;
178 	unsigned long nfa_srch_err;
179 	unsigned long unknown_cmd;
180 	unsigned long reqq_parity_err;
181 	unsigned long dispq_parity_err;
182 	unsigned long del_act_empty;
183 };
184 
185 struct mc7_stats {
186 	unsigned long corr_err;
187 	unsigned long uncorr_err;
188 	unsigned long parity_err;
189 	unsigned long addr_err;
190 };
191 
192 struct mac_stats {
193 	u64 tx_octets;		/* total # of octets in good frames */
194 	u64 tx_octets_bad;	/* total # of octets in error frames */
195 	u64 tx_frames;		/* all good frames */
196 	u64 tx_mcast_frames;	/* good multicast frames */
197 	u64 tx_bcast_frames;	/* good broadcast frames */
198 	u64 tx_pause;		/* # of transmitted pause frames */
199 	u64 tx_deferred;	/* frames with deferred transmissions */
200 	u64 tx_late_collisions;	/* # of late collisions */
201 	u64 tx_total_collisions;	/* # of total collisions */
202 	u64 tx_excess_collisions;	/* frame errors from excessive collissions */
203 	u64 tx_underrun;	/* # of Tx FIFO underruns */
204 	u64 tx_len_errs;	/* # of Tx length errors */
205 	u64 tx_mac_internal_errs;	/* # of internal MAC errors on Tx */
206 	u64 tx_excess_deferral;	/* # of frames with excessive deferral */
207 	u64 tx_fcs_errs;	/* # of frames with bad FCS */
208 
209 	u64 tx_frames_64;	/* # of Tx frames in a particular range */
210 	u64 tx_frames_65_127;
211 	u64 tx_frames_128_255;
212 	u64 tx_frames_256_511;
213 	u64 tx_frames_512_1023;
214 	u64 tx_frames_1024_1518;
215 	u64 tx_frames_1519_max;
216 
217 	u64 rx_octets;		/* total # of octets in good frames */
218 	u64 rx_octets_bad;	/* total # of octets in error frames */
219 	u64 rx_frames;		/* all good frames */
220 	u64 rx_mcast_frames;	/* good multicast frames */
221 	u64 rx_bcast_frames;	/* good broadcast frames */
222 	u64 rx_pause;		/* # of received pause frames */
223 	u64 rx_fcs_errs;	/* # of received frames with bad FCS */
224 	u64 rx_align_errs;	/* alignment errors */
225 	u64 rx_symbol_errs;	/* symbol errors */
226 	u64 rx_data_errs;	/* data errors */
227 	u64 rx_sequence_errs;	/* sequence errors */
228 	u64 rx_runt;		/* # of runt frames */
229 	u64 rx_jabber;		/* # of jabber frames */
230 	u64 rx_short;		/* # of short frames */
231 	u64 rx_too_long;	/* # of oversized frames */
232 	u64 rx_mac_internal_errs;	/* # of internal MAC errors on Rx */
233 
234 	u64 rx_frames_64;	/* # of Rx frames in a particular range */
235 	u64 rx_frames_65_127;
236 	u64 rx_frames_128_255;
237 	u64 rx_frames_256_511;
238 	u64 rx_frames_512_1023;
239 	u64 rx_frames_1024_1518;
240 	u64 rx_frames_1519_max;
241 
242 	u64 rx_cong_drops;	/* # of Rx drops due to SGE congestion */
243 
244 	unsigned long tx_fifo_parity_err;
245 	unsigned long rx_fifo_parity_err;
246 	unsigned long tx_fifo_urun;
247 	unsigned long rx_fifo_ovfl;
248 	unsigned long serdes_signal_loss;
249 	unsigned long xaui_pcs_ctc_err;
250 	unsigned long xaui_pcs_align_change;
251 
252 	unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
253 	unsigned long num_resets;  /* # times reset due to stuck TX */
254 
255 	unsigned long link_faults;  /* # detected link faults */
256 };
257 
258 struct tp_mib_stats {
259 	u32 ipInReceive_hi;
260 	u32 ipInReceive_lo;
261 	u32 ipInHdrErrors_hi;
262 	u32 ipInHdrErrors_lo;
263 	u32 ipInAddrErrors_hi;
264 	u32 ipInAddrErrors_lo;
265 	u32 ipInUnknownProtos_hi;
266 	u32 ipInUnknownProtos_lo;
267 	u32 ipInDiscards_hi;
268 	u32 ipInDiscards_lo;
269 	u32 ipInDelivers_hi;
270 	u32 ipInDelivers_lo;
271 	u32 ipOutRequests_hi;
272 	u32 ipOutRequests_lo;
273 	u32 ipOutDiscards_hi;
274 	u32 ipOutDiscards_lo;
275 	u32 ipOutNoRoutes_hi;
276 	u32 ipOutNoRoutes_lo;
277 	u32 ipReasmTimeout;
278 	u32 ipReasmReqds;
279 	u32 ipReasmOKs;
280 	u32 ipReasmFails;
281 
282 	u32 reserved[8];
283 
284 	u32 tcpActiveOpens;
285 	u32 tcpPassiveOpens;
286 	u32 tcpAttemptFails;
287 	u32 tcpEstabResets;
288 	u32 tcpOutRsts;
289 	u32 tcpCurrEstab;
290 	u32 tcpInSegs_hi;
291 	u32 tcpInSegs_lo;
292 	u32 tcpOutSegs_hi;
293 	u32 tcpOutSegs_lo;
294 	u32 tcpRetransSeg_hi;
295 	u32 tcpRetransSeg_lo;
296 	u32 tcpInErrs_hi;
297 	u32 tcpInErrs_lo;
298 	u32 tcpRtoMin;
299 	u32 tcpRtoMax;
300 };
301 
302 struct tp_params {
303 	unsigned int nchan;	/* # of channels */
304 	unsigned int pmrx_size;	/* total PMRX capacity */
305 	unsigned int pmtx_size;	/* total PMTX capacity */
306 	unsigned int cm_size;	/* total CM capacity */
307 	unsigned int chan_rx_size;	/* per channel Rx size */
308 	unsigned int chan_tx_size;	/* per channel Tx size */
309 	unsigned int rx_pg_size;	/* Rx page size */
310 	unsigned int tx_pg_size;	/* Tx page size */
311 	unsigned int rx_num_pgs;	/* # of Rx pages */
312 	unsigned int tx_num_pgs;	/* # of Tx pages */
313 	unsigned int ntimer_qs;	/* # of timer queues */
314 };
315 
316 struct qset_params {		/* SGE queue set parameters */
317 	unsigned int polling;	/* polling/interrupt service for rspq */
318 	unsigned int coalesce_usecs;	/* irq coalescing timer */
319 	unsigned int rspq_size;	/* # of entries in response queue */
320 	unsigned int fl_size;	/* # of entries in regular free list */
321 	unsigned int jumbo_size;	/* # of entries in jumbo free list */
322 	unsigned int txq_size[SGE_TXQ_PER_SET];	/* Tx queue sizes */
323 	unsigned int cong_thres;	/* FL congestion threshold */
324 	unsigned int vector;		/* Interrupt (line or vector) number */
325 };
326 
327 struct sge_params {
328 	unsigned int max_pkt_size;	/* max offload pkt size */
329 	struct qset_params qset[SGE_QSETS];
330 };
331 
332 struct mc5_params {
333 	unsigned int mode;	/* selects MC5 width */
334 	unsigned int nservers;	/* size of server region */
335 	unsigned int nfilters;	/* size of filter region */
336 	unsigned int nroutes;	/* size of routing region */
337 };
338 
339 /* Default MC5 region sizes */
340 enum {
341 	DEFAULT_NSERVERS = 512,
342 	DEFAULT_NFILTERS = 128
343 };
344 
345 /* MC5 modes, these must be non-0 */
346 enum {
347 	MC5_MODE_144_BIT = 1,
348 	MC5_MODE_72_BIT = 2
349 };
350 
351 /* MC5 min active region size */
352 enum { MC5_MIN_TIDS = 16 };
353 
354 struct vpd_params {
355 	unsigned int cclk;
356 	unsigned int mclk;
357 	unsigned int uclk;
358 	unsigned int mdc;
359 	unsigned int mem_timing;
360 	u8 sn[SERNUM_LEN + 1];
361 	u8 eth_base[6];
362 	u8 port_type[MAX_NPORTS];
363 	unsigned short xauicfg[2];
364 };
365 
366 struct pci_params {
367 	unsigned int vpd_cap_addr;
368 	unsigned short speed;
369 	unsigned char width;
370 	unsigned char variant;
371 };
372 
373 enum {
374 	PCI_VARIANT_PCI,
375 	PCI_VARIANT_PCIX_MODE1_PARITY,
376 	PCI_VARIANT_PCIX_MODE1_ECC,
377 	PCI_VARIANT_PCIX_266_MODE2,
378 	PCI_VARIANT_PCIE
379 };
380 
381 struct adapter_params {
382 	struct sge_params sge;
383 	struct mc5_params mc5;
384 	struct tp_params tp;
385 	struct vpd_params vpd;
386 	struct pci_params pci;
387 
388 	const struct adapter_info *info;
389 
390 	unsigned short mtus[NMTUS];
391 	unsigned short a_wnd[NCCTRL_WIN];
392 	unsigned short b_wnd[NCCTRL_WIN];
393 
394 	unsigned int nports;	/* # of ethernet ports */
395 	unsigned int chan_map;  /* bitmap of in-use Tx channels */
396 	unsigned int stats_update_period;	/* MAC stats accumulation period */
397 	unsigned int linkpoll_period;	/* link poll period in 0.1s */
398 	unsigned int rev;	/* chip revision */
399 	unsigned int offload;
400 };
401 
402 enum {					    /* chip revisions */
403 	T3_REV_A  = 0,
404 	T3_REV_B  = 2,
405 	T3_REV_B2 = 3,
406 	T3_REV_C  = 4,
407 };
408 
409 struct trace_params {
410 	u32 sip;
411 	u32 sip_mask;
412 	u32 dip;
413 	u32 dip_mask;
414 	u16 sport;
415 	u16 sport_mask;
416 	u16 dport;
417 	u16 dport_mask;
418 	u32 vlan:12;
419 	u32 vlan_mask:12;
420 	u32 intf:4;
421 	u32 intf_mask:4;
422 	u8 proto;
423 	u8 proto_mask;
424 };
425 
426 struct link_config {
427 	unsigned int supported;	/* link capabilities */
428 	unsigned int advertising;	/* advertised capabilities */
429 	unsigned short requested_speed;	/* speed user has requested */
430 	unsigned short speed;	/* actual link speed */
431 	unsigned char requested_duplex;	/* duplex user has requested */
432 	unsigned char duplex;	/* actual link duplex */
433 	unsigned char requested_fc;	/* flow control user has requested */
434 	unsigned char fc;	/* actual link flow control */
435 	unsigned char autoneg;	/* autonegotiating? */
436 	unsigned int link_ok;	/* link up? */
437 };
438 
439 #define SPEED_INVALID   0xffff
440 #define DUPLEX_INVALID  0xff
441 
442 struct mc5 {
443 	struct adapter *adapter;
444 	unsigned int tcam_size;
445 	unsigned char part_type;
446 	unsigned char parity_enabled;
447 	unsigned char mode;
448 	struct mc5_stats stats;
449 };
450 
451 static inline unsigned int t3_mc5_size(const struct mc5 *p)
452 {
453 	return p->tcam_size;
454 }
455 
456 struct mc7 {
457 	struct adapter *adapter;	/* backpointer to adapter */
458 	unsigned int size;	/* memory size in bytes */
459 	unsigned int width;	/* MC7 interface width */
460 	unsigned int offset;	/* register address offset for MC7 instance */
461 	const char *name;	/* name of MC7 instance */
462 	struct mc7_stats stats;	/* MC7 statistics */
463 };
464 
465 static inline unsigned int t3_mc7_size(const struct mc7 *p)
466 {
467 	return p->size;
468 }
469 
470 struct cmac {
471 	struct adapter *adapter;
472 	unsigned int offset;
473 	unsigned int nucast;	/* # of address filters for unicast MACs */
474 	unsigned int tx_tcnt;
475 	unsigned int tx_xcnt;
476 	u64 tx_mcnt;
477 	unsigned int rx_xcnt;
478 	unsigned int rx_ocnt;
479 	u64 rx_mcnt;
480 	unsigned int toggle_cnt;
481 	unsigned int txen;
482 	u64 rx_pause;
483 	struct mac_stats stats;
484 };
485 
486 enum {
487 	MAC_DIRECTION_RX = 1,
488 	MAC_DIRECTION_TX = 2,
489 	MAC_RXFIFO_SIZE = 32768
490 };
491 
492 /* PHY loopback direction */
493 enum {
494 	PHY_LOOPBACK_TX = 1,
495 	PHY_LOOPBACK_RX = 2
496 };
497 
498 /* PHY interrupt types */
499 enum {
500 	cphy_cause_link_change = 1,
501 	cphy_cause_fifo_error = 2,
502 	cphy_cause_module_change = 4,
503 };
504 
505 /* PHY module types */
506 enum {
507 	phy_modtype_none,
508 	phy_modtype_sr,
509 	phy_modtype_lr,
510 	phy_modtype_lrm,
511 	phy_modtype_twinax,
512 	phy_modtype_twinax_long,
513 	phy_modtype_unknown
514 };
515 
516 /* PHY operations */
517 struct cphy_ops {
518 	int (*reset)(struct cphy *phy, int wait);
519 
520 	int (*intr_enable)(struct cphy *phy);
521 	int (*intr_disable)(struct cphy *phy);
522 	int (*intr_clear)(struct cphy *phy);
523 	int (*intr_handler)(struct cphy *phy);
524 
525 	int (*autoneg_enable)(struct cphy *phy);
526 	int (*autoneg_restart)(struct cphy *phy);
527 
528 	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
529 	int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
530 	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
531 	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
532 			       int *duplex, int *fc);
533 	int (*power_down)(struct cphy *phy, int enable);
534 
535 	u32 mmds;
536 };
537 enum {
538 	EDC_OPT_AEL2005 = 0,
539 	EDC_OPT_AEL2005_SIZE = 1084,
540 	EDC_TWX_AEL2005 = 1,
541 	EDC_TWX_AEL2005_SIZE = 1464,
542 	EDC_TWX_AEL2020 = 2,
543 	EDC_TWX_AEL2020_SIZE = 1628,
544 	EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
545 };
546 
547 /* A PHY instance */
548 struct cphy {
549 	u8 modtype;			/* PHY module type */
550 	short priv;			/* scratch pad */
551 	unsigned int caps;		/* PHY capabilities */
552 	struct adapter *adapter;	/* associated adapter */
553 	const char *desc;		/* PHY description */
554 	unsigned long fifo_errors;	/* FIFO over/under-flows */
555 	const struct cphy_ops *ops;	/* PHY operations */
556 	struct mdio_if_info mdio;
557 	u16 phy_cache[EDC_MAX_SIZE];	/* EDC cache */
558 };
559 
560 /* Convenience MDIO read/write wrappers */
561 static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
562 			       unsigned int *valp)
563 {
564 	int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
565 	*valp = (rc >= 0) ? rc : -1;
566 	return (rc >= 0) ? 0 : rc;
567 }
568 
569 static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
570 				unsigned int val)
571 {
572 	return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
573 				    reg, val);
574 }
575 
576 /* Convenience initializer */
577 static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
578 			     int phy_addr, const struct cphy_ops *phy_ops,
579 			     const struct mdio_ops *mdio_ops,
580 			      unsigned int caps, const char *desc)
581 {
582 	phy->caps = caps;
583 	phy->adapter = adapter;
584 	phy->desc = desc;
585 	phy->ops = phy_ops;
586 	if (mdio_ops) {
587 		phy->mdio.prtad = phy_addr;
588 		phy->mdio.mmds = phy_ops->mmds;
589 		phy->mdio.mode_support = mdio_ops->mode_support;
590 		phy->mdio.mdio_read = mdio_ops->read;
591 		phy->mdio.mdio_write = mdio_ops->write;
592 	}
593 }
594 
595 /* Accumulate MAC statistics every 180 seconds.  For 1G we multiply by 10. */
596 #define MAC_STATS_ACCUM_SECS 180
597 
598 #define XGM_REG(reg_addr, idx) \
599 	((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
600 
601 struct addr_val_pair {
602 	unsigned int reg_addr;
603 	unsigned int val;
604 };
605 
606 #include "adapter.h"
607 
608 #ifndef PCI_VENDOR_ID_CHELSIO
609 # define PCI_VENDOR_ID_CHELSIO 0x1425
610 #endif
611 
612 #define for_each_port(adapter, iter) \
613 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
614 
615 #define adapter_info(adap) ((adap)->params.info)
616 
617 static inline int uses_xaui(const struct adapter *adap)
618 {
619 	return adapter_info(adap)->caps & SUPPORTED_AUI;
620 }
621 
622 static inline int is_10G(const struct adapter *adap)
623 {
624 	return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
625 }
626 
627 static inline int is_offload(const struct adapter *adap)
628 {
629 	return adap->params.offload;
630 }
631 
632 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
633 {
634 	return adap->params.vpd.cclk / 1000;
635 }
636 
637 static inline unsigned int is_pcie(const struct adapter *adap)
638 {
639 	return adap->params.pci.variant == PCI_VARIANT_PCIE;
640 }
641 
642 void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
643 		      u32 val);
644 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
645 		   int n, unsigned int offset);
646 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
647 			int polarity, int attempts, int delay, u32 *valp);
648 static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
649 				  int polarity, int attempts, int delay)
650 {
651 	return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
652 				   delay, NULL);
653 }
654 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
655 			unsigned int set);
656 int t3_phy_reset(struct cphy *phy, int mmd, int wait);
657 int t3_phy_advertise(struct cphy *phy, unsigned int advert);
658 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
659 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
660 int t3_phy_lasi_intr_enable(struct cphy *phy);
661 int t3_phy_lasi_intr_disable(struct cphy *phy);
662 int t3_phy_lasi_intr_clear(struct cphy *phy);
663 int t3_phy_lasi_intr_handler(struct cphy *phy);
664 
665 void t3_intr_enable(struct adapter *adapter);
666 void t3_intr_disable(struct adapter *adapter);
667 void t3_intr_clear(struct adapter *adapter);
668 void t3_xgm_intr_enable(struct adapter *adapter, int idx);
669 void t3_xgm_intr_disable(struct adapter *adapter, int idx);
670 void t3_port_intr_enable(struct adapter *adapter, int idx);
671 void t3_port_intr_disable(struct adapter *adapter, int idx);
672 int t3_slow_intr_handler(struct adapter *adapter);
673 int t3_phy_intr_handler(struct adapter *adapter);
674 
675 void t3_link_changed(struct adapter *adapter, int port_id);
676 void t3_link_fault(struct adapter *adapter, int port_id);
677 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
678 const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
679 int t3_seeprom_wp(struct adapter *adapter, int enable);
680 int t3_get_tp_version(struct adapter *adapter, u32 *vers);
681 int t3_check_tpsram_version(struct adapter *adapter);
682 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
683 		    unsigned int size);
684 int t3_set_proto_sram(struct adapter *adap, const u8 *data);
685 int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
686 int t3_get_fw_version(struct adapter *adapter, u32 *vers);
687 int t3_check_fw_version(struct adapter *adapter);
688 int t3_init_hw(struct adapter *adapter, u32 fw_params);
689 int t3_reset_adapter(struct adapter *adapter);
690 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
691 		    int reset);
692 int t3_replay_prep_adapter(struct adapter *adapter);
693 void t3_led_ready(struct adapter *adapter);
694 void t3_fatal_err(struct adapter *adapter);
695 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
696 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
697 		   const u8 * cpus, const u16 *rspq);
698 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
699 			unsigned int n, unsigned int *valp);
700 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
701 		   u64 *buf);
702 
703 int t3_mac_reset(struct cmac *mac);
704 void t3b_pcs_reset(struct cmac *mac);
705 void t3_mac_disable_exact_filters(struct cmac *mac);
706 void t3_mac_enable_exact_filters(struct cmac *mac);
707 int t3_mac_enable(struct cmac *mac, int which);
708 int t3_mac_disable(struct cmac *mac, int which);
709 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
710 int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
711 int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
712 int t3_mac_set_num_ucast(struct cmac *mac, int n);
713 const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
714 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
715 int t3b2_mac_watchdog_task(struct cmac *mac);
716 
717 void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
718 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
719 		unsigned int nroutes);
720 void t3_mc5_intr_handler(struct mc5 *mc5);
721 
722 void t3_tp_set_offload_mode(struct adapter *adap, int enable);
723 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
724 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
725 		  unsigned short alpha[NCCTRL_WIN],
726 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
727 void t3_config_trace_filter(struct adapter *adapter,
728 			    const struct trace_params *tp, int filter_index,
729 			    int invert, int enable);
730 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
731 
732 void t3_sge_prep(struct adapter *adap, struct sge_params *p);
733 void t3_sge_init(struct adapter *adap, struct sge_params *p);
734 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
735 		       enum sge_context_type type, int respq, u64 base_addr,
736 		       unsigned int size, unsigned int token, int gen,
737 		       unsigned int cidx);
738 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
739 			int gts_enable, u64 base_addr, unsigned int size,
740 			unsigned int esize, unsigned int cong_thres, int gen,
741 			unsigned int cidx);
742 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
743 			 int irq_vec_idx, u64 base_addr, unsigned int size,
744 			 unsigned int fl_thres, int gen, unsigned int cidx);
745 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
746 			unsigned int size, int rspq, int ovfl_mode,
747 			unsigned int credits, unsigned int credit_thres);
748 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
749 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
750 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
751 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
752 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
753 		      unsigned int credits);
754 
755 int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
756 			int phy_addr, const struct mdio_ops *mdio_ops);
757 int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
758 			int phy_addr, const struct mdio_ops *mdio_ops);
759 int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
760 			int phy_addr, const struct mdio_ops *mdio_ops);
761 int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
762 			int phy_addr, const struct mdio_ops *mdio_ops);
763 int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
764 			int phy_addr, const struct mdio_ops *mdio_ops);
765 int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
766 		       const struct mdio_ops *mdio_ops);
767 int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
768 			    int phy_addr, const struct mdio_ops *mdio_ops);
769 int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
770 			    int phy_addr, const struct mdio_ops *mdio_ops);
771 
772 extern struct workqueue_struct *cxgb3_wq;
773 #endif				/* __CHELSIO_COMMON_H */
774