xref: /linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <asm/uaccess.h>
66 
67 #include "cxgb4.h"
68 #include "t4_regs.h"
69 #include "t4_msg.h"
70 #include "t4fw_api.h"
71 #include "cxgb4_dcb.h"
72 #include "cxgb4_debugfs.h"
73 #include "l2t.h"
74 
75 #ifdef DRV_VERSION
76 #undef DRV_VERSION
77 #endif
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80 
81 /*
82  * Max interrupt hold-off timer value in us.  Queues fall back to this value
83  * under extreme memory pressure so it's largish to give the system time to
84  * recover.
85  */
86 #define MAX_SGE_TIMERVAL 200U
87 
88 enum {
89 	/*
90 	 * Physical Function provisioning constants.
91 	 */
92 	PFRES_NVI = 4,			/* # of Virtual Interfaces */
93 	PFRES_NETHCTRL = 128,		/* # of EQs used for ETH or CTRL Qs */
94 	PFRES_NIQFLINT = 128,		/* # of ingress Qs/w Free List(s)/intr
95 					 */
96 	PFRES_NEQ = 256,		/* # of egress queues */
97 	PFRES_NIQ = 0,			/* # of ingress queues */
98 	PFRES_TC = 0,			/* PCI-E traffic class */
99 	PFRES_NEXACTF = 128,		/* # of exact MPS filters */
100 
101 	PFRES_R_CAPS = FW_CMD_CAP_PF,
102 	PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 
104 #ifdef CONFIG_PCI_IOV
105 	/*
106 	 * Virtual Function provisioning constants.  We need two extra Ingress
107 	 * Queues with Interrupt capability to serve as the VF's Firmware
108 	 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 	 * neither will have Free Lists associated with them).  For each
110 	 * Ethernet/Control Egress Queue and for each Free List, we need an
111 	 * Egress Context.
112 	 */
113 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
114 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
115 
116 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
117 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
118 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
120 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
121 	VFRES_TC = 0,			/* PCI-E traffic class */
122 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
123 
124 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif
127 };
128 
129 /*
130  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
131  * static and likely not to be useful in the long run.  We really need to
132  * implement some form of persistent configuration which the firmware
133  * controls.
134  */
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 				  unsigned int pf, unsigned int vf)
137 {
138 	unsigned int portn, portvec;
139 
140 	/*
141 	 * Give PF's access to all of the ports.
142 	 */
143 	if (vf == 0)
144 		return FW_PFVF_CMD_PMASK_M;
145 
146 	/*
147 	 * For VFs, we'll assign them access to the ports based purely on the
148 	 * PF.  We assign active ports in order, wrapping around if there are
149 	 * fewer active ports than PFs: e.g. active port[pf % nports].
150 	 * Unfortunately the adapter's port_info structs haven't been
151 	 * initialized yet so we have to compute this.
152 	 */
153 	if (adapter->params.nports == 0)
154 		return 0;
155 
156 	portn = pf % adapter->params.nports;
157 	portvec = adapter->params.portvec;
158 	for (;;) {
159 		/*
160 		 * Isolate the lowest set bit in the port vector.  If we're at
161 		 * the port number that we want, return that as the pmask.
162 		 * otherwise mask that bit out of the port vector and
163 		 * decrement our port number ...
164 		 */
165 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 		if (portn == 0)
167 			return pmask;
168 		portn--;
169 		portvec &= ~pmask;
170 	}
171 	/*NOTREACHED*/
172 }
173 
174 enum {
175 	MAX_TXQ_ENTRIES      = 16384,
176 	MAX_CTRL_TXQ_ENTRIES = 1024,
177 	MAX_RSPQ_ENTRIES     = 16384,
178 	MAX_RX_BUFFERS       = 16384,
179 	MIN_TXQ_ENTRIES      = 32,
180 	MIN_CTRL_TXQ_ENTRIES = 32,
181 	MIN_RSPQ_ENTRIES     = 128,
182 	MIN_FL_ENTRIES       = 16
183 };
184 
185 /* Host shadow copy of ingress filter entry.  This is in host native format
186  * and doesn't match the ordering or bit order, etc. of the hardware of the
187  * firmware command.  The use of bit-field structure elements is purely to
188  * remind ourselves of the field size limitations and save memory in the case
189  * where the filter table is large.
190  */
191 struct filter_entry {
192 	/* Administrative fields for filter.
193 	 */
194 	u32 valid:1;            /* filter allocated and valid */
195 	u32 locked:1;           /* filter is administratively locked */
196 
197 	u32 pending:1;          /* filter action is pending firmware reply */
198 	u32 smtidx:8;           /* Source MAC Table index for smac */
199 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
200 
201 	/* The filter itself.  Most of this is a straight copy of information
202 	 * provided by the extended ioctl().  Some fields are translated to
203 	 * internal forms -- for instance the Ingress Queue ID passed in from
204 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 	 */
206 	struct ch_filter_specification fs;
207 };
208 
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 
213 /* Macros needed to support the PCI Device ID Table ...
214  */
215 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
216 	static struct pci_device_id cxgb4_pci_tbl[] = {
217 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
218 
219 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
220  * called for both.
221  */
222 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
223 
224 #define CH_PCI_ID_TABLE_ENTRY(devid) \
225 		{PCI_VDEVICE(CHELSIO, (devid)), 4}
226 
227 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
228 		{ 0, } \
229 	}
230 
231 #include "t4_pci_id_tbl.h"
232 
233 #define FW4_FNAME "cxgb4/t4fw.bin"
234 #define FW5_FNAME "cxgb4/t5fw.bin"
235 #define FW4_CFNAME "cxgb4/t4-config.txt"
236 #define FW5_CFNAME "cxgb4/t5-config.txt"
237 
238 MODULE_DESCRIPTION(DRV_DESC);
239 MODULE_AUTHOR("Chelsio Communications");
240 MODULE_LICENSE("Dual BSD/GPL");
241 MODULE_VERSION(DRV_VERSION);
242 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243 MODULE_FIRMWARE(FW4_FNAME);
244 MODULE_FIRMWARE(FW5_FNAME);
245 
246 /*
247  * Normally we're willing to become the firmware's Master PF but will be happy
248  * if another PF has already become the Master and initialized the adapter.
249  * Setting "force_init" will cause this driver to forcibly establish itself as
250  * the Master PF and initialize the adapter.
251  */
252 static uint force_init;
253 
254 module_param(force_init, uint, 0644);
255 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
256 
257 /*
258  * Normally if the firmware we connect to has Configuration File support, we
259  * use that and only fall back to the old Driver-based initialization if the
260  * Configuration File fails for some reason.  If force_old_init is set, then
261  * we'll always use the old Driver-based initialization sequence.
262  */
263 static uint force_old_init;
264 
265 module_param(force_old_init, uint, 0644);
266 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
267 
268 static int dflt_msg_enable = DFLT_MSG_ENABLE;
269 
270 module_param(dflt_msg_enable, int, 0644);
271 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
272 
273 /*
274  * The driver uses the best interrupt scheme available on a platform in the
275  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
276  * of these schemes the driver may consider as follows:
277  *
278  * msi = 2: choose from among all three options
279  * msi = 1: only consider MSI and INTx interrupts
280  * msi = 0: force INTx interrupts
281  */
282 static int msi = 2;
283 
284 module_param(msi, int, 0644);
285 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
286 
287 /*
288  * Queue interrupt hold-off timer values.  Queues default to the first of these
289  * upon creation.
290  */
291 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
292 
293 module_param_array(intr_holdoff, uint, NULL, 0644);
294 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
295 		 "0..4 in microseconds");
296 
297 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
298 
299 module_param_array(intr_cnt, uint, NULL, 0644);
300 MODULE_PARM_DESC(intr_cnt,
301 		 "thresholds 1..3 for queue interrupt packet counters");
302 
303 /*
304  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
305  * offset by 2 bytes in order to have the IP headers line up on 4-byte
306  * boundaries.  This is a requirement for many architectures which will throw
307  * a machine check fault if an attempt is made to access one of the 4-byte IP
308  * header fields on a non-4-byte boundary.  And it's a major performance issue
309  * even on some architectures which allow it like some implementations of the
310  * x86 ISA.  However, some architectures don't mind this and for some very
311  * edge-case performance sensitive applications (like forwarding large volumes
312  * of small packets), setting this DMA offset to 0 will decrease the number of
313  * PCI-E Bus transfers enough to measurably affect performance.
314  */
315 static int rx_dma_offset = 2;
316 
317 static bool vf_acls;
318 
319 #ifdef CONFIG_PCI_IOV
320 module_param(vf_acls, bool, 0644);
321 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
322 
323 /* Configure the number of PCI-E Virtual Function which are to be instantiated
324  * on SR-IOV Capable Physical Functions.
325  */
326 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
327 
328 module_param_array(num_vf, uint, NULL, 0644);
329 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
330 #endif
331 
332 /* TX Queue select used to determine what algorithm to use for selecting TX
333  * queue. Select between the kernel provided function (select_queue=0) or user
334  * cxgb_select_queue function (select_queue=1)
335  *
336  * Default: select_queue=0
337  */
338 static int select_queue;
339 module_param(select_queue, int, 0644);
340 MODULE_PARM_DESC(select_queue,
341 		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
342 
343 /*
344  * The filter TCAM has a fixed portion and a variable portion.  The fixed
345  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
346  * ports.  The variable portion is 36 bits which can include things like Exact
347  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
348  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
349  * far exceed the 36-bit budget for this "compressed" header portion of the
350  * filter.  Thus, we have a scarce resource which must be carefully managed.
351  *
352  * By default we set this up to mostly match the set of filter matching
353  * capabilities of T3 but with accommodations for some of T4's more
354  * interesting features:
355  *
356  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
357  *     [Inner] VLAN (17), Port (3), FCoE (1) }
358  */
359 enum {
360 	TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
361 	TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
362 	TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
363 };
364 
365 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
366 
367 module_param(tp_vlan_pri_map, uint, 0644);
368 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
369 
370 static struct dentry *cxgb4_debugfs_root;
371 
372 static LIST_HEAD(adapter_list);
373 static DEFINE_MUTEX(uld_mutex);
374 /* Adapter list to be accessed from atomic context */
375 static LIST_HEAD(adap_rcu_list);
376 static DEFINE_SPINLOCK(adap_rcu_lock);
377 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
378 static const char *uld_str[] = { "RDMA", "iSCSI" };
379 
380 static void link_report(struct net_device *dev)
381 {
382 	if (!netif_carrier_ok(dev))
383 		netdev_info(dev, "link down\n");
384 	else {
385 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
386 
387 		const char *s = "10Mbps";
388 		const struct port_info *p = netdev_priv(dev);
389 
390 		switch (p->link_cfg.speed) {
391 		case 10000:
392 			s = "10Gbps";
393 			break;
394 		case 1000:
395 			s = "1000Mbps";
396 			break;
397 		case 100:
398 			s = "100Mbps";
399 			break;
400 		case 40000:
401 			s = "40Gbps";
402 			break;
403 		}
404 
405 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
406 			    fc[p->link_cfg.fc]);
407 	}
408 }
409 
410 #ifdef CONFIG_CHELSIO_T4_DCB
411 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
412 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
413 {
414 	struct port_info *pi = netdev_priv(dev);
415 	struct adapter *adap = pi->adapter;
416 	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
417 	int i;
418 
419 	/* We use a simple mapping of Port TX Queue Index to DCB
420 	 * Priority when we're enabling DCB.
421 	 */
422 	for (i = 0; i < pi->nqsets; i++, txq++) {
423 		u32 name, value;
424 		int err;
425 
426 		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
427 			FW_PARAMS_PARAM_X_V(
428 				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
429 			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
430 		value = enable ? i : 0xffffffff;
431 
432 		/* Since we can be called while atomic (from "interrupt
433 		 * level") we need to issue the Set Parameters Commannd
434 		 * without sleeping (timeout < 0).
435 		 */
436 		err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
437 					    &name, &value);
438 
439 		if (err)
440 			dev_err(adap->pdev_dev,
441 				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
442 				enable ? "set" : "unset", pi->port_id, i, -err);
443 		else
444 			txq->dcb_prio = value;
445 	}
446 }
447 #endif /* CONFIG_CHELSIO_T4_DCB */
448 
449 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
450 {
451 	struct net_device *dev = adapter->port[port_id];
452 
453 	/* Skip changes from disabled ports. */
454 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
455 		if (link_stat)
456 			netif_carrier_on(dev);
457 		else {
458 #ifdef CONFIG_CHELSIO_T4_DCB
459 			cxgb4_dcb_state_init(dev);
460 			dcb_tx_queue_prio_enable(dev, false);
461 #endif /* CONFIG_CHELSIO_T4_DCB */
462 			netif_carrier_off(dev);
463 		}
464 
465 		link_report(dev);
466 	}
467 }
468 
469 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
470 {
471 	static const char *mod_str[] = {
472 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
473 	};
474 
475 	const struct net_device *dev = adap->port[port_id];
476 	const struct port_info *pi = netdev_priv(dev);
477 
478 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
479 		netdev_info(dev, "port module unplugged\n");
480 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
481 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
482 }
483 
484 /*
485  * Configure the exact and hash address filters to handle a port's multicast
486  * and secondary unicast MAC addresses.
487  */
488 static int set_addr_filters(const struct net_device *dev, bool sleep)
489 {
490 	u64 mhash = 0;
491 	u64 uhash = 0;
492 	bool free = true;
493 	u16 filt_idx[7];
494 	const u8 *addr[7];
495 	int ret, naddr = 0;
496 	const struct netdev_hw_addr *ha;
497 	int uc_cnt = netdev_uc_count(dev);
498 	int mc_cnt = netdev_mc_count(dev);
499 	const struct port_info *pi = netdev_priv(dev);
500 	unsigned int mb = pi->adapter->fn;
501 
502 	/* first do the secondary unicast addresses */
503 	netdev_for_each_uc_addr(ha, dev) {
504 		addr[naddr++] = ha->addr;
505 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
506 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
507 					naddr, addr, filt_idx, &uhash, sleep);
508 			if (ret < 0)
509 				return ret;
510 
511 			free = false;
512 			naddr = 0;
513 		}
514 	}
515 
516 	/* next set up the multicast addresses */
517 	netdev_for_each_mc_addr(ha, dev) {
518 		addr[naddr++] = ha->addr;
519 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
520 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
521 					naddr, addr, filt_idx, &mhash, sleep);
522 			if (ret < 0)
523 				return ret;
524 
525 			free = false;
526 			naddr = 0;
527 		}
528 	}
529 
530 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
531 				uhash | mhash, sleep);
532 }
533 
534 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
535 module_param(dbfifo_int_thresh, int, 0644);
536 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
537 
538 /*
539  * usecs to sleep while draining the dbfifo
540  */
541 static int dbfifo_drain_delay = 1000;
542 module_param(dbfifo_drain_delay, int, 0644);
543 MODULE_PARM_DESC(dbfifo_drain_delay,
544 		 "usecs to sleep while draining the dbfifo");
545 
546 /*
547  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
548  * If @mtu is -1 it is left unchanged.
549  */
550 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
551 {
552 	int ret;
553 	struct port_info *pi = netdev_priv(dev);
554 
555 	ret = set_addr_filters(dev, sleep_ok);
556 	if (ret == 0)
557 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
558 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
559 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
560 				    sleep_ok);
561 	return ret;
562 }
563 
564 /**
565  *	link_start - enable a port
566  *	@dev: the port to enable
567  *
568  *	Performs the MAC and PHY actions needed to enable a port.
569  */
570 static int link_start(struct net_device *dev)
571 {
572 	int ret;
573 	struct port_info *pi = netdev_priv(dev);
574 	unsigned int mb = pi->adapter->fn;
575 
576 	/*
577 	 * We do not set address filters and promiscuity here, the stack does
578 	 * that step explicitly.
579 	 */
580 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
581 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
582 	if (ret == 0) {
583 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
584 				    pi->xact_addr_filt, dev->dev_addr, true,
585 				    true);
586 		if (ret >= 0) {
587 			pi->xact_addr_filt = ret;
588 			ret = 0;
589 		}
590 	}
591 	if (ret == 0)
592 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
593 				    &pi->link_cfg);
594 	if (ret == 0) {
595 		local_bh_disable();
596 		ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
597 					  true, CXGB4_DCB_ENABLED);
598 		local_bh_enable();
599 	}
600 
601 	return ret;
602 }
603 
604 int cxgb4_dcb_enabled(const struct net_device *dev)
605 {
606 #ifdef CONFIG_CHELSIO_T4_DCB
607 	struct port_info *pi = netdev_priv(dev);
608 
609 	if (!pi->dcb.enabled)
610 		return 0;
611 
612 	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
613 		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
614 #else
615 	return 0;
616 #endif
617 }
618 EXPORT_SYMBOL(cxgb4_dcb_enabled);
619 
620 #ifdef CONFIG_CHELSIO_T4_DCB
621 /* Handle a Data Center Bridging update message from the firmware. */
622 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
623 {
624 	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
625 	struct net_device *dev = adap->port[port];
626 	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
627 	int new_dcb_enabled;
628 
629 	cxgb4_dcb_handle_fw_update(adap, pcmd);
630 	new_dcb_enabled = cxgb4_dcb_enabled(dev);
631 
632 	/* If the DCB has become enabled or disabled on the port then we're
633 	 * going to need to set up/tear down DCB Priority parameters for the
634 	 * TX Queues associated with the port.
635 	 */
636 	if (new_dcb_enabled != old_dcb_enabled)
637 		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
638 }
639 #endif /* CONFIG_CHELSIO_T4_DCB */
640 
641 /* Clear a filter and release any of its resources that we own.  This also
642  * clears the filter's "pending" status.
643  */
644 static void clear_filter(struct adapter *adap, struct filter_entry *f)
645 {
646 	/* If the new or old filter have loopback rewriteing rules then we'll
647 	 * need to free any existing Layer Two Table (L2T) entries of the old
648 	 * filter rule.  The firmware will handle freeing up any Source MAC
649 	 * Table (SMT) entries used for rewriting Source MAC Addresses in
650 	 * loopback rules.
651 	 */
652 	if (f->l2t)
653 		cxgb4_l2t_release(f->l2t);
654 
655 	/* The zeroing of the filter rule below clears the filter valid,
656 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
657 	 * this operation.
658 	 */
659 	memset(f, 0, sizeof(*f));
660 }
661 
662 /* Handle a filter write/deletion reply.
663  */
664 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
665 {
666 	unsigned int idx = GET_TID(rpl);
667 	unsigned int nidx = idx - adap->tids.ftid_base;
668 	unsigned int ret;
669 	struct filter_entry *f;
670 
671 	if (idx >= adap->tids.ftid_base && nidx <
672 	   (adap->tids.nftids + adap->tids.nsftids)) {
673 		idx = nidx;
674 		ret = GET_TCB_COOKIE(rpl->cookie);
675 		f = &adap->tids.ftid_tab[idx];
676 
677 		if (ret == FW_FILTER_WR_FLT_DELETED) {
678 			/* Clear the filter when we get confirmation from the
679 			 * hardware that the filter has been deleted.
680 			 */
681 			clear_filter(adap, f);
682 		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
683 			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
684 				idx);
685 			clear_filter(adap, f);
686 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
687 			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
688 			f->pending = 0;  /* asynchronous setup completed */
689 			f->valid = 1;
690 		} else {
691 			/* Something went wrong.  Issue a warning about the
692 			 * problem and clear everything out.
693 			 */
694 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
695 				idx, ret);
696 			clear_filter(adap, f);
697 		}
698 	}
699 }
700 
701 /* Response queue handler for the FW event queue.
702  */
703 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
704 			  const struct pkt_gl *gl)
705 {
706 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
707 
708 	rsp++;                                          /* skip RSS header */
709 
710 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
711 	 */
712 	if (unlikely(opcode == CPL_FW4_MSG &&
713 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
714 		rsp++;
715 		opcode = ((const struct rss_header *)rsp)->opcode;
716 		rsp++;
717 		if (opcode != CPL_SGE_EGR_UPDATE) {
718 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
719 				, opcode);
720 			goto out;
721 		}
722 	}
723 
724 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
725 		const struct cpl_sge_egr_update *p = (void *)rsp;
726 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
727 		struct sge_txq *txq;
728 
729 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
730 		txq->restarts++;
731 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
732 			struct sge_eth_txq *eq;
733 
734 			eq = container_of(txq, struct sge_eth_txq, q);
735 			netif_tx_wake_queue(eq->txq);
736 		} else {
737 			struct sge_ofld_txq *oq;
738 
739 			oq = container_of(txq, struct sge_ofld_txq, q);
740 			tasklet_schedule(&oq->qresume_tsk);
741 		}
742 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
743 		const struct cpl_fw6_msg *p = (void *)rsp;
744 
745 #ifdef CONFIG_CHELSIO_T4_DCB
746 		const struct fw_port_cmd *pcmd = (const void *)p->data;
747 		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
748 		unsigned int action =
749 			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
750 
751 		if (cmd == FW_PORT_CMD &&
752 		    action == FW_PORT_ACTION_GET_PORT_INFO) {
753 			int port = FW_PORT_CMD_PORTID_G(
754 					be32_to_cpu(pcmd->op_to_portid));
755 			struct net_device *dev = q->adap->port[port];
756 			int state_input = ((pcmd->u.info.dcbxdis_pkd &
757 					    FW_PORT_CMD_DCBXDIS_F)
758 					   ? CXGB4_DCB_INPUT_FW_DISABLED
759 					   : CXGB4_DCB_INPUT_FW_ENABLED);
760 
761 			cxgb4_dcb_state_fsm(dev, state_input);
762 		}
763 
764 		if (cmd == FW_PORT_CMD &&
765 		    action == FW_PORT_ACTION_L2_DCB_CFG)
766 			dcb_rpl(q->adap, pcmd);
767 		else
768 #endif
769 			if (p->type == 0)
770 				t4_handle_fw_rpl(q->adap, p->data);
771 	} else if (opcode == CPL_L2T_WRITE_RPL) {
772 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
773 
774 		do_l2t_write_rpl(q->adap, p);
775 	} else if (opcode == CPL_SET_TCB_RPL) {
776 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
777 
778 		filter_rpl(q->adap, p);
779 	} else
780 		dev_err(q->adap->pdev_dev,
781 			"unexpected CPL %#x on FW event queue\n", opcode);
782 out:
783 	return 0;
784 }
785 
786 /**
787  *	uldrx_handler - response queue handler for ULD queues
788  *	@q: the response queue that received the packet
789  *	@rsp: the response queue descriptor holding the offload message
790  *	@gl: the gather list of packet fragments
791  *
792  *	Deliver an ingress offload packet to a ULD.  All processing is done by
793  *	the ULD, we just maintain statistics.
794  */
795 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
796 			 const struct pkt_gl *gl)
797 {
798 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
799 
800 	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
801 	 */
802 	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
803 	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
804 		rsp += 2;
805 
806 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
807 		rxq->stats.nomem++;
808 		return -1;
809 	}
810 	if (gl == NULL)
811 		rxq->stats.imm++;
812 	else if (gl == CXGB4_MSG_AN)
813 		rxq->stats.an++;
814 	else
815 		rxq->stats.pkts++;
816 	return 0;
817 }
818 
819 static void disable_msi(struct adapter *adapter)
820 {
821 	if (adapter->flags & USING_MSIX) {
822 		pci_disable_msix(adapter->pdev);
823 		adapter->flags &= ~USING_MSIX;
824 	} else if (adapter->flags & USING_MSI) {
825 		pci_disable_msi(adapter->pdev);
826 		adapter->flags &= ~USING_MSI;
827 	}
828 }
829 
830 /*
831  * Interrupt handler for non-data events used with MSI-X.
832  */
833 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
834 {
835 	struct adapter *adap = cookie;
836 
837 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
838 	if (v & PFSW) {
839 		adap->swintr = 1;
840 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
841 	}
842 	t4_slow_intr_handler(adap);
843 	return IRQ_HANDLED;
844 }
845 
846 /*
847  * Name the MSI-X interrupts.
848  */
849 static void name_msix_vecs(struct adapter *adap)
850 {
851 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
852 
853 	/* non-data interrupts */
854 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
855 
856 	/* FW events */
857 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
858 		 adap->port[0]->name);
859 
860 	/* Ethernet queues */
861 	for_each_port(adap, j) {
862 		struct net_device *d = adap->port[j];
863 		const struct port_info *pi = netdev_priv(d);
864 
865 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
866 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
867 				 d->name, i);
868 	}
869 
870 	/* offload queues */
871 	for_each_ofldrxq(&adap->sge, i)
872 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
873 			 adap->port[0]->name, i);
874 
875 	for_each_rdmarxq(&adap->sge, i)
876 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
877 			 adap->port[0]->name, i);
878 
879 	for_each_rdmaciq(&adap->sge, i)
880 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
881 			 adap->port[0]->name, i);
882 }
883 
884 static int request_msix_queue_irqs(struct adapter *adap)
885 {
886 	struct sge *s = &adap->sge;
887 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
888 	int msi_index = 2;
889 
890 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
891 			  adap->msix_info[1].desc, &s->fw_evtq);
892 	if (err)
893 		return err;
894 
895 	for_each_ethrxq(s, ethqidx) {
896 		err = request_irq(adap->msix_info[msi_index].vec,
897 				  t4_sge_intr_msix, 0,
898 				  adap->msix_info[msi_index].desc,
899 				  &s->ethrxq[ethqidx].rspq);
900 		if (err)
901 			goto unwind;
902 		msi_index++;
903 	}
904 	for_each_ofldrxq(s, ofldqidx) {
905 		err = request_irq(adap->msix_info[msi_index].vec,
906 				  t4_sge_intr_msix, 0,
907 				  adap->msix_info[msi_index].desc,
908 				  &s->ofldrxq[ofldqidx].rspq);
909 		if (err)
910 			goto unwind;
911 		msi_index++;
912 	}
913 	for_each_rdmarxq(s, rdmaqidx) {
914 		err = request_irq(adap->msix_info[msi_index].vec,
915 				  t4_sge_intr_msix, 0,
916 				  adap->msix_info[msi_index].desc,
917 				  &s->rdmarxq[rdmaqidx].rspq);
918 		if (err)
919 			goto unwind;
920 		msi_index++;
921 	}
922 	for_each_rdmaciq(s, rdmaciqqidx) {
923 		err = request_irq(adap->msix_info[msi_index].vec,
924 				  t4_sge_intr_msix, 0,
925 				  adap->msix_info[msi_index].desc,
926 				  &s->rdmaciq[rdmaciqqidx].rspq);
927 		if (err)
928 			goto unwind;
929 		msi_index++;
930 	}
931 	return 0;
932 
933 unwind:
934 	while (--rdmaciqqidx >= 0)
935 		free_irq(adap->msix_info[--msi_index].vec,
936 			 &s->rdmaciq[rdmaciqqidx].rspq);
937 	while (--rdmaqidx >= 0)
938 		free_irq(adap->msix_info[--msi_index].vec,
939 			 &s->rdmarxq[rdmaqidx].rspq);
940 	while (--ofldqidx >= 0)
941 		free_irq(adap->msix_info[--msi_index].vec,
942 			 &s->ofldrxq[ofldqidx].rspq);
943 	while (--ethqidx >= 0)
944 		free_irq(adap->msix_info[--msi_index].vec,
945 			 &s->ethrxq[ethqidx].rspq);
946 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
947 	return err;
948 }
949 
950 static void free_msix_queue_irqs(struct adapter *adap)
951 {
952 	int i, msi_index = 2;
953 	struct sge *s = &adap->sge;
954 
955 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
956 	for_each_ethrxq(s, i)
957 		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
958 	for_each_ofldrxq(s, i)
959 		free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
960 	for_each_rdmarxq(s, i)
961 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
962 	for_each_rdmaciq(s, i)
963 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
964 }
965 
966 /**
967  *	write_rss - write the RSS table for a given port
968  *	@pi: the port
969  *	@queues: array of queue indices for RSS
970  *
971  *	Sets up the portion of the HW RSS table for the port's VI to distribute
972  *	packets to the Rx queues in @queues.
973  */
974 static int write_rss(const struct port_info *pi, const u16 *queues)
975 {
976 	u16 *rss;
977 	int i, err;
978 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
979 
980 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
981 	if (!rss)
982 		return -ENOMEM;
983 
984 	/* map the queue indices to queue ids */
985 	for (i = 0; i < pi->rss_size; i++, queues++)
986 		rss[i] = q[*queues].rspq.abs_id;
987 
988 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
989 				  pi->rss_size, rss, pi->rss_size);
990 	kfree(rss);
991 	return err;
992 }
993 
994 /**
995  *	setup_rss - configure RSS
996  *	@adap: the adapter
997  *
998  *	Sets up RSS for each port.
999  */
1000 static int setup_rss(struct adapter *adap)
1001 {
1002 	int i, err;
1003 
1004 	for_each_port(adap, i) {
1005 		const struct port_info *pi = adap2pinfo(adap, i);
1006 
1007 		err = write_rss(pi, pi->rss);
1008 		if (err)
1009 			return err;
1010 	}
1011 	return 0;
1012 }
1013 
1014 /*
1015  * Return the channel of the ingress queue with the given qid.
1016  */
1017 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1018 {
1019 	qid -= p->ingr_start;
1020 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1021 }
1022 
1023 /*
1024  * Wait until all NAPI handlers are descheduled.
1025  */
1026 static void quiesce_rx(struct adapter *adap)
1027 {
1028 	int i;
1029 
1030 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1031 		struct sge_rspq *q = adap->sge.ingr_map[i];
1032 
1033 		if (q && q->handler)
1034 			napi_disable(&q->napi);
1035 	}
1036 }
1037 
1038 /*
1039  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1040  */
1041 static void enable_rx(struct adapter *adap)
1042 {
1043 	int i;
1044 
1045 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1046 		struct sge_rspq *q = adap->sge.ingr_map[i];
1047 
1048 		if (!q)
1049 			continue;
1050 		if (q->handler)
1051 			napi_enable(&q->napi);
1052 		/* 0-increment GTS to start the timer and enable interrupts */
1053 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1054 			     SEINTARM(q->intr_params) |
1055 			     INGRESSQID(q->cntxt_id));
1056 	}
1057 }
1058 
1059 /**
1060  *	setup_sge_queues - configure SGE Tx/Rx/response queues
1061  *	@adap: the adapter
1062  *
1063  *	Determines how many sets of SGE queues to use and initializes them.
1064  *	We support multiple queue sets per port if we have MSI-X, otherwise
1065  *	just one queue set per port.
1066  */
1067 static int setup_sge_queues(struct adapter *adap)
1068 {
1069 	int err, msi_idx, i, j;
1070 	struct sge *s = &adap->sge;
1071 
1072 	bitmap_zero(s->starving_fl, MAX_EGRQ);
1073 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
1074 
1075 	if (adap->flags & USING_MSIX)
1076 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
1077 	else {
1078 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1079 				       NULL, NULL);
1080 		if (err)
1081 			return err;
1082 		msi_idx = -((int)s->intrq.abs_id + 1);
1083 	}
1084 
1085 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1086 			       msi_idx, NULL, fwevtq_handler);
1087 	if (err) {
1088 freeout:	t4_free_sge_resources(adap);
1089 		return err;
1090 	}
1091 
1092 	for_each_port(adap, i) {
1093 		struct net_device *dev = adap->port[i];
1094 		struct port_info *pi = netdev_priv(dev);
1095 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1096 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1097 
1098 		for (j = 0; j < pi->nqsets; j++, q++) {
1099 			if (msi_idx > 0)
1100 				msi_idx++;
1101 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1102 					       msi_idx, &q->fl,
1103 					       t4_ethrx_handler);
1104 			if (err)
1105 				goto freeout;
1106 			q->rspq.idx = j;
1107 			memset(&q->stats, 0, sizeof(q->stats));
1108 		}
1109 		for (j = 0; j < pi->nqsets; j++, t++) {
1110 			err = t4_sge_alloc_eth_txq(adap, t, dev,
1111 					netdev_get_tx_queue(dev, j),
1112 					s->fw_evtq.cntxt_id);
1113 			if (err)
1114 				goto freeout;
1115 		}
1116 	}
1117 
1118 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1119 	for_each_ofldrxq(s, i) {
1120 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
1121 		struct net_device *dev = adap->port[i / j];
1122 
1123 		if (msi_idx > 0)
1124 			msi_idx++;
1125 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1126 				       q->fl.size ? &q->fl : NULL,
1127 				       uldrx_handler);
1128 		if (err)
1129 			goto freeout;
1130 		memset(&q->stats, 0, sizeof(q->stats));
1131 		s->ofld_rxq[i] = q->rspq.abs_id;
1132 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1133 					    s->fw_evtq.cntxt_id);
1134 		if (err)
1135 			goto freeout;
1136 	}
1137 
1138 	for_each_rdmarxq(s, i) {
1139 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
1140 
1141 		if (msi_idx > 0)
1142 			msi_idx++;
1143 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1144 				       msi_idx, q->fl.size ? &q->fl : NULL,
1145 				       uldrx_handler);
1146 		if (err)
1147 			goto freeout;
1148 		memset(&q->stats, 0, sizeof(q->stats));
1149 		s->rdma_rxq[i] = q->rspq.abs_id;
1150 	}
1151 
1152 	for_each_rdmaciq(s, i) {
1153 		struct sge_ofld_rxq *q = &s->rdmaciq[i];
1154 
1155 		if (msi_idx > 0)
1156 			msi_idx++;
1157 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1158 				       msi_idx, q->fl.size ? &q->fl : NULL,
1159 				       uldrx_handler);
1160 		if (err)
1161 			goto freeout;
1162 		memset(&q->stats, 0, sizeof(q->stats));
1163 		s->rdma_ciq[i] = q->rspq.abs_id;
1164 	}
1165 
1166 	for_each_port(adap, i) {
1167 		/*
1168 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1169 		 * have RDMA queues, and that's the right value.
1170 		 */
1171 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1172 					    s->fw_evtq.cntxt_id,
1173 					    s->rdmarxq[i].rspq.cntxt_id);
1174 		if (err)
1175 			goto freeout;
1176 	}
1177 
1178 	t4_write_reg(adap, is_t4(adap->params.chip) ?
1179 				MPS_TRC_RSS_CONTROL :
1180 				MPS_T5_TRC_RSS_CONTROL,
1181 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1182 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1183 	return 0;
1184 }
1185 
1186 /*
1187  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1188  * The allocated memory is cleared.
1189  */
1190 void *t4_alloc_mem(size_t size)
1191 {
1192 	void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1193 
1194 	if (!p)
1195 		p = vzalloc(size);
1196 	return p;
1197 }
1198 
1199 /*
1200  * Free memory allocated through alloc_mem().
1201  */
1202 void t4_free_mem(void *addr)
1203 {
1204 	if (is_vmalloc_addr(addr))
1205 		vfree(addr);
1206 	else
1207 		kfree(addr);
1208 }
1209 
1210 /* Send a Work Request to write the filter at a specified index.  We construct
1211  * a Firmware Filter Work Request to have the work done and put the indicated
1212  * filter into "pending" mode which will prevent any further actions against
1213  * it till we get a reply from the firmware on the completion status of the
1214  * request.
1215  */
1216 static int set_filter_wr(struct adapter *adapter, int fidx)
1217 {
1218 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1219 	struct sk_buff *skb;
1220 	struct fw_filter_wr *fwr;
1221 	unsigned int ftid;
1222 
1223 	/* If the new filter requires loopback Destination MAC and/or VLAN
1224 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1225 	 * the filter.
1226 	 */
1227 	if (f->fs.newdmac || f->fs.newvlan) {
1228 		/* allocate L2T entry for new filter */
1229 		f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1230 		if (f->l2t == NULL)
1231 			return -EAGAIN;
1232 		if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1233 					f->fs.eport, f->fs.dmac)) {
1234 			cxgb4_l2t_release(f->l2t);
1235 			f->l2t = NULL;
1236 			return -ENOMEM;
1237 		}
1238 	}
1239 
1240 	ftid = adapter->tids.ftid_base + fidx;
1241 
1242 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1243 	fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1244 	memset(fwr, 0, sizeof(*fwr));
1245 
1246 	/* It would be nice to put most of the following in t4_hw.c but most
1247 	 * of the work is translating the cxgbtool ch_filter_specification
1248 	 * into the Work Request and the definition of that structure is
1249 	 * currently in cxgbtool.h which isn't appropriate to pull into the
1250 	 * common code.  We may eventually try to come up with a more neutral
1251 	 * filter specification structure but for now it's easiest to simply
1252 	 * put this fairly direct code in line ...
1253 	 */
1254 	fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1255 	fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1256 	fwr->tid_to_iq =
1257 		htonl(FW_FILTER_WR_TID_V(ftid) |
1258 		      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1259 		      FW_FILTER_WR_NOREPLY_V(0) |
1260 		      FW_FILTER_WR_IQ_V(f->fs.iq));
1261 	fwr->del_filter_to_l2tix =
1262 		htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1263 		      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1264 		      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1265 		      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1266 		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1267 		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1268 		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1269 		      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1270 		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1271 					     f->fs.newvlan == VLAN_REWRITE) |
1272 		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1273 					    f->fs.newvlan == VLAN_REWRITE) |
1274 		      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1275 		      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1276 		      FW_FILTER_WR_PRIO_V(f->fs.prio) |
1277 		      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1278 	fwr->ethtype = htons(f->fs.val.ethtype);
1279 	fwr->ethtypem = htons(f->fs.mask.ethtype);
1280 	fwr->frag_to_ovlan_vldm =
1281 		(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1282 		 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1283 		 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1284 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1285 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1286 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1287 	fwr->smac_sel = 0;
1288 	fwr->rx_chan_rx_rpl_iq =
1289 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
1290 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1291 	fwr->maci_to_matchtypem =
1292 		htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1293 		      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1294 		      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1295 		      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1296 		      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1297 		      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1298 		      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1299 		      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1300 	fwr->ptcl = f->fs.val.proto;
1301 	fwr->ptclm = f->fs.mask.proto;
1302 	fwr->ttyp = f->fs.val.tos;
1303 	fwr->ttypm = f->fs.mask.tos;
1304 	fwr->ivlan = htons(f->fs.val.ivlan);
1305 	fwr->ivlanm = htons(f->fs.mask.ivlan);
1306 	fwr->ovlan = htons(f->fs.val.ovlan);
1307 	fwr->ovlanm = htons(f->fs.mask.ovlan);
1308 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1309 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1310 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1311 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1312 	fwr->lp = htons(f->fs.val.lport);
1313 	fwr->lpm = htons(f->fs.mask.lport);
1314 	fwr->fp = htons(f->fs.val.fport);
1315 	fwr->fpm = htons(f->fs.mask.fport);
1316 	if (f->fs.newsmac)
1317 		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1318 
1319 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1320 	 * When we get the Work Request Reply we'll clear the pending status.
1321 	 */
1322 	f->pending = 1;
1323 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1324 	t4_ofld_send(adapter, skb);
1325 	return 0;
1326 }
1327 
1328 /* Delete the filter at a specified index.
1329  */
1330 static int del_filter_wr(struct adapter *adapter, int fidx)
1331 {
1332 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1333 	struct sk_buff *skb;
1334 	struct fw_filter_wr *fwr;
1335 	unsigned int len, ftid;
1336 
1337 	len = sizeof(*fwr);
1338 	ftid = adapter->tids.ftid_base + fidx;
1339 
1340 	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1341 	fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1342 	t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1343 
1344 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1345 	 * When we get the Work Request Reply we'll clear the pending status.
1346 	 */
1347 	f->pending = 1;
1348 	t4_mgmt_tx(adapter, skb);
1349 	return 0;
1350 }
1351 
1352 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1353 			     void *accel_priv, select_queue_fallback_t fallback)
1354 {
1355 	int txq;
1356 
1357 #ifdef CONFIG_CHELSIO_T4_DCB
1358 	/* If a Data Center Bridging has been successfully negotiated on this
1359 	 * link then we'll use the skb's priority to map it to a TX Queue.
1360 	 * The skb's priority is determined via the VLAN Tag Priority Code
1361 	 * Point field.
1362 	 */
1363 	if (cxgb4_dcb_enabled(dev)) {
1364 		u16 vlan_tci;
1365 		int err;
1366 
1367 		err = vlan_get_tag(skb, &vlan_tci);
1368 		if (unlikely(err)) {
1369 			if (net_ratelimit())
1370 				netdev_warn(dev,
1371 					    "TX Packet without VLAN Tag on DCB Link\n");
1372 			txq = 0;
1373 		} else {
1374 			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1375 		}
1376 		return txq;
1377 	}
1378 #endif /* CONFIG_CHELSIO_T4_DCB */
1379 
1380 	if (select_queue) {
1381 		txq = (skb_rx_queue_recorded(skb)
1382 			? skb_get_rx_queue(skb)
1383 			: smp_processor_id());
1384 
1385 		while (unlikely(txq >= dev->real_num_tx_queues))
1386 			txq -= dev->real_num_tx_queues;
1387 
1388 		return txq;
1389 	}
1390 
1391 	return fallback(dev, skb) % dev->real_num_tx_queues;
1392 }
1393 
1394 static inline int is_offload(const struct adapter *adap)
1395 {
1396 	return adap->params.offload;
1397 }
1398 
1399 /*
1400  * Implementation of ethtool operations.
1401  */
1402 
1403 static u32 get_msglevel(struct net_device *dev)
1404 {
1405 	return netdev2adap(dev)->msg_enable;
1406 }
1407 
1408 static void set_msglevel(struct net_device *dev, u32 val)
1409 {
1410 	netdev2adap(dev)->msg_enable = val;
1411 }
1412 
1413 static char stats_strings[][ETH_GSTRING_LEN] = {
1414 	"TxOctetsOK         ",
1415 	"TxFramesOK         ",
1416 	"TxBroadcastFrames  ",
1417 	"TxMulticastFrames  ",
1418 	"TxUnicastFrames    ",
1419 	"TxErrorFrames      ",
1420 
1421 	"TxFrames64         ",
1422 	"TxFrames65To127    ",
1423 	"TxFrames128To255   ",
1424 	"TxFrames256To511   ",
1425 	"TxFrames512To1023  ",
1426 	"TxFrames1024To1518 ",
1427 	"TxFrames1519ToMax  ",
1428 
1429 	"TxFramesDropped    ",
1430 	"TxPauseFrames      ",
1431 	"TxPPP0Frames       ",
1432 	"TxPPP1Frames       ",
1433 	"TxPPP2Frames       ",
1434 	"TxPPP3Frames       ",
1435 	"TxPPP4Frames       ",
1436 	"TxPPP5Frames       ",
1437 	"TxPPP6Frames       ",
1438 	"TxPPP7Frames       ",
1439 
1440 	"RxOctetsOK         ",
1441 	"RxFramesOK         ",
1442 	"RxBroadcastFrames  ",
1443 	"RxMulticastFrames  ",
1444 	"RxUnicastFrames    ",
1445 
1446 	"RxFramesTooLong    ",
1447 	"RxJabberErrors     ",
1448 	"RxFCSErrors        ",
1449 	"RxLengthErrors     ",
1450 	"RxSymbolErrors     ",
1451 	"RxRuntFrames       ",
1452 
1453 	"RxFrames64         ",
1454 	"RxFrames65To127    ",
1455 	"RxFrames128To255   ",
1456 	"RxFrames256To511   ",
1457 	"RxFrames512To1023  ",
1458 	"RxFrames1024To1518 ",
1459 	"RxFrames1519ToMax  ",
1460 
1461 	"RxPauseFrames      ",
1462 	"RxPPP0Frames       ",
1463 	"RxPPP1Frames       ",
1464 	"RxPPP2Frames       ",
1465 	"RxPPP3Frames       ",
1466 	"RxPPP4Frames       ",
1467 	"RxPPP5Frames       ",
1468 	"RxPPP6Frames       ",
1469 	"RxPPP7Frames       ",
1470 
1471 	"RxBG0FramesDropped ",
1472 	"RxBG1FramesDropped ",
1473 	"RxBG2FramesDropped ",
1474 	"RxBG3FramesDropped ",
1475 	"RxBG0FramesTrunc   ",
1476 	"RxBG1FramesTrunc   ",
1477 	"RxBG2FramesTrunc   ",
1478 	"RxBG3FramesTrunc   ",
1479 
1480 	"TSO                ",
1481 	"TxCsumOffload      ",
1482 	"RxCsumGood         ",
1483 	"VLANextractions    ",
1484 	"VLANinsertions     ",
1485 	"GROpackets         ",
1486 	"GROmerged          ",
1487 	"WriteCoalSuccess   ",
1488 	"WriteCoalFail      ",
1489 };
1490 
1491 static int get_sset_count(struct net_device *dev, int sset)
1492 {
1493 	switch (sset) {
1494 	case ETH_SS_STATS:
1495 		return ARRAY_SIZE(stats_strings);
1496 	default:
1497 		return -EOPNOTSUPP;
1498 	}
1499 }
1500 
1501 #define T4_REGMAP_SIZE (160 * 1024)
1502 #define T5_REGMAP_SIZE (332 * 1024)
1503 
1504 static int get_regs_len(struct net_device *dev)
1505 {
1506 	struct adapter *adap = netdev2adap(dev);
1507 	if (is_t4(adap->params.chip))
1508 		return T4_REGMAP_SIZE;
1509 	else
1510 		return T5_REGMAP_SIZE;
1511 }
1512 
1513 static int get_eeprom_len(struct net_device *dev)
1514 {
1515 	return EEPROMSIZE;
1516 }
1517 
1518 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1519 {
1520 	struct adapter *adapter = netdev2adap(dev);
1521 
1522 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1523 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1524 	strlcpy(info->bus_info, pci_name(adapter->pdev),
1525 		sizeof(info->bus_info));
1526 
1527 	if (adapter->params.fw_vers)
1528 		snprintf(info->fw_version, sizeof(info->fw_version),
1529 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1530 			FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1531 			FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1532 			FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1533 			FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1534 			FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1535 			FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1536 			FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1537 			FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1538 }
1539 
1540 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1541 {
1542 	if (stringset == ETH_SS_STATS)
1543 		memcpy(data, stats_strings, sizeof(stats_strings));
1544 }
1545 
1546 /*
1547  * port stats maintained per queue of the port.  They should be in the same
1548  * order as in stats_strings above.
1549  */
1550 struct queue_port_stats {
1551 	u64 tso;
1552 	u64 tx_csum;
1553 	u64 rx_csum;
1554 	u64 vlan_ex;
1555 	u64 vlan_ins;
1556 	u64 gro_pkts;
1557 	u64 gro_merged;
1558 };
1559 
1560 static void collect_sge_port_stats(const struct adapter *adap,
1561 		const struct port_info *p, struct queue_port_stats *s)
1562 {
1563 	int i;
1564 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1565 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1566 
1567 	memset(s, 0, sizeof(*s));
1568 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1569 		s->tso += tx->tso;
1570 		s->tx_csum += tx->tx_cso;
1571 		s->rx_csum += rx->stats.rx_cso;
1572 		s->vlan_ex += rx->stats.vlan_ex;
1573 		s->vlan_ins += tx->vlan_ins;
1574 		s->gro_pkts += rx->stats.lro_pkts;
1575 		s->gro_merged += rx->stats.lro_merged;
1576 	}
1577 }
1578 
1579 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1580 		      u64 *data)
1581 {
1582 	struct port_info *pi = netdev_priv(dev);
1583 	struct adapter *adapter = pi->adapter;
1584 	u32 val1, val2;
1585 
1586 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1587 
1588 	data += sizeof(struct port_stats) / sizeof(u64);
1589 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1590 	data += sizeof(struct queue_port_stats) / sizeof(u64);
1591 	if (!is_t4(adapter->params.chip)) {
1592 		t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1593 		val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1594 		val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1595 		*data = val1 - val2;
1596 		data++;
1597 		*data = val2;
1598 		data++;
1599 	} else {
1600 		memset(data, 0, 2 * sizeof(u64));
1601 		*data += 2;
1602 	}
1603 }
1604 
1605 /*
1606  * Return a version number to identify the type of adapter.  The scheme is:
1607  * - bits 0..9: chip version
1608  * - bits 10..15: chip revision
1609  * - bits 16..23: register dump version
1610  */
1611 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1612 {
1613 	return CHELSIO_CHIP_VERSION(ap->params.chip) |
1614 		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1615 }
1616 
1617 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1618 			   unsigned int end)
1619 {
1620 	u32 *p = buf + start;
1621 
1622 	for ( ; start <= end; start += sizeof(u32))
1623 		*p++ = t4_read_reg(ap, start);
1624 }
1625 
1626 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1627 		     void *buf)
1628 {
1629 	static const unsigned int t4_reg_ranges[] = {
1630 		0x1008, 0x1108,
1631 		0x1180, 0x11b4,
1632 		0x11fc, 0x123c,
1633 		0x1300, 0x173c,
1634 		0x1800, 0x18fc,
1635 		0x3000, 0x30d8,
1636 		0x30e0, 0x5924,
1637 		0x5960, 0x59d4,
1638 		0x5a00, 0x5af8,
1639 		0x6000, 0x6098,
1640 		0x6100, 0x6150,
1641 		0x6200, 0x6208,
1642 		0x6240, 0x6248,
1643 		0x6280, 0x6338,
1644 		0x6370, 0x638c,
1645 		0x6400, 0x643c,
1646 		0x6500, 0x6524,
1647 		0x6a00, 0x6a38,
1648 		0x6a60, 0x6a78,
1649 		0x6b00, 0x6b84,
1650 		0x6bf0, 0x6c84,
1651 		0x6cf0, 0x6d84,
1652 		0x6df0, 0x6e84,
1653 		0x6ef0, 0x6f84,
1654 		0x6ff0, 0x7084,
1655 		0x70f0, 0x7184,
1656 		0x71f0, 0x7284,
1657 		0x72f0, 0x7384,
1658 		0x73f0, 0x7450,
1659 		0x7500, 0x7530,
1660 		0x7600, 0x761c,
1661 		0x7680, 0x76cc,
1662 		0x7700, 0x7798,
1663 		0x77c0, 0x77fc,
1664 		0x7900, 0x79fc,
1665 		0x7b00, 0x7c38,
1666 		0x7d00, 0x7efc,
1667 		0x8dc0, 0x8e1c,
1668 		0x8e30, 0x8e78,
1669 		0x8ea0, 0x8f6c,
1670 		0x8fc0, 0x9074,
1671 		0x90fc, 0x90fc,
1672 		0x9400, 0x9458,
1673 		0x9600, 0x96bc,
1674 		0x9800, 0x9808,
1675 		0x9820, 0x983c,
1676 		0x9850, 0x9864,
1677 		0x9c00, 0x9c6c,
1678 		0x9c80, 0x9cec,
1679 		0x9d00, 0x9d6c,
1680 		0x9d80, 0x9dec,
1681 		0x9e00, 0x9e6c,
1682 		0x9e80, 0x9eec,
1683 		0x9f00, 0x9f6c,
1684 		0x9f80, 0x9fec,
1685 		0xd004, 0xd03c,
1686 		0xdfc0, 0xdfe0,
1687 		0xe000, 0xea7c,
1688 		0xf000, 0x11110,
1689 		0x11118, 0x11190,
1690 		0x19040, 0x1906c,
1691 		0x19078, 0x19080,
1692 		0x1908c, 0x19124,
1693 		0x19150, 0x191b0,
1694 		0x191d0, 0x191e8,
1695 		0x19238, 0x1924c,
1696 		0x193f8, 0x19474,
1697 		0x19490, 0x194f8,
1698 		0x19800, 0x19f30,
1699 		0x1a000, 0x1a06c,
1700 		0x1a0b0, 0x1a120,
1701 		0x1a128, 0x1a138,
1702 		0x1a190, 0x1a1c4,
1703 		0x1a1fc, 0x1a1fc,
1704 		0x1e040, 0x1e04c,
1705 		0x1e284, 0x1e28c,
1706 		0x1e2c0, 0x1e2c0,
1707 		0x1e2e0, 0x1e2e0,
1708 		0x1e300, 0x1e384,
1709 		0x1e3c0, 0x1e3c8,
1710 		0x1e440, 0x1e44c,
1711 		0x1e684, 0x1e68c,
1712 		0x1e6c0, 0x1e6c0,
1713 		0x1e6e0, 0x1e6e0,
1714 		0x1e700, 0x1e784,
1715 		0x1e7c0, 0x1e7c8,
1716 		0x1e840, 0x1e84c,
1717 		0x1ea84, 0x1ea8c,
1718 		0x1eac0, 0x1eac0,
1719 		0x1eae0, 0x1eae0,
1720 		0x1eb00, 0x1eb84,
1721 		0x1ebc0, 0x1ebc8,
1722 		0x1ec40, 0x1ec4c,
1723 		0x1ee84, 0x1ee8c,
1724 		0x1eec0, 0x1eec0,
1725 		0x1eee0, 0x1eee0,
1726 		0x1ef00, 0x1ef84,
1727 		0x1efc0, 0x1efc8,
1728 		0x1f040, 0x1f04c,
1729 		0x1f284, 0x1f28c,
1730 		0x1f2c0, 0x1f2c0,
1731 		0x1f2e0, 0x1f2e0,
1732 		0x1f300, 0x1f384,
1733 		0x1f3c0, 0x1f3c8,
1734 		0x1f440, 0x1f44c,
1735 		0x1f684, 0x1f68c,
1736 		0x1f6c0, 0x1f6c0,
1737 		0x1f6e0, 0x1f6e0,
1738 		0x1f700, 0x1f784,
1739 		0x1f7c0, 0x1f7c8,
1740 		0x1f840, 0x1f84c,
1741 		0x1fa84, 0x1fa8c,
1742 		0x1fac0, 0x1fac0,
1743 		0x1fae0, 0x1fae0,
1744 		0x1fb00, 0x1fb84,
1745 		0x1fbc0, 0x1fbc8,
1746 		0x1fc40, 0x1fc4c,
1747 		0x1fe84, 0x1fe8c,
1748 		0x1fec0, 0x1fec0,
1749 		0x1fee0, 0x1fee0,
1750 		0x1ff00, 0x1ff84,
1751 		0x1ffc0, 0x1ffc8,
1752 		0x20000, 0x2002c,
1753 		0x20100, 0x2013c,
1754 		0x20190, 0x201c8,
1755 		0x20200, 0x20318,
1756 		0x20400, 0x20528,
1757 		0x20540, 0x20614,
1758 		0x21000, 0x21040,
1759 		0x2104c, 0x21060,
1760 		0x210c0, 0x210ec,
1761 		0x21200, 0x21268,
1762 		0x21270, 0x21284,
1763 		0x212fc, 0x21388,
1764 		0x21400, 0x21404,
1765 		0x21500, 0x21518,
1766 		0x2152c, 0x2153c,
1767 		0x21550, 0x21554,
1768 		0x21600, 0x21600,
1769 		0x21608, 0x21628,
1770 		0x21630, 0x2163c,
1771 		0x21700, 0x2171c,
1772 		0x21780, 0x2178c,
1773 		0x21800, 0x21c38,
1774 		0x21c80, 0x21d7c,
1775 		0x21e00, 0x21e04,
1776 		0x22000, 0x2202c,
1777 		0x22100, 0x2213c,
1778 		0x22190, 0x221c8,
1779 		0x22200, 0x22318,
1780 		0x22400, 0x22528,
1781 		0x22540, 0x22614,
1782 		0x23000, 0x23040,
1783 		0x2304c, 0x23060,
1784 		0x230c0, 0x230ec,
1785 		0x23200, 0x23268,
1786 		0x23270, 0x23284,
1787 		0x232fc, 0x23388,
1788 		0x23400, 0x23404,
1789 		0x23500, 0x23518,
1790 		0x2352c, 0x2353c,
1791 		0x23550, 0x23554,
1792 		0x23600, 0x23600,
1793 		0x23608, 0x23628,
1794 		0x23630, 0x2363c,
1795 		0x23700, 0x2371c,
1796 		0x23780, 0x2378c,
1797 		0x23800, 0x23c38,
1798 		0x23c80, 0x23d7c,
1799 		0x23e00, 0x23e04,
1800 		0x24000, 0x2402c,
1801 		0x24100, 0x2413c,
1802 		0x24190, 0x241c8,
1803 		0x24200, 0x24318,
1804 		0x24400, 0x24528,
1805 		0x24540, 0x24614,
1806 		0x25000, 0x25040,
1807 		0x2504c, 0x25060,
1808 		0x250c0, 0x250ec,
1809 		0x25200, 0x25268,
1810 		0x25270, 0x25284,
1811 		0x252fc, 0x25388,
1812 		0x25400, 0x25404,
1813 		0x25500, 0x25518,
1814 		0x2552c, 0x2553c,
1815 		0x25550, 0x25554,
1816 		0x25600, 0x25600,
1817 		0x25608, 0x25628,
1818 		0x25630, 0x2563c,
1819 		0x25700, 0x2571c,
1820 		0x25780, 0x2578c,
1821 		0x25800, 0x25c38,
1822 		0x25c80, 0x25d7c,
1823 		0x25e00, 0x25e04,
1824 		0x26000, 0x2602c,
1825 		0x26100, 0x2613c,
1826 		0x26190, 0x261c8,
1827 		0x26200, 0x26318,
1828 		0x26400, 0x26528,
1829 		0x26540, 0x26614,
1830 		0x27000, 0x27040,
1831 		0x2704c, 0x27060,
1832 		0x270c0, 0x270ec,
1833 		0x27200, 0x27268,
1834 		0x27270, 0x27284,
1835 		0x272fc, 0x27388,
1836 		0x27400, 0x27404,
1837 		0x27500, 0x27518,
1838 		0x2752c, 0x2753c,
1839 		0x27550, 0x27554,
1840 		0x27600, 0x27600,
1841 		0x27608, 0x27628,
1842 		0x27630, 0x2763c,
1843 		0x27700, 0x2771c,
1844 		0x27780, 0x2778c,
1845 		0x27800, 0x27c38,
1846 		0x27c80, 0x27d7c,
1847 		0x27e00, 0x27e04
1848 	};
1849 
1850 	static const unsigned int t5_reg_ranges[] = {
1851 		0x1008, 0x1148,
1852 		0x1180, 0x11b4,
1853 		0x11fc, 0x123c,
1854 		0x1280, 0x173c,
1855 		0x1800, 0x18fc,
1856 		0x3000, 0x3028,
1857 		0x3060, 0x30d8,
1858 		0x30e0, 0x30fc,
1859 		0x3140, 0x357c,
1860 		0x35a8, 0x35cc,
1861 		0x35ec, 0x35ec,
1862 		0x3600, 0x5624,
1863 		0x56cc, 0x575c,
1864 		0x580c, 0x5814,
1865 		0x5890, 0x58bc,
1866 		0x5940, 0x59dc,
1867 		0x59fc, 0x5a18,
1868 		0x5a60, 0x5a9c,
1869 		0x5b9c, 0x5bfc,
1870 		0x6000, 0x6040,
1871 		0x6058, 0x614c,
1872 		0x7700, 0x7798,
1873 		0x77c0, 0x78fc,
1874 		0x7b00, 0x7c54,
1875 		0x7d00, 0x7efc,
1876 		0x8dc0, 0x8de0,
1877 		0x8df8, 0x8e84,
1878 		0x8ea0, 0x8f84,
1879 		0x8fc0, 0x90f8,
1880 		0x9400, 0x9470,
1881 		0x9600, 0x96f4,
1882 		0x9800, 0x9808,
1883 		0x9820, 0x983c,
1884 		0x9850, 0x9864,
1885 		0x9c00, 0x9c6c,
1886 		0x9c80, 0x9cec,
1887 		0x9d00, 0x9d6c,
1888 		0x9d80, 0x9dec,
1889 		0x9e00, 0x9e6c,
1890 		0x9e80, 0x9eec,
1891 		0x9f00, 0x9f6c,
1892 		0x9f80, 0xa020,
1893 		0xd004, 0xd03c,
1894 		0xdfc0, 0xdfe0,
1895 		0xe000, 0x11088,
1896 		0x1109c, 0x11110,
1897 		0x11118, 0x1117c,
1898 		0x11190, 0x11204,
1899 		0x19040, 0x1906c,
1900 		0x19078, 0x19080,
1901 		0x1908c, 0x19124,
1902 		0x19150, 0x191b0,
1903 		0x191d0, 0x191e8,
1904 		0x19238, 0x19290,
1905 		0x193f8, 0x19474,
1906 		0x19490, 0x194cc,
1907 		0x194f0, 0x194f8,
1908 		0x19c00, 0x19c60,
1909 		0x19c94, 0x19e10,
1910 		0x19e50, 0x19f34,
1911 		0x19f40, 0x19f50,
1912 		0x19f90, 0x19fe4,
1913 		0x1a000, 0x1a06c,
1914 		0x1a0b0, 0x1a120,
1915 		0x1a128, 0x1a138,
1916 		0x1a190, 0x1a1c4,
1917 		0x1a1fc, 0x1a1fc,
1918 		0x1e008, 0x1e00c,
1919 		0x1e040, 0x1e04c,
1920 		0x1e284, 0x1e290,
1921 		0x1e2c0, 0x1e2c0,
1922 		0x1e2e0, 0x1e2e0,
1923 		0x1e300, 0x1e384,
1924 		0x1e3c0, 0x1e3c8,
1925 		0x1e408, 0x1e40c,
1926 		0x1e440, 0x1e44c,
1927 		0x1e684, 0x1e690,
1928 		0x1e6c0, 0x1e6c0,
1929 		0x1e6e0, 0x1e6e0,
1930 		0x1e700, 0x1e784,
1931 		0x1e7c0, 0x1e7c8,
1932 		0x1e808, 0x1e80c,
1933 		0x1e840, 0x1e84c,
1934 		0x1ea84, 0x1ea90,
1935 		0x1eac0, 0x1eac0,
1936 		0x1eae0, 0x1eae0,
1937 		0x1eb00, 0x1eb84,
1938 		0x1ebc0, 0x1ebc8,
1939 		0x1ec08, 0x1ec0c,
1940 		0x1ec40, 0x1ec4c,
1941 		0x1ee84, 0x1ee90,
1942 		0x1eec0, 0x1eec0,
1943 		0x1eee0, 0x1eee0,
1944 		0x1ef00, 0x1ef84,
1945 		0x1efc0, 0x1efc8,
1946 		0x1f008, 0x1f00c,
1947 		0x1f040, 0x1f04c,
1948 		0x1f284, 0x1f290,
1949 		0x1f2c0, 0x1f2c0,
1950 		0x1f2e0, 0x1f2e0,
1951 		0x1f300, 0x1f384,
1952 		0x1f3c0, 0x1f3c8,
1953 		0x1f408, 0x1f40c,
1954 		0x1f440, 0x1f44c,
1955 		0x1f684, 0x1f690,
1956 		0x1f6c0, 0x1f6c0,
1957 		0x1f6e0, 0x1f6e0,
1958 		0x1f700, 0x1f784,
1959 		0x1f7c0, 0x1f7c8,
1960 		0x1f808, 0x1f80c,
1961 		0x1f840, 0x1f84c,
1962 		0x1fa84, 0x1fa90,
1963 		0x1fac0, 0x1fac0,
1964 		0x1fae0, 0x1fae0,
1965 		0x1fb00, 0x1fb84,
1966 		0x1fbc0, 0x1fbc8,
1967 		0x1fc08, 0x1fc0c,
1968 		0x1fc40, 0x1fc4c,
1969 		0x1fe84, 0x1fe90,
1970 		0x1fec0, 0x1fec0,
1971 		0x1fee0, 0x1fee0,
1972 		0x1ff00, 0x1ff84,
1973 		0x1ffc0, 0x1ffc8,
1974 		0x30000, 0x30030,
1975 		0x30100, 0x30144,
1976 		0x30190, 0x301d0,
1977 		0x30200, 0x30318,
1978 		0x30400, 0x3052c,
1979 		0x30540, 0x3061c,
1980 		0x30800, 0x30834,
1981 		0x308c0, 0x30908,
1982 		0x30910, 0x309ac,
1983 		0x30a00, 0x30a04,
1984 		0x30a0c, 0x30a2c,
1985 		0x30a44, 0x30a50,
1986 		0x30a74, 0x30c24,
1987 		0x30d08, 0x30d14,
1988 		0x30d1c, 0x30d20,
1989 		0x30d3c, 0x30d50,
1990 		0x31200, 0x3120c,
1991 		0x31220, 0x31220,
1992 		0x31240, 0x31240,
1993 		0x31600, 0x31600,
1994 		0x31608, 0x3160c,
1995 		0x31a00, 0x31a1c,
1996 		0x31e04, 0x31e20,
1997 		0x31e38, 0x31e3c,
1998 		0x31e80, 0x31e80,
1999 		0x31e88, 0x31ea8,
2000 		0x31eb0, 0x31eb4,
2001 		0x31ec8, 0x31ed4,
2002 		0x31fb8, 0x32004,
2003 		0x32208, 0x3223c,
2004 		0x32600, 0x32630,
2005 		0x32a00, 0x32abc,
2006 		0x32b00, 0x32b70,
2007 		0x33000, 0x33048,
2008 		0x33060, 0x3309c,
2009 		0x330f0, 0x33148,
2010 		0x33160, 0x3319c,
2011 		0x331f0, 0x332e4,
2012 		0x332f8, 0x333e4,
2013 		0x333f8, 0x33448,
2014 		0x33460, 0x3349c,
2015 		0x334f0, 0x33548,
2016 		0x33560, 0x3359c,
2017 		0x335f0, 0x336e4,
2018 		0x336f8, 0x337e4,
2019 		0x337f8, 0x337fc,
2020 		0x33814, 0x33814,
2021 		0x3382c, 0x3382c,
2022 		0x33880, 0x3388c,
2023 		0x338e8, 0x338ec,
2024 		0x33900, 0x33948,
2025 		0x33960, 0x3399c,
2026 		0x339f0, 0x33ae4,
2027 		0x33af8, 0x33b10,
2028 		0x33b28, 0x33b28,
2029 		0x33b3c, 0x33b50,
2030 		0x33bf0, 0x33c10,
2031 		0x33c28, 0x33c28,
2032 		0x33c3c, 0x33c50,
2033 		0x33cf0, 0x33cfc,
2034 		0x34000, 0x34030,
2035 		0x34100, 0x34144,
2036 		0x34190, 0x341d0,
2037 		0x34200, 0x34318,
2038 		0x34400, 0x3452c,
2039 		0x34540, 0x3461c,
2040 		0x34800, 0x34834,
2041 		0x348c0, 0x34908,
2042 		0x34910, 0x349ac,
2043 		0x34a00, 0x34a04,
2044 		0x34a0c, 0x34a2c,
2045 		0x34a44, 0x34a50,
2046 		0x34a74, 0x34c24,
2047 		0x34d08, 0x34d14,
2048 		0x34d1c, 0x34d20,
2049 		0x34d3c, 0x34d50,
2050 		0x35200, 0x3520c,
2051 		0x35220, 0x35220,
2052 		0x35240, 0x35240,
2053 		0x35600, 0x35600,
2054 		0x35608, 0x3560c,
2055 		0x35a00, 0x35a1c,
2056 		0x35e04, 0x35e20,
2057 		0x35e38, 0x35e3c,
2058 		0x35e80, 0x35e80,
2059 		0x35e88, 0x35ea8,
2060 		0x35eb0, 0x35eb4,
2061 		0x35ec8, 0x35ed4,
2062 		0x35fb8, 0x36004,
2063 		0x36208, 0x3623c,
2064 		0x36600, 0x36630,
2065 		0x36a00, 0x36abc,
2066 		0x36b00, 0x36b70,
2067 		0x37000, 0x37048,
2068 		0x37060, 0x3709c,
2069 		0x370f0, 0x37148,
2070 		0x37160, 0x3719c,
2071 		0x371f0, 0x372e4,
2072 		0x372f8, 0x373e4,
2073 		0x373f8, 0x37448,
2074 		0x37460, 0x3749c,
2075 		0x374f0, 0x37548,
2076 		0x37560, 0x3759c,
2077 		0x375f0, 0x376e4,
2078 		0x376f8, 0x377e4,
2079 		0x377f8, 0x377fc,
2080 		0x37814, 0x37814,
2081 		0x3782c, 0x3782c,
2082 		0x37880, 0x3788c,
2083 		0x378e8, 0x378ec,
2084 		0x37900, 0x37948,
2085 		0x37960, 0x3799c,
2086 		0x379f0, 0x37ae4,
2087 		0x37af8, 0x37b10,
2088 		0x37b28, 0x37b28,
2089 		0x37b3c, 0x37b50,
2090 		0x37bf0, 0x37c10,
2091 		0x37c28, 0x37c28,
2092 		0x37c3c, 0x37c50,
2093 		0x37cf0, 0x37cfc,
2094 		0x38000, 0x38030,
2095 		0x38100, 0x38144,
2096 		0x38190, 0x381d0,
2097 		0x38200, 0x38318,
2098 		0x38400, 0x3852c,
2099 		0x38540, 0x3861c,
2100 		0x38800, 0x38834,
2101 		0x388c0, 0x38908,
2102 		0x38910, 0x389ac,
2103 		0x38a00, 0x38a04,
2104 		0x38a0c, 0x38a2c,
2105 		0x38a44, 0x38a50,
2106 		0x38a74, 0x38c24,
2107 		0x38d08, 0x38d14,
2108 		0x38d1c, 0x38d20,
2109 		0x38d3c, 0x38d50,
2110 		0x39200, 0x3920c,
2111 		0x39220, 0x39220,
2112 		0x39240, 0x39240,
2113 		0x39600, 0x39600,
2114 		0x39608, 0x3960c,
2115 		0x39a00, 0x39a1c,
2116 		0x39e04, 0x39e20,
2117 		0x39e38, 0x39e3c,
2118 		0x39e80, 0x39e80,
2119 		0x39e88, 0x39ea8,
2120 		0x39eb0, 0x39eb4,
2121 		0x39ec8, 0x39ed4,
2122 		0x39fb8, 0x3a004,
2123 		0x3a208, 0x3a23c,
2124 		0x3a600, 0x3a630,
2125 		0x3aa00, 0x3aabc,
2126 		0x3ab00, 0x3ab70,
2127 		0x3b000, 0x3b048,
2128 		0x3b060, 0x3b09c,
2129 		0x3b0f0, 0x3b148,
2130 		0x3b160, 0x3b19c,
2131 		0x3b1f0, 0x3b2e4,
2132 		0x3b2f8, 0x3b3e4,
2133 		0x3b3f8, 0x3b448,
2134 		0x3b460, 0x3b49c,
2135 		0x3b4f0, 0x3b548,
2136 		0x3b560, 0x3b59c,
2137 		0x3b5f0, 0x3b6e4,
2138 		0x3b6f8, 0x3b7e4,
2139 		0x3b7f8, 0x3b7fc,
2140 		0x3b814, 0x3b814,
2141 		0x3b82c, 0x3b82c,
2142 		0x3b880, 0x3b88c,
2143 		0x3b8e8, 0x3b8ec,
2144 		0x3b900, 0x3b948,
2145 		0x3b960, 0x3b99c,
2146 		0x3b9f0, 0x3bae4,
2147 		0x3baf8, 0x3bb10,
2148 		0x3bb28, 0x3bb28,
2149 		0x3bb3c, 0x3bb50,
2150 		0x3bbf0, 0x3bc10,
2151 		0x3bc28, 0x3bc28,
2152 		0x3bc3c, 0x3bc50,
2153 		0x3bcf0, 0x3bcfc,
2154 		0x3c000, 0x3c030,
2155 		0x3c100, 0x3c144,
2156 		0x3c190, 0x3c1d0,
2157 		0x3c200, 0x3c318,
2158 		0x3c400, 0x3c52c,
2159 		0x3c540, 0x3c61c,
2160 		0x3c800, 0x3c834,
2161 		0x3c8c0, 0x3c908,
2162 		0x3c910, 0x3c9ac,
2163 		0x3ca00, 0x3ca04,
2164 		0x3ca0c, 0x3ca2c,
2165 		0x3ca44, 0x3ca50,
2166 		0x3ca74, 0x3cc24,
2167 		0x3cd08, 0x3cd14,
2168 		0x3cd1c, 0x3cd20,
2169 		0x3cd3c, 0x3cd50,
2170 		0x3d200, 0x3d20c,
2171 		0x3d220, 0x3d220,
2172 		0x3d240, 0x3d240,
2173 		0x3d600, 0x3d600,
2174 		0x3d608, 0x3d60c,
2175 		0x3da00, 0x3da1c,
2176 		0x3de04, 0x3de20,
2177 		0x3de38, 0x3de3c,
2178 		0x3de80, 0x3de80,
2179 		0x3de88, 0x3dea8,
2180 		0x3deb0, 0x3deb4,
2181 		0x3dec8, 0x3ded4,
2182 		0x3dfb8, 0x3e004,
2183 		0x3e208, 0x3e23c,
2184 		0x3e600, 0x3e630,
2185 		0x3ea00, 0x3eabc,
2186 		0x3eb00, 0x3eb70,
2187 		0x3f000, 0x3f048,
2188 		0x3f060, 0x3f09c,
2189 		0x3f0f0, 0x3f148,
2190 		0x3f160, 0x3f19c,
2191 		0x3f1f0, 0x3f2e4,
2192 		0x3f2f8, 0x3f3e4,
2193 		0x3f3f8, 0x3f448,
2194 		0x3f460, 0x3f49c,
2195 		0x3f4f0, 0x3f548,
2196 		0x3f560, 0x3f59c,
2197 		0x3f5f0, 0x3f6e4,
2198 		0x3f6f8, 0x3f7e4,
2199 		0x3f7f8, 0x3f7fc,
2200 		0x3f814, 0x3f814,
2201 		0x3f82c, 0x3f82c,
2202 		0x3f880, 0x3f88c,
2203 		0x3f8e8, 0x3f8ec,
2204 		0x3f900, 0x3f948,
2205 		0x3f960, 0x3f99c,
2206 		0x3f9f0, 0x3fae4,
2207 		0x3faf8, 0x3fb10,
2208 		0x3fb28, 0x3fb28,
2209 		0x3fb3c, 0x3fb50,
2210 		0x3fbf0, 0x3fc10,
2211 		0x3fc28, 0x3fc28,
2212 		0x3fc3c, 0x3fc50,
2213 		0x3fcf0, 0x3fcfc,
2214 		0x40000, 0x4000c,
2215 		0x40040, 0x40068,
2216 		0x40080, 0x40144,
2217 		0x40180, 0x4018c,
2218 		0x40200, 0x40298,
2219 		0x402ac, 0x4033c,
2220 		0x403f8, 0x403fc,
2221 		0x41304, 0x413c4,
2222 		0x41400, 0x4141c,
2223 		0x41480, 0x414d0,
2224 		0x44000, 0x44078,
2225 		0x440c0, 0x44278,
2226 		0x442c0, 0x44478,
2227 		0x444c0, 0x44678,
2228 		0x446c0, 0x44878,
2229 		0x448c0, 0x449fc,
2230 		0x45000, 0x45068,
2231 		0x45080, 0x45084,
2232 		0x450a0, 0x450b0,
2233 		0x45200, 0x45268,
2234 		0x45280, 0x45284,
2235 		0x452a0, 0x452b0,
2236 		0x460c0, 0x460e4,
2237 		0x47000, 0x4708c,
2238 		0x47200, 0x47250,
2239 		0x47400, 0x47420,
2240 		0x47600, 0x47618,
2241 		0x47800, 0x47814,
2242 		0x48000, 0x4800c,
2243 		0x48040, 0x48068,
2244 		0x48080, 0x48144,
2245 		0x48180, 0x4818c,
2246 		0x48200, 0x48298,
2247 		0x482ac, 0x4833c,
2248 		0x483f8, 0x483fc,
2249 		0x49304, 0x493c4,
2250 		0x49400, 0x4941c,
2251 		0x49480, 0x494d0,
2252 		0x4c000, 0x4c078,
2253 		0x4c0c0, 0x4c278,
2254 		0x4c2c0, 0x4c478,
2255 		0x4c4c0, 0x4c678,
2256 		0x4c6c0, 0x4c878,
2257 		0x4c8c0, 0x4c9fc,
2258 		0x4d000, 0x4d068,
2259 		0x4d080, 0x4d084,
2260 		0x4d0a0, 0x4d0b0,
2261 		0x4d200, 0x4d268,
2262 		0x4d280, 0x4d284,
2263 		0x4d2a0, 0x4d2b0,
2264 		0x4e0c0, 0x4e0e4,
2265 		0x4f000, 0x4f08c,
2266 		0x4f200, 0x4f250,
2267 		0x4f400, 0x4f420,
2268 		0x4f600, 0x4f618,
2269 		0x4f800, 0x4f814,
2270 		0x50000, 0x500cc,
2271 		0x50400, 0x50400,
2272 		0x50800, 0x508cc,
2273 		0x50c00, 0x50c00,
2274 		0x51000, 0x5101c,
2275 		0x51300, 0x51308,
2276 	};
2277 
2278 	int i;
2279 	struct adapter *ap = netdev2adap(dev);
2280 	static const unsigned int *reg_ranges;
2281 	int arr_size = 0, buf_size = 0;
2282 
2283 	if (is_t4(ap->params.chip)) {
2284 		reg_ranges = &t4_reg_ranges[0];
2285 		arr_size = ARRAY_SIZE(t4_reg_ranges);
2286 		buf_size = T4_REGMAP_SIZE;
2287 	} else {
2288 		reg_ranges = &t5_reg_ranges[0];
2289 		arr_size = ARRAY_SIZE(t5_reg_ranges);
2290 		buf_size = T5_REGMAP_SIZE;
2291 	}
2292 
2293 	regs->version = mk_adap_vers(ap);
2294 
2295 	memset(buf, 0, buf_size);
2296 	for (i = 0; i < arr_size; i += 2)
2297 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2298 }
2299 
2300 static int restart_autoneg(struct net_device *dev)
2301 {
2302 	struct port_info *p = netdev_priv(dev);
2303 
2304 	if (!netif_running(dev))
2305 		return -EAGAIN;
2306 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2307 		return -EINVAL;
2308 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2309 	return 0;
2310 }
2311 
2312 static int identify_port(struct net_device *dev,
2313 			 enum ethtool_phys_id_state state)
2314 {
2315 	unsigned int val;
2316 	struct adapter *adap = netdev2adap(dev);
2317 
2318 	if (state == ETHTOOL_ID_ACTIVE)
2319 		val = 0xffff;
2320 	else if (state == ETHTOOL_ID_INACTIVE)
2321 		val = 0;
2322 	else
2323 		return -EINVAL;
2324 
2325 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2326 }
2327 
2328 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2329 {
2330 	unsigned int v = 0;
2331 
2332 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2333 	    type == FW_PORT_TYPE_BT_XAUI) {
2334 		v |= SUPPORTED_TP;
2335 		if (caps & FW_PORT_CAP_SPEED_100M)
2336 			v |= SUPPORTED_100baseT_Full;
2337 		if (caps & FW_PORT_CAP_SPEED_1G)
2338 			v |= SUPPORTED_1000baseT_Full;
2339 		if (caps & FW_PORT_CAP_SPEED_10G)
2340 			v |= SUPPORTED_10000baseT_Full;
2341 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2342 		v |= SUPPORTED_Backplane;
2343 		if (caps & FW_PORT_CAP_SPEED_1G)
2344 			v |= SUPPORTED_1000baseKX_Full;
2345 		if (caps & FW_PORT_CAP_SPEED_10G)
2346 			v |= SUPPORTED_10000baseKX4_Full;
2347 	} else if (type == FW_PORT_TYPE_KR)
2348 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2349 	else if (type == FW_PORT_TYPE_BP_AP)
2350 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2351 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2352 	else if (type == FW_PORT_TYPE_BP4_AP)
2353 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2354 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2355 		     SUPPORTED_10000baseKX4_Full;
2356 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
2357 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2358 		v |= SUPPORTED_FIBRE;
2359 		if (caps & FW_PORT_CAP_SPEED_1G)
2360 			v |= SUPPORTED_1000baseT_Full;
2361 		if (caps & FW_PORT_CAP_SPEED_10G)
2362 			v |= SUPPORTED_10000baseT_Full;
2363 	} else if (type == FW_PORT_TYPE_BP40_BA)
2364 		v |= SUPPORTED_40000baseSR4_Full;
2365 
2366 	if (caps & FW_PORT_CAP_ANEG)
2367 		v |= SUPPORTED_Autoneg;
2368 	return v;
2369 }
2370 
2371 static unsigned int to_fw_linkcaps(unsigned int caps)
2372 {
2373 	unsigned int v = 0;
2374 
2375 	if (caps & ADVERTISED_100baseT_Full)
2376 		v |= FW_PORT_CAP_SPEED_100M;
2377 	if (caps & ADVERTISED_1000baseT_Full)
2378 		v |= FW_PORT_CAP_SPEED_1G;
2379 	if (caps & ADVERTISED_10000baseT_Full)
2380 		v |= FW_PORT_CAP_SPEED_10G;
2381 	if (caps & ADVERTISED_40000baseSR4_Full)
2382 		v |= FW_PORT_CAP_SPEED_40G;
2383 	return v;
2384 }
2385 
2386 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2387 {
2388 	const struct port_info *p = netdev_priv(dev);
2389 
2390 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2391 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
2392 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
2393 		cmd->port = PORT_TP;
2394 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2395 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2396 		cmd->port = PORT_FIBRE;
2397 	else if (p->port_type == FW_PORT_TYPE_SFP ||
2398 		 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2399 		 p->port_type == FW_PORT_TYPE_QSFP) {
2400 		if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2401 		    p->mod_type == FW_PORT_MOD_TYPE_SR ||
2402 		    p->mod_type == FW_PORT_MOD_TYPE_ER ||
2403 		    p->mod_type == FW_PORT_MOD_TYPE_LRM)
2404 			cmd->port = PORT_FIBRE;
2405 		else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2406 			 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2407 			cmd->port = PORT_DA;
2408 		else
2409 			cmd->port = PORT_OTHER;
2410 	} else
2411 		cmd->port = PORT_OTHER;
2412 
2413 	if (p->mdio_addr >= 0) {
2414 		cmd->phy_address = p->mdio_addr;
2415 		cmd->transceiver = XCVR_EXTERNAL;
2416 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2417 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2418 	} else {
2419 		cmd->phy_address = 0;  /* not really, but no better option */
2420 		cmd->transceiver = XCVR_INTERNAL;
2421 		cmd->mdio_support = 0;
2422 	}
2423 
2424 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2425 	cmd->advertising = from_fw_linkcaps(p->port_type,
2426 					    p->link_cfg.advertising);
2427 	ethtool_cmd_speed_set(cmd,
2428 			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2429 	cmd->duplex = DUPLEX_FULL;
2430 	cmd->autoneg = p->link_cfg.autoneg;
2431 	cmd->maxtxpkt = 0;
2432 	cmd->maxrxpkt = 0;
2433 	return 0;
2434 }
2435 
2436 static unsigned int speed_to_caps(int speed)
2437 {
2438 	if (speed == 100)
2439 		return FW_PORT_CAP_SPEED_100M;
2440 	if (speed == 1000)
2441 		return FW_PORT_CAP_SPEED_1G;
2442 	if (speed == 10000)
2443 		return FW_PORT_CAP_SPEED_10G;
2444 	if (speed == 40000)
2445 		return FW_PORT_CAP_SPEED_40G;
2446 	return 0;
2447 }
2448 
2449 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2450 {
2451 	unsigned int cap;
2452 	struct port_info *p = netdev_priv(dev);
2453 	struct link_config *lc = &p->link_cfg;
2454 	u32 speed = ethtool_cmd_speed(cmd);
2455 
2456 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2457 		return -EINVAL;
2458 
2459 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2460 		/*
2461 		 * PHY offers a single speed.  See if that's what's
2462 		 * being requested.
2463 		 */
2464 		if (cmd->autoneg == AUTONEG_DISABLE &&
2465 		    (lc->supported & speed_to_caps(speed)))
2466 			return 0;
2467 		return -EINVAL;
2468 	}
2469 
2470 	if (cmd->autoneg == AUTONEG_DISABLE) {
2471 		cap = speed_to_caps(speed);
2472 
2473 		if (!(lc->supported & cap) ||
2474 		    (speed == 1000) ||
2475 		    (speed == 10000) ||
2476 		    (speed == 40000))
2477 			return -EINVAL;
2478 		lc->requested_speed = cap;
2479 		lc->advertising = 0;
2480 	} else {
2481 		cap = to_fw_linkcaps(cmd->advertising);
2482 		if (!(lc->supported & cap))
2483 			return -EINVAL;
2484 		lc->requested_speed = 0;
2485 		lc->advertising = cap | FW_PORT_CAP_ANEG;
2486 	}
2487 	lc->autoneg = cmd->autoneg;
2488 
2489 	if (netif_running(dev))
2490 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2491 				     lc);
2492 	return 0;
2493 }
2494 
2495 static void get_pauseparam(struct net_device *dev,
2496 			   struct ethtool_pauseparam *epause)
2497 {
2498 	struct port_info *p = netdev_priv(dev);
2499 
2500 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2501 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2502 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2503 }
2504 
2505 static int set_pauseparam(struct net_device *dev,
2506 			  struct ethtool_pauseparam *epause)
2507 {
2508 	struct port_info *p = netdev_priv(dev);
2509 	struct link_config *lc = &p->link_cfg;
2510 
2511 	if (epause->autoneg == AUTONEG_DISABLE)
2512 		lc->requested_fc = 0;
2513 	else if (lc->supported & FW_PORT_CAP_ANEG)
2514 		lc->requested_fc = PAUSE_AUTONEG;
2515 	else
2516 		return -EINVAL;
2517 
2518 	if (epause->rx_pause)
2519 		lc->requested_fc |= PAUSE_RX;
2520 	if (epause->tx_pause)
2521 		lc->requested_fc |= PAUSE_TX;
2522 	if (netif_running(dev))
2523 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2524 				     lc);
2525 	return 0;
2526 }
2527 
2528 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2529 {
2530 	const struct port_info *pi = netdev_priv(dev);
2531 	const struct sge *s = &pi->adapter->sge;
2532 
2533 	e->rx_max_pending = MAX_RX_BUFFERS;
2534 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2535 	e->rx_jumbo_max_pending = 0;
2536 	e->tx_max_pending = MAX_TXQ_ENTRIES;
2537 
2538 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2539 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2540 	e->rx_jumbo_pending = 0;
2541 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2542 }
2543 
2544 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2545 {
2546 	int i;
2547 	const struct port_info *pi = netdev_priv(dev);
2548 	struct adapter *adapter = pi->adapter;
2549 	struct sge *s = &adapter->sge;
2550 
2551 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2552 	    e->tx_pending > MAX_TXQ_ENTRIES ||
2553 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2554 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2555 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2556 		return -EINVAL;
2557 
2558 	if (adapter->flags & FULL_INIT_DONE)
2559 		return -EBUSY;
2560 
2561 	for (i = 0; i < pi->nqsets; ++i) {
2562 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2563 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2564 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2565 	}
2566 	return 0;
2567 }
2568 
2569 static int closest_timer(const struct sge *s, int time)
2570 {
2571 	int i, delta, match = 0, min_delta = INT_MAX;
2572 
2573 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2574 		delta = time - s->timer_val[i];
2575 		if (delta < 0)
2576 			delta = -delta;
2577 		if (delta < min_delta) {
2578 			min_delta = delta;
2579 			match = i;
2580 		}
2581 	}
2582 	return match;
2583 }
2584 
2585 static int closest_thres(const struct sge *s, int thres)
2586 {
2587 	int i, delta, match = 0, min_delta = INT_MAX;
2588 
2589 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2590 		delta = thres - s->counter_val[i];
2591 		if (delta < 0)
2592 			delta = -delta;
2593 		if (delta < min_delta) {
2594 			min_delta = delta;
2595 			match = i;
2596 		}
2597 	}
2598 	return match;
2599 }
2600 
2601 /*
2602  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2603  */
2604 static unsigned int qtimer_val(const struct adapter *adap,
2605 			       const struct sge_rspq *q)
2606 {
2607 	unsigned int idx = q->intr_params >> 1;
2608 
2609 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2610 }
2611 
2612 /**
2613  *	set_rspq_intr_params - set a queue's interrupt holdoff parameters
2614  *	@q: the Rx queue
2615  *	@us: the hold-off time in us, or 0 to disable timer
2616  *	@cnt: the hold-off packet count, or 0 to disable counter
2617  *
2618  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
2619  *	one of the two needs to be enabled for the queue to generate interrupts.
2620  */
2621 static int set_rspq_intr_params(struct sge_rspq *q,
2622 				unsigned int us, unsigned int cnt)
2623 {
2624 	struct adapter *adap = q->adap;
2625 
2626 	if ((us | cnt) == 0)
2627 		cnt = 1;
2628 
2629 	if (cnt) {
2630 		int err;
2631 		u32 v, new_idx;
2632 
2633 		new_idx = closest_thres(&adap->sge, cnt);
2634 		if (q->desc && q->pktcnt_idx != new_idx) {
2635 			/* the queue has already been created, update it */
2636 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2637 			    FW_PARAMS_PARAM_X_V(
2638 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2639 			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
2640 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2641 					    &new_idx);
2642 			if (err)
2643 				return err;
2644 		}
2645 		q->pktcnt_idx = new_idx;
2646 	}
2647 
2648 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2649 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2650 	return 0;
2651 }
2652 
2653 /**
2654  * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2655  * @dev: the network device
2656  * @us: the hold-off time in us, or 0 to disable timer
2657  * @cnt: the hold-off packet count, or 0 to disable counter
2658  *
2659  * Set the RX interrupt hold-off parameters for a network device.
2660  */
2661 static int set_rx_intr_params(struct net_device *dev,
2662 			      unsigned int us, unsigned int cnt)
2663 {
2664 	int i, err;
2665 	struct port_info *pi = netdev_priv(dev);
2666 	struct adapter *adap = pi->adapter;
2667 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2668 
2669 	for (i = 0; i < pi->nqsets; i++, q++) {
2670 		err = set_rspq_intr_params(&q->rspq, us, cnt);
2671 		if (err)
2672 			return err;
2673 	}
2674 	return 0;
2675 }
2676 
2677 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2678 {
2679 	int i;
2680 	struct port_info *pi = netdev_priv(dev);
2681 	struct adapter *adap = pi->adapter;
2682 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2683 
2684 	for (i = 0; i < pi->nqsets; i++, q++)
2685 		q->rspq.adaptive_rx = adaptive_rx;
2686 
2687 	return 0;
2688 }
2689 
2690 static int get_adaptive_rx_setting(struct net_device *dev)
2691 {
2692 	struct port_info *pi = netdev_priv(dev);
2693 	struct adapter *adap = pi->adapter;
2694 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2695 
2696 	return q->rspq.adaptive_rx;
2697 }
2698 
2699 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2700 {
2701 	set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2702 	return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2703 				  c->rx_max_coalesced_frames);
2704 }
2705 
2706 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2707 {
2708 	const struct port_info *pi = netdev_priv(dev);
2709 	const struct adapter *adap = pi->adapter;
2710 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2711 
2712 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
2713 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2714 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
2715 	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2716 	return 0;
2717 }
2718 
2719 /**
2720  *	eeprom_ptov - translate a physical EEPROM address to virtual
2721  *	@phys_addr: the physical EEPROM address
2722  *	@fn: the PCI function number
2723  *	@sz: size of function-specific area
2724  *
2725  *	Translate a physical EEPROM address to virtual.  The first 1K is
2726  *	accessed through virtual addresses starting at 31K, the rest is
2727  *	accessed through virtual addresses starting at 0.
2728  *
2729  *	The mapping is as follows:
2730  *	[0..1K) -> [31K..32K)
2731  *	[1K..1K+A) -> [31K-A..31K)
2732  *	[1K+A..ES) -> [0..ES-A-1K)
2733  *
2734  *	where A = @fn * @sz, and ES = EEPROM size.
2735  */
2736 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2737 {
2738 	fn *= sz;
2739 	if (phys_addr < 1024)
2740 		return phys_addr + (31 << 10);
2741 	if (phys_addr < 1024 + fn)
2742 		return 31744 - fn + phys_addr - 1024;
2743 	if (phys_addr < EEPROMSIZE)
2744 		return phys_addr - 1024 - fn;
2745 	return -EINVAL;
2746 }
2747 
2748 /*
2749  * The next two routines implement eeprom read/write from physical addresses.
2750  */
2751 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2752 {
2753 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2754 
2755 	if (vaddr >= 0)
2756 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2757 	return vaddr < 0 ? vaddr : 0;
2758 }
2759 
2760 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2761 {
2762 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2763 
2764 	if (vaddr >= 0)
2765 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2766 	return vaddr < 0 ? vaddr : 0;
2767 }
2768 
2769 #define EEPROM_MAGIC 0x38E2F10C
2770 
2771 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2772 		      u8 *data)
2773 {
2774 	int i, err = 0;
2775 	struct adapter *adapter = netdev2adap(dev);
2776 
2777 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2778 	if (!buf)
2779 		return -ENOMEM;
2780 
2781 	e->magic = EEPROM_MAGIC;
2782 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2783 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2784 
2785 	if (!err)
2786 		memcpy(data, buf + e->offset, e->len);
2787 	kfree(buf);
2788 	return err;
2789 }
2790 
2791 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2792 		      u8 *data)
2793 {
2794 	u8 *buf;
2795 	int err = 0;
2796 	u32 aligned_offset, aligned_len, *p;
2797 	struct adapter *adapter = netdev2adap(dev);
2798 
2799 	if (eeprom->magic != EEPROM_MAGIC)
2800 		return -EINVAL;
2801 
2802 	aligned_offset = eeprom->offset & ~3;
2803 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2804 
2805 	if (adapter->fn > 0) {
2806 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2807 
2808 		if (aligned_offset < start ||
2809 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
2810 			return -EPERM;
2811 	}
2812 
2813 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2814 		/*
2815 		 * RMW possibly needed for first or last words.
2816 		 */
2817 		buf = kmalloc(aligned_len, GFP_KERNEL);
2818 		if (!buf)
2819 			return -ENOMEM;
2820 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2821 		if (!err && aligned_len > 4)
2822 			err = eeprom_rd_phys(adapter,
2823 					     aligned_offset + aligned_len - 4,
2824 					     (u32 *)&buf[aligned_len - 4]);
2825 		if (err)
2826 			goto out;
2827 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2828 	} else
2829 		buf = data;
2830 
2831 	err = t4_seeprom_wp(adapter, false);
2832 	if (err)
2833 		goto out;
2834 
2835 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2836 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
2837 		aligned_offset += 4;
2838 	}
2839 
2840 	if (!err)
2841 		err = t4_seeprom_wp(adapter, true);
2842 out:
2843 	if (buf != data)
2844 		kfree(buf);
2845 	return err;
2846 }
2847 
2848 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2849 {
2850 	int ret;
2851 	const struct firmware *fw;
2852 	struct adapter *adap = netdev2adap(netdev);
2853 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
2854 
2855 	ef->data[sizeof(ef->data) - 1] = '\0';
2856 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2857 	if (ret < 0)
2858 		return ret;
2859 
2860 	/* If the adapter has been fully initialized then we'll go ahead and
2861 	 * try to get the firmware's cooperation in upgrading to the new
2862 	 * firmware image otherwise we'll try to do the entire job from the
2863 	 * host ... and we always "force" the operation in this path.
2864 	 */
2865 	if (adap->flags & FULL_INIT_DONE)
2866 		mbox = adap->mbox;
2867 
2868 	ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2869 	release_firmware(fw);
2870 	if (!ret)
2871 		dev_info(adap->pdev_dev, "loaded firmware %s,"
2872 			 " reload cxgb4 driver\n", ef->data);
2873 	return ret;
2874 }
2875 
2876 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2877 #define BCAST_CRC 0xa0ccc1a6
2878 
2879 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2880 {
2881 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
2882 	wol->wolopts = netdev2adap(dev)->wol;
2883 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2884 }
2885 
2886 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2887 {
2888 	int err = 0;
2889 	struct port_info *pi = netdev_priv(dev);
2890 
2891 	if (wol->wolopts & ~WOL_SUPPORTED)
2892 		return -EINVAL;
2893 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2894 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2895 	if (wol->wolopts & WAKE_BCAST) {
2896 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2897 					~0ULL, 0, false);
2898 		if (!err)
2899 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2900 						~6ULL, ~0ULL, BCAST_CRC, true);
2901 	} else
2902 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2903 	return err;
2904 }
2905 
2906 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2907 {
2908 	const struct port_info *pi = netdev_priv(dev);
2909 	netdev_features_t changed = dev->features ^ features;
2910 	int err;
2911 
2912 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2913 		return 0;
2914 
2915 	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2916 			    -1, -1, -1,
2917 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2918 	if (unlikely(err))
2919 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2920 	return err;
2921 }
2922 
2923 static u32 get_rss_table_size(struct net_device *dev)
2924 {
2925 	const struct port_info *pi = netdev_priv(dev);
2926 
2927 	return pi->rss_size;
2928 }
2929 
2930 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
2931 {
2932 	const struct port_info *pi = netdev_priv(dev);
2933 	unsigned int n = pi->rss_size;
2934 
2935 	if (hfunc)
2936 		*hfunc = ETH_RSS_HASH_TOP;
2937 	if (!p)
2938 		return 0;
2939 	while (n--)
2940 		p[n] = pi->rss[n];
2941 	return 0;
2942 }
2943 
2944 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2945 			 const u8 hfunc)
2946 {
2947 	unsigned int i;
2948 	struct port_info *pi = netdev_priv(dev);
2949 
2950 	/* We require at least one supported parameter to be changed and no
2951 	 * change in any of the unsupported parameters
2952 	 */
2953 	if (key ||
2954 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2955 		return -EOPNOTSUPP;
2956 	if (!p)
2957 		return 0;
2958 
2959 	for (i = 0; i < pi->rss_size; i++)
2960 		pi->rss[i] = p[i];
2961 	if (pi->adapter->flags & FULL_INIT_DONE)
2962 		return write_rss(pi, pi->rss);
2963 	return 0;
2964 }
2965 
2966 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2967 		     u32 *rules)
2968 {
2969 	const struct port_info *pi = netdev_priv(dev);
2970 
2971 	switch (info->cmd) {
2972 	case ETHTOOL_GRXFH: {
2973 		unsigned int v = pi->rss_mode;
2974 
2975 		info->data = 0;
2976 		switch (info->flow_type) {
2977 		case TCP_V4_FLOW:
2978 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
2979 				info->data = RXH_IP_SRC | RXH_IP_DST |
2980 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2981 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2982 				info->data = RXH_IP_SRC | RXH_IP_DST;
2983 			break;
2984 		case UDP_V4_FLOW:
2985 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2986 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2987 				info->data = RXH_IP_SRC | RXH_IP_DST |
2988 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2989 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2990 				info->data = RXH_IP_SRC | RXH_IP_DST;
2991 			break;
2992 		case SCTP_V4_FLOW:
2993 		case AH_ESP_V4_FLOW:
2994 		case IPV4_FLOW:
2995 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2996 				info->data = RXH_IP_SRC | RXH_IP_DST;
2997 			break;
2998 		case TCP_V6_FLOW:
2999 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
3000 				info->data = RXH_IP_SRC | RXH_IP_DST |
3001 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3002 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3003 				info->data = RXH_IP_SRC | RXH_IP_DST;
3004 			break;
3005 		case UDP_V6_FLOW:
3006 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
3007 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
3008 				info->data = RXH_IP_SRC | RXH_IP_DST |
3009 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3010 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3011 				info->data = RXH_IP_SRC | RXH_IP_DST;
3012 			break;
3013 		case SCTP_V6_FLOW:
3014 		case AH_ESP_V6_FLOW:
3015 		case IPV6_FLOW:
3016 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3017 				info->data = RXH_IP_SRC | RXH_IP_DST;
3018 			break;
3019 		}
3020 		return 0;
3021 	}
3022 	case ETHTOOL_GRXRINGS:
3023 		info->data = pi->nqsets;
3024 		return 0;
3025 	}
3026 	return -EOPNOTSUPP;
3027 }
3028 
3029 static const struct ethtool_ops cxgb_ethtool_ops = {
3030 	.get_settings      = get_settings,
3031 	.set_settings      = set_settings,
3032 	.get_drvinfo       = get_drvinfo,
3033 	.get_msglevel      = get_msglevel,
3034 	.set_msglevel      = set_msglevel,
3035 	.get_ringparam     = get_sge_param,
3036 	.set_ringparam     = set_sge_param,
3037 	.get_coalesce      = get_coalesce,
3038 	.set_coalesce      = set_coalesce,
3039 	.get_eeprom_len    = get_eeprom_len,
3040 	.get_eeprom        = get_eeprom,
3041 	.set_eeprom        = set_eeprom,
3042 	.get_pauseparam    = get_pauseparam,
3043 	.set_pauseparam    = set_pauseparam,
3044 	.get_link          = ethtool_op_get_link,
3045 	.get_strings       = get_strings,
3046 	.set_phys_id       = identify_port,
3047 	.nway_reset        = restart_autoneg,
3048 	.get_sset_count    = get_sset_count,
3049 	.get_ethtool_stats = get_stats,
3050 	.get_regs_len      = get_regs_len,
3051 	.get_regs          = get_regs,
3052 	.get_wol           = get_wol,
3053 	.set_wol           = set_wol,
3054 	.get_rxnfc         = get_rxnfc,
3055 	.get_rxfh_indir_size = get_rss_table_size,
3056 	.get_rxfh	   = get_rss_table,
3057 	.set_rxfh	   = set_rss_table,
3058 	.flash_device      = set_flash,
3059 };
3060 
3061 static int setup_debugfs(struct adapter *adap)
3062 {
3063 	if (IS_ERR_OR_NULL(adap->debugfs_root))
3064 		return -1;
3065 
3066 #ifdef CONFIG_DEBUG_FS
3067 	t4_setup_debugfs(adap);
3068 #endif
3069 	return 0;
3070 }
3071 
3072 /*
3073  * upper-layer driver support
3074  */
3075 
3076 /*
3077  * Allocate an active-open TID and set it to the supplied value.
3078  */
3079 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3080 {
3081 	int atid = -1;
3082 
3083 	spin_lock_bh(&t->atid_lock);
3084 	if (t->afree) {
3085 		union aopen_entry *p = t->afree;
3086 
3087 		atid = (p - t->atid_tab) + t->atid_base;
3088 		t->afree = p->next;
3089 		p->data = data;
3090 		t->atids_in_use++;
3091 	}
3092 	spin_unlock_bh(&t->atid_lock);
3093 	return atid;
3094 }
3095 EXPORT_SYMBOL(cxgb4_alloc_atid);
3096 
3097 /*
3098  * Release an active-open TID.
3099  */
3100 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3101 {
3102 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3103 
3104 	spin_lock_bh(&t->atid_lock);
3105 	p->next = t->afree;
3106 	t->afree = p;
3107 	t->atids_in_use--;
3108 	spin_unlock_bh(&t->atid_lock);
3109 }
3110 EXPORT_SYMBOL(cxgb4_free_atid);
3111 
3112 /*
3113  * Allocate a server TID and set it to the supplied value.
3114  */
3115 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3116 {
3117 	int stid;
3118 
3119 	spin_lock_bh(&t->stid_lock);
3120 	if (family == PF_INET) {
3121 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3122 		if (stid < t->nstids)
3123 			__set_bit(stid, t->stid_bmap);
3124 		else
3125 			stid = -1;
3126 	} else {
3127 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3128 		if (stid < 0)
3129 			stid = -1;
3130 	}
3131 	if (stid >= 0) {
3132 		t->stid_tab[stid].data = data;
3133 		stid += t->stid_base;
3134 		/* IPv6 requires max of 520 bits or 16 cells in TCAM
3135 		 * This is equivalent to 4 TIDs. With CLIP enabled it
3136 		 * needs 2 TIDs.
3137 		 */
3138 		if (family == PF_INET)
3139 			t->stids_in_use++;
3140 		else
3141 			t->stids_in_use += 4;
3142 	}
3143 	spin_unlock_bh(&t->stid_lock);
3144 	return stid;
3145 }
3146 EXPORT_SYMBOL(cxgb4_alloc_stid);
3147 
3148 /* Allocate a server filter TID and set it to the supplied value.
3149  */
3150 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3151 {
3152 	int stid;
3153 
3154 	spin_lock_bh(&t->stid_lock);
3155 	if (family == PF_INET) {
3156 		stid = find_next_zero_bit(t->stid_bmap,
3157 				t->nstids + t->nsftids, t->nstids);
3158 		if (stid < (t->nstids + t->nsftids))
3159 			__set_bit(stid, t->stid_bmap);
3160 		else
3161 			stid = -1;
3162 	} else {
3163 		stid = -1;
3164 	}
3165 	if (stid >= 0) {
3166 		t->stid_tab[stid].data = data;
3167 		stid -= t->nstids;
3168 		stid += t->sftid_base;
3169 		t->stids_in_use++;
3170 	}
3171 	spin_unlock_bh(&t->stid_lock);
3172 	return stid;
3173 }
3174 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3175 
3176 /* Release a server TID.
3177  */
3178 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3179 {
3180 	/* Is it a server filter TID? */
3181 	if (t->nsftids && (stid >= t->sftid_base)) {
3182 		stid -= t->sftid_base;
3183 		stid += t->nstids;
3184 	} else {
3185 		stid -= t->stid_base;
3186 	}
3187 
3188 	spin_lock_bh(&t->stid_lock);
3189 	if (family == PF_INET)
3190 		__clear_bit(stid, t->stid_bmap);
3191 	else
3192 		bitmap_release_region(t->stid_bmap, stid, 2);
3193 	t->stid_tab[stid].data = NULL;
3194 	if (family == PF_INET)
3195 		t->stids_in_use--;
3196 	else
3197 		t->stids_in_use -= 4;
3198 	spin_unlock_bh(&t->stid_lock);
3199 }
3200 EXPORT_SYMBOL(cxgb4_free_stid);
3201 
3202 /*
3203  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3204  */
3205 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3206 			   unsigned int tid)
3207 {
3208 	struct cpl_tid_release *req;
3209 
3210 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3211 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3212 	INIT_TP_WR(req, tid);
3213 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3214 }
3215 
3216 /*
3217  * Queue a TID release request and if necessary schedule a work queue to
3218  * process it.
3219  */
3220 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3221 				    unsigned int tid)
3222 {
3223 	void **p = &t->tid_tab[tid];
3224 	struct adapter *adap = container_of(t, struct adapter, tids);
3225 
3226 	spin_lock_bh(&adap->tid_release_lock);
3227 	*p = adap->tid_release_head;
3228 	/* Low 2 bits encode the Tx channel number */
3229 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
3230 	if (!adap->tid_release_task_busy) {
3231 		adap->tid_release_task_busy = true;
3232 		queue_work(adap->workq, &adap->tid_release_task);
3233 	}
3234 	spin_unlock_bh(&adap->tid_release_lock);
3235 }
3236 
3237 /*
3238  * Process the list of pending TID release requests.
3239  */
3240 static void process_tid_release_list(struct work_struct *work)
3241 {
3242 	struct sk_buff *skb;
3243 	struct adapter *adap;
3244 
3245 	adap = container_of(work, struct adapter, tid_release_task);
3246 
3247 	spin_lock_bh(&adap->tid_release_lock);
3248 	while (adap->tid_release_head) {
3249 		void **p = adap->tid_release_head;
3250 		unsigned int chan = (uintptr_t)p & 3;
3251 		p = (void *)p - chan;
3252 
3253 		adap->tid_release_head = *p;
3254 		*p = NULL;
3255 		spin_unlock_bh(&adap->tid_release_lock);
3256 
3257 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3258 					 GFP_KERNEL)))
3259 			schedule_timeout_uninterruptible(1);
3260 
3261 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3262 		t4_ofld_send(adap, skb);
3263 		spin_lock_bh(&adap->tid_release_lock);
3264 	}
3265 	adap->tid_release_task_busy = false;
3266 	spin_unlock_bh(&adap->tid_release_lock);
3267 }
3268 
3269 /*
3270  * Release a TID and inform HW.  If we are unable to allocate the release
3271  * message we defer to a work queue.
3272  */
3273 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3274 {
3275 	void *old;
3276 	struct sk_buff *skb;
3277 	struct adapter *adap = container_of(t, struct adapter, tids);
3278 
3279 	old = t->tid_tab[tid];
3280 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3281 	if (likely(skb)) {
3282 		t->tid_tab[tid] = NULL;
3283 		mk_tid_release(skb, chan, tid);
3284 		t4_ofld_send(adap, skb);
3285 	} else
3286 		cxgb4_queue_tid_release(t, chan, tid);
3287 	if (old)
3288 		atomic_dec(&t->tids_in_use);
3289 }
3290 EXPORT_SYMBOL(cxgb4_remove_tid);
3291 
3292 /*
3293  * Allocate and initialize the TID tables.  Returns 0 on success.
3294  */
3295 static int tid_init(struct tid_info *t)
3296 {
3297 	size_t size;
3298 	unsigned int stid_bmap_size;
3299 	unsigned int natids = t->natids;
3300 	struct adapter *adap = container_of(t, struct adapter, tids);
3301 
3302 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3303 	size = t->ntids * sizeof(*t->tid_tab) +
3304 	       natids * sizeof(*t->atid_tab) +
3305 	       t->nstids * sizeof(*t->stid_tab) +
3306 	       t->nsftids * sizeof(*t->stid_tab) +
3307 	       stid_bmap_size * sizeof(long) +
3308 	       t->nftids * sizeof(*t->ftid_tab) +
3309 	       t->nsftids * sizeof(*t->ftid_tab);
3310 
3311 	t->tid_tab = t4_alloc_mem(size);
3312 	if (!t->tid_tab)
3313 		return -ENOMEM;
3314 
3315 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3316 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3317 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3318 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3319 	spin_lock_init(&t->stid_lock);
3320 	spin_lock_init(&t->atid_lock);
3321 
3322 	t->stids_in_use = 0;
3323 	t->afree = NULL;
3324 	t->atids_in_use = 0;
3325 	atomic_set(&t->tids_in_use, 0);
3326 
3327 	/* Setup the free list for atid_tab and clear the stid bitmap. */
3328 	if (natids) {
3329 		while (--natids)
3330 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3331 		t->afree = t->atid_tab;
3332 	}
3333 	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3334 	/* Reserve stid 0 for T4/T5 adapters */
3335 	if (!t->stid_base &&
3336 	    (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3337 		__set_bit(0, t->stid_bmap);
3338 
3339 	return 0;
3340 }
3341 
3342 int cxgb4_clip_get(const struct net_device *dev,
3343 		   const struct in6_addr *lip)
3344 {
3345 	struct adapter *adap;
3346 	struct fw_clip_cmd c;
3347 
3348 	adap = netdev2adap(dev);
3349 	memset(&c, 0, sizeof(c));
3350 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3351 			FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3352 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
3353 	c.ip_hi = *(__be64 *)(lip->s6_addr);
3354 	c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3355 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3356 }
3357 EXPORT_SYMBOL(cxgb4_clip_get);
3358 
3359 int cxgb4_clip_release(const struct net_device *dev,
3360 		       const struct in6_addr *lip)
3361 {
3362 	struct adapter *adap;
3363 	struct fw_clip_cmd c;
3364 
3365 	adap = netdev2adap(dev);
3366 	memset(&c, 0, sizeof(c));
3367 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3368 			FW_CMD_REQUEST_F | FW_CMD_READ_F);
3369 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
3370 	c.ip_hi = *(__be64 *)(lip->s6_addr);
3371 	c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3372 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3373 }
3374 EXPORT_SYMBOL(cxgb4_clip_release);
3375 
3376 /**
3377  *	cxgb4_create_server - create an IP server
3378  *	@dev: the device
3379  *	@stid: the server TID
3380  *	@sip: local IP address to bind server to
3381  *	@sport: the server's TCP port
3382  *	@queue: queue to direct messages from this server to
3383  *
3384  *	Create an IP server for the given port and address.
3385  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3386  */
3387 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3388 			__be32 sip, __be16 sport, __be16 vlan,
3389 			unsigned int queue)
3390 {
3391 	unsigned int chan;
3392 	struct sk_buff *skb;
3393 	struct adapter *adap;
3394 	struct cpl_pass_open_req *req;
3395 	int ret;
3396 
3397 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3398 	if (!skb)
3399 		return -ENOMEM;
3400 
3401 	adap = netdev2adap(dev);
3402 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3403 	INIT_TP_WR(req, 0);
3404 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3405 	req->local_port = sport;
3406 	req->peer_port = htons(0);
3407 	req->local_ip = sip;
3408 	req->peer_ip = htonl(0);
3409 	chan = rxq_to_chan(&adap->sge, queue);
3410 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3411 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3412 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3413 	ret = t4_mgmt_tx(adap, skb);
3414 	return net_xmit_eval(ret);
3415 }
3416 EXPORT_SYMBOL(cxgb4_create_server);
3417 
3418 /*	cxgb4_create_server6 - create an IPv6 server
3419  *	@dev: the device
3420  *	@stid: the server TID
3421  *	@sip: local IPv6 address to bind server to
3422  *	@sport: the server's TCP port
3423  *	@queue: queue to direct messages from this server to
3424  *
3425  *	Create an IPv6 server for the given port and address.
3426  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3427  */
3428 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3429 			 const struct in6_addr *sip, __be16 sport,
3430 			 unsigned int queue)
3431 {
3432 	unsigned int chan;
3433 	struct sk_buff *skb;
3434 	struct adapter *adap;
3435 	struct cpl_pass_open_req6 *req;
3436 	int ret;
3437 
3438 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3439 	if (!skb)
3440 		return -ENOMEM;
3441 
3442 	adap = netdev2adap(dev);
3443 	req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3444 	INIT_TP_WR(req, 0);
3445 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3446 	req->local_port = sport;
3447 	req->peer_port = htons(0);
3448 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3449 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3450 	req->peer_ip_hi = cpu_to_be64(0);
3451 	req->peer_ip_lo = cpu_to_be64(0);
3452 	chan = rxq_to_chan(&adap->sge, queue);
3453 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3454 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3455 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3456 	ret = t4_mgmt_tx(adap, skb);
3457 	return net_xmit_eval(ret);
3458 }
3459 EXPORT_SYMBOL(cxgb4_create_server6);
3460 
3461 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3462 			unsigned int queue, bool ipv6)
3463 {
3464 	struct sk_buff *skb;
3465 	struct adapter *adap;
3466 	struct cpl_close_listsvr_req *req;
3467 	int ret;
3468 
3469 	adap = netdev2adap(dev);
3470 
3471 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3472 	if (!skb)
3473 		return -ENOMEM;
3474 
3475 	req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3476 	INIT_TP_WR(req, 0);
3477 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3478 	req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3479 				LISTSVR_IPV6(0)) | QUEUENO(queue));
3480 	ret = t4_mgmt_tx(adap, skb);
3481 	return net_xmit_eval(ret);
3482 }
3483 EXPORT_SYMBOL(cxgb4_remove_server);
3484 
3485 /**
3486  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3487  *	@mtus: the HW MTU table
3488  *	@mtu: the target MTU
3489  *	@idx: index of selected entry in the MTU table
3490  *
3491  *	Returns the index and the value in the HW MTU table that is closest to
3492  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
3493  *	table, in which case that smallest available value is selected.
3494  */
3495 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3496 			    unsigned int *idx)
3497 {
3498 	unsigned int i = 0;
3499 
3500 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3501 		++i;
3502 	if (idx)
3503 		*idx = i;
3504 	return mtus[i];
3505 }
3506 EXPORT_SYMBOL(cxgb4_best_mtu);
3507 
3508 /**
3509  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3510  *     @mtus: the HW MTU table
3511  *     @header_size: Header Size
3512  *     @data_size_max: maximum Data Segment Size
3513  *     @data_size_align: desired Data Segment Size Alignment (2^N)
3514  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3515  *
3516  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
3517  *     MTU Table based solely on a Maximum MTU parameter, we break that
3518  *     parameter up into a Header Size and Maximum Data Segment Size, and
3519  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
3520  *     the Hardware MTU Table which will result in a Data Segment Size with
3521  *     the requested alignment _and_ that MTU isn't "too far" from the
3522  *     closest MTU, then we'll return that rather than the closest MTU.
3523  */
3524 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3525 				    unsigned short header_size,
3526 				    unsigned short data_size_max,
3527 				    unsigned short data_size_align,
3528 				    unsigned int *mtu_idxp)
3529 {
3530 	unsigned short max_mtu = header_size + data_size_max;
3531 	unsigned short data_size_align_mask = data_size_align - 1;
3532 	int mtu_idx, aligned_mtu_idx;
3533 
3534 	/* Scan the MTU Table till we find an MTU which is larger than our
3535 	 * Maximum MTU or we reach the end of the table.  Along the way,
3536 	 * record the last MTU found, if any, which will result in a Data
3537 	 * Segment Length matching the requested alignment.
3538 	 */
3539 	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3540 		unsigned short data_size = mtus[mtu_idx] - header_size;
3541 
3542 		/* If this MTU minus the Header Size would result in a
3543 		 * Data Segment Size of the desired alignment, remember it.
3544 		 */
3545 		if ((data_size & data_size_align_mask) == 0)
3546 			aligned_mtu_idx = mtu_idx;
3547 
3548 		/* If we're not at the end of the Hardware MTU Table and the
3549 		 * next element is larger than our Maximum MTU, drop out of
3550 		 * the loop.
3551 		 */
3552 		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3553 			break;
3554 	}
3555 
3556 	/* If we fell out of the loop because we ran to the end of the table,
3557 	 * then we just have to use the last [largest] entry.
3558 	 */
3559 	if (mtu_idx == NMTUS)
3560 		mtu_idx--;
3561 
3562 	/* If we found an MTU which resulted in the requested Data Segment
3563 	 * Length alignment and that's "not far" from the largest MTU which is
3564 	 * less than or equal to the maximum MTU, then use that.
3565 	 */
3566 	if (aligned_mtu_idx >= 0 &&
3567 	    mtu_idx - aligned_mtu_idx <= 1)
3568 		mtu_idx = aligned_mtu_idx;
3569 
3570 	/* If the caller has passed in an MTU Index pointer, pass the
3571 	 * MTU Index back.  Return the MTU value.
3572 	 */
3573 	if (mtu_idxp)
3574 		*mtu_idxp = mtu_idx;
3575 	return mtus[mtu_idx];
3576 }
3577 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3578 
3579 /**
3580  *	cxgb4_port_chan - get the HW channel of a port
3581  *	@dev: the net device for the port
3582  *
3583  *	Return the HW Tx channel of the given port.
3584  */
3585 unsigned int cxgb4_port_chan(const struct net_device *dev)
3586 {
3587 	return netdev2pinfo(dev)->tx_chan;
3588 }
3589 EXPORT_SYMBOL(cxgb4_port_chan);
3590 
3591 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3592 {
3593 	struct adapter *adap = netdev2adap(dev);
3594 	u32 v1, v2, lp_count, hp_count;
3595 
3596 	v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3597 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3598 	if (is_t4(adap->params.chip)) {
3599 		lp_count = G_LP_COUNT(v1);
3600 		hp_count = G_HP_COUNT(v1);
3601 	} else {
3602 		lp_count = G_LP_COUNT_T5(v1);
3603 		hp_count = G_HP_COUNT_T5(v2);
3604 	}
3605 	return lpfifo ? lp_count : hp_count;
3606 }
3607 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3608 
3609 /**
3610  *	cxgb4_port_viid - get the VI id of a port
3611  *	@dev: the net device for the port
3612  *
3613  *	Return the VI id of the given port.
3614  */
3615 unsigned int cxgb4_port_viid(const struct net_device *dev)
3616 {
3617 	return netdev2pinfo(dev)->viid;
3618 }
3619 EXPORT_SYMBOL(cxgb4_port_viid);
3620 
3621 /**
3622  *	cxgb4_port_idx - get the index of a port
3623  *	@dev: the net device for the port
3624  *
3625  *	Return the index of the given port.
3626  */
3627 unsigned int cxgb4_port_idx(const struct net_device *dev)
3628 {
3629 	return netdev2pinfo(dev)->port_id;
3630 }
3631 EXPORT_SYMBOL(cxgb4_port_idx);
3632 
3633 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3634 			 struct tp_tcp_stats *v6)
3635 {
3636 	struct adapter *adap = pci_get_drvdata(pdev);
3637 
3638 	spin_lock(&adap->stats_lock);
3639 	t4_tp_get_tcp_stats(adap, v4, v6);
3640 	spin_unlock(&adap->stats_lock);
3641 }
3642 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3643 
3644 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3645 		      const unsigned int *pgsz_order)
3646 {
3647 	struct adapter *adap = netdev2adap(dev);
3648 
3649 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3650 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3651 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3652 		     HPZ3(pgsz_order[3]));
3653 }
3654 EXPORT_SYMBOL(cxgb4_iscsi_init);
3655 
3656 int cxgb4_flush_eq_cache(struct net_device *dev)
3657 {
3658 	struct adapter *adap = netdev2adap(dev);
3659 	int ret;
3660 
3661 	ret = t4_fwaddrspace_write(adap, adap->mbox,
3662 				   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3663 	return ret;
3664 }
3665 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3666 
3667 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3668 {
3669 	u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3670 	__be64 indices;
3671 	int ret;
3672 
3673 	spin_lock(&adap->win0_lock);
3674 	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3675 			   sizeof(indices), (__be32 *)&indices,
3676 			   T4_MEMORY_READ);
3677 	spin_unlock(&adap->win0_lock);
3678 	if (!ret) {
3679 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3680 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3681 	}
3682 	return ret;
3683 }
3684 
3685 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3686 			u16 size)
3687 {
3688 	struct adapter *adap = netdev2adap(dev);
3689 	u16 hw_pidx, hw_cidx;
3690 	int ret;
3691 
3692 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3693 	if (ret)
3694 		goto out;
3695 
3696 	if (pidx != hw_pidx) {
3697 		u16 delta;
3698 
3699 		if (pidx >= hw_pidx)
3700 			delta = pidx - hw_pidx;
3701 		else
3702 			delta = size - hw_pidx + pidx;
3703 		wmb();
3704 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3705 			     QID(qid) | PIDX(delta));
3706 	}
3707 out:
3708 	return ret;
3709 }
3710 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3711 
3712 void cxgb4_disable_db_coalescing(struct net_device *dev)
3713 {
3714 	struct adapter *adap;
3715 
3716 	adap = netdev2adap(dev);
3717 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3718 			 F_NOCOALESCE);
3719 }
3720 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3721 
3722 void cxgb4_enable_db_coalescing(struct net_device *dev)
3723 {
3724 	struct adapter *adap;
3725 
3726 	adap = netdev2adap(dev);
3727 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3728 }
3729 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3730 
3731 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3732 {
3733 	struct adapter *adap;
3734 	u32 offset, memtype, memaddr;
3735 	u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
3736 	u32 edc0_end, edc1_end, mc0_end, mc1_end;
3737 	int ret;
3738 
3739 	adap = netdev2adap(dev);
3740 
3741 	offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3742 
3743 	/* Figure out where the offset lands in the Memory Type/Address scheme.
3744 	 * This code assumes that the memory is laid out starting at offset 0
3745 	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3746 	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
3747 	 * MC0, and some have both MC0 and MC1.
3748 	 */
3749 	size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3750 	edc0_size = EDRAM0_SIZE_G(size) << 20;
3751 	size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3752 	edc1_size = EDRAM1_SIZE_G(size) << 20;
3753 	size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3754 	mc0_size = EXT_MEM0_SIZE_G(size) << 20;
3755 
3756 	edc0_end = edc0_size;
3757 	edc1_end = edc0_end + edc1_size;
3758 	mc0_end = edc1_end + mc0_size;
3759 
3760 	if (offset < edc0_end) {
3761 		memtype = MEM_EDC0;
3762 		memaddr = offset;
3763 	} else if (offset < edc1_end) {
3764 		memtype = MEM_EDC1;
3765 		memaddr = offset - edc0_end;
3766 	} else {
3767 		if (offset < mc0_end) {
3768 			memtype = MEM_MC0;
3769 			memaddr = offset - edc1_end;
3770 		} else if (is_t4(adap->params.chip)) {
3771 			/* T4 only has a single memory channel */
3772 			goto err;
3773 		} else {
3774 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3775 			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
3776 			mc1_end = mc0_end + mc1_size;
3777 			if (offset < mc1_end) {
3778 				memtype = MEM_MC1;
3779 				memaddr = offset - mc0_end;
3780 			} else {
3781 				/* offset beyond the end of any memory */
3782 				goto err;
3783 			}
3784 		}
3785 	}
3786 
3787 	spin_lock(&adap->win0_lock);
3788 	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3789 	spin_unlock(&adap->win0_lock);
3790 	return ret;
3791 
3792 err:
3793 	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3794 		stag, offset);
3795 	return -EINVAL;
3796 }
3797 EXPORT_SYMBOL(cxgb4_read_tpte);
3798 
3799 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3800 {
3801 	u32 hi, lo;
3802 	struct adapter *adap;
3803 
3804 	adap = netdev2adap(dev);
3805 	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3806 	hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3807 
3808 	return ((u64)hi << 32) | (u64)lo;
3809 }
3810 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3811 
3812 int cxgb4_bar2_sge_qregs(struct net_device *dev,
3813 			 unsigned int qid,
3814 			 enum cxgb4_bar2_qtype qtype,
3815 			 u64 *pbar2_qoffset,
3816 			 unsigned int *pbar2_qid)
3817 {
3818 	return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
3819 				 qid,
3820 				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3821 				  ? T4_BAR2_QTYPE_EGRESS
3822 				  : T4_BAR2_QTYPE_INGRESS),
3823 				 pbar2_qoffset,
3824 				 pbar2_qid);
3825 }
3826 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3827 
3828 static struct pci_driver cxgb4_driver;
3829 
3830 static void check_neigh_update(struct neighbour *neigh)
3831 {
3832 	const struct device *parent;
3833 	const struct net_device *netdev = neigh->dev;
3834 
3835 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
3836 		netdev = vlan_dev_real_dev(netdev);
3837 	parent = netdev->dev.parent;
3838 	if (parent && parent->driver == &cxgb4_driver.driver)
3839 		t4_l2t_update(dev_get_drvdata(parent), neigh);
3840 }
3841 
3842 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3843 		       void *data)
3844 {
3845 	switch (event) {
3846 	case NETEVENT_NEIGH_UPDATE:
3847 		check_neigh_update(data);
3848 		break;
3849 	case NETEVENT_REDIRECT:
3850 	default:
3851 		break;
3852 	}
3853 	return 0;
3854 }
3855 
3856 static bool netevent_registered;
3857 static struct notifier_block cxgb4_netevent_nb = {
3858 	.notifier_call = netevent_cb
3859 };
3860 
3861 static void drain_db_fifo(struct adapter *adap, int usecs)
3862 {
3863 	u32 v1, v2, lp_count, hp_count;
3864 
3865 	do {
3866 		v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3867 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3868 		if (is_t4(adap->params.chip)) {
3869 			lp_count = G_LP_COUNT(v1);
3870 			hp_count = G_HP_COUNT(v1);
3871 		} else {
3872 			lp_count = G_LP_COUNT_T5(v1);
3873 			hp_count = G_HP_COUNT_T5(v2);
3874 		}
3875 
3876 		if (lp_count == 0 && hp_count == 0)
3877 			break;
3878 		set_current_state(TASK_UNINTERRUPTIBLE);
3879 		schedule_timeout(usecs_to_jiffies(usecs));
3880 	} while (1);
3881 }
3882 
3883 static void disable_txq_db(struct sge_txq *q)
3884 {
3885 	unsigned long flags;
3886 
3887 	spin_lock_irqsave(&q->db_lock, flags);
3888 	q->db_disabled = 1;
3889 	spin_unlock_irqrestore(&q->db_lock, flags);
3890 }
3891 
3892 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3893 {
3894 	spin_lock_irq(&q->db_lock);
3895 	if (q->db_pidx_inc) {
3896 		/* Make sure that all writes to the TX descriptors
3897 		 * are committed before we tell HW about them.
3898 		 */
3899 		wmb();
3900 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3901 			     QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3902 		q->db_pidx_inc = 0;
3903 	}
3904 	q->db_disabled = 0;
3905 	spin_unlock_irq(&q->db_lock);
3906 }
3907 
3908 static void disable_dbs(struct adapter *adap)
3909 {
3910 	int i;
3911 
3912 	for_each_ethrxq(&adap->sge, i)
3913 		disable_txq_db(&adap->sge.ethtxq[i].q);
3914 	for_each_ofldrxq(&adap->sge, i)
3915 		disable_txq_db(&adap->sge.ofldtxq[i].q);
3916 	for_each_port(adap, i)
3917 		disable_txq_db(&adap->sge.ctrlq[i].q);
3918 }
3919 
3920 static void enable_dbs(struct adapter *adap)
3921 {
3922 	int i;
3923 
3924 	for_each_ethrxq(&adap->sge, i)
3925 		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3926 	for_each_ofldrxq(&adap->sge, i)
3927 		enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3928 	for_each_port(adap, i)
3929 		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3930 }
3931 
3932 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3933 {
3934 	if (adap->uld_handle[CXGB4_ULD_RDMA])
3935 		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3936 				cmd);
3937 }
3938 
3939 static void process_db_full(struct work_struct *work)
3940 {
3941 	struct adapter *adap;
3942 
3943 	adap = container_of(work, struct adapter, db_full_task);
3944 
3945 	drain_db_fifo(adap, dbfifo_drain_delay);
3946 	enable_dbs(adap);
3947 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3948 	t4_set_reg_field(adap, SGE_INT_ENABLE3,
3949 			 DBFIFO_HP_INT | DBFIFO_LP_INT,
3950 			 DBFIFO_HP_INT | DBFIFO_LP_INT);
3951 }
3952 
3953 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3954 {
3955 	u16 hw_pidx, hw_cidx;
3956 	int ret;
3957 
3958 	spin_lock_irq(&q->db_lock);
3959 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3960 	if (ret)
3961 		goto out;
3962 	if (q->db_pidx != hw_pidx) {
3963 		u16 delta;
3964 
3965 		if (q->db_pidx >= hw_pidx)
3966 			delta = q->db_pidx - hw_pidx;
3967 		else
3968 			delta = q->size - hw_pidx + q->db_pidx;
3969 		wmb();
3970 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3971 			     QID(q->cntxt_id) | PIDX(delta));
3972 	}
3973 out:
3974 	q->db_disabled = 0;
3975 	q->db_pidx_inc = 0;
3976 	spin_unlock_irq(&q->db_lock);
3977 	if (ret)
3978 		CH_WARN(adap, "DB drop recovery failed.\n");
3979 }
3980 static void recover_all_queues(struct adapter *adap)
3981 {
3982 	int i;
3983 
3984 	for_each_ethrxq(&adap->sge, i)
3985 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3986 	for_each_ofldrxq(&adap->sge, i)
3987 		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3988 	for_each_port(adap, i)
3989 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3990 }
3991 
3992 static void process_db_drop(struct work_struct *work)
3993 {
3994 	struct adapter *adap;
3995 
3996 	adap = container_of(work, struct adapter, db_drop_task);
3997 
3998 	if (is_t4(adap->params.chip)) {
3999 		drain_db_fifo(adap, dbfifo_drain_delay);
4000 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4001 		drain_db_fifo(adap, dbfifo_drain_delay);
4002 		recover_all_queues(adap);
4003 		drain_db_fifo(adap, dbfifo_drain_delay);
4004 		enable_dbs(adap);
4005 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4006 	} else {
4007 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
4008 		u16 qid = (dropped_db >> 15) & 0x1ffff;
4009 		u16 pidx_inc = dropped_db & 0x1fff;
4010 		u64 bar2_qoffset;
4011 		unsigned int bar2_qid;
4012 		int ret;
4013 
4014 		ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
4015 					&bar2_qoffset, &bar2_qid);
4016 		if (ret)
4017 			dev_err(adap->pdev_dev, "doorbell drop recovery: "
4018 				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
4019 		else
4020 			writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
4021 			       adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
4022 
4023 		/* Re-enable BAR2 WC */
4024 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4025 	}
4026 
4027 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4028 }
4029 
4030 void t4_db_full(struct adapter *adap)
4031 {
4032 	if (is_t4(adap->params.chip)) {
4033 		disable_dbs(adap);
4034 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4035 		t4_set_reg_field(adap, SGE_INT_ENABLE3,
4036 				 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4037 		queue_work(adap->workq, &adap->db_full_task);
4038 	}
4039 }
4040 
4041 void t4_db_dropped(struct adapter *adap)
4042 {
4043 	if (is_t4(adap->params.chip)) {
4044 		disable_dbs(adap);
4045 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4046 	}
4047 	queue_work(adap->workq, &adap->db_drop_task);
4048 }
4049 
4050 static void uld_attach(struct adapter *adap, unsigned int uld)
4051 {
4052 	void *handle;
4053 	struct cxgb4_lld_info lli;
4054 	unsigned short i;
4055 
4056 	lli.pdev = adap->pdev;
4057 	lli.pf = adap->fn;
4058 	lli.l2t = adap->l2t;
4059 	lli.tids = &adap->tids;
4060 	lli.ports = adap->port;
4061 	lli.vr = &adap->vres;
4062 	lli.mtus = adap->params.mtus;
4063 	if (uld == CXGB4_ULD_RDMA) {
4064 		lli.rxq_ids = adap->sge.rdma_rxq;
4065 		lli.ciq_ids = adap->sge.rdma_ciq;
4066 		lli.nrxq = adap->sge.rdmaqs;
4067 		lli.nciq = adap->sge.rdmaciqs;
4068 	} else if (uld == CXGB4_ULD_ISCSI) {
4069 		lli.rxq_ids = adap->sge.ofld_rxq;
4070 		lli.nrxq = adap->sge.ofldqsets;
4071 	}
4072 	lli.ntxq = adap->sge.ofldqsets;
4073 	lli.nchan = adap->params.nports;
4074 	lli.nports = adap->params.nports;
4075 	lli.wr_cred = adap->params.ofldq_wr_cred;
4076 	lli.adapter_type = adap->params.chip;
4077 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4078 	lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4079 	lli.udb_density = 1 << adap->params.sge.eq_qpp;
4080 	lli.ucq_density = 1 << adap->params.sge.iq_qpp;
4081 	lli.filt_mode = adap->params.tp.vlan_pri_map;
4082 	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4083 	for (i = 0; i < NCHAN; i++)
4084 		lli.tx_modq[i] = i;
4085 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4086 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4087 	lli.fw_vers = adap->params.fw_vers;
4088 	lli.dbfifo_int_thresh = dbfifo_int_thresh;
4089 	lli.sge_ingpadboundary = adap->sge.fl_align;
4090 	lli.sge_egrstatuspagesize = adap->sge.stat_len;
4091 	lli.sge_pktshift = adap->sge.pktshift;
4092 	lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4093 	lli.max_ordird_qp = adap->params.max_ordird_qp;
4094 	lli.max_ird_adapter = adap->params.max_ird_adapter;
4095 	lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4096 
4097 	handle = ulds[uld].add(&lli);
4098 	if (IS_ERR(handle)) {
4099 		dev_warn(adap->pdev_dev,
4100 			 "could not attach to the %s driver, error %ld\n",
4101 			 uld_str[uld], PTR_ERR(handle));
4102 		return;
4103 	}
4104 
4105 	adap->uld_handle[uld] = handle;
4106 
4107 	if (!netevent_registered) {
4108 		register_netevent_notifier(&cxgb4_netevent_nb);
4109 		netevent_registered = true;
4110 	}
4111 
4112 	if (adap->flags & FULL_INIT_DONE)
4113 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
4114 }
4115 
4116 static void attach_ulds(struct adapter *adap)
4117 {
4118 	unsigned int i;
4119 
4120 	spin_lock(&adap_rcu_lock);
4121 	list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4122 	spin_unlock(&adap_rcu_lock);
4123 
4124 	mutex_lock(&uld_mutex);
4125 	list_add_tail(&adap->list_node, &adapter_list);
4126 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4127 		if (ulds[i].add)
4128 			uld_attach(adap, i);
4129 	mutex_unlock(&uld_mutex);
4130 }
4131 
4132 static void detach_ulds(struct adapter *adap)
4133 {
4134 	unsigned int i;
4135 
4136 	mutex_lock(&uld_mutex);
4137 	list_del(&adap->list_node);
4138 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4139 		if (adap->uld_handle[i]) {
4140 			ulds[i].state_change(adap->uld_handle[i],
4141 					     CXGB4_STATE_DETACH);
4142 			adap->uld_handle[i] = NULL;
4143 		}
4144 	if (netevent_registered && list_empty(&adapter_list)) {
4145 		unregister_netevent_notifier(&cxgb4_netevent_nb);
4146 		netevent_registered = false;
4147 	}
4148 	mutex_unlock(&uld_mutex);
4149 
4150 	spin_lock(&adap_rcu_lock);
4151 	list_del_rcu(&adap->rcu_node);
4152 	spin_unlock(&adap_rcu_lock);
4153 }
4154 
4155 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4156 {
4157 	unsigned int i;
4158 
4159 	mutex_lock(&uld_mutex);
4160 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4161 		if (adap->uld_handle[i])
4162 			ulds[i].state_change(adap->uld_handle[i], new_state);
4163 	mutex_unlock(&uld_mutex);
4164 }
4165 
4166 /**
4167  *	cxgb4_register_uld - register an upper-layer driver
4168  *	@type: the ULD type
4169  *	@p: the ULD methods
4170  *
4171  *	Registers an upper-layer driver with this driver and notifies the ULD
4172  *	about any presently available devices that support its type.  Returns
4173  *	%-EBUSY if a ULD of the same type is already registered.
4174  */
4175 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4176 {
4177 	int ret = 0;
4178 	struct adapter *adap;
4179 
4180 	if (type >= CXGB4_ULD_MAX)
4181 		return -EINVAL;
4182 	mutex_lock(&uld_mutex);
4183 	if (ulds[type].add) {
4184 		ret = -EBUSY;
4185 		goto out;
4186 	}
4187 	ulds[type] = *p;
4188 	list_for_each_entry(adap, &adapter_list, list_node)
4189 		uld_attach(adap, type);
4190 out:	mutex_unlock(&uld_mutex);
4191 	return ret;
4192 }
4193 EXPORT_SYMBOL(cxgb4_register_uld);
4194 
4195 /**
4196  *	cxgb4_unregister_uld - unregister an upper-layer driver
4197  *	@type: the ULD type
4198  *
4199  *	Unregisters an existing upper-layer driver.
4200  */
4201 int cxgb4_unregister_uld(enum cxgb4_uld type)
4202 {
4203 	struct adapter *adap;
4204 
4205 	if (type >= CXGB4_ULD_MAX)
4206 		return -EINVAL;
4207 	mutex_lock(&uld_mutex);
4208 	list_for_each_entry(adap, &adapter_list, list_node)
4209 		adap->uld_handle[type] = NULL;
4210 	ulds[type].add = NULL;
4211 	mutex_unlock(&uld_mutex);
4212 	return 0;
4213 }
4214 EXPORT_SYMBOL(cxgb4_unregister_uld);
4215 
4216 /* Check if netdev on which event is occured belongs to us or not. Return
4217  * success (true) if it belongs otherwise failure (false).
4218  * Called with rcu_read_lock() held.
4219  */
4220 #if IS_ENABLED(CONFIG_IPV6)
4221 static bool cxgb4_netdev(const struct net_device *netdev)
4222 {
4223 	struct adapter *adap;
4224 	int i;
4225 
4226 	list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4227 		for (i = 0; i < MAX_NPORTS; i++)
4228 			if (adap->port[i] == netdev)
4229 				return true;
4230 	return false;
4231 }
4232 
4233 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4234 		    unsigned long event)
4235 {
4236 	int ret = NOTIFY_DONE;
4237 
4238 	rcu_read_lock();
4239 	if (cxgb4_netdev(event_dev)) {
4240 		switch (event) {
4241 		case NETDEV_UP:
4242 			ret = cxgb4_clip_get(event_dev, &ifa->addr);
4243 			if (ret < 0) {
4244 				rcu_read_unlock();
4245 				return ret;
4246 			}
4247 			ret = NOTIFY_OK;
4248 			break;
4249 		case NETDEV_DOWN:
4250 			cxgb4_clip_release(event_dev, &ifa->addr);
4251 			ret = NOTIFY_OK;
4252 			break;
4253 		default:
4254 			break;
4255 		}
4256 	}
4257 	rcu_read_unlock();
4258 	return ret;
4259 }
4260 
4261 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4262 		unsigned long event, void *data)
4263 {
4264 	struct inet6_ifaddr *ifa = data;
4265 	struct net_device *event_dev;
4266 	int ret = NOTIFY_DONE;
4267 	struct bonding *bond = netdev_priv(ifa->idev->dev);
4268 	struct list_head *iter;
4269 	struct slave *slave;
4270 	struct pci_dev *first_pdev = NULL;
4271 
4272 	if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4273 		event_dev = vlan_dev_real_dev(ifa->idev->dev);
4274 		ret = clip_add(event_dev, ifa, event);
4275 	} else if (ifa->idev->dev->flags & IFF_MASTER) {
4276 		/* It is possible that two different adapters are bonded in one
4277 		 * bond. We need to find such different adapters and add clip
4278 		 * in all of them only once.
4279 		 */
4280 		bond_for_each_slave(bond, slave, iter) {
4281 			if (!first_pdev) {
4282 				ret = clip_add(slave->dev, ifa, event);
4283 				/* If clip_add is success then only initialize
4284 				 * first_pdev since it means it is our device
4285 				 */
4286 				if (ret == NOTIFY_OK)
4287 					first_pdev = to_pci_dev(
4288 							slave->dev->dev.parent);
4289 			} else if (first_pdev !=
4290 				   to_pci_dev(slave->dev->dev.parent))
4291 					ret = clip_add(slave->dev, ifa, event);
4292 		}
4293 	} else
4294 		ret = clip_add(ifa->idev->dev, ifa, event);
4295 
4296 	return ret;
4297 }
4298 
4299 static struct notifier_block cxgb4_inet6addr_notifier = {
4300 	.notifier_call = cxgb4_inet6addr_handler
4301 };
4302 
4303 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4304  * a physical device.
4305  * The physical device reference is needed to send the actul CLIP command.
4306  */
4307 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4308 {
4309 	struct inet6_dev *idev = NULL;
4310 	struct inet6_ifaddr *ifa;
4311 	int ret = 0;
4312 
4313 	idev = __in6_dev_get(root_dev);
4314 	if (!idev)
4315 		return ret;
4316 
4317 	read_lock_bh(&idev->lock);
4318 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
4319 		ret = cxgb4_clip_get(dev, &ifa->addr);
4320 		if (ret < 0)
4321 			break;
4322 	}
4323 	read_unlock_bh(&idev->lock);
4324 
4325 	return ret;
4326 }
4327 
4328 static int update_root_dev_clip(struct net_device *dev)
4329 {
4330 	struct net_device *root_dev = NULL;
4331 	int i, ret = 0;
4332 
4333 	/* First populate the real net device's IPv6 addresses */
4334 	ret = update_dev_clip(dev, dev);
4335 	if (ret)
4336 		return ret;
4337 
4338 	/* Parse all bond and vlan devices layered on top of the physical dev */
4339 	root_dev = netdev_master_upper_dev_get_rcu(dev);
4340 	if (root_dev) {
4341 		ret = update_dev_clip(root_dev, dev);
4342 		if (ret)
4343 			return ret;
4344 	}
4345 
4346 	for (i = 0; i < VLAN_N_VID; i++) {
4347 		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4348 		if (!root_dev)
4349 			continue;
4350 
4351 		ret = update_dev_clip(root_dev, dev);
4352 		if (ret)
4353 			break;
4354 	}
4355 	return ret;
4356 }
4357 
4358 static void update_clip(const struct adapter *adap)
4359 {
4360 	int i;
4361 	struct net_device *dev;
4362 	int ret;
4363 
4364 	rcu_read_lock();
4365 
4366 	for (i = 0; i < MAX_NPORTS; i++) {
4367 		dev = adap->port[i];
4368 		ret = 0;
4369 
4370 		if (dev)
4371 			ret = update_root_dev_clip(dev);
4372 
4373 		if (ret < 0)
4374 			break;
4375 	}
4376 	rcu_read_unlock();
4377 }
4378 #endif /* IS_ENABLED(CONFIG_IPV6) */
4379 
4380 /**
4381  *	cxgb_up - enable the adapter
4382  *	@adap: adapter being enabled
4383  *
4384  *	Called when the first port is enabled, this function performs the
4385  *	actions necessary to make an adapter operational, such as completing
4386  *	the initialization of HW modules, and enabling interrupts.
4387  *
4388  *	Must be called with the rtnl lock held.
4389  */
4390 static int cxgb_up(struct adapter *adap)
4391 {
4392 	int err;
4393 
4394 	err = setup_sge_queues(adap);
4395 	if (err)
4396 		goto out;
4397 	err = setup_rss(adap);
4398 	if (err)
4399 		goto freeq;
4400 
4401 	if (adap->flags & USING_MSIX) {
4402 		name_msix_vecs(adap);
4403 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4404 				  adap->msix_info[0].desc, adap);
4405 		if (err)
4406 			goto irq_err;
4407 
4408 		err = request_msix_queue_irqs(adap);
4409 		if (err) {
4410 			free_irq(adap->msix_info[0].vec, adap);
4411 			goto irq_err;
4412 		}
4413 	} else {
4414 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4415 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4416 				  adap->port[0]->name, adap);
4417 		if (err)
4418 			goto irq_err;
4419 	}
4420 	enable_rx(adap);
4421 	t4_sge_start(adap);
4422 	t4_intr_enable(adap);
4423 	adap->flags |= FULL_INIT_DONE;
4424 	notify_ulds(adap, CXGB4_STATE_UP);
4425 #if IS_ENABLED(CONFIG_IPV6)
4426 	update_clip(adap);
4427 #endif
4428  out:
4429 	return err;
4430  irq_err:
4431 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4432  freeq:
4433 	t4_free_sge_resources(adap);
4434 	goto out;
4435 }
4436 
4437 static void cxgb_down(struct adapter *adapter)
4438 {
4439 	t4_intr_disable(adapter);
4440 	cancel_work_sync(&adapter->tid_release_task);
4441 	cancel_work_sync(&adapter->db_full_task);
4442 	cancel_work_sync(&adapter->db_drop_task);
4443 	adapter->tid_release_task_busy = false;
4444 	adapter->tid_release_head = NULL;
4445 
4446 	if (adapter->flags & USING_MSIX) {
4447 		free_msix_queue_irqs(adapter);
4448 		free_irq(adapter->msix_info[0].vec, adapter);
4449 	} else
4450 		free_irq(adapter->pdev->irq, adapter);
4451 	quiesce_rx(adapter);
4452 	t4_sge_stop(adapter);
4453 	t4_free_sge_resources(adapter);
4454 	adapter->flags &= ~FULL_INIT_DONE;
4455 }
4456 
4457 /*
4458  * net_device operations
4459  */
4460 static int cxgb_open(struct net_device *dev)
4461 {
4462 	int err;
4463 	struct port_info *pi = netdev_priv(dev);
4464 	struct adapter *adapter = pi->adapter;
4465 
4466 	netif_carrier_off(dev);
4467 
4468 	if (!(adapter->flags & FULL_INIT_DONE)) {
4469 		err = cxgb_up(adapter);
4470 		if (err < 0)
4471 			return err;
4472 	}
4473 
4474 	err = link_start(dev);
4475 	if (!err)
4476 		netif_tx_start_all_queues(dev);
4477 	return err;
4478 }
4479 
4480 static int cxgb_close(struct net_device *dev)
4481 {
4482 	struct port_info *pi = netdev_priv(dev);
4483 	struct adapter *adapter = pi->adapter;
4484 
4485 	netif_tx_stop_all_queues(dev);
4486 	netif_carrier_off(dev);
4487 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4488 }
4489 
4490 /* Return an error number if the indicated filter isn't writable ...
4491  */
4492 static int writable_filter(struct filter_entry *f)
4493 {
4494 	if (f->locked)
4495 		return -EPERM;
4496 	if (f->pending)
4497 		return -EBUSY;
4498 
4499 	return 0;
4500 }
4501 
4502 /* Delete the filter at the specified index (if valid).  The checks for all
4503  * the common problems with doing this like the filter being locked, currently
4504  * pending in another operation, etc.
4505  */
4506 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4507 {
4508 	struct filter_entry *f;
4509 	int ret;
4510 
4511 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4512 		return -EINVAL;
4513 
4514 	f = &adapter->tids.ftid_tab[fidx];
4515 	ret = writable_filter(f);
4516 	if (ret)
4517 		return ret;
4518 	if (f->valid)
4519 		return del_filter_wr(adapter, fidx);
4520 
4521 	return 0;
4522 }
4523 
4524 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4525 		__be32 sip, __be16 sport, __be16 vlan,
4526 		unsigned int queue, unsigned char port, unsigned char mask)
4527 {
4528 	int ret;
4529 	struct filter_entry *f;
4530 	struct adapter *adap;
4531 	int i;
4532 	u8 *val;
4533 
4534 	adap = netdev2adap(dev);
4535 
4536 	/* Adjust stid to correct filter index */
4537 	stid -= adap->tids.sftid_base;
4538 	stid += adap->tids.nftids;
4539 
4540 	/* Check to make sure the filter requested is writable ...
4541 	 */
4542 	f = &adap->tids.ftid_tab[stid];
4543 	ret = writable_filter(f);
4544 	if (ret)
4545 		return ret;
4546 
4547 	/* Clear out any old resources being used by the filter before
4548 	 * we start constructing the new filter.
4549 	 */
4550 	if (f->valid)
4551 		clear_filter(adap, f);
4552 
4553 	/* Clear out filter specifications */
4554 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4555 	f->fs.val.lport = cpu_to_be16(sport);
4556 	f->fs.mask.lport  = ~0;
4557 	val = (u8 *)&sip;
4558 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4559 		for (i = 0; i < 4; i++) {
4560 			f->fs.val.lip[i] = val[i];
4561 			f->fs.mask.lip[i] = ~0;
4562 		}
4563 		if (adap->params.tp.vlan_pri_map & F_PORT) {
4564 			f->fs.val.iport = port;
4565 			f->fs.mask.iport = mask;
4566 		}
4567 	}
4568 
4569 	if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4570 		f->fs.val.proto = IPPROTO_TCP;
4571 		f->fs.mask.proto = ~0;
4572 	}
4573 
4574 	f->fs.dirsteer = 1;
4575 	f->fs.iq = queue;
4576 	/* Mark filter as locked */
4577 	f->locked = 1;
4578 	f->fs.rpttid = 1;
4579 
4580 	ret = set_filter_wr(adap, stid);
4581 	if (ret) {
4582 		clear_filter(adap, f);
4583 		return ret;
4584 	}
4585 
4586 	return 0;
4587 }
4588 EXPORT_SYMBOL(cxgb4_create_server_filter);
4589 
4590 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4591 		unsigned int queue, bool ipv6)
4592 {
4593 	int ret;
4594 	struct filter_entry *f;
4595 	struct adapter *adap;
4596 
4597 	adap = netdev2adap(dev);
4598 
4599 	/* Adjust stid to correct filter index */
4600 	stid -= adap->tids.sftid_base;
4601 	stid += adap->tids.nftids;
4602 
4603 	f = &adap->tids.ftid_tab[stid];
4604 	/* Unlock the filter */
4605 	f->locked = 0;
4606 
4607 	ret = delete_filter(adap, stid);
4608 	if (ret)
4609 		return ret;
4610 
4611 	return 0;
4612 }
4613 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4614 
4615 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4616 						struct rtnl_link_stats64 *ns)
4617 {
4618 	struct port_stats stats;
4619 	struct port_info *p = netdev_priv(dev);
4620 	struct adapter *adapter = p->adapter;
4621 
4622 	/* Block retrieving statistics during EEH error
4623 	 * recovery. Otherwise, the recovery might fail
4624 	 * and the PCI device will be removed permanently
4625 	 */
4626 	spin_lock(&adapter->stats_lock);
4627 	if (!netif_device_present(dev)) {
4628 		spin_unlock(&adapter->stats_lock);
4629 		return ns;
4630 	}
4631 	t4_get_port_stats(adapter, p->tx_chan, &stats);
4632 	spin_unlock(&adapter->stats_lock);
4633 
4634 	ns->tx_bytes   = stats.tx_octets;
4635 	ns->tx_packets = stats.tx_frames;
4636 	ns->rx_bytes   = stats.rx_octets;
4637 	ns->rx_packets = stats.rx_frames;
4638 	ns->multicast  = stats.rx_mcast_frames;
4639 
4640 	/* detailed rx_errors */
4641 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4642 			       stats.rx_runt;
4643 	ns->rx_over_errors   = 0;
4644 	ns->rx_crc_errors    = stats.rx_fcs_err;
4645 	ns->rx_frame_errors  = stats.rx_symbol_err;
4646 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4647 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
4648 			       stats.rx_trunc0 + stats.rx_trunc1 +
4649 			       stats.rx_trunc2 + stats.rx_trunc3;
4650 	ns->rx_missed_errors = 0;
4651 
4652 	/* detailed tx_errors */
4653 	ns->tx_aborted_errors   = 0;
4654 	ns->tx_carrier_errors   = 0;
4655 	ns->tx_fifo_errors      = 0;
4656 	ns->tx_heartbeat_errors = 0;
4657 	ns->tx_window_errors    = 0;
4658 
4659 	ns->tx_errors = stats.tx_error_frames;
4660 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4661 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4662 	return ns;
4663 }
4664 
4665 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4666 {
4667 	unsigned int mbox;
4668 	int ret = 0, prtad, devad;
4669 	struct port_info *pi = netdev_priv(dev);
4670 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4671 
4672 	switch (cmd) {
4673 	case SIOCGMIIPHY:
4674 		if (pi->mdio_addr < 0)
4675 			return -EOPNOTSUPP;
4676 		data->phy_id = pi->mdio_addr;
4677 		break;
4678 	case SIOCGMIIREG:
4679 	case SIOCSMIIREG:
4680 		if (mdio_phy_id_is_c45(data->phy_id)) {
4681 			prtad = mdio_phy_id_prtad(data->phy_id);
4682 			devad = mdio_phy_id_devad(data->phy_id);
4683 		} else if (data->phy_id < 32) {
4684 			prtad = data->phy_id;
4685 			devad = 0;
4686 			data->reg_num &= 0x1f;
4687 		} else
4688 			return -EINVAL;
4689 
4690 		mbox = pi->adapter->fn;
4691 		if (cmd == SIOCGMIIREG)
4692 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4693 					 data->reg_num, &data->val_out);
4694 		else
4695 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4696 					 data->reg_num, data->val_in);
4697 		break;
4698 	default:
4699 		return -EOPNOTSUPP;
4700 	}
4701 	return ret;
4702 }
4703 
4704 static void cxgb_set_rxmode(struct net_device *dev)
4705 {
4706 	/* unfortunately we can't return errors to the stack */
4707 	set_rxmode(dev, -1, false);
4708 }
4709 
4710 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4711 {
4712 	int ret;
4713 	struct port_info *pi = netdev_priv(dev);
4714 
4715 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4716 		return -EINVAL;
4717 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4718 			    -1, -1, -1, true);
4719 	if (!ret)
4720 		dev->mtu = new_mtu;
4721 	return ret;
4722 }
4723 
4724 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4725 {
4726 	int ret;
4727 	struct sockaddr *addr = p;
4728 	struct port_info *pi = netdev_priv(dev);
4729 
4730 	if (!is_valid_ether_addr(addr->sa_data))
4731 		return -EADDRNOTAVAIL;
4732 
4733 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4734 			    pi->xact_addr_filt, addr->sa_data, true, true);
4735 	if (ret < 0)
4736 		return ret;
4737 
4738 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4739 	pi->xact_addr_filt = ret;
4740 	return 0;
4741 }
4742 
4743 #ifdef CONFIG_NET_POLL_CONTROLLER
4744 static void cxgb_netpoll(struct net_device *dev)
4745 {
4746 	struct port_info *pi = netdev_priv(dev);
4747 	struct adapter *adap = pi->adapter;
4748 
4749 	if (adap->flags & USING_MSIX) {
4750 		int i;
4751 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4752 
4753 		for (i = pi->nqsets; i; i--, rx++)
4754 			t4_sge_intr_msix(0, &rx->rspq);
4755 	} else
4756 		t4_intr_handler(adap)(0, adap);
4757 }
4758 #endif
4759 
4760 static const struct net_device_ops cxgb4_netdev_ops = {
4761 	.ndo_open             = cxgb_open,
4762 	.ndo_stop             = cxgb_close,
4763 	.ndo_start_xmit       = t4_eth_xmit,
4764 	.ndo_select_queue     =	cxgb_select_queue,
4765 	.ndo_get_stats64      = cxgb_get_stats,
4766 	.ndo_set_rx_mode      = cxgb_set_rxmode,
4767 	.ndo_set_mac_address  = cxgb_set_mac_addr,
4768 	.ndo_set_features     = cxgb_set_features,
4769 	.ndo_validate_addr    = eth_validate_addr,
4770 	.ndo_do_ioctl         = cxgb_ioctl,
4771 	.ndo_change_mtu       = cxgb_change_mtu,
4772 #ifdef CONFIG_NET_POLL_CONTROLLER
4773 	.ndo_poll_controller  = cxgb_netpoll,
4774 #endif
4775 };
4776 
4777 void t4_fatal_err(struct adapter *adap)
4778 {
4779 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4780 	t4_intr_disable(adap);
4781 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4782 }
4783 
4784 /* Return the specified PCI-E Configuration Space register from our Physical
4785  * Function.  We try first via a Firmware LDST Command since we prefer to let
4786  * the firmware own all of these registers, but if that fails we go for it
4787  * directly ourselves.
4788  */
4789 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4790 {
4791 	struct fw_ldst_cmd ldst_cmd;
4792 	u32 val;
4793 	int ret;
4794 
4795 	/* Construct and send the Firmware LDST Command to retrieve the
4796 	 * specified PCI-E Configuration Space register.
4797 	 */
4798 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4799 	ldst_cmd.op_to_addrspace =
4800 		htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4801 		      FW_CMD_REQUEST_F |
4802 		      FW_CMD_READ_F |
4803 		      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
4804 	ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4805 	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
4806 	ldst_cmd.u.pcie.ctrl_to_fn =
4807 		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
4808 	ldst_cmd.u.pcie.r = reg;
4809 	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4810 			 &ldst_cmd);
4811 
4812 	/* If the LDST Command suucceeded, exctract the returned register
4813 	 * value.  Otherwise read it directly ourself.
4814 	 */
4815 	if (ret == 0)
4816 		val = ntohl(ldst_cmd.u.pcie.data[0]);
4817 	else
4818 		t4_hw_pci_read_cfg4(adap, reg, &val);
4819 
4820 	return val;
4821 }
4822 
4823 static void setup_memwin(struct adapter *adap)
4824 {
4825 	u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4826 
4827 	if (is_t4(adap->params.chip)) {
4828 		u32 bar0;
4829 
4830 		/* Truncation intentional: we only read the bottom 32-bits of
4831 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
4832 		 * mechanism to read BAR0 instead of using
4833 		 * pci_resource_start() because we could be operating from
4834 		 * within a Virtual Machine which is trapping our accesses to
4835 		 * our Configuration Space and we need to set up the PCI-E
4836 		 * Memory Window decoders with the actual addresses which will
4837 		 * be coming across the PCI-E link.
4838 		 */
4839 		bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4840 		bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4841 		adap->t4_bar0 = bar0;
4842 
4843 		mem_win0_base = bar0 + MEMWIN0_BASE;
4844 		mem_win1_base = bar0 + MEMWIN1_BASE;
4845 		mem_win2_base = bar0 + MEMWIN2_BASE;
4846 		mem_win2_aperture = MEMWIN2_APERTURE;
4847 	} else {
4848 		/* For T5, only relative offset inside the PCIe BAR is passed */
4849 		mem_win0_base = MEMWIN0_BASE;
4850 		mem_win1_base = MEMWIN1_BASE;
4851 		mem_win2_base = MEMWIN2_BASE_T5;
4852 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
4853 	}
4854 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4855 		     mem_win0_base | BIR(0) |
4856 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4857 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4858 		     mem_win1_base | BIR(0) |
4859 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4860 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4861 		     mem_win2_base | BIR(0) |
4862 		     WINDOW(ilog2(mem_win2_aperture) - 10));
4863 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4864 }
4865 
4866 static void setup_memwin_rdma(struct adapter *adap)
4867 {
4868 	if (adap->vres.ocq.size) {
4869 		u32 start;
4870 		unsigned int sz_kb;
4871 
4872 		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4873 		start &= PCI_BASE_ADDRESS_MEM_MASK;
4874 		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4875 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4876 		t4_write_reg(adap,
4877 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4878 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
4879 		t4_write_reg(adap,
4880 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4881 			     adap->vres.ocq.start);
4882 		t4_read_reg(adap,
4883 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4884 	}
4885 }
4886 
4887 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4888 {
4889 	u32 v;
4890 	int ret;
4891 
4892 	/* get device capabilities */
4893 	memset(c, 0, sizeof(*c));
4894 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4895 			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
4896 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4897 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4898 	if (ret < 0)
4899 		return ret;
4900 
4901 	/* select capabilities we'll be using */
4902 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4903 		if (!vf_acls)
4904 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4905 		else
4906 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4907 	} else if (vf_acls) {
4908 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4909 		return ret;
4910 	}
4911 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4912 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4913 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4914 	if (ret < 0)
4915 		return ret;
4916 
4917 	ret = t4_config_glbl_rss(adap, adap->fn,
4918 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4919 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4920 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4921 	if (ret < 0)
4922 		return ret;
4923 
4924 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4925 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4926 	if (ret < 0)
4927 		return ret;
4928 
4929 	t4_sge_init(adap);
4930 
4931 	/* tweak some settings */
4932 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4933 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4934 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4935 	v = t4_read_reg(adap, TP_PIO_DATA);
4936 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4937 
4938 	/* first 4 Tx modulation queues point to consecutive Tx channels */
4939 	adap->params.tp.tx_modq_map = 0xE4;
4940 	t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4941 		     V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4942 
4943 	/* associate each Tx modulation queue with consecutive Tx channels */
4944 	v = 0x84218421;
4945 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4946 			  &v, 1, A_TP_TX_SCHED_HDR);
4947 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4948 			  &v, 1, A_TP_TX_SCHED_FIFO);
4949 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4950 			  &v, 1, A_TP_TX_SCHED_PCMD);
4951 
4952 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4953 	if (is_offload(adap)) {
4954 		t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4955 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4956 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4957 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4958 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4959 		t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4960 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4961 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4962 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4963 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4964 	}
4965 
4966 	/* get basic stuff going */
4967 	return t4_early_init(adap, adap->fn);
4968 }
4969 
4970 /*
4971  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4972  */
4973 #define MAX_ATIDS 8192U
4974 
4975 /*
4976  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4977  *
4978  * If the firmware we're dealing with has Configuration File support, then
4979  * we use that to perform all configuration
4980  */
4981 
4982 /*
4983  * Tweak configuration based on module parameters, etc.  Most of these have
4984  * defaults assigned to them by Firmware Configuration Files (if we're using
4985  * them) but need to be explicitly set if we're using hard-coded
4986  * initialization.  But even in the case of using Firmware Configuration
4987  * Files, we'd like to expose the ability to change these via module
4988  * parameters so these are essentially common tweaks/settings for
4989  * Configuration Files and hard-coded initialization ...
4990  */
4991 static int adap_init0_tweaks(struct adapter *adapter)
4992 {
4993 	/*
4994 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
4995 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
4996 	 * 64B Cache Line Size ...
4997 	 */
4998 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4999 
5000 	/*
5001 	 * Process module parameters which affect early initialization.
5002 	 */
5003 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5004 		dev_err(&adapter->pdev->dev,
5005 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
5006 			rx_dma_offset);
5007 		rx_dma_offset = 2;
5008 	}
5009 	t4_set_reg_field(adapter, SGE_CONTROL,
5010 			 PKTSHIFT_MASK,
5011 			 PKTSHIFT(rx_dma_offset));
5012 
5013 	/*
5014 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5015 	 * adds the pseudo header itself.
5016 	 */
5017 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5018 			       CSUM_HAS_PSEUDO_HDR, 0);
5019 
5020 	return 0;
5021 }
5022 
5023 /*
5024  * Attempt to initialize the adapter via a Firmware Configuration File.
5025  */
5026 static int adap_init0_config(struct adapter *adapter, int reset)
5027 {
5028 	struct fw_caps_config_cmd caps_cmd;
5029 	const struct firmware *cf;
5030 	unsigned long mtype = 0, maddr = 0;
5031 	u32 finiver, finicsum, cfcsum;
5032 	int ret;
5033 	int config_issued = 0;
5034 	char *fw_config_file, fw_config_file_path[256];
5035 	char *config_name = NULL;
5036 
5037 	/*
5038 	 * Reset device if necessary.
5039 	 */
5040 	if (reset) {
5041 		ret = t4_fw_reset(adapter, adapter->mbox,
5042 				  PIORSTMODE | PIORST);
5043 		if (ret < 0)
5044 			goto bye;
5045 	}
5046 
5047 	/*
5048 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5049 	 * then use that.  Otherwise, use the configuration file stored
5050 	 * in the adapter flash ...
5051 	 */
5052 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5053 	case CHELSIO_T4:
5054 		fw_config_file = FW4_CFNAME;
5055 		break;
5056 	case CHELSIO_T5:
5057 		fw_config_file = FW5_CFNAME;
5058 		break;
5059 	default:
5060 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5061 		       adapter->pdev->device);
5062 		ret = -EINVAL;
5063 		goto bye;
5064 	}
5065 
5066 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5067 	if (ret < 0) {
5068 		config_name = "On FLASH";
5069 		mtype = FW_MEMTYPE_CF_FLASH;
5070 		maddr = t4_flash_cfg_addr(adapter);
5071 	} else {
5072 		u32 params[7], val[7];
5073 
5074 		sprintf(fw_config_file_path,
5075 			"/lib/firmware/%s", fw_config_file);
5076 		config_name = fw_config_file_path;
5077 
5078 		if (cf->size >= FLASH_CFG_MAX_SIZE)
5079 			ret = -ENOMEM;
5080 		else {
5081 			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5082 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5083 			ret = t4_query_params(adapter, adapter->mbox,
5084 					      adapter->fn, 0, 1, params, val);
5085 			if (ret == 0) {
5086 				/*
5087 				 * For t4_memory_rw() below addresses and
5088 				 * sizes have to be in terms of multiples of 4
5089 				 * bytes.  So, if the Configuration File isn't
5090 				 * a multiple of 4 bytes in length we'll have
5091 				 * to write that out separately since we can't
5092 				 * guarantee that the bytes following the
5093 				 * residual byte in the buffer returned by
5094 				 * request_firmware() are zeroed out ...
5095 				 */
5096 				size_t resid = cf->size & 0x3;
5097 				size_t size = cf->size & ~0x3;
5098 				__be32 *data = (__be32 *)cf->data;
5099 
5100 				mtype = FW_PARAMS_PARAM_Y_G(val[0]);
5101 				maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
5102 
5103 				spin_lock(&adapter->win0_lock);
5104 				ret = t4_memory_rw(adapter, 0, mtype, maddr,
5105 						   size, data, T4_MEMORY_WRITE);
5106 				if (ret == 0 && resid != 0) {
5107 					union {
5108 						__be32 word;
5109 						char buf[4];
5110 					} last;
5111 					int i;
5112 
5113 					last.word = data[size >> 2];
5114 					for (i = resid; i < 4; i++)
5115 						last.buf[i] = 0;
5116 					ret = t4_memory_rw(adapter, 0, mtype,
5117 							   maddr + size,
5118 							   4, &last.word,
5119 							   T4_MEMORY_WRITE);
5120 				}
5121 				spin_unlock(&adapter->win0_lock);
5122 			}
5123 		}
5124 
5125 		release_firmware(cf);
5126 		if (ret)
5127 			goto bye;
5128 	}
5129 
5130 	/*
5131 	 * Issue a Capability Configuration command to the firmware to get it
5132 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
5133 	 * because we want the ability to modify various features after we've
5134 	 * processed the configuration file ...
5135 	 */
5136 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5137 	caps_cmd.op_to_write =
5138 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5139 		      FW_CMD_REQUEST_F |
5140 		      FW_CMD_READ_F);
5141 	caps_cmd.cfvalid_to_len16 =
5142 		htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
5143 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
5144 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
5145 		      FW_LEN16(caps_cmd));
5146 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5147 			 &caps_cmd);
5148 
5149 	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5150 	 * Configuration File in FLASH), our last gasp effort is to use the
5151 	 * Firmware Configuration File which is embedded in the firmware.  A
5152 	 * very few early versions of the firmware didn't have one embedded
5153 	 * but we can ignore those.
5154 	 */
5155 	if (ret == -ENOENT) {
5156 		memset(&caps_cmd, 0, sizeof(caps_cmd));
5157 		caps_cmd.op_to_write =
5158 			htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5159 					FW_CMD_REQUEST_F |
5160 					FW_CMD_READ_F);
5161 		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5162 		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5163 				sizeof(caps_cmd), &caps_cmd);
5164 		config_name = "Firmware Default";
5165 	}
5166 
5167 	config_issued = 1;
5168 	if (ret < 0)
5169 		goto bye;
5170 
5171 	finiver = ntohl(caps_cmd.finiver);
5172 	finicsum = ntohl(caps_cmd.finicsum);
5173 	cfcsum = ntohl(caps_cmd.cfcsum);
5174 	if (finicsum != cfcsum)
5175 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5176 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5177 			 finicsum, cfcsum);
5178 
5179 	/*
5180 	 * And now tell the firmware to use the configuration we just loaded.
5181 	 */
5182 	caps_cmd.op_to_write =
5183 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5184 		      FW_CMD_REQUEST_F |
5185 		      FW_CMD_WRITE_F);
5186 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5187 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5188 			 NULL);
5189 	if (ret < 0)
5190 		goto bye;
5191 
5192 	/*
5193 	 * Tweak configuration based on system architecture, module
5194 	 * parameters, etc.
5195 	 */
5196 	ret = adap_init0_tweaks(adapter);
5197 	if (ret < 0)
5198 		goto bye;
5199 
5200 	/*
5201 	 * And finally tell the firmware to initialize itself using the
5202 	 * parameters from the Configuration File.
5203 	 */
5204 	ret = t4_fw_initialize(adapter, adapter->mbox);
5205 	if (ret < 0)
5206 		goto bye;
5207 
5208 	/*
5209 	 * Return successfully and note that we're operating with parameters
5210 	 * not supplied by the driver, rather than from hard-wired
5211 	 * initialization constants burried in the driver.
5212 	 */
5213 	adapter->flags |= USING_SOFT_PARAMS;
5214 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5215 		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5216 		 config_name, finiver, cfcsum);
5217 	return 0;
5218 
5219 	/*
5220 	 * Something bad happened.  Return the error ...  (If the "error"
5221 	 * is that there's no Configuration File on the adapter we don't
5222 	 * want to issue a warning since this is fairly common.)
5223 	 */
5224 bye:
5225 	if (config_issued && ret != -ENOENT)
5226 		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5227 			 config_name, -ret);
5228 	return ret;
5229 }
5230 
5231 /*
5232  * Attempt to initialize the adapter via hard-coded, driver supplied
5233  * parameters ...
5234  */
5235 static int adap_init0_no_config(struct adapter *adapter, int reset)
5236 {
5237 	struct sge *s = &adapter->sge;
5238 	struct fw_caps_config_cmd caps_cmd;
5239 	u32 v;
5240 	int i, ret;
5241 
5242 	/*
5243 	 * Reset device if necessary
5244 	 */
5245 	if (reset) {
5246 		ret = t4_fw_reset(adapter, adapter->mbox,
5247 				  PIORSTMODE | PIORST);
5248 		if (ret < 0)
5249 			goto bye;
5250 	}
5251 
5252 	/*
5253 	 * Get device capabilities and select which we'll be using.
5254 	 */
5255 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5256 	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5257 				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
5258 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5259 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5260 			 &caps_cmd);
5261 	if (ret < 0)
5262 		goto bye;
5263 
5264 	if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5265 		if (!vf_acls)
5266 			caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5267 		else
5268 			caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5269 	} else if (vf_acls) {
5270 		dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5271 		goto bye;
5272 	}
5273 	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5274 			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5275 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5276 			 NULL);
5277 	if (ret < 0)
5278 		goto bye;
5279 
5280 	/*
5281 	 * Tweak configuration based on system architecture, module
5282 	 * parameters, etc.
5283 	 */
5284 	ret = adap_init0_tweaks(adapter);
5285 	if (ret < 0)
5286 		goto bye;
5287 
5288 	/*
5289 	 * Select RSS Global Mode we want to use.  We use "Basic Virtual"
5290 	 * mode which maps each Virtual Interface to its own section of
5291 	 * the RSS Table and we turn on all map and hash enables ...
5292 	 */
5293 	adapter->flags |= RSS_TNLALLLOOKUP;
5294 	ret = t4_config_glbl_rss(adapter, adapter->mbox,
5295 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5296 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
5297 				 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
5298 				 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5299 					FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
5300 	if (ret < 0)
5301 		goto bye;
5302 
5303 	/*
5304 	 * Set up our own fundamental resource provisioning ...
5305 	 */
5306 	ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5307 			  PFRES_NEQ, PFRES_NETHCTRL,
5308 			  PFRES_NIQFLINT, PFRES_NIQ,
5309 			  PFRES_TC, PFRES_NVI,
5310 			  FW_PFVF_CMD_CMASK_M,
5311 			  pfvfres_pmask(adapter, adapter->fn, 0),
5312 			  PFRES_NEXACTF,
5313 			  PFRES_R_CAPS, PFRES_WX_CAPS);
5314 	if (ret < 0)
5315 		goto bye;
5316 
5317 	/*
5318 	 * Perform low level SGE initialization.  We need to do this before we
5319 	 * send the firmware the INITIALIZE command because that will cause
5320 	 * any other PF Drivers which are waiting for the Master
5321 	 * Initialization to proceed forward.
5322 	 */
5323 	for (i = 0; i < SGE_NTIMERS - 1; i++)
5324 		s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5325 	s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5326 	s->counter_val[0] = 1;
5327 	for (i = 1; i < SGE_NCOUNTERS; i++)
5328 		s->counter_val[i] = min(intr_cnt[i - 1],
5329 					THRESHOLD_0_GET(THRESHOLD_0_MASK));
5330 	t4_sge_init(adapter);
5331 
5332 #ifdef CONFIG_PCI_IOV
5333 	/*
5334 	 * Provision resource limits for Virtual Functions.  We currently
5335 	 * grant them all the same static resource limits except for the Port
5336 	 * Access Rights Mask which we're assigning based on the PF.  All of
5337 	 * the static provisioning stuff for both the PF and VF really needs
5338 	 * to be managed in a persistent manner for each device which the
5339 	 * firmware controls.
5340 	 */
5341 	{
5342 		int pf, vf;
5343 
5344 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5345 			if (num_vf[pf] <= 0)
5346 				continue;
5347 
5348 			/* VF numbering starts at 1! */
5349 			for (vf = 1; vf <= num_vf[pf]; vf++) {
5350 				ret = t4_cfg_pfvf(adapter, adapter->mbox,
5351 						  pf, vf,
5352 						  VFRES_NEQ, VFRES_NETHCTRL,
5353 						  VFRES_NIQFLINT, VFRES_NIQ,
5354 						  VFRES_TC, VFRES_NVI,
5355 						  FW_PFVF_CMD_CMASK_M,
5356 						  pfvfres_pmask(
5357 						  adapter, pf, vf),
5358 						  VFRES_NEXACTF,
5359 						  VFRES_R_CAPS, VFRES_WX_CAPS);
5360 				if (ret < 0)
5361 					dev_warn(adapter->pdev_dev,
5362 						 "failed to "\
5363 						 "provision pf/vf=%d/%d; "
5364 						 "err=%d\n", pf, vf, ret);
5365 			}
5366 		}
5367 	}
5368 #endif
5369 
5370 	/*
5371 	 * Set up the default filter mode.  Later we'll want to implement this
5372 	 * via a firmware command, etc. ...  This needs to be done before the
5373 	 * firmare initialization command ...  If the selected set of fields
5374 	 * isn't equal to the default value, we'll need to make sure that the
5375 	 * field selections will fit in the 36-bit budget.
5376 	 */
5377 	if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5378 		int j, bits = 0;
5379 
5380 		for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5381 			switch (tp_vlan_pri_map & (1 << j)) {
5382 			case 0:
5383 				/* compressed filter field not enabled */
5384 				break;
5385 			case FCOE_MASK:
5386 				bits +=  1;
5387 				break;
5388 			case PORT_MASK:
5389 				bits +=  3;
5390 				break;
5391 			case VNIC_ID_MASK:
5392 				bits += 17;
5393 				break;
5394 			case VLAN_MASK:
5395 				bits += 17;
5396 				break;
5397 			case TOS_MASK:
5398 				bits +=  8;
5399 				break;
5400 			case PROTOCOL_MASK:
5401 				bits +=  8;
5402 				break;
5403 			case ETHERTYPE_MASK:
5404 				bits += 16;
5405 				break;
5406 			case MACMATCH_MASK:
5407 				bits +=  9;
5408 				break;
5409 			case MPSHITTYPE_MASK:
5410 				bits +=  3;
5411 				break;
5412 			case FRAGMENTATION_MASK:
5413 				bits +=  1;
5414 				break;
5415 			}
5416 
5417 		if (bits > 36) {
5418 			dev_err(adapter->pdev_dev,
5419 				"tp_vlan_pri_map=%#x needs %d bits > 36;"\
5420 				" using %#x\n", tp_vlan_pri_map, bits,
5421 				TP_VLAN_PRI_MAP_DEFAULT);
5422 			tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5423 		}
5424 	}
5425 	v = tp_vlan_pri_map;
5426 	t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5427 			  &v, 1, TP_VLAN_PRI_MAP);
5428 
5429 	/*
5430 	 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5431 	 * to support any of the compressed filter fields above.  Newer
5432 	 * versions of the firmware do this automatically but it doesn't hurt
5433 	 * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
5434 	 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5435 	 * since the firmware automatically turns this on and off when we have
5436 	 * a non-zero number of filters active (since it does have a
5437 	 * performance impact).
5438 	 */
5439 	if (tp_vlan_pri_map)
5440 		t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5441 				 FIVETUPLELOOKUP_MASK,
5442 				 FIVETUPLELOOKUP_MASK);
5443 
5444 	/*
5445 	 * Tweak some settings.
5446 	 */
5447 	t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5448 		     RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5449 		     PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5450 		     KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5451 
5452 	/*
5453 	 * Get basic stuff going by issuing the Firmware Initialize command.
5454 	 * Note that this _must_ be after all PFVF commands ...
5455 	 */
5456 	ret = t4_fw_initialize(adapter, adapter->mbox);
5457 	if (ret < 0)
5458 		goto bye;
5459 
5460 	/*
5461 	 * Return successfully!
5462 	 */
5463 	dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5464 		 "driver parameters\n");
5465 	return 0;
5466 
5467 	/*
5468 	 * Something bad happened.  Return the error ...
5469 	 */
5470 bye:
5471 	return ret;
5472 }
5473 
5474 static struct fw_info fw_info_array[] = {
5475 	{
5476 		.chip = CHELSIO_T4,
5477 		.fs_name = FW4_CFNAME,
5478 		.fw_mod_name = FW4_FNAME,
5479 		.fw_hdr = {
5480 			.chip = FW_HDR_CHIP_T4,
5481 			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5482 			.intfver_nic = FW_INTFVER(T4, NIC),
5483 			.intfver_vnic = FW_INTFVER(T4, VNIC),
5484 			.intfver_ri = FW_INTFVER(T4, RI),
5485 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
5486 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
5487 		},
5488 	}, {
5489 		.chip = CHELSIO_T5,
5490 		.fs_name = FW5_CFNAME,
5491 		.fw_mod_name = FW5_FNAME,
5492 		.fw_hdr = {
5493 			.chip = FW_HDR_CHIP_T5,
5494 			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5495 			.intfver_nic = FW_INTFVER(T5, NIC),
5496 			.intfver_vnic = FW_INTFVER(T5, VNIC),
5497 			.intfver_ri = FW_INTFVER(T5, RI),
5498 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
5499 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
5500 		},
5501 	}
5502 };
5503 
5504 static struct fw_info *find_fw_info(int chip)
5505 {
5506 	int i;
5507 
5508 	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5509 		if (fw_info_array[i].chip == chip)
5510 			return &fw_info_array[i];
5511 	}
5512 	return NULL;
5513 }
5514 
5515 /*
5516  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5517  */
5518 static int adap_init0(struct adapter *adap)
5519 {
5520 	int ret;
5521 	u32 v, port_vec;
5522 	enum dev_state state;
5523 	u32 params[7], val[7];
5524 	struct fw_caps_config_cmd caps_cmd;
5525 	int reset = 1;
5526 
5527 	/* Contact FW, advertising Master capability */
5528 	ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
5529 	if (ret < 0) {
5530 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5531 			ret);
5532 		return ret;
5533 	}
5534 	if (ret == adap->mbox)
5535 		adap->flags |= MASTER_PF;
5536 
5537 	/*
5538 	 * If we're the Master PF Driver and the device is uninitialized,
5539 	 * then let's consider upgrading the firmware ...  (We always want
5540 	 * to check the firmware version number in order to A. get it for
5541 	 * later reporting and B. to warn if the currently loaded firmware
5542 	 * is excessively mismatched relative to the driver.)
5543 	 */
5544 	t4_get_fw_version(adap, &adap->params.fw_vers);
5545 	t4_get_tp_version(adap, &adap->params.tp_vers);
5546 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5547 		struct fw_info *fw_info;
5548 		struct fw_hdr *card_fw;
5549 		const struct firmware *fw;
5550 		const u8 *fw_data = NULL;
5551 		unsigned int fw_size = 0;
5552 
5553 		/* This is the firmware whose headers the driver was compiled
5554 		 * against
5555 		 */
5556 		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5557 		if (fw_info == NULL) {
5558 			dev_err(adap->pdev_dev,
5559 				"unable to get firmware info for chip %d.\n",
5560 				CHELSIO_CHIP_VERSION(adap->params.chip));
5561 			return -EINVAL;
5562 		}
5563 
5564 		/* allocate memory to read the header of the firmware on the
5565 		 * card
5566 		 */
5567 		card_fw = t4_alloc_mem(sizeof(*card_fw));
5568 
5569 		/* Get FW from from /lib/firmware/ */
5570 		ret = request_firmware(&fw, fw_info->fw_mod_name,
5571 				       adap->pdev_dev);
5572 		if (ret < 0) {
5573 			dev_err(adap->pdev_dev,
5574 				"unable to load firmware image %s, error %d\n",
5575 				fw_info->fw_mod_name, ret);
5576 		} else {
5577 			fw_data = fw->data;
5578 			fw_size = fw->size;
5579 		}
5580 
5581 		/* upgrade FW logic */
5582 		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5583 				 state, &reset);
5584 
5585 		/* Cleaning up */
5586 		if (fw != NULL)
5587 			release_firmware(fw);
5588 		t4_free_mem(card_fw);
5589 
5590 		if (ret < 0)
5591 			goto bye;
5592 	}
5593 
5594 	/*
5595 	 * Grab VPD parameters.  This should be done after we establish a
5596 	 * connection to the firmware since some of the VPD parameters
5597 	 * (notably the Core Clock frequency) are retrieved via requests to
5598 	 * the firmware.  On the other hand, we need these fairly early on
5599 	 * so we do this right after getting ahold of the firmware.
5600 	 */
5601 	ret = get_vpd_params(adap, &adap->params.vpd);
5602 	if (ret < 0)
5603 		goto bye;
5604 
5605 	/*
5606 	 * Find out what ports are available to us.  Note that we need to do
5607 	 * this before calling adap_init0_no_config() since it needs nports
5608 	 * and portvec ...
5609 	 */
5610 	v =
5611 	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5612 	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5613 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5614 	if (ret < 0)
5615 		goto bye;
5616 
5617 	adap->params.nports = hweight32(port_vec);
5618 	adap->params.portvec = port_vec;
5619 
5620 	/*
5621 	 * If the firmware is initialized already (and we're not forcing a
5622 	 * master initialization), note that we're living with existing
5623 	 * adapter parameters.  Otherwise, it's time to try initializing the
5624 	 * adapter ...
5625 	 */
5626 	if (state == DEV_STATE_INIT) {
5627 		dev_info(adap->pdev_dev, "Coming up as %s: "\
5628 			 "Adapter already initialized\n",
5629 			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5630 		adap->flags |= USING_SOFT_PARAMS;
5631 	} else {
5632 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5633 			 "Initializing adapter\n");
5634 		/*
5635 		 * If the firmware doesn't support Configuration
5636 		 * Files warn user and exit,
5637 		 */
5638 		if (ret < 0)
5639 			dev_warn(adap->pdev_dev, "Firmware doesn't support "
5640 				 "configuration file.\n");
5641 		if (force_old_init)
5642 			ret = adap_init0_no_config(adap, reset);
5643 		else {
5644 			/*
5645 			 * Find out whether we're dealing with a version of
5646 			 * the firmware which has configuration file support.
5647 			 */
5648 			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5649 				     FW_PARAMS_PARAM_X_V(
5650 					     FW_PARAMS_PARAM_DEV_CF));
5651 			ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5652 					      params, val);
5653 
5654 			/*
5655 			 * If the firmware doesn't support Configuration
5656 			 * Files, use the old Driver-based, hard-wired
5657 			 * initialization.  Otherwise, try using the
5658 			 * Configuration File support and fall back to the
5659 			 * Driver-based initialization if there's no
5660 			 * Configuration File found.
5661 			 */
5662 			if (ret < 0)
5663 				ret = adap_init0_no_config(adap, reset);
5664 			else {
5665 				/*
5666 				 * The firmware provides us with a memory
5667 				 * buffer where we can load a Configuration
5668 				 * File from the host if we want to override
5669 				 * the Configuration File in flash.
5670 				 */
5671 
5672 				ret = adap_init0_config(adap, reset);
5673 				if (ret == -ENOENT) {
5674 					dev_info(adap->pdev_dev,
5675 					    "No Configuration File present "
5676 					    "on adapter. Using hard-wired "
5677 					    "configuration parameters.\n");
5678 					ret = adap_init0_no_config(adap, reset);
5679 				}
5680 			}
5681 		}
5682 		if (ret < 0) {
5683 			dev_err(adap->pdev_dev,
5684 				"could not initialize adapter, error %d\n",
5685 				-ret);
5686 			goto bye;
5687 		}
5688 	}
5689 
5690 	/*
5691 	 * If we're living with non-hard-coded parameters (either from a
5692 	 * Firmware Configuration File or values programmed by a different PF
5693 	 * Driver), give the SGE code a chance to pull in anything that it
5694 	 * needs ...  Note that this must be called after we retrieve our VPD
5695 	 * parameters in order to know how to convert core ticks to seconds.
5696 	 */
5697 	if (adap->flags & USING_SOFT_PARAMS) {
5698 		ret = t4_sge_init(adap);
5699 		if (ret < 0)
5700 			goto bye;
5701 	}
5702 
5703 	if (is_bypass_device(adap->pdev->device))
5704 		adap->params.bypass = 1;
5705 
5706 	/*
5707 	 * Grab some of our basic fundamental operating parameters.
5708 	 */
5709 #define FW_PARAM_DEV(param) \
5710 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5711 	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
5712 
5713 #define FW_PARAM_PFVF(param) \
5714 	FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5715 	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)|  \
5716 	FW_PARAMS_PARAM_Y_V(0) | \
5717 	FW_PARAMS_PARAM_Z_V(0)
5718 
5719 	params[0] = FW_PARAM_PFVF(EQ_START);
5720 	params[1] = FW_PARAM_PFVF(L2T_START);
5721 	params[2] = FW_PARAM_PFVF(L2T_END);
5722 	params[3] = FW_PARAM_PFVF(FILTER_START);
5723 	params[4] = FW_PARAM_PFVF(FILTER_END);
5724 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
5725 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5726 	if (ret < 0)
5727 		goto bye;
5728 	adap->sge.egr_start = val[0];
5729 	adap->l2t_start = val[1];
5730 	adap->l2t_end = val[2];
5731 	adap->tids.ftid_base = val[3];
5732 	adap->tids.nftids = val[4] - val[3] + 1;
5733 	adap->sge.ingr_start = val[5];
5734 
5735 	/* query params related to active filter region */
5736 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5737 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5738 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5739 	/* If Active filter size is set we enable establishing
5740 	 * offload connection through firmware work request
5741 	 */
5742 	if ((val[0] != val[1]) && (ret >= 0)) {
5743 		adap->flags |= FW_OFLD_CONN;
5744 		adap->tids.aftid_base = val[0];
5745 		adap->tids.aftid_end = val[1];
5746 	}
5747 
5748 	/* If we're running on newer firmware, let it know that we're
5749 	 * prepared to deal with encapsulated CPL messages.  Older
5750 	 * firmware won't understand this and we'll just get
5751 	 * unencapsulated messages ...
5752 	 */
5753 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5754 	val[0] = 1;
5755 	(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5756 
5757 	/*
5758 	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5759 	 * capability.  Earlier versions of the firmware didn't have the
5760 	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5761 	 * permission to use ULPTX MEMWRITE DSGL.
5762 	 */
5763 	if (is_t4(adap->params.chip)) {
5764 		adap->params.ulptx_memwrite_dsgl = false;
5765 	} else {
5766 		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5767 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5768 				      1, params, val);
5769 		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5770 	}
5771 
5772 	/*
5773 	 * Get device capabilities so we can determine what resources we need
5774 	 * to manage.
5775 	 */
5776 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5777 	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5778 				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
5779 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5780 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5781 			 &caps_cmd);
5782 	if (ret < 0)
5783 		goto bye;
5784 
5785 	if (caps_cmd.ofldcaps) {
5786 		/* query offload-related parameters */
5787 		params[0] = FW_PARAM_DEV(NTID);
5788 		params[1] = FW_PARAM_PFVF(SERVER_START);
5789 		params[2] = FW_PARAM_PFVF(SERVER_END);
5790 		params[3] = FW_PARAM_PFVF(TDDP_START);
5791 		params[4] = FW_PARAM_PFVF(TDDP_END);
5792 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5793 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5794 				      params, val);
5795 		if (ret < 0)
5796 			goto bye;
5797 		adap->tids.ntids = val[0];
5798 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5799 		adap->tids.stid_base = val[1];
5800 		adap->tids.nstids = val[2] - val[1] + 1;
5801 		/*
5802 		 * Setup server filter region. Divide the availble filter
5803 		 * region into two parts. Regular filters get 1/3rd and server
5804 		 * filters get 2/3rd part. This is only enabled if workarond
5805 		 * path is enabled.
5806 		 * 1. For regular filters.
5807 		 * 2. Server filter: This are special filters which are used
5808 		 * to redirect SYN packets to offload queue.
5809 		 */
5810 		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5811 			adap->tids.sftid_base = adap->tids.ftid_base +
5812 					DIV_ROUND_UP(adap->tids.nftids, 3);
5813 			adap->tids.nsftids = adap->tids.nftids -
5814 					 DIV_ROUND_UP(adap->tids.nftids, 3);
5815 			adap->tids.nftids = adap->tids.sftid_base -
5816 						adap->tids.ftid_base;
5817 		}
5818 		adap->vres.ddp.start = val[3];
5819 		adap->vres.ddp.size = val[4] - val[3] + 1;
5820 		adap->params.ofldq_wr_cred = val[5];
5821 
5822 		adap->params.offload = 1;
5823 	}
5824 	if (caps_cmd.rdmacaps) {
5825 		params[0] = FW_PARAM_PFVF(STAG_START);
5826 		params[1] = FW_PARAM_PFVF(STAG_END);
5827 		params[2] = FW_PARAM_PFVF(RQ_START);
5828 		params[3] = FW_PARAM_PFVF(RQ_END);
5829 		params[4] = FW_PARAM_PFVF(PBL_START);
5830 		params[5] = FW_PARAM_PFVF(PBL_END);
5831 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5832 				      params, val);
5833 		if (ret < 0)
5834 			goto bye;
5835 		adap->vres.stag.start = val[0];
5836 		adap->vres.stag.size = val[1] - val[0] + 1;
5837 		adap->vres.rq.start = val[2];
5838 		adap->vres.rq.size = val[3] - val[2] + 1;
5839 		adap->vres.pbl.start = val[4];
5840 		adap->vres.pbl.size = val[5] - val[4] + 1;
5841 
5842 		params[0] = FW_PARAM_PFVF(SQRQ_START);
5843 		params[1] = FW_PARAM_PFVF(SQRQ_END);
5844 		params[2] = FW_PARAM_PFVF(CQ_START);
5845 		params[3] = FW_PARAM_PFVF(CQ_END);
5846 		params[4] = FW_PARAM_PFVF(OCQ_START);
5847 		params[5] = FW_PARAM_PFVF(OCQ_END);
5848 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5849 				      val);
5850 		if (ret < 0)
5851 			goto bye;
5852 		adap->vres.qp.start = val[0];
5853 		adap->vres.qp.size = val[1] - val[0] + 1;
5854 		adap->vres.cq.start = val[2];
5855 		adap->vres.cq.size = val[3] - val[2] + 1;
5856 		adap->vres.ocq.start = val[4];
5857 		adap->vres.ocq.size = val[5] - val[4] + 1;
5858 
5859 		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5860 		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5861 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5862 				      val);
5863 		if (ret < 0) {
5864 			adap->params.max_ordird_qp = 8;
5865 			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5866 			ret = 0;
5867 		} else {
5868 			adap->params.max_ordird_qp = val[0];
5869 			adap->params.max_ird_adapter = val[1];
5870 		}
5871 		dev_info(adap->pdev_dev,
5872 			 "max_ordird_qp %d max_ird_adapter %d\n",
5873 			 adap->params.max_ordird_qp,
5874 			 adap->params.max_ird_adapter);
5875 	}
5876 	if (caps_cmd.iscsicaps) {
5877 		params[0] = FW_PARAM_PFVF(ISCSI_START);
5878 		params[1] = FW_PARAM_PFVF(ISCSI_END);
5879 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5880 				      params, val);
5881 		if (ret < 0)
5882 			goto bye;
5883 		adap->vres.iscsi.start = val[0];
5884 		adap->vres.iscsi.size = val[1] - val[0] + 1;
5885 	}
5886 #undef FW_PARAM_PFVF
5887 #undef FW_PARAM_DEV
5888 
5889 	/* The MTU/MSS Table is initialized by now, so load their values.  If
5890 	 * we're initializing the adapter, then we'll make any modifications
5891 	 * we want to the MTU/MSS Table and also initialize the congestion
5892 	 * parameters.
5893 	 */
5894 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5895 	if (state != DEV_STATE_INIT) {
5896 		int i;
5897 
5898 		/* The default MTU Table contains values 1492 and 1500.
5899 		 * However, for TCP, it's better to have two values which are
5900 		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5901 		 * This allows us to have a TCP Data Payload which is a
5902 		 * multiple of 8 regardless of what combination of TCP Options
5903 		 * are in use (always a multiple of 4 bytes) which is
5904 		 * important for performance reasons.  For instance, if no
5905 		 * options are in use, then we have a 20-byte IP header and a
5906 		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
5907 		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5908 		 * which is not a multiple of 8.  So using an MSS of 1488 in
5909 		 * this case results in a TCP Data Payload of 1448 bytes which
5910 		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
5911 		 * Stamps have been negotiated, then an MTU of 1500 bytes
5912 		 * results in a TCP Data Payload of 1448 bytes which, as
5913 		 * above, is a multiple of 8 bytes ...
5914 		 */
5915 		for (i = 0; i < NMTUS; i++)
5916 			if (adap->params.mtus[i] == 1492) {
5917 				adap->params.mtus[i] = 1488;
5918 				break;
5919 			}
5920 
5921 		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5922 			     adap->params.b_wnd);
5923 	}
5924 	t4_init_sge_params(adap);
5925 	t4_init_tp_params(adap);
5926 	adap->flags |= FW_OK;
5927 	return 0;
5928 
5929 	/*
5930 	 * Something bad happened.  If a command timed out or failed with EIO
5931 	 * FW does not operate within its spec or something catastrophic
5932 	 * happened to HW/FW, stop issuing commands.
5933 	 */
5934 bye:
5935 	if (ret != -ETIMEDOUT && ret != -EIO)
5936 		t4_fw_bye(adap, adap->mbox);
5937 	return ret;
5938 }
5939 
5940 /* EEH callbacks */
5941 
5942 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5943 					 pci_channel_state_t state)
5944 {
5945 	int i;
5946 	struct adapter *adap = pci_get_drvdata(pdev);
5947 
5948 	if (!adap)
5949 		goto out;
5950 
5951 	rtnl_lock();
5952 	adap->flags &= ~FW_OK;
5953 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5954 	spin_lock(&adap->stats_lock);
5955 	for_each_port(adap, i) {
5956 		struct net_device *dev = adap->port[i];
5957 
5958 		netif_device_detach(dev);
5959 		netif_carrier_off(dev);
5960 	}
5961 	spin_unlock(&adap->stats_lock);
5962 	if (adap->flags & FULL_INIT_DONE)
5963 		cxgb_down(adap);
5964 	rtnl_unlock();
5965 	if ((adap->flags & DEV_ENABLED)) {
5966 		pci_disable_device(pdev);
5967 		adap->flags &= ~DEV_ENABLED;
5968 	}
5969 out:	return state == pci_channel_io_perm_failure ?
5970 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5971 }
5972 
5973 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5974 {
5975 	int i, ret;
5976 	struct fw_caps_config_cmd c;
5977 	struct adapter *adap = pci_get_drvdata(pdev);
5978 
5979 	if (!adap) {
5980 		pci_restore_state(pdev);
5981 		pci_save_state(pdev);
5982 		return PCI_ERS_RESULT_RECOVERED;
5983 	}
5984 
5985 	if (!(adap->flags & DEV_ENABLED)) {
5986 		if (pci_enable_device(pdev)) {
5987 			dev_err(&pdev->dev, "Cannot reenable PCI "
5988 					    "device after reset\n");
5989 			return PCI_ERS_RESULT_DISCONNECT;
5990 		}
5991 		adap->flags |= DEV_ENABLED;
5992 	}
5993 
5994 	pci_set_master(pdev);
5995 	pci_restore_state(pdev);
5996 	pci_save_state(pdev);
5997 	pci_cleanup_aer_uncorrect_error_status(pdev);
5998 
5999 	if (t4_wait_dev_ready(adap->regs) < 0)
6000 		return PCI_ERS_RESULT_DISCONNECT;
6001 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6002 		return PCI_ERS_RESULT_DISCONNECT;
6003 	adap->flags |= FW_OK;
6004 	if (adap_init1(adap, &c))
6005 		return PCI_ERS_RESULT_DISCONNECT;
6006 
6007 	for_each_port(adap, i) {
6008 		struct port_info *p = adap2pinfo(adap, i);
6009 
6010 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6011 				  NULL, NULL);
6012 		if (ret < 0)
6013 			return PCI_ERS_RESULT_DISCONNECT;
6014 		p->viid = ret;
6015 		p->xact_addr_filt = -1;
6016 	}
6017 
6018 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6019 		     adap->params.b_wnd);
6020 	setup_memwin(adap);
6021 	if (cxgb_up(adap))
6022 		return PCI_ERS_RESULT_DISCONNECT;
6023 	return PCI_ERS_RESULT_RECOVERED;
6024 }
6025 
6026 static void eeh_resume(struct pci_dev *pdev)
6027 {
6028 	int i;
6029 	struct adapter *adap = pci_get_drvdata(pdev);
6030 
6031 	if (!adap)
6032 		return;
6033 
6034 	rtnl_lock();
6035 	for_each_port(adap, i) {
6036 		struct net_device *dev = adap->port[i];
6037 
6038 		if (netif_running(dev)) {
6039 			link_start(dev);
6040 			cxgb_set_rxmode(dev);
6041 		}
6042 		netif_device_attach(dev);
6043 	}
6044 	rtnl_unlock();
6045 }
6046 
6047 static const struct pci_error_handlers cxgb4_eeh = {
6048 	.error_detected = eeh_err_detected,
6049 	.slot_reset     = eeh_slot_reset,
6050 	.resume         = eeh_resume,
6051 };
6052 
6053 static inline bool is_x_10g_port(const struct link_config *lc)
6054 {
6055 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6056 	       (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6057 }
6058 
6059 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6060 			     unsigned int us, unsigned int cnt,
6061 			     unsigned int size, unsigned int iqe_size)
6062 {
6063 	q->adap = adap;
6064 	set_rspq_intr_params(q, us, cnt);
6065 	q->iqe_len = iqe_size;
6066 	q->size = size;
6067 }
6068 
6069 /*
6070  * Perform default configuration of DMA queues depending on the number and type
6071  * of ports we found and the number of available CPUs.  Most settings can be
6072  * modified by the admin prior to actual use.
6073  */
6074 static void cfg_queues(struct adapter *adap)
6075 {
6076 	struct sge *s = &adap->sge;
6077 	int i, n10g = 0, qidx = 0;
6078 #ifndef CONFIG_CHELSIO_T4_DCB
6079 	int q10g = 0;
6080 #endif
6081 	int ciq_size;
6082 
6083 	for_each_port(adap, i)
6084 		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6085 #ifdef CONFIG_CHELSIO_T4_DCB
6086 	/* For Data Center Bridging support we need to be able to support up
6087 	 * to 8 Traffic Priorities; each of which will be assigned to its
6088 	 * own TX Queue in order to prevent Head-Of-Line Blocking.
6089 	 */
6090 	if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6091 		dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6092 			MAX_ETH_QSETS, adap->params.nports * 8);
6093 		BUG_ON(1);
6094 	}
6095 
6096 	for_each_port(adap, i) {
6097 		struct port_info *pi = adap2pinfo(adap, i);
6098 
6099 		pi->first_qset = qidx;
6100 		pi->nqsets = 8;
6101 		qidx += pi->nqsets;
6102 	}
6103 #else /* !CONFIG_CHELSIO_T4_DCB */
6104 	/*
6105 	 * We default to 1 queue per non-10G port and up to # of cores queues
6106 	 * per 10G port.
6107 	 */
6108 	if (n10g)
6109 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6110 	if (q10g > netif_get_num_default_rss_queues())
6111 		q10g = netif_get_num_default_rss_queues();
6112 
6113 	for_each_port(adap, i) {
6114 		struct port_info *pi = adap2pinfo(adap, i);
6115 
6116 		pi->first_qset = qidx;
6117 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6118 		qidx += pi->nqsets;
6119 	}
6120 #endif /* !CONFIG_CHELSIO_T4_DCB */
6121 
6122 	s->ethqsets = qidx;
6123 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
6124 
6125 	if (is_offload(adap)) {
6126 		/*
6127 		 * For offload we use 1 queue/channel if all ports are up to 1G,
6128 		 * otherwise we divide all available queues amongst the channels
6129 		 * capped by the number of available cores.
6130 		 */
6131 		if (n10g) {
6132 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6133 				  num_online_cpus());
6134 			s->ofldqsets = roundup(i, adap->params.nports);
6135 		} else
6136 			s->ofldqsets = adap->params.nports;
6137 		/* For RDMA one Rx queue per channel suffices */
6138 		s->rdmaqs = adap->params.nports;
6139 		s->rdmaciqs = adap->params.nports;
6140 	}
6141 
6142 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6143 		struct sge_eth_rxq *r = &s->ethrxq[i];
6144 
6145 		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6146 		r->fl.size = 72;
6147 	}
6148 
6149 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6150 		s->ethtxq[i].q.size = 1024;
6151 
6152 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6153 		s->ctrlq[i].q.size = 512;
6154 
6155 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6156 		s->ofldtxq[i].q.size = 1024;
6157 
6158 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6159 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
6160 
6161 		init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6162 		r->rspq.uld = CXGB4_ULD_ISCSI;
6163 		r->fl.size = 72;
6164 	}
6165 
6166 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6167 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
6168 
6169 		init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6170 		r->rspq.uld = CXGB4_ULD_RDMA;
6171 		r->fl.size = 72;
6172 	}
6173 
6174 	ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6175 	if (ciq_size > SGE_MAX_IQ_SIZE) {
6176 		CH_WARN(adap, "CIQ size too small for available IQs\n");
6177 		ciq_size = SGE_MAX_IQ_SIZE;
6178 	}
6179 
6180 	for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6181 		struct sge_ofld_rxq *r = &s->rdmaciq[i];
6182 
6183 		init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6184 		r->rspq.uld = CXGB4_ULD_RDMA;
6185 	}
6186 
6187 	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6188 	init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6189 }
6190 
6191 /*
6192  * Reduce the number of Ethernet queues across all ports to at most n.
6193  * n provides at least one queue per port.
6194  */
6195 static void reduce_ethqs(struct adapter *adap, int n)
6196 {
6197 	int i;
6198 	struct port_info *pi;
6199 
6200 	while (n < adap->sge.ethqsets)
6201 		for_each_port(adap, i) {
6202 			pi = adap2pinfo(adap, i);
6203 			if (pi->nqsets > 1) {
6204 				pi->nqsets--;
6205 				adap->sge.ethqsets--;
6206 				if (adap->sge.ethqsets <= n)
6207 					break;
6208 			}
6209 		}
6210 
6211 	n = 0;
6212 	for_each_port(adap, i) {
6213 		pi = adap2pinfo(adap, i);
6214 		pi->first_qset = n;
6215 		n += pi->nqsets;
6216 	}
6217 }
6218 
6219 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6220 #define EXTRA_VECS 2
6221 
6222 static int enable_msix(struct adapter *adap)
6223 {
6224 	int ofld_need = 0;
6225 	int i, want, need;
6226 	struct sge *s = &adap->sge;
6227 	unsigned int nchan = adap->params.nports;
6228 	struct msix_entry entries[MAX_INGQ + 1];
6229 
6230 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
6231 		entries[i].entry = i;
6232 
6233 	want = s->max_ethqsets + EXTRA_VECS;
6234 	if (is_offload(adap)) {
6235 		want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6236 		/* need nchan for each possible ULD */
6237 		ofld_need = 3 * nchan;
6238 	}
6239 #ifdef CONFIG_CHELSIO_T4_DCB
6240 	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6241 	 * each port.
6242 	 */
6243 	need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6244 #else
6245 	need = adap->params.nports + EXTRA_VECS + ofld_need;
6246 #endif
6247 	want = pci_enable_msix_range(adap->pdev, entries, need, want);
6248 	if (want < 0)
6249 		return want;
6250 
6251 	/*
6252 	 * Distribute available vectors to the various queue groups.
6253 	 * Every group gets its minimum requirement and NIC gets top
6254 	 * priority for leftovers.
6255 	 */
6256 	i = want - EXTRA_VECS - ofld_need;
6257 	if (i < s->max_ethqsets) {
6258 		s->max_ethqsets = i;
6259 		if (i < s->ethqsets)
6260 			reduce_ethqs(adap, i);
6261 	}
6262 	if (is_offload(adap)) {
6263 		i = want - EXTRA_VECS - s->max_ethqsets;
6264 		i -= ofld_need - nchan;
6265 		s->ofldqsets = (i / nchan) * nchan;  /* round down */
6266 	}
6267 	for (i = 0; i < want; ++i)
6268 		adap->msix_info[i].vec = entries[i].vector;
6269 
6270 	return 0;
6271 }
6272 
6273 #undef EXTRA_VECS
6274 
6275 static int init_rss(struct adapter *adap)
6276 {
6277 	unsigned int i, j;
6278 
6279 	for_each_port(adap, i) {
6280 		struct port_info *pi = adap2pinfo(adap, i);
6281 
6282 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6283 		if (!pi->rss)
6284 			return -ENOMEM;
6285 		for (j = 0; j < pi->rss_size; j++)
6286 			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6287 	}
6288 	return 0;
6289 }
6290 
6291 static void print_port_info(const struct net_device *dev)
6292 {
6293 	char buf[80];
6294 	char *bufp = buf;
6295 	const char *spd = "";
6296 	const struct port_info *pi = netdev_priv(dev);
6297 	const struct adapter *adap = pi->adapter;
6298 
6299 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6300 		spd = " 2.5 GT/s";
6301 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6302 		spd = " 5 GT/s";
6303 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6304 		spd = " 8 GT/s";
6305 
6306 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6307 		bufp += sprintf(bufp, "100/");
6308 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6309 		bufp += sprintf(bufp, "1000/");
6310 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6311 		bufp += sprintf(bufp, "10G/");
6312 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6313 		bufp += sprintf(bufp, "40G/");
6314 	if (bufp != buf)
6315 		--bufp;
6316 	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6317 
6318 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6319 		    adap->params.vpd.id,
6320 		    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6321 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6322 		    (adap->flags & USING_MSIX) ? " MSI-X" :
6323 		    (adap->flags & USING_MSI) ? " MSI" : "");
6324 	netdev_info(dev, "S/N: %s, P/N: %s\n",
6325 		    adap->params.vpd.sn, adap->params.vpd.pn);
6326 }
6327 
6328 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6329 {
6330 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6331 }
6332 
6333 /*
6334  * Free the following resources:
6335  * - memory used for tables
6336  * - MSI/MSI-X
6337  * - net devices
6338  * - resources FW is holding for us
6339  */
6340 static void free_some_resources(struct adapter *adapter)
6341 {
6342 	unsigned int i;
6343 
6344 	t4_free_mem(adapter->l2t);
6345 	t4_free_mem(adapter->tids.tid_tab);
6346 	disable_msi(adapter);
6347 
6348 	for_each_port(adapter, i)
6349 		if (adapter->port[i]) {
6350 			kfree(adap2pinfo(adapter, i)->rss);
6351 			free_netdev(adapter->port[i]);
6352 		}
6353 	if (adapter->flags & FW_OK)
6354 		t4_fw_bye(adapter, adapter->fn);
6355 }
6356 
6357 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6358 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6359 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6360 #define SEGMENT_SIZE 128
6361 
6362 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6363 {
6364 	int func, i, err, s_qpp, qpp, num_seg;
6365 	struct port_info *pi;
6366 	bool highdma = false;
6367 	struct adapter *adapter = NULL;
6368 	void __iomem *regs;
6369 
6370 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6371 
6372 	err = pci_request_regions(pdev, KBUILD_MODNAME);
6373 	if (err) {
6374 		/* Just info, some other driver may have claimed the device. */
6375 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6376 		return err;
6377 	}
6378 
6379 	err = pci_enable_device(pdev);
6380 	if (err) {
6381 		dev_err(&pdev->dev, "cannot enable PCI device\n");
6382 		goto out_release_regions;
6383 	}
6384 
6385 	regs = pci_ioremap_bar(pdev, 0);
6386 	if (!regs) {
6387 		dev_err(&pdev->dev, "cannot map device registers\n");
6388 		err = -ENOMEM;
6389 		goto out_disable_device;
6390 	}
6391 
6392 	err = t4_wait_dev_ready(regs);
6393 	if (err < 0)
6394 		goto out_unmap_bar0;
6395 
6396 	/* We control everything through one PF */
6397 	func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6398 	if (func != ent->driver_data) {
6399 		iounmap(regs);
6400 		pci_disable_device(pdev);
6401 		pci_save_state(pdev);        /* to restore SR-IOV later */
6402 		goto sriov;
6403 	}
6404 
6405 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6406 		highdma = true;
6407 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6408 		if (err) {
6409 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6410 				"coherent allocations\n");
6411 			goto out_unmap_bar0;
6412 		}
6413 	} else {
6414 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6415 		if (err) {
6416 			dev_err(&pdev->dev, "no usable DMA configuration\n");
6417 			goto out_unmap_bar0;
6418 		}
6419 	}
6420 
6421 	pci_enable_pcie_error_reporting(pdev);
6422 	enable_pcie_relaxed_ordering(pdev);
6423 	pci_set_master(pdev);
6424 	pci_save_state(pdev);
6425 
6426 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6427 	if (!adapter) {
6428 		err = -ENOMEM;
6429 		goto out_unmap_bar0;
6430 	}
6431 
6432 	adapter->workq = create_singlethread_workqueue("cxgb4");
6433 	if (!adapter->workq) {
6434 		err = -ENOMEM;
6435 		goto out_free_adapter;
6436 	}
6437 
6438 	/* PCI device has been enabled */
6439 	adapter->flags |= DEV_ENABLED;
6440 
6441 	adapter->regs = regs;
6442 	adapter->pdev = pdev;
6443 	adapter->pdev_dev = &pdev->dev;
6444 	adapter->mbox = func;
6445 	adapter->fn = func;
6446 	adapter->msg_enable = dflt_msg_enable;
6447 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6448 
6449 	spin_lock_init(&adapter->stats_lock);
6450 	spin_lock_init(&adapter->tid_release_lock);
6451 	spin_lock_init(&adapter->win0_lock);
6452 
6453 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6454 	INIT_WORK(&adapter->db_full_task, process_db_full);
6455 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
6456 
6457 	err = t4_prep_adapter(adapter);
6458 	if (err)
6459 		goto out_free_adapter;
6460 
6461 
6462 	if (!is_t4(adapter->params.chip)) {
6463 		s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6464 		qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6465 		      SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6466 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
6467 
6468 		/* Each segment size is 128B. Write coalescing is enabled only
6469 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6470 		 * queue is less no of segments that can be accommodated in
6471 		 * a page size.
6472 		 */
6473 		if (qpp > num_seg) {
6474 			dev_err(&pdev->dev,
6475 				"Incorrect number of egress queues per page\n");
6476 			err = -EINVAL;
6477 			goto out_free_adapter;
6478 		}
6479 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6480 		pci_resource_len(pdev, 2));
6481 		if (!adapter->bar2) {
6482 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
6483 			err = -ENOMEM;
6484 			goto out_free_adapter;
6485 		}
6486 	}
6487 
6488 	setup_memwin(adapter);
6489 	err = adap_init0(adapter);
6490 	setup_memwin_rdma(adapter);
6491 	if (err)
6492 		goto out_unmap_bar;
6493 
6494 	for_each_port(adapter, i) {
6495 		struct net_device *netdev;
6496 
6497 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
6498 					   MAX_ETH_QSETS);
6499 		if (!netdev) {
6500 			err = -ENOMEM;
6501 			goto out_free_dev;
6502 		}
6503 
6504 		SET_NETDEV_DEV(netdev, &pdev->dev);
6505 
6506 		adapter->port[i] = netdev;
6507 		pi = netdev_priv(netdev);
6508 		pi->adapter = adapter;
6509 		pi->xact_addr_filt = -1;
6510 		pi->port_id = i;
6511 		netdev->irq = pdev->irq;
6512 
6513 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6514 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6515 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
6516 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6517 		if (highdma)
6518 			netdev->hw_features |= NETIF_F_HIGHDMA;
6519 		netdev->features |= netdev->hw_features;
6520 		netdev->vlan_features = netdev->features & VLAN_FEAT;
6521 
6522 		netdev->priv_flags |= IFF_UNICAST_FLT;
6523 
6524 		netdev->netdev_ops = &cxgb4_netdev_ops;
6525 #ifdef CONFIG_CHELSIO_T4_DCB
6526 		netdev->dcbnl_ops = &cxgb4_dcb_ops;
6527 		cxgb4_dcb_state_init(netdev);
6528 #endif
6529 		netdev->ethtool_ops = &cxgb_ethtool_ops;
6530 	}
6531 
6532 	pci_set_drvdata(pdev, adapter);
6533 
6534 	if (adapter->flags & FW_OK) {
6535 		err = t4_port_init(adapter, func, func, 0);
6536 		if (err)
6537 			goto out_free_dev;
6538 	}
6539 
6540 	/*
6541 	 * Configure queues and allocate tables now, they can be needed as
6542 	 * soon as the first register_netdev completes.
6543 	 */
6544 	cfg_queues(adapter);
6545 
6546 	adapter->l2t = t4_init_l2t();
6547 	if (!adapter->l2t) {
6548 		/* We tolerate a lack of L2T, giving up some functionality */
6549 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6550 		adapter->params.offload = 0;
6551 	}
6552 
6553 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6554 		dev_warn(&pdev->dev, "could not allocate TID table, "
6555 			 "continuing\n");
6556 		adapter->params.offload = 0;
6557 	}
6558 
6559 	/* See what interrupts we'll be using */
6560 	if (msi > 1 && enable_msix(adapter) == 0)
6561 		adapter->flags |= USING_MSIX;
6562 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
6563 		adapter->flags |= USING_MSI;
6564 
6565 	err = init_rss(adapter);
6566 	if (err)
6567 		goto out_free_dev;
6568 
6569 	/*
6570 	 * The card is now ready to go.  If any errors occur during device
6571 	 * registration we do not fail the whole card but rather proceed only
6572 	 * with the ports we manage to register successfully.  However we must
6573 	 * register at least one net device.
6574 	 */
6575 	for_each_port(adapter, i) {
6576 		pi = adap2pinfo(adapter, i);
6577 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6578 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6579 
6580 		err = register_netdev(adapter->port[i]);
6581 		if (err)
6582 			break;
6583 		adapter->chan_map[pi->tx_chan] = i;
6584 		print_port_info(adapter->port[i]);
6585 	}
6586 	if (i == 0) {
6587 		dev_err(&pdev->dev, "could not register any net devices\n");
6588 		goto out_free_dev;
6589 	}
6590 	if (err) {
6591 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6592 		err = 0;
6593 	}
6594 
6595 	if (cxgb4_debugfs_root) {
6596 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6597 							   cxgb4_debugfs_root);
6598 		setup_debugfs(adapter);
6599 	}
6600 
6601 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6602 	pdev->needs_freset = 1;
6603 
6604 	if (is_offload(adapter))
6605 		attach_ulds(adapter);
6606 
6607 sriov:
6608 #ifdef CONFIG_PCI_IOV
6609 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6610 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6611 			dev_info(&pdev->dev,
6612 				 "instantiated %u virtual functions\n",
6613 				 num_vf[func]);
6614 #endif
6615 	return 0;
6616 
6617  out_free_dev:
6618 	free_some_resources(adapter);
6619  out_unmap_bar:
6620 	if (!is_t4(adapter->params.chip))
6621 		iounmap(adapter->bar2);
6622  out_free_adapter:
6623 	if (adapter->workq)
6624 		destroy_workqueue(adapter->workq);
6625 
6626 	kfree(adapter);
6627  out_unmap_bar0:
6628 	iounmap(regs);
6629  out_disable_device:
6630 	pci_disable_pcie_error_reporting(pdev);
6631 	pci_disable_device(pdev);
6632  out_release_regions:
6633 	pci_release_regions(pdev);
6634 	return err;
6635 }
6636 
6637 static void remove_one(struct pci_dev *pdev)
6638 {
6639 	struct adapter *adapter = pci_get_drvdata(pdev);
6640 
6641 #ifdef CONFIG_PCI_IOV
6642 	pci_disable_sriov(pdev);
6643 
6644 #endif
6645 
6646 	if (adapter) {
6647 		int i;
6648 
6649 		/* Tear down per-adapter Work Queue first since it can contain
6650 		 * references to our adapter data structure.
6651 		 */
6652 		destroy_workqueue(adapter->workq);
6653 
6654 		if (is_offload(adapter))
6655 			detach_ulds(adapter);
6656 
6657 		for_each_port(adapter, i)
6658 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6659 				unregister_netdev(adapter->port[i]);
6660 
6661 		debugfs_remove_recursive(adapter->debugfs_root);
6662 
6663 		/* If we allocated filters, free up state associated with any
6664 		 * valid filters ...
6665 		 */
6666 		if (adapter->tids.ftid_tab) {
6667 			struct filter_entry *f = &adapter->tids.ftid_tab[0];
6668 			for (i = 0; i < (adapter->tids.nftids +
6669 					adapter->tids.nsftids); i++, f++)
6670 				if (f->valid)
6671 					clear_filter(adapter, f);
6672 		}
6673 
6674 		if (adapter->flags & FULL_INIT_DONE)
6675 			cxgb_down(adapter);
6676 
6677 		free_some_resources(adapter);
6678 		iounmap(adapter->regs);
6679 		if (!is_t4(adapter->params.chip))
6680 			iounmap(adapter->bar2);
6681 		pci_disable_pcie_error_reporting(pdev);
6682 		if ((adapter->flags & DEV_ENABLED)) {
6683 			pci_disable_device(pdev);
6684 			adapter->flags &= ~DEV_ENABLED;
6685 		}
6686 		pci_release_regions(pdev);
6687 		synchronize_rcu();
6688 		kfree(adapter);
6689 	} else
6690 		pci_release_regions(pdev);
6691 }
6692 
6693 static struct pci_driver cxgb4_driver = {
6694 	.name     = KBUILD_MODNAME,
6695 	.id_table = cxgb4_pci_tbl,
6696 	.probe    = init_one,
6697 	.remove   = remove_one,
6698 	.shutdown = remove_one,
6699 	.err_handler = &cxgb4_eeh,
6700 };
6701 
6702 static int __init cxgb4_init_module(void)
6703 {
6704 	int ret;
6705 
6706 	/* Debugfs support is optional, just warn if this fails */
6707 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6708 	if (!cxgb4_debugfs_root)
6709 		pr_warn("could not create debugfs entry, continuing\n");
6710 
6711 	ret = pci_register_driver(&cxgb4_driver);
6712 	if (ret < 0)
6713 		debugfs_remove(cxgb4_debugfs_root);
6714 
6715 #if IS_ENABLED(CONFIG_IPV6)
6716 	register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6717 #endif
6718 
6719 	return ret;
6720 }
6721 
6722 static void __exit cxgb4_cleanup_module(void)
6723 {
6724 #if IS_ENABLED(CONFIG_IPV6)
6725 	unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6726 #endif
6727 	pci_unregister_driver(&cxgb4_driver);
6728 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6729 }
6730 
6731 module_init(cxgb4_init_module);
6732 module_exit(cxgb4_cleanup_module);
6733