xref: /linux/drivers/infiniband/hw/qib/qib_iba7322.c (revision 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257)
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation, \
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect, \
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 		    BOARD_QMH7342)
163 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
164 		    BOARD_QME7342)
165 
166 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
167 
168 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
169 
170 #define MASK_ACROSS(lsb, msb) \
171 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
172 
173 #define SYM_RMASK(regname, fldname) ((u64)              \
174 	QIB_7322_##regname##_##fldname##_RMASK)
175 
176 #define SYM_MASK(regname, fldname) ((u64)               \
177 	QIB_7322_##regname##_##fldname##_RMASK <<       \
178 	 QIB_7322_##regname##_##fldname##_LSB)
179 
180 #define SYM_FIELD(value, regname, fldname) ((u64)	\
181 	(((value) >> SYM_LSB(regname, fldname)) &	\
182 	 SYM_RMASK(regname, fldname)))
183 
184 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
185 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
186 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
187 
188 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
189 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
190 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
191 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
192 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
193 /* Below because most, but not all, fields of IntMask have that full suffix */
194 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
195 
196 
197 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
198 
199 /*
200  * the size bits give us 2^N, in KB units.  0 marks as invalid,
201  * and 7 is reserved.  We currently use only 2KB and 4KB
202  */
203 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
204 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
205 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
206 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
207 
208 #define SendIBSLIDAssignMask \
209 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
210 #define SendIBSLMCMask \
211 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
212 
213 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
214 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
215 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
216 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
217 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
218 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
219 
220 #define _QIB_GPIO_SDA_NUM 1
221 #define _QIB_GPIO_SCL_NUM 0
222 #define QIB_EEPROM_WEN_NUM 14
223 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
224 
225 /* HW counter clock is at 4nsec */
226 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
227 
228 /* full speed IB port 1 only */
229 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
230 #define PORT_SPD_CAP_SHIFT 3
231 
232 /* full speed featuremask, both ports */
233 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
234 
235 /*
236  * This file contains almost all the chip-specific register information and
237  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
238  */
239 
240 /* Use defines to tie machine-generated names to lower-case names */
241 #define kr_contextcnt KREG_IDX(ContextCnt)
242 #define kr_control KREG_IDX(Control)
243 #define kr_counterregbase KREG_IDX(CntrRegBase)
244 #define kr_errclear KREG_IDX(ErrClear)
245 #define kr_errmask KREG_IDX(ErrMask)
246 #define kr_errstatus KREG_IDX(ErrStatus)
247 #define kr_extctrl KREG_IDX(EXTCtrl)
248 #define kr_extstatus KREG_IDX(EXTStatus)
249 #define kr_gpio_clear KREG_IDX(GPIOClear)
250 #define kr_gpio_mask KREG_IDX(GPIOMask)
251 #define kr_gpio_out KREG_IDX(GPIOOut)
252 #define kr_gpio_status KREG_IDX(GPIOStatus)
253 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
254 #define kr_debugportval KREG_IDX(DebugPortValueReg)
255 #define kr_fmask KREG_IDX(feature_mask)
256 #define kr_act_fmask KREG_IDX(active_feature_mask)
257 #define kr_hwerrclear KREG_IDX(HwErrClear)
258 #define kr_hwerrmask KREG_IDX(HwErrMask)
259 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
260 #define kr_intclear KREG_IDX(IntClear)
261 #define kr_intmask KREG_IDX(IntMask)
262 #define kr_intredirect KREG_IDX(IntRedirect0)
263 #define kr_intstatus KREG_IDX(IntStatus)
264 #define kr_pagealign KREG_IDX(PageAlign)
265 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
266 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
267 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
268 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
269 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
270 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
271 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
272 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
273 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
274 #define kr_revision KREG_IDX(Revision)
275 #define kr_scratch KREG_IDX(Scratch)
276 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
277 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
278 #define kr_sendctrl KREG_IDX(SendCtrl)
279 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
280 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
281 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
282 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
283 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
284 #define kr_sendpiosize KREG_IDX(SendBufSize)
285 #define kr_sendregbase KREG_IDX(SendRegBase)
286 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
287 #define kr_userregbase KREG_IDX(UserRegBase)
288 #define kr_intgranted KREG_IDX(Int_Granted)
289 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
290 #define kr_intblocked KREG_IDX(IntBlocked)
291 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
292 
293 /*
294  * per-port kernel registers.  Access only with qib_read_kreg_port()
295  * or qib_write_kreg_port()
296  */
297 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
298 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
299 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
300 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
301 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
302 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
303 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
304 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
305 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
306 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
307 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
308 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
309 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
310 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
311 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
312 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
313 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
314 #define krp_psstart KREG_IBPORT_IDX(PSStart)
315 #define krp_psstat KREG_IBPORT_IDX(PSStat)
316 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
317 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
318 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
319 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
320 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
321 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
322 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
323 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
324 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
325 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
326 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
327 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
328 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
329 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
330 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
331 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
332 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
333 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
334 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
335 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
336 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
337 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
338 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
339 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
340 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
341 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
342 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
343 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
344 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
345 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
346 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
347 
348 /*
349  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
350  * or qib_write_kreg_ctxt()
351  */
352 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
353 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
354 
355 /*
356  * TID Flow table, per context.  Reduces
357  * number of hdrq updates to one per flow (or on errors).
358  * context 0 and 1 share same memory, but have distinct
359  * addresses.  Since for now, we never use expected sends
360  * on kernel contexts, we don't worry about that (we initialize
361  * those entries for ctxt 0/1 on driver load twice, for example).
362  */
363 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
364 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
365 
366 /* these are the error bits in the tid flows, and are W1C */
367 #define TIDFLOW_ERRBITS  ( \
368 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
369 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
370 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
371 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
372 
373 /* Most (not all) Counters are per-IBport.
374  * Requires LBIntCnt is at offset 0 in the group
375  */
376 #define CREG_IDX(regname) \
377 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
378 
379 #define crp_badformat CREG_IDX(RxVersionErrCnt)
380 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
381 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
382 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
383 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
384 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
385 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
386 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
387 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
388 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
389 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
390 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
391 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
392 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
393 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
394 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
395 #define crp_pktsend CREG_IDX(TxDataPktCnt)
396 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
397 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
398 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
399 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
400 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
401 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
402 #define crp_rcvebp CREG_IDX(RxEBPCnt)
403 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
404 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
405 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
406 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
407 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
408 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
409 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
410 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
411 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
412 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
413 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
414 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
415 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
416 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
417 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
418 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
419 #define crp_wordrcv CREG_IDX(RxDwordCnt)
420 #define crp_wordsend CREG_IDX(TxDwordCnt)
421 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
422 
423 /* these are the (few) counters that are not port-specific */
424 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
425 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
426 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
427 #define cr_lbint CREG_DEVIDX(LBIntCnt)
428 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
429 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
430 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
431 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
432 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
433 
434 /* no chip register for # of IB ports supported, so define */
435 #define NUM_IB_PORTS 2
436 
437 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
438 #define NUM_VL15_BUFS NUM_IB_PORTS
439 
440 /*
441  * context 0 and 1 are special, and there is no chip register that
442  * defines this value, so we have to define it here.
443  * These are all allocated to either 0 or 1 for single port
444  * hardware configuration, otherwise each gets half
445  */
446 #define KCTXT0_EGRCNT 2048
447 
448 /* values for vl and port fields in PBC, 7322-specific */
449 #define PBC_PORT_SEL_LSB 26
450 #define PBC_PORT_SEL_RMASK 1
451 #define PBC_VL_NUM_LSB 27
452 #define PBC_VL_NUM_RMASK 7
453 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
454 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
455 
456 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
457 	[IB_RATE_2_5_GBPS] = 16,
458 	[IB_RATE_5_GBPS] = 8,
459 	[IB_RATE_10_GBPS] = 4,
460 	[IB_RATE_20_GBPS] = 2,
461 	[IB_RATE_30_GBPS] = 2,
462 	[IB_RATE_40_GBPS] = 1
463 };
464 
465 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
466 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
467 
468 /* link training states, from IBC */
469 #define IB_7322_LT_STATE_DISABLED        0x00
470 #define IB_7322_LT_STATE_LINKUP          0x01
471 #define IB_7322_LT_STATE_POLLACTIVE      0x02
472 #define IB_7322_LT_STATE_POLLQUIET       0x03
473 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
474 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
475 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
476 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
477 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
478 #define IB_7322_LT_STATE_CFGIDLE         0x0b
479 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
480 #define IB_7322_LT_STATE_TXREVLANES      0x0d
481 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
482 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
483 #define IB_7322_LT_STATE_CFGENH          0x10
484 #define IB_7322_LT_STATE_CFGTEST         0x11
485 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
486 #define IB_7322_LT_STATE_CFGWAITENH      0x13
487 
488 /* link state machine states from IBC */
489 #define IB_7322_L_STATE_DOWN             0x0
490 #define IB_7322_L_STATE_INIT             0x1
491 #define IB_7322_L_STATE_ARM              0x2
492 #define IB_7322_L_STATE_ACTIVE           0x3
493 #define IB_7322_L_STATE_ACT_DEFER        0x4
494 
495 static const u8 qib_7322_physportstate[0x20] = {
496 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
497 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
498 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
499 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
500 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
501 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
502 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
503 	[IB_7322_LT_STATE_CFGRCVFCFG] =
504 		IB_PHYSPORTSTATE_CFG_TRAIN,
505 	[IB_7322_LT_STATE_CFGWAITRMT] =
506 		IB_PHYSPORTSTATE_CFG_TRAIN,
507 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
508 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
509 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
510 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
511 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
512 	[IB_7322_LT_STATE_RECOVERIDLE] =
513 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
514 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
515 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
516 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
517 		IB_PHYSPORTSTATE_CFG_TRAIN,
518 	[IB_7322_LT_STATE_CFGWAITENH] =
519 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
520 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
521 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
523 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
524 };
525 
526 #ifdef CONFIG_INFINIBAND_QIB_DCA
527 struct qib_irq_notify {
528 	int rcv;
529 	void *arg;
530 	struct irq_affinity_notify notify;
531 };
532 #endif
533 
534 struct qib_chip_specific {
535 	u64 __iomem *cregbase;
536 	u64 *cntrs;
537 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
538 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
539 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
540 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
541 	u64 errormask;
542 	u64 hwerrmask;
543 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
544 	u64 gpio_mask; /* shadow the gpio mask register */
545 	u64 extctrl; /* shadow the gpio output enable, etc... */
546 	u32 ncntrs;
547 	u32 nportcntrs;
548 	u32 cntrnamelen;
549 	u32 portcntrnamelen;
550 	u32 numctxts;
551 	u32 rcvegrcnt;
552 	u32 updthresh; /* current AvailUpdThld */
553 	u32 updthresh_dflt; /* default AvailUpdThld */
554 	u32 r1;
555 	int irq;
556 	u32 num_msix_entries;
557 	u32 sdmabufcnt;
558 	u32 lastbuf_for_pio;
559 	u32 stay_in_freeze;
560 	u32 recovery_ports_initted;
561 #ifdef CONFIG_INFINIBAND_QIB_DCA
562 	u32 dca_ctrl;
563 	int rhdr_cpu[18];
564 	int sdma_cpu[2];
565 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
566 #endif
567 	struct qib_msix_entry *msix_entries;
568 	unsigned long *sendchkenable;
569 	unsigned long *sendgrhchk;
570 	unsigned long *sendibchk;
571 	u32 rcvavail_timeout[18];
572 	char emsgbuf[128]; /* for device error interrupt msg buffer */
573 };
574 
575 /* Table of entries in "human readable" form Tx Emphasis. */
576 struct txdds_ent {
577 	u8 amp;
578 	u8 pre;
579 	u8 main;
580 	u8 post;
581 };
582 
583 struct vendor_txdds_ent {
584 	u8 oui[QSFP_VOUI_LEN];
585 	u8 *partnum;
586 	struct txdds_ent sdr;
587 	struct txdds_ent ddr;
588 	struct txdds_ent qdr;
589 };
590 
591 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
592 
593 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
594 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
595 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
596 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
597 
598 #define H1_FORCE_VAL 8
599 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
600 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
601 
602 /* The static and dynamic registers are paired, and the pairs indexed by spd */
603 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
604 	+ ((spd) * 2))
605 
606 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
607 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
608 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
609 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
610 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
611 
612 struct qib_chippport_specific {
613 	u64 __iomem *kpregbase;
614 	u64 __iomem *cpregbase;
615 	u64 *portcntrs;
616 	struct qib_pportdata *ppd;
617 	wait_queue_head_t autoneg_wait;
618 	struct delayed_work autoneg_work;
619 	struct delayed_work ipg_work;
620 	struct timer_list chase_timer;
621 	/*
622 	 * these 5 fields are used to establish deltas for IB symbol
623 	 * errors and linkrecovery errors.  They can be reported on
624 	 * some chips during link negotiation prior to INIT, and with
625 	 * DDR when faking DDR negotiations with non-IBTA switches.
626 	 * The chip counters are adjusted at driver unload if there is
627 	 * a non-zero delta.
628 	 */
629 	u64 ibdeltainprog;
630 	u64 ibsymdelta;
631 	u64 ibsymsnap;
632 	u64 iblnkerrdelta;
633 	u64 iblnkerrsnap;
634 	u64 iblnkdownsnap;
635 	u64 iblnkdowndelta;
636 	u64 ibmalfdelta;
637 	u64 ibmalfsnap;
638 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
639 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
640 	unsigned long qdr_dfe_time;
641 	unsigned long chase_end;
642 	u32 autoneg_tries;
643 	u32 recovery_init;
644 	u32 qdr_dfe_on;
645 	u32 qdr_reforce;
646 	/*
647 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
648 	 * entry zero is unused, to simplify indexing
649 	 */
650 	u8 h1_val;
651 	u8 no_eep;  /* txselect table index to use if no qsfp info */
652 	u8 ipg_tries;
653 	u8 ibmalfusesnap;
654 	struct qib_qsfp_data qsfp_data;
655 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
656 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
657 };
658 
659 static struct {
660 	const char *name;
661 	irq_handler_t handler;
662 	int lsb;
663 	int port; /* 0 if not port-specific, else port # */
664 	int dca;
665 } irq_table[] = {
666 	{ "", qib_7322intr, -1, 0, 0 },
667 	{ " (buf avail)", qib_7322bufavail,
668 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
669 	{ " (sdma 0)", sdma_intr,
670 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
671 	{ " (sdma 1)", sdma_intr,
672 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
673 	{ " (sdmaI 0)", sdma_idle_intr,
674 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
675 	{ " (sdmaI 1)", sdma_idle_intr,
676 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
677 	{ " (sdmaP 0)", sdma_progress_intr,
678 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
679 	{ " (sdmaP 1)", sdma_progress_intr,
680 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
681 	{ " (sdmaC 0)", sdma_cleanup_intr,
682 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
683 	{ " (sdmaC 1)", sdma_cleanup_intr,
684 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
685 };
686 
687 #ifdef CONFIG_INFINIBAND_QIB_DCA
688 
689 static const struct dca_reg_map {
690 	int     shadow_inx;
691 	int     lsb;
692 	u64     mask;
693 	u16     regno;
694 } dca_rcvhdr_reg_map[] = {
695 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
696 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
697 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
698 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
699 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
700 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
701 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
702 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
703 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
704 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
705 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
706 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
707 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
708 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
709 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
710 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
711 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
712 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
713 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
714 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
715 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
716 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
717 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
718 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
719 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
720 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
721 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
722 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
723 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
724 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
725 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
726 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
727 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
728 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
729 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
730 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
731 };
732 #endif
733 
734 /* ibcctrl bits */
735 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
736 /* cycle through TS1/TS2 till OK */
737 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
738 /* wait for TS1, then go on */
739 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
740 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
741 
742 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
743 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
744 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
745 
746 #define BLOB_7322_IBCHG 0x101
747 
748 static inline void qib_write_kreg(const struct qib_devdata *dd,
749 				  const u32 regno, u64 value);
750 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
751 static void write_7322_initregs(struct qib_devdata *);
752 static void write_7322_init_portregs(struct qib_pportdata *);
753 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
754 static void check_7322_rxe_status(struct qib_pportdata *);
755 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
756 #ifdef CONFIG_INFINIBAND_QIB_DCA
757 static void qib_setup_dca(struct qib_devdata *dd);
758 static void setup_dca_notifier(struct qib_devdata *dd,
759 			       struct qib_msix_entry *m);
760 static void reset_dca_notifier(struct qib_devdata *dd,
761 			       struct qib_msix_entry *m);
762 #endif
763 
764 /**
765  * qib_read_ureg32 - read 32-bit virtualized per-context register
766  * @dd: device
767  * @regno: register number
768  * @ctxt: context number
769  *
770  * Return the contents of a register that is virtualized to be per context.
771  * Returns -1 on errors (not distinguishable from valid contents at
772  * runtime; we may add a separate error variable at some point).
773  */
774 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
775 				  enum qib_ureg regno, int ctxt)
776 {
777 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
778 		return 0;
779 	return readl(regno + (u64 __iomem *)(
780 		(dd->ureg_align * ctxt) + (dd->userbase ?
781 		 (char __iomem *)dd->userbase :
782 		 (char __iomem *)dd->kregbase + dd->uregbase)));
783 }
784 
785 /**
786  * qib_read_ureg - read virtualized per-context register
787  * @dd: device
788  * @regno: register number
789  * @ctxt: context number
790  *
791  * Return the contents of a register that is virtualized to be per context.
792  * Returns -1 on errors (not distinguishable from valid contents at
793  * runtime; we may add a separate error variable at some point).
794  */
795 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
796 				enum qib_ureg regno, int ctxt)
797 {
798 
799 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
800 		return 0;
801 	return readq(regno + (u64 __iomem *)(
802 		(dd->ureg_align * ctxt) + (dd->userbase ?
803 		 (char __iomem *)dd->userbase :
804 		 (char __iomem *)dd->kregbase + dd->uregbase)));
805 }
806 
807 /**
808  * qib_write_ureg - write virtualized per-context register
809  * @dd: device
810  * @regno: register number
811  * @value: value
812  * @ctxt: context
813  *
814  * Write the contents of a register that is virtualized to be per context.
815  */
816 static inline void qib_write_ureg(const struct qib_devdata *dd,
817 				  enum qib_ureg regno, u64 value, int ctxt)
818 {
819 	u64 __iomem *ubase;
820 	if (dd->userbase)
821 		ubase = (u64 __iomem *)
822 			((char __iomem *) dd->userbase +
823 			 dd->ureg_align * ctxt);
824 	else
825 		ubase = (u64 __iomem *)
826 			(dd->uregbase +
827 			 (char __iomem *) dd->kregbase +
828 			 dd->ureg_align * ctxt);
829 
830 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
831 		writeq(value, &ubase[regno]);
832 }
833 
834 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
835 				  const u32 regno)
836 {
837 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
838 		return -1;
839 	return readl((u32 __iomem *) &dd->kregbase[regno]);
840 }
841 
842 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
843 				  const u32 regno)
844 {
845 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
846 		return -1;
847 	return readq(&dd->kregbase[regno]);
848 }
849 
850 static inline void qib_write_kreg(const struct qib_devdata *dd,
851 				  const u32 regno, u64 value)
852 {
853 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
854 		writeq(value, &dd->kregbase[regno]);
855 }
856 
857 /*
858  * not many sanity checks for the port-specific kernel register routines,
859  * since they are only used when it's known to be safe.
860 */
861 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
862 				     const u16 regno)
863 {
864 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
865 		return 0ULL;
866 	return readq(&ppd->cpspec->kpregbase[regno]);
867 }
868 
869 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
870 				       const u16 regno, u64 value)
871 {
872 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
873 	    (ppd->dd->flags & QIB_PRESENT))
874 		writeq(value, &ppd->cpspec->kpregbase[regno]);
875 }
876 
877 /**
878  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
879  * @dd: the qlogic_ib device
880  * @regno: the register number to write
881  * @ctxt: the context containing the register
882  * @value: the value to write
883  */
884 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
885 				       const u16 regno, unsigned ctxt,
886 				       u64 value)
887 {
888 	qib_write_kreg(dd, regno + ctxt, value);
889 }
890 
891 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
892 {
893 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
894 		return 0;
895 	return readq(&dd->cspec->cregbase[regno]);
896 
897 
898 }
899 
900 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
901 {
902 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
903 		return 0;
904 	return readl(&dd->cspec->cregbase[regno]);
905 
906 
907 }
908 
909 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
910 					u16 regno, u64 value)
911 {
912 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
913 	    (ppd->dd->flags & QIB_PRESENT))
914 		writeq(value, &ppd->cpspec->cpregbase[regno]);
915 }
916 
917 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
918 				      u16 regno)
919 {
920 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
921 	    !(ppd->dd->flags & QIB_PRESENT))
922 		return 0;
923 	return readq(&ppd->cpspec->cpregbase[regno]);
924 }
925 
926 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
927 					u16 regno)
928 {
929 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930 	    !(ppd->dd->flags & QIB_PRESENT))
931 		return 0;
932 	return readl(&ppd->cpspec->cpregbase[regno]);
933 }
934 
935 /* bits in Control register */
936 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
937 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
938 
939 /* bits in general interrupt regs */
940 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
941 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
942 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
943 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
944 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
945 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
946 #define QIB_I_C_ERROR INT_MASK(Err)
947 
948 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
949 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
950 #define QIB_I_GPIO INT_MASK(AssertGPIO)
951 #define QIB_I_P_SDMAINT(pidx) \
952 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
953 	 INT_MASK_P(SDmaProgress, pidx) | \
954 	 INT_MASK_PM(SDmaCleanupDone, pidx))
955 
956 /* Interrupt bits that are "per port" */
957 #define QIB_I_P_BITSEXTANT(pidx) \
958 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
959 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
960 	INT_MASK_P(SDmaProgress, pidx) | \
961 	INT_MASK_PM(SDmaCleanupDone, pidx))
962 
963 /* Interrupt bits that are common to a device */
964 /* currently unused: QIB_I_SPIOSENT */
965 #define QIB_I_C_BITSEXTANT \
966 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
967 	QIB_I_SPIOSENT | \
968 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
969 
970 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
971 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
972 
973 /*
974  * Error bits that are "per port".
975  */
976 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
977 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
978 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
979 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
980 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
981 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
982 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
983 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
984 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
985 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
986 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
987 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
988 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
989 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
990 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
991 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
992 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
993 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
994 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
995 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
996 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
997 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
998 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
999 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1000 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1001 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1002 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1003 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1004 
1005 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1006 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1007 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1008 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1009 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1010 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1011 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1012 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1013 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1014 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1015 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1016 
1017 /* Error bits that are common to a device */
1018 #define QIB_E_RESET ERR_MASK(ResetNegated)
1019 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1020 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1021 
1022 
1023 /*
1024  * Per chip (rather than per-port) errors.  Most either do
1025  * nothing but trigger a print (because they self-recover, or
1026  * always occur in tandem with other errors that handle the
1027  * issue), or because they indicate errors with no recovery,
1028  * but we want to know that they happened.
1029  */
1030 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1031 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1032 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1033 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1034 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1035 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1036 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1037 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1038 
1039 /* SDMA chip errors (not per port)
1040  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1041  * the SDMAHALT error immediately, so we just print the dup error via the
1042  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1043  * as well, but since this is port-independent, by definition, it's
1044  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1045  * packet send errors, and so are handled in the same manner as other
1046  * per-packet errors.
1047  */
1048 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1049 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1050 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1051 
1052 /*
1053  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1054  * it is used to print "common" packet errors.
1055  */
1056 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1057 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1058 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1059 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1060 	QIB_E_P_REBP)
1061 
1062 /* Error Bits that Packet-related (Receive, per-port) */
1063 #define QIB_E_P_RPKTERRS (\
1064 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1065 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1066 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1067 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1068 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1069 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1070 
1071 /*
1072  * Error bits that are Send-related (per port)
1073  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1074  * All of these potentially need to have a buffer disarmed
1075  */
1076 #define QIB_E_P_SPKTERRS (\
1077 	QIB_E_P_SUNEXP_PKTNUM |\
1078 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1079 	QIB_E_P_SMAXPKTLEN |\
1080 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1081 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1082 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1083 
1084 #define QIB_E_SPKTERRS ( \
1085 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1086 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1087 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1088 
1089 #define QIB_E_P_SDMAERRS ( \
1090 	QIB_E_P_SDMAHALT | \
1091 	QIB_E_P_SDMADESCADDRMISALIGN | \
1092 	QIB_E_P_SDMAUNEXPDATA | \
1093 	QIB_E_P_SDMAMISSINGDW | \
1094 	QIB_E_P_SDMADWEN | \
1095 	QIB_E_P_SDMARPYTAG | \
1096 	QIB_E_P_SDMA1STDESC | \
1097 	QIB_E_P_SDMABASE | \
1098 	QIB_E_P_SDMATAILOUTOFBOUND | \
1099 	QIB_E_P_SDMAOUTOFBOUND | \
1100 	QIB_E_P_SDMAGENMISMATCH)
1101 
1102 /*
1103  * This sets some bits more than once, but makes it more obvious which
1104  * bits are not handled under other categories, and the repeat definition
1105  * is not a problem.
1106  */
1107 #define QIB_E_P_BITSEXTANT ( \
1108 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1109 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1110 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1111 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1112 	)
1113 
1114 /*
1115  * These are errors that can occur when the link
1116  * changes state while a packet is being sent or received.  This doesn't
1117  * cover things like EBP or VCRC that can be the result of a sending
1118  * having the link change state, so we receive a "known bad" packet.
1119  * All of these are "per port", so renamed:
1120  */
1121 #define QIB_E_P_LINK_PKTERRS (\
1122 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1123 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1124 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1125 	QIB_E_P_RUNEXPCHAR)
1126 
1127 /*
1128  * This sets some bits more than once, but makes it more obvious which
1129  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1130  * and the repeat definition is not a problem.
1131  */
1132 #define QIB_E_C_BITSEXTANT (\
1133 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1134 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1135 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1136 
1137 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1138 #define E_SPKT_ERRS_IGNORE 0
1139 
1140 #define QIB_EXTS_MEMBIST_DISABLED \
1141 	SYM_MASK(EXTStatus, MemBISTDisabled)
1142 #define QIB_EXTS_MEMBIST_ENDTEST \
1143 	SYM_MASK(EXTStatus, MemBISTEndTest)
1144 
1145 #define QIB_E_SPIOARMLAUNCH \
1146 	ERR_MASK(SendArmLaunchErr)
1147 
1148 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1149 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1150 
1151 /*
1152  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1153  * and also if forced QDR (only QDR enabled).  It's enabled for the
1154  * forced QDR case so that scrambling will be enabled by the TS3
1155  * exchange, when supported by both sides of the link.
1156  */
1157 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1158 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1159 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1160 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1161 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1162 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1163 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1164 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1165 
1166 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1167 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1168 
1169 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1170 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1171 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1172 
1173 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1174 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1175 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1176 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1177 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1178 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1179 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1180 
1181 #define IBA7322_REDIRECT_VEC_PER_REG 12
1182 
1183 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1184 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1185 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1186 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1187 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1188 
1189 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1190 
1191 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1192 	.msg = #fldname , .sz = sizeof(#fldname) }
1193 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1194 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1195 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1196 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1197 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1198 	HWE_AUTO(PCIESerdesPClkNotDetect),
1199 	HWE_AUTO(PowerOnBISTFailed),
1200 	HWE_AUTO(TempsenseTholdReached),
1201 	HWE_AUTO(MemoryErr),
1202 	HWE_AUTO(PCIeBusParityErr),
1203 	HWE_AUTO(PcieCplTimeout),
1204 	HWE_AUTO(PciePoisonedTLP),
1205 	HWE_AUTO_P(SDmaMemReadErr, 1),
1206 	HWE_AUTO_P(SDmaMemReadErr, 0),
1207 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1208 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1209 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1210 	HWE_AUTO(statusValidNoEop),
1211 	HWE_AUTO(LATriggered),
1212 	{ .mask = 0, .sz = 0 }
1213 };
1214 
1215 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1216 	.msg = #fldname, .sz = sizeof(#fldname) }
1217 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1218 	.msg = #fldname, .sz = sizeof(#fldname) }
1219 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1220 	E_AUTO(RcvEgrFullErr),
1221 	E_AUTO(RcvHdrFullErr),
1222 	E_AUTO(ResetNegated),
1223 	E_AUTO(HardwareErr),
1224 	E_AUTO(InvalidAddrErr),
1225 	E_AUTO(SDmaVL15Err),
1226 	E_AUTO(SBufVL15MisUseErr),
1227 	E_AUTO(InvalidEEPCmd),
1228 	E_AUTO(RcvContextShareErr),
1229 	E_AUTO(SendVLMismatchErr),
1230 	E_AUTO(SendArmLaunchErr),
1231 	E_AUTO(SendSpecialTriggerErr),
1232 	E_AUTO(SDmaWrongPortErr),
1233 	E_AUTO(SDmaBufMaskDuplicateErr),
1234 	{ .mask = 0, .sz = 0 }
1235 };
1236 
1237 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1238 	E_P_AUTO(IBStatusChanged),
1239 	E_P_AUTO(SHeadersErr),
1240 	E_P_AUTO(VL15BufMisuseErr),
1241 	/*
1242 	 * SDmaHaltErr is not really an error, make it clearer;
1243 	 */
1244 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1245 		.sz = 11},
1246 	E_P_AUTO(SDmaDescAddrMisalignErr),
1247 	E_P_AUTO(SDmaUnexpDataErr),
1248 	E_P_AUTO(SDmaMissingDwErr),
1249 	E_P_AUTO(SDmaDwEnErr),
1250 	E_P_AUTO(SDmaRpyTagErr),
1251 	E_P_AUTO(SDma1stDescErr),
1252 	E_P_AUTO(SDmaBaseErr),
1253 	E_P_AUTO(SDmaTailOutOfBoundErr),
1254 	E_P_AUTO(SDmaOutOfBoundErr),
1255 	E_P_AUTO(SDmaGenMismatchErr),
1256 	E_P_AUTO(SendBufMisuseErr),
1257 	E_P_AUTO(SendUnsupportedVLErr),
1258 	E_P_AUTO(SendUnexpectedPktNumErr),
1259 	E_P_AUTO(SendDroppedDataPktErr),
1260 	E_P_AUTO(SendDroppedSmpPktErr),
1261 	E_P_AUTO(SendPktLenErr),
1262 	E_P_AUTO(SendUnderRunErr),
1263 	E_P_AUTO(SendMaxPktLenErr),
1264 	E_P_AUTO(SendMinPktLenErr),
1265 	E_P_AUTO(RcvIBLostLinkErr),
1266 	E_P_AUTO(RcvHdrErr),
1267 	E_P_AUTO(RcvHdrLenErr),
1268 	E_P_AUTO(RcvBadTidErr),
1269 	E_P_AUTO(RcvBadVersionErr),
1270 	E_P_AUTO(RcvIBFlowErr),
1271 	E_P_AUTO(RcvEBPErr),
1272 	E_P_AUTO(RcvUnsupportedVLErr),
1273 	E_P_AUTO(RcvUnexpectedCharErr),
1274 	E_P_AUTO(RcvShortPktLenErr),
1275 	E_P_AUTO(RcvLongPktLenErr),
1276 	E_P_AUTO(RcvMaxPktLenErr),
1277 	E_P_AUTO(RcvMinPktLenErr),
1278 	E_P_AUTO(RcvICRCErr),
1279 	E_P_AUTO(RcvVCRCErr),
1280 	E_P_AUTO(RcvFormatErr),
1281 	{ .mask = 0, .sz = 0 }
1282 };
1283 
1284 /*
1285  * Below generates "auto-message" for interrupts not specific to any port or
1286  * context
1287  */
1288 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1289 	.msg = #fldname, .sz = sizeof(#fldname) }
1290 /* Below generates "auto-message" for interrupts specific to a port */
1291 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1292 	SYM_LSB(IntMask, fldname##Mask##_0), \
1293 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1294 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1295 /* For some reason, the SerDesTrimDone bits are reversed */
1296 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1297 	SYM_LSB(IntMask, fldname##Mask##_1), \
1298 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1299 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1300 /*
1301  * Below generates "auto-message" for interrupts specific to a context,
1302  * with ctxt-number appended
1303  */
1304 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1305 	SYM_LSB(IntMask, fldname##0IntMask), \
1306 	SYM_LSB(IntMask, fldname##17IntMask)), \
1307 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1308 
1309 static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
1310 	INTR_AUTO_P(SDmaInt),
1311 	INTR_AUTO_P(SDmaProgressInt),
1312 	INTR_AUTO_P(SDmaIdleInt),
1313 	INTR_AUTO_P(SDmaCleanupDone),
1314 	INTR_AUTO_C(RcvUrg),
1315 	INTR_AUTO_P(ErrInt),
1316 	INTR_AUTO(ErrInt),      /* non-port-specific errs */
1317 	INTR_AUTO(AssertGPIOInt),
1318 	INTR_AUTO_P(SendDoneInt),
1319 	INTR_AUTO(SendBufAvailInt),
1320 	INTR_AUTO_C(RcvAvail),
1321 	{ .mask = 0, .sz = 0 }
1322 };
1323 
1324 #define TXSYMPTOM_AUTO_P(fldname) \
1325 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1326 	.msg = #fldname, .sz = sizeof(#fldname) }
1327 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1328 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1329 	TXSYMPTOM_AUTO_P(GRHFail),
1330 	TXSYMPTOM_AUTO_P(PkeyFail),
1331 	TXSYMPTOM_AUTO_P(QPFail),
1332 	TXSYMPTOM_AUTO_P(SLIDFail),
1333 	TXSYMPTOM_AUTO_P(RawIPV6),
1334 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1335 	{ .mask = 0, .sz = 0 }
1336 };
1337 
1338 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1339 
1340 /*
1341  * Called when we might have an error that is specific to a particular
1342  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1343  * because we don't need to force the update of pioavail
1344  */
1345 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1346 {
1347 	struct qib_devdata *dd = ppd->dd;
1348 	u32 i;
1349 	int any;
1350 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1351 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1352 	unsigned long sbuf[4];
1353 
1354 	/*
1355 	 * It's possible that sendbuffererror could have bits set; might
1356 	 * have already done this as a result of hardware error handling.
1357 	 */
1358 	any = 0;
1359 	for (i = 0; i < regcnt; ++i) {
1360 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1361 		if (sbuf[i]) {
1362 			any = 1;
1363 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1364 		}
1365 	}
1366 
1367 	if (any)
1368 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1369 }
1370 
1371 /* No txe_recover yet, if ever */
1372 
1373 /* No decode__errors yet */
1374 static void err_decode(char *msg, size_t len, u64 errs,
1375 		       const struct qib_hwerror_msgs *msp)
1376 {
1377 	u64 these, lmask;
1378 	int took, multi, n = 0;
1379 
1380 	while (errs && msp && msp->mask) {
1381 		multi = (msp->mask & (msp->mask - 1));
1382 		while (errs & msp->mask) {
1383 			these = (errs & msp->mask);
1384 			lmask = (these & (these - 1)) ^ these;
1385 			if (len) {
1386 				if (n++) {
1387 					/* separate the strings */
1388 					*msg++ = ',';
1389 					len--;
1390 				}
1391 				BUG_ON(!msp->sz);
1392 				/* msp->sz counts the nul */
1393 				took = min_t(size_t, msp->sz - (size_t)1, len);
1394 				memcpy(msg,  msp->msg, took);
1395 				len -= took;
1396 				msg += took;
1397 				if (len)
1398 					*msg = '\0';
1399 			}
1400 			errs &= ~lmask;
1401 			if (len && multi) {
1402 				/* More than one bit this mask */
1403 				int idx = -1;
1404 
1405 				while (lmask & msp->mask) {
1406 					++idx;
1407 					lmask >>= 1;
1408 				}
1409 				took = scnprintf(msg, len, "_%d", idx);
1410 				len -= took;
1411 				msg += took;
1412 			}
1413 		}
1414 		++msp;
1415 	}
1416 	/* If some bits are left, show in hex. */
1417 	if (len && errs)
1418 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1419 			(unsigned long long) errs);
1420 }
1421 
1422 /* only called if r1 set */
1423 static void flush_fifo(struct qib_pportdata *ppd)
1424 {
1425 	struct qib_devdata *dd = ppd->dd;
1426 	u32 __iomem *piobuf;
1427 	u32 bufn;
1428 	u32 *hdr;
1429 	u64 pbc;
1430 	const unsigned hdrwords = 7;
1431 	static struct qib_ib_header ibhdr = {
1432 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1433 		.lrh[1] = IB_LID_PERMISSIVE,
1434 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1435 		.lrh[3] = IB_LID_PERMISSIVE,
1436 		.u.oth.bth[0] = cpu_to_be32(
1437 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1438 		.u.oth.bth[1] = cpu_to_be32(0),
1439 		.u.oth.bth[2] = cpu_to_be32(0),
1440 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1441 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1442 	};
1443 
1444 	/*
1445 	 * Send a dummy VL15 packet to flush the launch FIFO.
1446 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1447 	 */
1448 	pbc = PBC_7322_VL15_SEND |
1449 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1450 		(hdrwords + SIZE_OF_CRC);
1451 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1452 	if (!piobuf)
1453 		return;
1454 	writeq(pbc, piobuf);
1455 	hdr = (u32 *) &ibhdr;
1456 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1457 		qib_flush_wc();
1458 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1459 		qib_flush_wc();
1460 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1461 		qib_flush_wc();
1462 	} else
1463 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1464 	qib_sendbuf_done(dd, bufn);
1465 }
1466 
1467 /*
1468  * This is called with interrupts disabled and sdma_lock held.
1469  */
1470 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1471 {
1472 	struct qib_devdata *dd = ppd->dd;
1473 	u64 set_sendctrl = 0;
1474 	u64 clr_sendctrl = 0;
1475 
1476 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1477 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1478 	else
1479 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1480 
1481 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1482 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1483 	else
1484 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1485 
1486 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1487 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1488 	else
1489 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1490 
1491 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1492 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1493 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1494 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1495 	else
1496 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1497 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1498 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1499 
1500 	spin_lock(&dd->sendctrl_lock);
1501 
1502 	/* If we are draining everything, block sends first */
1503 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1504 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1505 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1506 		qib_write_kreg(dd, kr_scratch, 0);
1507 	}
1508 
1509 	ppd->p_sendctrl |= set_sendctrl;
1510 	ppd->p_sendctrl &= ~clr_sendctrl;
1511 
1512 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1513 		qib_write_kreg_port(ppd, krp_sendctrl,
1514 				    ppd->p_sendctrl |
1515 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1516 	else
1517 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1518 	qib_write_kreg(dd, kr_scratch, 0);
1519 
1520 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1521 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1522 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1523 		qib_write_kreg(dd, kr_scratch, 0);
1524 	}
1525 
1526 	spin_unlock(&dd->sendctrl_lock);
1527 
1528 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1529 		flush_fifo(ppd);
1530 }
1531 
1532 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1533 {
1534 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1535 }
1536 
1537 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1538 {
1539 	/*
1540 	 * Set SendDmaLenGen and clear and set
1541 	 * the MSB of the generation count to enable generation checking
1542 	 * and load the internal generation counter.
1543 	 */
1544 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1545 	qib_write_kreg_port(ppd, krp_senddmalengen,
1546 			    ppd->sdma_descq_cnt |
1547 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1548 }
1549 
1550 /*
1551  * Must be called with sdma_lock held, or before init finished.
1552  */
1553 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1554 {
1555 	/* Commit writes to memory and advance the tail on the chip */
1556 	wmb();
1557 	ppd->sdma_descq_tail = tail;
1558 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1559 }
1560 
1561 /*
1562  * This is called with interrupts disabled and sdma_lock held.
1563  */
1564 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1565 {
1566 	/*
1567 	 * Drain all FIFOs.
1568 	 * The hardware doesn't require this but we do it so that verbs
1569 	 * and user applications don't wait for link active to send stale
1570 	 * data.
1571 	 */
1572 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1573 
1574 	qib_sdma_7322_setlengen(ppd);
1575 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1576 	ppd->sdma_head_dma[0] = 0;
1577 	qib_7322_sdma_sendctrl(ppd,
1578 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1579 }
1580 
1581 #define DISABLES_SDMA ( \
1582 	QIB_E_P_SDMAHALT | \
1583 	QIB_E_P_SDMADESCADDRMISALIGN | \
1584 	QIB_E_P_SDMAMISSINGDW | \
1585 	QIB_E_P_SDMADWEN | \
1586 	QIB_E_P_SDMARPYTAG | \
1587 	QIB_E_P_SDMA1STDESC | \
1588 	QIB_E_P_SDMABASE | \
1589 	QIB_E_P_SDMATAILOUTOFBOUND | \
1590 	QIB_E_P_SDMAOUTOFBOUND | \
1591 	QIB_E_P_SDMAGENMISMATCH)
1592 
1593 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1594 {
1595 	unsigned long flags;
1596 	struct qib_devdata *dd = ppd->dd;
1597 
1598 	errs &= QIB_E_P_SDMAERRS;
1599 
1600 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1601 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1602 			    ppd->port);
1603 
1604 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1605 
1606 	if (errs != QIB_E_P_SDMAHALT) {
1607 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1608 		qib_dev_porterr(dd, ppd->port,
1609 			"SDMA %s 0x%016llx %s\n",
1610 			qib_sdma_state_names[ppd->sdma_state.current_state],
1611 			errs, ppd->cpspec->sdmamsgbuf);
1612 		dump_sdma_7322_state(ppd);
1613 	}
1614 
1615 	switch (ppd->sdma_state.current_state) {
1616 	case qib_sdma_state_s00_hw_down:
1617 		break;
1618 
1619 	case qib_sdma_state_s10_hw_start_up_wait:
1620 		if (errs & QIB_E_P_SDMAHALT)
1621 			__qib_sdma_process_event(ppd,
1622 				qib_sdma_event_e20_hw_started);
1623 		break;
1624 
1625 	case qib_sdma_state_s20_idle:
1626 		break;
1627 
1628 	case qib_sdma_state_s30_sw_clean_up_wait:
1629 		break;
1630 
1631 	case qib_sdma_state_s40_hw_clean_up_wait:
1632 		if (errs & QIB_E_P_SDMAHALT)
1633 			__qib_sdma_process_event(ppd,
1634 				qib_sdma_event_e50_hw_cleaned);
1635 		break;
1636 
1637 	case qib_sdma_state_s50_hw_halt_wait:
1638 		if (errs & QIB_E_P_SDMAHALT)
1639 			__qib_sdma_process_event(ppd,
1640 				qib_sdma_event_e60_hw_halted);
1641 		break;
1642 
1643 	case qib_sdma_state_s99_running:
1644 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1645 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1646 		break;
1647 	}
1648 
1649 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1650 }
1651 
1652 /*
1653  * handle per-device errors (not per-port errors)
1654  */
1655 static noinline void handle_7322_errors(struct qib_devdata *dd)
1656 {
1657 	char *msg;
1658 	u64 iserr = 0;
1659 	u64 errs;
1660 	u64 mask;
1661 	int log_idx;
1662 
1663 	qib_stats.sps_errints++;
1664 	errs = qib_read_kreg64(dd, kr_errstatus);
1665 	if (!errs) {
1666 		qib_devinfo(dd->pcidev,
1667 			"device error interrupt, but no error bits set!\n");
1668 		goto done;
1669 	}
1670 
1671 	/* don't report errors that are masked */
1672 	errs &= dd->cspec->errormask;
1673 	msg = dd->cspec->emsgbuf;
1674 
1675 	/* do these first, they are most important */
1676 	if (errs & QIB_E_HARDWARE) {
1677 		*msg = '\0';
1678 		qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1679 	} else
1680 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1681 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1682 				qib_inc_eeprom_err(dd, log_idx, 1);
1683 
1684 	if (errs & QIB_E_SPKTERRS) {
1685 		qib_disarm_7322_senderrbufs(dd->pport);
1686 		qib_stats.sps_txerrs++;
1687 	} else if (errs & QIB_E_INVALIDADDR)
1688 		qib_stats.sps_txerrs++;
1689 	else if (errs & QIB_E_ARMLAUNCH) {
1690 		qib_stats.sps_txerrs++;
1691 		qib_disarm_7322_senderrbufs(dd->pport);
1692 	}
1693 	qib_write_kreg(dd, kr_errclear, errs);
1694 
1695 	/*
1696 	 * The ones we mask off are handled specially below
1697 	 * or above.  Also mask SDMADISABLED by default as it
1698 	 * is too chatty.
1699 	 */
1700 	mask = QIB_E_HARDWARE;
1701 	*msg = '\0';
1702 
1703 	err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1704 		   qib_7322error_msgs);
1705 
1706 	/*
1707 	 * Getting reset is a tragedy for all ports. Mark the device
1708 	 * _and_ the ports as "offline" in way meaningful to each.
1709 	 */
1710 	if (errs & QIB_E_RESET) {
1711 		int pidx;
1712 
1713 		qib_dev_err(dd,
1714 			"Got reset, requires re-init (unload and reload driver)\n");
1715 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1716 		/* mark as having had error */
1717 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1718 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1719 			if (dd->pport[pidx].link_speed_supported)
1720 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1721 	}
1722 
1723 	if (*msg && iserr)
1724 		qib_dev_err(dd, "%s error\n", msg);
1725 
1726 	/*
1727 	 * If there were hdrq or egrfull errors, wake up any processes
1728 	 * waiting in poll.  We used to try to check which contexts had
1729 	 * the overflow, but given the cost of that and the chip reads
1730 	 * to support it, it's better to just wake everybody up if we
1731 	 * get an overflow; waiters can poll again if it's not them.
1732 	 */
1733 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1734 		qib_handle_urcv(dd, ~0U);
1735 		if (errs & ERR_MASK(RcvEgrFullErr))
1736 			qib_stats.sps_buffull++;
1737 		else
1738 			qib_stats.sps_hdrfull++;
1739 	}
1740 
1741 done:
1742 	return;
1743 }
1744 
1745 static void qib_error_tasklet(unsigned long data)
1746 {
1747 	struct qib_devdata *dd = (struct qib_devdata *)data;
1748 
1749 	handle_7322_errors(dd);
1750 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1751 }
1752 
1753 static void reenable_chase(unsigned long opaque)
1754 {
1755 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1756 
1757 	ppd->cpspec->chase_timer.expires = 0;
1758 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1759 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1760 }
1761 
1762 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1763 		u8 ibclt)
1764 {
1765 	ppd->cpspec->chase_end = 0;
1766 
1767 	if (!qib_chase)
1768 		return;
1769 
1770 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1771 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1772 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1773 	add_timer(&ppd->cpspec->chase_timer);
1774 }
1775 
1776 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1777 {
1778 	u8 ibclt;
1779 	unsigned long tnow;
1780 
1781 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1782 
1783 	/*
1784 	 * Detect and handle the state chase issue, where we can
1785 	 * get stuck if we are unlucky on timing on both sides of
1786 	 * the link.   If we are, we disable, set a timer, and
1787 	 * then re-enable.
1788 	 */
1789 	switch (ibclt) {
1790 	case IB_7322_LT_STATE_CFGRCVFCFG:
1791 	case IB_7322_LT_STATE_CFGWAITRMT:
1792 	case IB_7322_LT_STATE_TXREVLANES:
1793 	case IB_7322_LT_STATE_CFGENH:
1794 		tnow = jiffies;
1795 		if (ppd->cpspec->chase_end &&
1796 		     time_after(tnow, ppd->cpspec->chase_end))
1797 			disable_chase(ppd, tnow, ibclt);
1798 		else if (!ppd->cpspec->chase_end)
1799 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1800 		break;
1801 	default:
1802 		ppd->cpspec->chase_end = 0;
1803 		break;
1804 	}
1805 
1806 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1807 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1808 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1809 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1810 		force_h1(ppd);
1811 		ppd->cpspec->qdr_reforce = 1;
1812 		if (!ppd->dd->cspec->r1)
1813 			serdes_7322_los_enable(ppd, 0);
1814 	} else if (ppd->cpspec->qdr_reforce &&
1815 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1816 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1817 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1818 		ibclt == IB_7322_LT_STATE_LINKUP))
1819 		force_h1(ppd);
1820 
1821 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1822 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1823 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1824 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1825 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1826 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1827 		adj_tx_serdes(ppd);
1828 
1829 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1830 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1831 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1832 					  LinkTrainingState);
1833 		if (!ppd->dd->cspec->r1 &&
1834 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1835 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1836 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1837 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1838 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1839 			/* If the link went down (but no into recovery,
1840 			 * turn LOS back on */
1841 			serdes_7322_los_enable(ppd, 1);
1842 		if (!ppd->cpspec->qdr_dfe_on &&
1843 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1844 			ppd->cpspec->qdr_dfe_on = 1;
1845 			ppd->cpspec->qdr_dfe_time = 0;
1846 			/* On link down, reenable QDR adaptation */
1847 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1848 					    ppd->dd->cspec->r1 ?
1849 					    QDR_STATIC_ADAPT_DOWN_R1 :
1850 					    QDR_STATIC_ADAPT_DOWN);
1851 			pr_info(
1852 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1853 				ppd->dd->unit, ppd->port, ibclt);
1854 		}
1855 	}
1856 }
1857 
1858 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1859 
1860 /*
1861  * This is per-pport error handling.
1862  * will likely get it's own MSIx interrupt (one for each port,
1863  * although just a single handler).
1864  */
1865 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1866 {
1867 	char *msg;
1868 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1869 	struct qib_devdata *dd = ppd->dd;
1870 
1871 	/* do this as soon as possible */
1872 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1873 	if (!fmask)
1874 		check_7322_rxe_status(ppd);
1875 
1876 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1877 	if (!errs)
1878 		qib_devinfo(dd->pcidev,
1879 			 "Port%d error interrupt, but no error bits set!\n",
1880 			 ppd->port);
1881 	if (!fmask)
1882 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1883 	if (!errs)
1884 		goto done;
1885 
1886 	msg = ppd->cpspec->epmsgbuf;
1887 	*msg = '\0';
1888 
1889 	if (errs & ~QIB_E_P_BITSEXTANT) {
1890 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1891 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1892 		if (!*msg)
1893 			snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1894 				 "no others");
1895 		qib_dev_porterr(dd, ppd->port,
1896 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1897 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1898 		*msg = '\0';
1899 	}
1900 
1901 	if (errs & QIB_E_P_SHDR) {
1902 		u64 symptom;
1903 
1904 		/* determine cause, then write to clear */
1905 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1906 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1907 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1908 			   hdrchk_msgs);
1909 		*msg = '\0';
1910 		/* senderrbuf cleared in SPKTERRS below */
1911 	}
1912 
1913 	if (errs & QIB_E_P_SPKTERRS) {
1914 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1915 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1916 			/*
1917 			 * This can happen when trying to bring the link
1918 			 * up, but the IB link changes state at the "wrong"
1919 			 * time. The IB logic then complains that the packet
1920 			 * isn't valid.  We don't want to confuse people, so
1921 			 * we just don't print them, except at debug
1922 			 */
1923 			err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1924 				   (errs & QIB_E_P_LINK_PKTERRS),
1925 				   qib_7322p_error_msgs);
1926 			*msg = '\0';
1927 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1928 		}
1929 		qib_disarm_7322_senderrbufs(ppd);
1930 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1931 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1932 		/*
1933 		 * This can happen when SMA is trying to bring the link
1934 		 * up, but the IB link changes state at the "wrong" time.
1935 		 * The IB logic then complains that the packet isn't
1936 		 * valid.  We don't want to confuse people, so we just
1937 		 * don't print them, except at debug
1938 		 */
1939 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1940 			   qib_7322p_error_msgs);
1941 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1942 		*msg = '\0';
1943 	}
1944 
1945 	qib_write_kreg_port(ppd, krp_errclear, errs);
1946 
1947 	errs &= ~ignore_this_time;
1948 	if (!errs)
1949 		goto done;
1950 
1951 	if (errs & QIB_E_P_RPKTERRS)
1952 		qib_stats.sps_rcverrs++;
1953 	if (errs & QIB_E_P_SPKTERRS)
1954 		qib_stats.sps_txerrs++;
1955 
1956 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1957 
1958 	if (errs & QIB_E_P_SDMAERRS)
1959 		sdma_7322_p_errors(ppd, errs);
1960 
1961 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1962 		u64 ibcs;
1963 		u8 ltstate;
1964 
1965 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1966 		ltstate = qib_7322_phys_portstate(ibcs);
1967 
1968 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1969 			handle_serdes_issues(ppd, ibcs);
1970 		if (!(ppd->cpspec->ibcctrl_a &
1971 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1972 			/*
1973 			 * We got our interrupt, so init code should be
1974 			 * happy and not try alternatives. Now squelch
1975 			 * other "chatter" from link-negotiation (pre Init)
1976 			 */
1977 			ppd->cpspec->ibcctrl_a |=
1978 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1979 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1980 					    ppd->cpspec->ibcctrl_a);
1981 		}
1982 
1983 		/* Update our picture of width and speed from chip */
1984 		ppd->link_width_active =
1985 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1986 			    IB_WIDTH_4X : IB_WIDTH_1X;
1987 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1988 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1989 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1990 				   QIB_IB_DDR : QIB_IB_SDR;
1991 
1992 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1993 		    IB_PHYSPORTSTATE_DISABLED)
1994 			qib_set_ib_7322_lstate(ppd, 0,
1995 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1996 		else
1997 			/*
1998 			 * Since going into a recovery state causes the link
1999 			 * state to go down and since recovery is transitory,
2000 			 * it is better if we "miss" ever seeing the link
2001 			 * training state go into recovery (i.e., ignore this
2002 			 * transition for link state special handling purposes)
2003 			 * without updating lastibcstat.
2004 			 */
2005 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
2006 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
2007 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2008 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2009 				qib_handle_e_ibstatuschanged(ppd, ibcs);
2010 	}
2011 	if (*msg && iserr)
2012 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2013 
2014 	if (ppd->state_wanted & ppd->lflags)
2015 		wake_up_interruptible(&ppd->state_wait);
2016 done:
2017 	return;
2018 }
2019 
2020 /* enable/disable chip from delivering interrupts */
2021 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2022 {
2023 	if (enable) {
2024 		if (dd->flags & QIB_BADINTR)
2025 			return;
2026 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2027 		/* cause any pending enabled interrupts to be re-delivered */
2028 		qib_write_kreg(dd, kr_intclear, 0ULL);
2029 		if (dd->cspec->num_msix_entries) {
2030 			/* and same for MSIx */
2031 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2032 			if (val)
2033 				qib_write_kreg(dd, kr_intgranted, val);
2034 		}
2035 	} else
2036 		qib_write_kreg(dd, kr_intmask, 0ULL);
2037 }
2038 
2039 /*
2040  * Try to cleanup as much as possible for anything that might have gone
2041  * wrong while in freeze mode, such as pio buffers being written by user
2042  * processes (causing armlaunch), send errors due to going into freeze mode,
2043  * etc., and try to avoid causing extra interrupts while doing so.
2044  * Forcibly update the in-memory pioavail register copies after cleanup
2045  * because the chip won't do it while in freeze mode (the register values
2046  * themselves are kept correct).
2047  * Make sure that we don't lose any important interrupts by using the chip
2048  * feature that says that writing 0 to a bit in *clear that is set in
2049  * *status will cause an interrupt to be generated again (if allowed by
2050  * the *mask value).
2051  * This is in chip-specific code because of all of the register accesses,
2052  * even though the details are similar on most chips.
2053  */
2054 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2055 {
2056 	int pidx;
2057 
2058 	/* disable error interrupts, to avoid confusion */
2059 	qib_write_kreg(dd, kr_errmask, 0ULL);
2060 
2061 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2062 		if (dd->pport[pidx].link_speed_supported)
2063 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2064 					    0ULL);
2065 
2066 	/* also disable interrupts; errormask is sometimes overwriten */
2067 	qib_7322_set_intr_state(dd, 0);
2068 
2069 	/* clear the freeze, and be sure chip saw it */
2070 	qib_write_kreg(dd, kr_control, dd->control);
2071 	qib_read_kreg32(dd, kr_scratch);
2072 
2073 	/*
2074 	 * Force new interrupt if any hwerr, error or interrupt bits are
2075 	 * still set, and clear "safe" send packet errors related to freeze
2076 	 * and cancelling sends.  Re-enable error interrupts before possible
2077 	 * force of re-interrupt on pending interrupts.
2078 	 */
2079 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2080 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2081 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2082 	/* We need to purge per-port errs and reset mask, too */
2083 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2084 		if (!dd->pport[pidx].link_speed_supported)
2085 			continue;
2086 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2087 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2088 	}
2089 	qib_7322_set_intr_state(dd, 1);
2090 }
2091 
2092 /* no error handling to speak of */
2093 /**
2094  * qib_7322_handle_hwerrors - display hardware errors.
2095  * @dd: the qlogic_ib device
2096  * @msg: the output buffer
2097  * @msgl: the size of the output buffer
2098  *
2099  * Use same msg buffer as regular errors to avoid excessive stack
2100  * use.  Most hardware errors are catastrophic, but for right now,
2101  * we'll print them and continue.  We reuse the same message buffer as
2102  * qib_handle_errors() to avoid excessive stack usage.
2103  */
2104 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2105 				     size_t msgl)
2106 {
2107 	u64 hwerrs;
2108 	u32 ctrl;
2109 	int isfatal = 0;
2110 
2111 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2112 	if (!hwerrs)
2113 		goto bail;
2114 	if (hwerrs == ~0ULL) {
2115 		qib_dev_err(dd,
2116 			"Read of hardware error status failed (all bits set); ignoring\n");
2117 		goto bail;
2118 	}
2119 	qib_stats.sps_hwerrs++;
2120 
2121 	/* Always clear the error status register, except BIST fail */
2122 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2123 		       ~HWE_MASK(PowerOnBISTFailed));
2124 
2125 	hwerrs &= dd->cspec->hwerrmask;
2126 
2127 	/* no EEPROM logging, yet */
2128 
2129 	if (hwerrs)
2130 		qib_devinfo(dd->pcidev,
2131 			"Hardware error: hwerr=0x%llx (cleared)\n",
2132 			(unsigned long long) hwerrs);
2133 
2134 	ctrl = qib_read_kreg32(dd, kr_control);
2135 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2136 		/*
2137 		 * No recovery yet...
2138 		 */
2139 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2140 		    dd->cspec->stay_in_freeze) {
2141 			/*
2142 			 * If any set that we aren't ignoring only make the
2143 			 * complaint once, in case it's stuck or recurring,
2144 			 * and we get here multiple times
2145 			 * Force link down, so switch knows, and
2146 			 * LEDs are turned off.
2147 			 */
2148 			if (dd->flags & QIB_INITTED)
2149 				isfatal = 1;
2150 		} else
2151 			qib_7322_clear_freeze(dd);
2152 	}
2153 
2154 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2155 		isfatal = 1;
2156 		strlcpy(msg,
2157 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2158 			msgl);
2159 		/* ignore from now on, so disable until driver reloaded */
2160 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2161 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2162 	}
2163 
2164 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2165 
2166 	/* Ignore esoteric PLL failures et al. */
2167 
2168 	qib_dev_err(dd, "%s hardware error\n", msg);
2169 
2170 	if (hwerrs &
2171 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2172 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2173 		int pidx = 0;
2174 		int err;
2175 		unsigned long flags;
2176 		struct qib_pportdata *ppd = dd->pport;
2177 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2178 			err = 0;
2179 			if (pidx == 0 && (hwerrs &
2180 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2181 				err++;
2182 			if (pidx == 1 && (hwerrs &
2183 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2184 				err++;
2185 			if (err) {
2186 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2187 				dump_sdma_7322_state(ppd);
2188 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2189 			}
2190 		}
2191 	}
2192 
2193 	if (isfatal && !dd->diag_client) {
2194 		qib_dev_err(dd,
2195 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2196 			dd->serial);
2197 		/*
2198 		 * for /sys status file and user programs to print; if no
2199 		 * trailing brace is copied, we'll know it was truncated.
2200 		 */
2201 		if (dd->freezemsg)
2202 			snprintf(dd->freezemsg, dd->freezelen,
2203 				 "{%s}", msg);
2204 		qib_disable_after_error(dd);
2205 	}
2206 bail:;
2207 }
2208 
2209 /**
2210  * qib_7322_init_hwerrors - enable hardware errors
2211  * @dd: the qlogic_ib device
2212  *
2213  * now that we have finished initializing everything that might reasonably
2214  * cause a hardware error, and cleared those errors bits as they occur,
2215  * we can enable hardware errors in the mask (potentially enabling
2216  * freeze mode), and enable hardware errors as errors (along with
2217  * everything else) in errormask
2218  */
2219 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2220 {
2221 	int pidx;
2222 	u64 extsval;
2223 
2224 	extsval = qib_read_kreg64(dd, kr_extstatus);
2225 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2226 			 QIB_EXTS_MEMBIST_ENDTEST)))
2227 		qib_dev_err(dd, "MemBIST did not complete!\n");
2228 
2229 	/* never clear BIST failure, so reported on each driver load */
2230 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2231 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2232 
2233 	/* clear all */
2234 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2235 	/* enable errors that are masked, at least this first time. */
2236 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2237 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2238 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2239 		if (dd->pport[pidx].link_speed_supported)
2240 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2241 					    ~0ULL);
2242 }
2243 
2244 /*
2245  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2246  * on chips that are count-based, rather than trigger-based.  There is no
2247  * reference counting, but that's also fine, given the intended use.
2248  * Only chip-specific because it's all register accesses
2249  */
2250 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2251 {
2252 	if (enable) {
2253 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2254 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2255 	} else
2256 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2257 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2258 }
2259 
2260 /*
2261  * Formerly took parameter <which> in pre-shifted,
2262  * pre-merged form with LinkCmd and LinkInitCmd
2263  * together, and assuming the zero was NOP.
2264  */
2265 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2266 				   u16 linitcmd)
2267 {
2268 	u64 mod_wd;
2269 	struct qib_devdata *dd = ppd->dd;
2270 	unsigned long flags;
2271 
2272 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2273 		/*
2274 		 * If we are told to disable, note that so link-recovery
2275 		 * code does not attempt to bring us back up.
2276 		 * Also reset everything that we can, so we start
2277 		 * completely clean when re-enabled (before we
2278 		 * actually issue the disable to the IBC)
2279 		 */
2280 		qib_7322_mini_pcs_reset(ppd);
2281 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2282 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2283 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2284 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2285 		/*
2286 		 * Any other linkinitcmd will lead to LINKDOWN and then
2287 		 * to INIT (if all is well), so clear flag to let
2288 		 * link-recovery code attempt to bring us back up.
2289 		 */
2290 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2291 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2292 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2293 		/*
2294 		 * Clear status change interrupt reduction so the
2295 		 * new state is seen.
2296 		 */
2297 		ppd->cpspec->ibcctrl_a &=
2298 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2299 	}
2300 
2301 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2302 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2303 
2304 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2305 			    mod_wd);
2306 	/* write to chip to prevent back-to-back writes of ibc reg */
2307 	qib_write_kreg(dd, kr_scratch, 0);
2308 
2309 }
2310 
2311 /*
2312  * The total RCV buffer memory is 64KB, used for both ports, and is
2313  * in units of 64 bytes (same as IB flow control credit unit).
2314  * The consumedVL unit in the same registers are in 32 byte units!
2315  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2316  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2317  * in krp_rxcreditvl15, rather than 10.
2318  */
2319 #define RCV_BUF_UNITSZ 64
2320 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2321 
2322 static void set_vls(struct qib_pportdata *ppd)
2323 {
2324 	int i, numvls, totcred, cred_vl, vl0extra;
2325 	struct qib_devdata *dd = ppd->dd;
2326 	u64 val;
2327 
2328 	numvls = qib_num_vls(ppd->vls_operational);
2329 
2330 	/*
2331 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2332 	 * 1) port is disabled at the time early_init is called.
2333 	 * 2) give VL15 17 credits, for two max-plausible packets.
2334 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2335 	 */
2336 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2337 	totcred = NUM_RCV_BUF_UNITS(dd);
2338 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2339 	totcred -= cred_vl;
2340 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2341 	cred_vl = totcred / numvls;
2342 	vl0extra = totcred - cred_vl * numvls;
2343 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2344 	for (i = 1; i < numvls; i++)
2345 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2346 	for (; i < 8; i++) /* no buffer space for other VLs */
2347 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2348 
2349 	/* Notify IBC that credits need to be recalculated */
2350 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2351 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2352 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2353 	qib_write_kreg(dd, kr_scratch, 0ULL);
2354 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2355 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2356 
2357 	for (i = 0; i < numvls; i++)
2358 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2359 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2360 
2361 	/* Change the number of operational VLs */
2362 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2363 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2364 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2365 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2366 	qib_write_kreg(dd, kr_scratch, 0ULL);
2367 }
2368 
2369 /*
2370  * The code that deals with actual SerDes is in serdes_7322_init().
2371  * Compared to the code for iba7220, it is minimal.
2372  */
2373 static int serdes_7322_init(struct qib_pportdata *ppd);
2374 
2375 /**
2376  * qib_7322_bringup_serdes - bring up the serdes
2377  * @ppd: physical port on the qlogic_ib device
2378  */
2379 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2380 {
2381 	struct qib_devdata *dd = ppd->dd;
2382 	u64 val, guid, ibc;
2383 	unsigned long flags;
2384 	int ret = 0;
2385 
2386 	/*
2387 	 * SerDes model not in Pd, but still need to
2388 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2389 	 * eventually.
2390 	 */
2391 	/* Put IBC in reset, sends disabled (should be in reset already) */
2392 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2393 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2394 	qib_write_kreg(dd, kr_scratch, 0ULL);
2395 
2396 	if (qib_compat_ddr_negotiate) {
2397 		ppd->cpspec->ibdeltainprog = 1;
2398 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2399 						crp_ibsymbolerr);
2400 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2401 						crp_iblinkerrrecov);
2402 	}
2403 
2404 	/* flowcontrolwatermark is in units of KBytes */
2405 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2406 	/*
2407 	 * Flow control is sent this often, even if no changes in
2408 	 * buffer space occur.  Units are 128ns for this chip.
2409 	 * Set to 3usec.
2410 	 */
2411 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2412 	/* max error tolerance */
2413 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2414 	/* IB credit flow control. */
2415 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2416 	/*
2417 	 * set initial max size pkt IBC will send, including ICRC; it's the
2418 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2419 	 */
2420 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2421 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2422 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2423 
2424 	/*
2425 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2426 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2427 	 */
2428 	qib_7322_mini_pcs_reset(ppd);
2429 
2430 	if (!ppd->cpspec->ibcctrl_b) {
2431 		unsigned lse = ppd->link_speed_enabled;
2432 
2433 		/*
2434 		 * Not on re-init after reset, establish shadow
2435 		 * and force initial config.
2436 		 */
2437 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2438 							     krp_ibcctrl_b);
2439 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2440 				IBA7322_IBC_SPEED_DDR |
2441 				IBA7322_IBC_SPEED_SDR |
2442 				IBA7322_IBC_WIDTH_AUTONEG |
2443 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2444 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2445 			ppd->cpspec->ibcctrl_b |=
2446 				(lse << IBA7322_IBC_SPEED_LSB) |
2447 				IBA7322_IBC_IBTA_1_2_MASK |
2448 				IBA7322_IBC_MAX_SPEED_MASK;
2449 		else
2450 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2451 				IBA7322_IBC_SPEED_QDR |
2452 				 IBA7322_IBC_IBTA_1_2_MASK :
2453 				(lse == QIB_IB_DDR) ?
2454 					IBA7322_IBC_SPEED_DDR :
2455 					IBA7322_IBC_SPEED_SDR;
2456 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2457 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2458 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2459 		else
2460 			ppd->cpspec->ibcctrl_b |=
2461 				ppd->link_width_enabled == IB_WIDTH_4X ?
2462 				IBA7322_IBC_WIDTH_4X_ONLY :
2463 				IBA7322_IBC_WIDTH_1X_ONLY;
2464 
2465 		/* always enable these on driver reload, not sticky */
2466 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2467 			IBA7322_IBC_HRTBT_MASK);
2468 	}
2469 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2470 
2471 	/* setup so we have more time at CFGTEST to change H1 */
2472 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2473 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2474 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2475 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2476 
2477 	serdes_7322_init(ppd);
2478 
2479 	guid = be64_to_cpu(ppd->guid);
2480 	if (!guid) {
2481 		if (dd->base_guid)
2482 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2483 		ppd->guid = cpu_to_be64(guid);
2484 	}
2485 
2486 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2487 	/* write to chip to prevent back-to-back writes of ibc reg */
2488 	qib_write_kreg(dd, kr_scratch, 0);
2489 
2490 	/* Enable port */
2491 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2492 	set_vls(ppd);
2493 
2494 	/* initially come up DISABLED, without sending anything. */
2495 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2496 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2497 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2498 	qib_write_kreg(dd, kr_scratch, 0ULL);
2499 	/* clear the linkinit cmds */
2500 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2501 
2502 	/* be paranoid against later code motion, etc. */
2503 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2504 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2505 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2506 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2507 
2508 	/* Also enable IBSTATUSCHG interrupt.  */
2509 	val = qib_read_kreg_port(ppd, krp_errmask);
2510 	qib_write_kreg_port(ppd, krp_errmask,
2511 		val | ERR_MASK_N(IBStatusChanged));
2512 
2513 	/* Always zero until we start messing with SerDes for real */
2514 	return ret;
2515 }
2516 
2517 /**
2518  * qib_7322_quiet_serdes - set serdes to txidle
2519  * @dd: the qlogic_ib device
2520  * Called when driver is being unloaded
2521  */
2522 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2523 {
2524 	u64 val;
2525 	unsigned long flags;
2526 
2527 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2528 
2529 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2530 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2531 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2532 	wake_up(&ppd->cpspec->autoneg_wait);
2533 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2534 	if (ppd->dd->cspec->r1)
2535 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2536 
2537 	ppd->cpspec->chase_end = 0;
2538 	if (ppd->cpspec->chase_timer.data) /* if initted */
2539 		del_timer_sync(&ppd->cpspec->chase_timer);
2540 
2541 	/*
2542 	 * Despite the name, actually disables IBC as well. Do it when
2543 	 * we are as sure as possible that no more packets can be
2544 	 * received, following the down and the PCS reset.
2545 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2546 	 * along with the PCS being reset.
2547 	 */
2548 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2549 	qib_7322_mini_pcs_reset(ppd);
2550 
2551 	/*
2552 	 * Update the adjusted counters so the adjustment persists
2553 	 * across driver reload.
2554 	 */
2555 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2556 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2557 		struct qib_devdata *dd = ppd->dd;
2558 		u64 diagc;
2559 
2560 		/* enable counter writes */
2561 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2562 		qib_write_kreg(dd, kr_hwdiagctrl,
2563 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2564 
2565 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2566 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2567 			if (ppd->cpspec->ibdeltainprog)
2568 				val -= val - ppd->cpspec->ibsymsnap;
2569 			val -= ppd->cpspec->ibsymdelta;
2570 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2571 		}
2572 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2573 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2574 			if (ppd->cpspec->ibdeltainprog)
2575 				val -= val - ppd->cpspec->iblnkerrsnap;
2576 			val -= ppd->cpspec->iblnkerrdelta;
2577 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2578 		}
2579 		if (ppd->cpspec->iblnkdowndelta) {
2580 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2581 			val += ppd->cpspec->iblnkdowndelta;
2582 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2583 		}
2584 		/*
2585 		 * No need to save ibmalfdelta since IB perfcounters
2586 		 * are cleared on driver reload.
2587 		 */
2588 
2589 		/* and disable counter writes */
2590 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2591 	}
2592 }
2593 
2594 /**
2595  * qib_setup_7322_setextled - set the state of the two external LEDs
2596  * @ppd: physical port on the qlogic_ib device
2597  * @on: whether the link is up or not
2598  *
2599  * The exact combo of LEDs if on is true is determined by looking
2600  * at the ibcstatus.
2601  *
2602  * These LEDs indicate the physical and logical state of IB link.
2603  * For this chip (at least with recommended board pinouts), LED1
2604  * is Yellow (logical state) and LED2 is Green (physical state),
2605  *
2606  * Note:  We try to match the Mellanox HCA LED behavior as best
2607  * we can.  Green indicates physical link state is OK (something is
2608  * plugged in, and we can train).
2609  * Amber indicates the link is logically up (ACTIVE).
2610  * Mellanox further blinks the amber LED to indicate data packet
2611  * activity, but we have no hardware support for that, so it would
2612  * require waking up every 10-20 msecs and checking the counters
2613  * on the chip, and then turning the LED off if appropriate.  That's
2614  * visible overhead, so not something we will do.
2615  */
2616 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2617 {
2618 	struct qib_devdata *dd = ppd->dd;
2619 	u64 extctl, ledblink = 0, val;
2620 	unsigned long flags;
2621 	int yel, grn;
2622 
2623 	/*
2624 	 * The diags use the LED to indicate diag info, so we leave
2625 	 * the external LED alone when the diags are running.
2626 	 */
2627 	if (dd->diag_client)
2628 		return;
2629 
2630 	/* Allow override of LED display for, e.g. Locating system in rack */
2631 	if (ppd->led_override) {
2632 		grn = (ppd->led_override & QIB_LED_PHYS);
2633 		yel = (ppd->led_override & QIB_LED_LOG);
2634 	} else if (on) {
2635 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2636 		grn = qib_7322_phys_portstate(val) ==
2637 			IB_PHYSPORTSTATE_LINKUP;
2638 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2639 	} else {
2640 		grn = 0;
2641 		yel = 0;
2642 	}
2643 
2644 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2645 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2646 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2647 	if (grn) {
2648 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2649 		/*
2650 		 * Counts are in chip clock (4ns) periods.
2651 		 * This is 1/16 sec (66.6ms) on,
2652 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2653 		 */
2654 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2655 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2656 	}
2657 	if (yel)
2658 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2659 	dd->cspec->extctrl = extctl;
2660 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2661 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2662 
2663 	if (ledblink) /* blink the LED on packet receive */
2664 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2665 }
2666 
2667 #ifdef CONFIG_INFINIBAND_QIB_DCA
2668 
2669 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2670 {
2671 	switch (event) {
2672 	case DCA_PROVIDER_ADD:
2673 		if (dd->flags & QIB_DCA_ENABLED)
2674 			break;
2675 		if (!dca_add_requester(&dd->pcidev->dev)) {
2676 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2677 			dd->flags |= QIB_DCA_ENABLED;
2678 			qib_setup_dca(dd);
2679 		}
2680 		break;
2681 	case DCA_PROVIDER_REMOVE:
2682 		if (dd->flags & QIB_DCA_ENABLED) {
2683 			dca_remove_requester(&dd->pcidev->dev);
2684 			dd->flags &= ~QIB_DCA_ENABLED;
2685 			dd->cspec->dca_ctrl = 0;
2686 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2687 				dd->cspec->dca_ctrl);
2688 		}
2689 		break;
2690 	}
2691 	return 0;
2692 }
2693 
2694 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2695 {
2696 	struct qib_devdata *dd = rcd->dd;
2697 	struct qib_chip_specific *cspec = dd->cspec;
2698 
2699 	if (!(dd->flags & QIB_DCA_ENABLED))
2700 		return;
2701 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2702 		const struct dca_reg_map *rmp;
2703 
2704 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2705 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2706 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2707 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2708 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2709 		qib_devinfo(dd->pcidev,
2710 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2711 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2712 		qib_write_kreg(dd, rmp->regno,
2713 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2714 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2715 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2716 	}
2717 }
2718 
2719 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2720 {
2721 	struct qib_devdata *dd = ppd->dd;
2722 	struct qib_chip_specific *cspec = dd->cspec;
2723 	unsigned pidx = ppd->port - 1;
2724 
2725 	if (!(dd->flags & QIB_DCA_ENABLED))
2726 		return;
2727 	if (cspec->sdma_cpu[pidx] != cpu) {
2728 		cspec->sdma_cpu[pidx] = cpu;
2729 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2730 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2731 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2732 		cspec->dca_rcvhdr_ctrl[4] |=
2733 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2734 				(ppd->hw_pidx ?
2735 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2736 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2737 		qib_devinfo(dd->pcidev,
2738 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2739 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2740 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2741 			       cspec->dca_rcvhdr_ctrl[4]);
2742 		cspec->dca_ctrl |= ppd->hw_pidx ?
2743 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2744 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2745 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2746 	}
2747 }
2748 
2749 static void qib_setup_dca(struct qib_devdata *dd)
2750 {
2751 	struct qib_chip_specific *cspec = dd->cspec;
2752 	int i;
2753 
2754 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2755 		cspec->rhdr_cpu[i] = -1;
2756 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2757 		cspec->sdma_cpu[i] = -1;
2758 	cspec->dca_rcvhdr_ctrl[0] =
2759 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2760 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2761 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2762 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2763 	cspec->dca_rcvhdr_ctrl[1] =
2764 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2765 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2766 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2768 	cspec->dca_rcvhdr_ctrl[2] =
2769 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2770 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2771 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2772 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2773 	cspec->dca_rcvhdr_ctrl[3] =
2774 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2775 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2776 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2777 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2778 	cspec->dca_rcvhdr_ctrl[4] =
2779 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2780 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2781 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2782 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2783 			       cspec->dca_rcvhdr_ctrl[i]);
2784 	for (i = 0; i < cspec->num_msix_entries; i++)
2785 		setup_dca_notifier(dd, &cspec->msix_entries[i]);
2786 }
2787 
2788 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2789 			     const cpumask_t *mask)
2790 {
2791 	struct qib_irq_notify *n =
2792 		container_of(notify, struct qib_irq_notify, notify);
2793 	int cpu = cpumask_first(mask);
2794 
2795 	if (n->rcv) {
2796 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2797 		qib_update_rhdrq_dca(rcd, cpu);
2798 	} else {
2799 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2800 		qib_update_sdma_dca(ppd, cpu);
2801 	}
2802 }
2803 
2804 static void qib_irq_notifier_release(struct kref *ref)
2805 {
2806 	struct qib_irq_notify *n =
2807 		container_of(ref, struct qib_irq_notify, notify.kref);
2808 	struct qib_devdata *dd;
2809 
2810 	if (n->rcv) {
2811 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2812 		dd = rcd->dd;
2813 	} else {
2814 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2815 		dd = ppd->dd;
2816 	}
2817 	qib_devinfo(dd->pcidev,
2818 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2819 	kfree(n);
2820 }
2821 #endif
2822 
2823 /*
2824  * Disable MSIx interrupt if enabled, call generic MSIx code
2825  * to cleanup, and clear pending MSIx interrupts.
2826  * Used for fallback to INTx, after reset, and when MSIx setup fails.
2827  */
2828 static void qib_7322_nomsix(struct qib_devdata *dd)
2829 {
2830 	u64 intgranted;
2831 	int n;
2832 
2833 	dd->cspec->main_int_mask = ~0ULL;
2834 	n = dd->cspec->num_msix_entries;
2835 	if (n) {
2836 		int i;
2837 
2838 		dd->cspec->num_msix_entries = 0;
2839 		for (i = 0; i < n; i++) {
2840 #ifdef CONFIG_INFINIBAND_QIB_DCA
2841 			reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2842 #endif
2843 			irq_set_affinity_hint(
2844 			  dd->cspec->msix_entries[i].msix.vector, NULL);
2845 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2846 			free_irq(dd->cspec->msix_entries[i].msix.vector,
2847 			   dd->cspec->msix_entries[i].arg);
2848 		}
2849 		qib_nomsix(dd);
2850 	}
2851 	/* make sure no MSIx interrupts are left pending */
2852 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2853 	if (intgranted)
2854 		qib_write_kreg(dd, kr_intgranted, intgranted);
2855 }
2856 
2857 static void qib_7322_free_irq(struct qib_devdata *dd)
2858 {
2859 	if (dd->cspec->irq) {
2860 		free_irq(dd->cspec->irq, dd);
2861 		dd->cspec->irq = 0;
2862 	}
2863 	qib_7322_nomsix(dd);
2864 }
2865 
2866 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2867 {
2868 	int i;
2869 
2870 #ifdef CONFIG_INFINIBAND_QIB_DCA
2871 	if (dd->flags & QIB_DCA_ENABLED) {
2872 		dca_remove_requester(&dd->pcidev->dev);
2873 		dd->flags &= ~QIB_DCA_ENABLED;
2874 		dd->cspec->dca_ctrl = 0;
2875 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2876 	}
2877 #endif
2878 
2879 	qib_7322_free_irq(dd);
2880 	kfree(dd->cspec->cntrs);
2881 	kfree(dd->cspec->sendchkenable);
2882 	kfree(dd->cspec->sendgrhchk);
2883 	kfree(dd->cspec->sendibchk);
2884 	kfree(dd->cspec->msix_entries);
2885 	for (i = 0; i < dd->num_pports; i++) {
2886 		unsigned long flags;
2887 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2888 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2889 
2890 		kfree(dd->pport[i].cpspec->portcntrs);
2891 		if (dd->flags & QIB_HAS_QSFP) {
2892 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2893 			dd->cspec->gpio_mask &= ~mask;
2894 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2895 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2896 			qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2897 		}
2898 		if (dd->pport[i].ibport_data.smi_ah)
2899 			ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2900 	}
2901 }
2902 
2903 /* handle SDMA interrupts */
2904 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2905 {
2906 	struct qib_pportdata *ppd0 = &dd->pport[0];
2907 	struct qib_pportdata *ppd1 = &dd->pport[1];
2908 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2909 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2910 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2911 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2912 
2913 	if (intr0)
2914 		qib_sdma_intr(ppd0);
2915 	if (intr1)
2916 		qib_sdma_intr(ppd1);
2917 
2918 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2919 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2920 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2921 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2922 }
2923 
2924 /*
2925  * Set or clear the Send buffer available interrupt enable bit.
2926  */
2927 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2928 {
2929 	unsigned long flags;
2930 
2931 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2932 	if (needint)
2933 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2934 	else
2935 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2936 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2937 	qib_write_kreg(dd, kr_scratch, 0ULL);
2938 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2939 }
2940 
2941 /*
2942  * Somehow got an interrupt with reserved bits set in interrupt status.
2943  * Print a message so we know it happened, then clear them.
2944  * keep mainline interrupt handler cache-friendly
2945  */
2946 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2947 {
2948 	u64 kills;
2949 	char msg[128];
2950 
2951 	kills = istat & ~QIB_I_BITSEXTANT;
2952 	qib_dev_err(dd,
2953 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2954 		(unsigned long long) kills, msg);
2955 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2956 }
2957 
2958 /* keep mainline interrupt handler cache-friendly */
2959 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2960 {
2961 	u32 gpiostatus;
2962 	int handled = 0;
2963 	int pidx;
2964 
2965 	/*
2966 	 * Boards for this chip currently don't use GPIO interrupts,
2967 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2968 	 * to developer.  To avoid endless repeats, clear
2969 	 * the bits in the mask, since there is some kind of
2970 	 * programming error or chip problem.
2971 	 */
2972 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2973 	/*
2974 	 * In theory, writing GPIOstatus to GPIOclear could
2975 	 * have a bad side-effect on some diagnostic that wanted
2976 	 * to poll for a status-change, but the various shadows
2977 	 * make that problematic at best. Diags will just suppress
2978 	 * all GPIO interrupts during such tests.
2979 	 */
2980 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2981 	/*
2982 	 * Check for QSFP MOD_PRS changes
2983 	 * only works for single port if IB1 != pidx1
2984 	 */
2985 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2986 	     ++pidx) {
2987 		struct qib_pportdata *ppd;
2988 		struct qib_qsfp_data *qd;
2989 		u32 mask;
2990 		if (!dd->pport[pidx].link_speed_supported)
2991 			continue;
2992 		mask = QSFP_GPIO_MOD_PRS_N;
2993 		ppd = dd->pport + pidx;
2994 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2995 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2996 			u64 pins;
2997 			qd = &ppd->cpspec->qsfp_data;
2998 			gpiostatus &= ~mask;
2999 			pins = qib_read_kreg64(dd, kr_extstatus);
3000 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
3001 			if (!(pins & mask)) {
3002 				++handled;
3003 				qd->t_insert = jiffies;
3004 				queue_work(ib_wq, &qd->work);
3005 			}
3006 		}
3007 	}
3008 
3009 	if (gpiostatus && !handled) {
3010 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3011 		u32 gpio_irq = mask & gpiostatus;
3012 
3013 		/*
3014 		 * Clear any troublemakers, and update chip from shadow
3015 		 */
3016 		dd->cspec->gpio_mask &= ~gpio_irq;
3017 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3018 	}
3019 }
3020 
3021 /*
3022  * Handle errors and unusual events first, separate function
3023  * to improve cache hits for fast path interrupt handling.
3024  */
3025 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3026 {
3027 	if (istat & ~QIB_I_BITSEXTANT)
3028 		unknown_7322_ibits(dd, istat);
3029 	if (istat & QIB_I_GPIO)
3030 		unknown_7322_gpio_intr(dd);
3031 	if (istat & QIB_I_C_ERROR) {
3032 		qib_write_kreg(dd, kr_errmask, 0ULL);
3033 		tasklet_schedule(&dd->error_tasklet);
3034 	}
3035 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3036 		handle_7322_p_errors(dd->rcd[0]->ppd);
3037 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3038 		handle_7322_p_errors(dd->rcd[1]->ppd);
3039 }
3040 
3041 /*
3042  * Dynamically adjust the rcv int timeout for a context based on incoming
3043  * packet rate.
3044  */
3045 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3046 {
3047 	struct qib_devdata *dd = rcd->dd;
3048 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3049 
3050 	/*
3051 	 * Dynamically adjust idle timeout on chip
3052 	 * based on number of packets processed.
3053 	 */
3054 	if (npkts < rcv_int_count && timeout > 2)
3055 		timeout >>= 1;
3056 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3057 		timeout = min(timeout << 1, rcv_int_timeout);
3058 	else
3059 		return;
3060 
3061 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3062 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3063 }
3064 
3065 /*
3066  * This is the main interrupt handler.
3067  * It will normally only be used for low frequency interrupts but may
3068  * have to handle all interrupts if INTx is enabled or fewer than normal
3069  * MSIx interrupts were allocated.
3070  * This routine should ignore the interrupt bits for any of the
3071  * dedicated MSIx handlers.
3072  */
3073 static irqreturn_t qib_7322intr(int irq, void *data)
3074 {
3075 	struct qib_devdata *dd = data;
3076 	irqreturn_t ret;
3077 	u64 istat;
3078 	u64 ctxtrbits;
3079 	u64 rmask;
3080 	unsigned i;
3081 	u32 npkts;
3082 
3083 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3084 		/*
3085 		 * This return value is not great, but we do not want the
3086 		 * interrupt core code to remove our interrupt handler
3087 		 * because we don't appear to be handling an interrupt
3088 		 * during a chip reset.
3089 		 */
3090 		ret = IRQ_HANDLED;
3091 		goto bail;
3092 	}
3093 
3094 	istat = qib_read_kreg64(dd, kr_intstatus);
3095 
3096 	if (unlikely(istat == ~0ULL)) {
3097 		qib_bad_intrstatus(dd);
3098 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3099 		/* don't know if it was our interrupt or not */
3100 		ret = IRQ_NONE;
3101 		goto bail;
3102 	}
3103 
3104 	istat &= dd->cspec->main_int_mask;
3105 	if (unlikely(!istat)) {
3106 		/* already handled, or shared and not us */
3107 		ret = IRQ_NONE;
3108 		goto bail;
3109 	}
3110 
3111 	qib_stats.sps_ints++;
3112 	if (dd->int_counter != (u32) -1)
3113 		dd->int_counter++;
3114 
3115 	/* handle "errors" of various kinds first, device ahead of port */
3116 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3117 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3118 			      INT_MASK_P(Err, 1))))
3119 		unlikely_7322_intr(dd, istat);
3120 
3121 	/*
3122 	 * Clear the interrupt bits we found set, relatively early, so we
3123 	 * "know" know the chip will have seen this by the time we process
3124 	 * the queue, and will re-interrupt if necessary.  The processor
3125 	 * itself won't take the interrupt again until we return.
3126 	 */
3127 	qib_write_kreg(dd, kr_intclear, istat);
3128 
3129 	/*
3130 	 * Handle kernel receive queues before checking for pio buffers
3131 	 * available since receives can overflow; piobuf waiters can afford
3132 	 * a few extra cycles, since they were waiting anyway.
3133 	 */
3134 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3135 	if (ctxtrbits) {
3136 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3137 			(1ULL << QIB_I_RCVURG_LSB);
3138 		for (i = 0; i < dd->first_user_ctxt; i++) {
3139 			if (ctxtrbits & rmask) {
3140 				ctxtrbits &= ~rmask;
3141 				if (dd->rcd[i])
3142 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3143 			}
3144 			rmask <<= 1;
3145 		}
3146 		if (ctxtrbits) {
3147 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3148 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3149 			qib_handle_urcv(dd, ctxtrbits);
3150 		}
3151 	}
3152 
3153 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3154 		sdma_7322_intr(dd, istat);
3155 
3156 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3157 		qib_ib_piobufavail(dd);
3158 
3159 	ret = IRQ_HANDLED;
3160 bail:
3161 	return ret;
3162 }
3163 
3164 /*
3165  * Dedicated receive packet available interrupt handler.
3166  */
3167 static irqreturn_t qib_7322pintr(int irq, void *data)
3168 {
3169 	struct qib_ctxtdata *rcd = data;
3170 	struct qib_devdata *dd = rcd->dd;
3171 	u32 npkts;
3172 
3173 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3174 		/*
3175 		 * This return value is not great, but we do not want the
3176 		 * interrupt core code to remove our interrupt handler
3177 		 * because we don't appear to be handling an interrupt
3178 		 * during a chip reset.
3179 		 */
3180 		return IRQ_HANDLED;
3181 
3182 	qib_stats.sps_ints++;
3183 	if (dd->int_counter != (u32) -1)
3184 		dd->int_counter++;
3185 
3186 	/* Clear the interrupt bit we expect to be set. */
3187 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3188 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3189 
3190 	qib_kreceive(rcd, NULL, &npkts);
3191 
3192 	return IRQ_HANDLED;
3193 }
3194 
3195 /*
3196  * Dedicated Send buffer available interrupt handler.
3197  */
3198 static irqreturn_t qib_7322bufavail(int irq, void *data)
3199 {
3200 	struct qib_devdata *dd = data;
3201 
3202 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3203 		/*
3204 		 * This return value is not great, but we do not want the
3205 		 * interrupt core code to remove our interrupt handler
3206 		 * because we don't appear to be handling an interrupt
3207 		 * during a chip reset.
3208 		 */
3209 		return IRQ_HANDLED;
3210 
3211 	qib_stats.sps_ints++;
3212 	if (dd->int_counter != (u32) -1)
3213 		dd->int_counter++;
3214 
3215 	/* Clear the interrupt bit we expect to be set. */
3216 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3217 
3218 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3219 	if (dd->flags & QIB_INITTED)
3220 		qib_ib_piobufavail(dd);
3221 	else
3222 		qib_wantpiobuf_7322_intr(dd, 0);
3223 
3224 	return IRQ_HANDLED;
3225 }
3226 
3227 /*
3228  * Dedicated Send DMA interrupt handler.
3229  */
3230 static irqreturn_t sdma_intr(int irq, void *data)
3231 {
3232 	struct qib_pportdata *ppd = data;
3233 	struct qib_devdata *dd = ppd->dd;
3234 
3235 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3236 		/*
3237 		 * This return value is not great, but we do not want the
3238 		 * interrupt core code to remove our interrupt handler
3239 		 * because we don't appear to be handling an interrupt
3240 		 * during a chip reset.
3241 		 */
3242 		return IRQ_HANDLED;
3243 
3244 	qib_stats.sps_ints++;
3245 	if (dd->int_counter != (u32) -1)
3246 		dd->int_counter++;
3247 
3248 	/* Clear the interrupt bit we expect to be set. */
3249 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3250 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3251 	qib_sdma_intr(ppd);
3252 
3253 	return IRQ_HANDLED;
3254 }
3255 
3256 /*
3257  * Dedicated Send DMA idle interrupt handler.
3258  */
3259 static irqreturn_t sdma_idle_intr(int irq, void *data)
3260 {
3261 	struct qib_pportdata *ppd = data;
3262 	struct qib_devdata *dd = ppd->dd;
3263 
3264 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3265 		/*
3266 		 * This return value is not great, but we do not want the
3267 		 * interrupt core code to remove our interrupt handler
3268 		 * because we don't appear to be handling an interrupt
3269 		 * during a chip reset.
3270 		 */
3271 		return IRQ_HANDLED;
3272 
3273 	qib_stats.sps_ints++;
3274 	if (dd->int_counter != (u32) -1)
3275 		dd->int_counter++;
3276 
3277 	/* Clear the interrupt bit we expect to be set. */
3278 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3279 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3280 	qib_sdma_intr(ppd);
3281 
3282 	return IRQ_HANDLED;
3283 }
3284 
3285 /*
3286  * Dedicated Send DMA progress interrupt handler.
3287  */
3288 static irqreturn_t sdma_progress_intr(int irq, void *data)
3289 {
3290 	struct qib_pportdata *ppd = data;
3291 	struct qib_devdata *dd = ppd->dd;
3292 
3293 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3294 		/*
3295 		 * This return value is not great, but we do not want the
3296 		 * interrupt core code to remove our interrupt handler
3297 		 * because we don't appear to be handling an interrupt
3298 		 * during a chip reset.
3299 		 */
3300 		return IRQ_HANDLED;
3301 
3302 	qib_stats.sps_ints++;
3303 	if (dd->int_counter != (u32) -1)
3304 		dd->int_counter++;
3305 
3306 	/* Clear the interrupt bit we expect to be set. */
3307 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3308 		       INT_MASK_P(SDmaProgress, 1) :
3309 		       INT_MASK_P(SDmaProgress, 0));
3310 	qib_sdma_intr(ppd);
3311 
3312 	return IRQ_HANDLED;
3313 }
3314 
3315 /*
3316  * Dedicated Send DMA cleanup interrupt handler.
3317  */
3318 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3319 {
3320 	struct qib_pportdata *ppd = data;
3321 	struct qib_devdata *dd = ppd->dd;
3322 
3323 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3324 		/*
3325 		 * This return value is not great, but we do not want the
3326 		 * interrupt core code to remove our interrupt handler
3327 		 * because we don't appear to be handling an interrupt
3328 		 * during a chip reset.
3329 		 */
3330 		return IRQ_HANDLED;
3331 
3332 	qib_stats.sps_ints++;
3333 	if (dd->int_counter != (u32) -1)
3334 		dd->int_counter++;
3335 
3336 	/* Clear the interrupt bit we expect to be set. */
3337 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3338 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3339 		       INT_MASK_PM(SDmaCleanupDone, 0));
3340 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3341 
3342 	return IRQ_HANDLED;
3343 }
3344 
3345 #ifdef CONFIG_INFINIBAND_QIB_DCA
3346 
3347 static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3348 {
3349 	if (!m->dca)
3350 		return;
3351 	qib_devinfo(dd->pcidev,
3352 		"Disabling notifier on HCA %d irq %d\n",
3353 		dd->unit,
3354 		m->msix.vector);
3355 	irq_set_affinity_notifier(
3356 		m->msix.vector,
3357 		NULL);
3358 	m->notifier = NULL;
3359 }
3360 
3361 static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3362 {
3363 	struct qib_irq_notify *n;
3364 
3365 	if (!m->dca)
3366 		return;
3367 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3368 	if (n) {
3369 		int ret;
3370 
3371 		m->notifier = n;
3372 		n->notify.irq = m->msix.vector;
3373 		n->notify.notify = qib_irq_notifier_notify;
3374 		n->notify.release = qib_irq_notifier_release;
3375 		n->arg = m->arg;
3376 		n->rcv = m->rcv;
3377 		qib_devinfo(dd->pcidev,
3378 			"set notifier irq %d rcv %d notify %p\n",
3379 			n->notify.irq, n->rcv, &n->notify);
3380 		ret = irq_set_affinity_notifier(
3381 				n->notify.irq,
3382 				&n->notify);
3383 		if (ret) {
3384 			m->notifier = NULL;
3385 			kfree(n);
3386 		}
3387 	}
3388 }
3389 
3390 #endif
3391 
3392 /*
3393  * Set up our chip-specific interrupt handler.
3394  * The interrupt type has already been setup, so
3395  * we just need to do the registration and error checking.
3396  * If we are using MSIx interrupts, we may fall back to
3397  * INTx later, if the interrupt handler doesn't get called
3398  * within 1/2 second (see verify_interrupt()).
3399  */
3400 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3401 {
3402 	int ret, i, msixnum;
3403 	u64 redirect[6];
3404 	u64 mask;
3405 	const struct cpumask *local_mask;
3406 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3407 
3408 	if (!dd->num_pports)
3409 		return;
3410 
3411 	if (clearpend) {
3412 		/*
3413 		 * if not switching interrupt types, be sure interrupts are
3414 		 * disabled, and then clear anything pending at this point,
3415 		 * because we are starting clean.
3416 		 */
3417 		qib_7322_set_intr_state(dd, 0);
3418 
3419 		/* clear the reset error, init error/hwerror mask */
3420 		qib_7322_init_hwerrors(dd);
3421 
3422 		/* clear any interrupt bits that might be set */
3423 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3424 
3425 		/* make sure no pending MSIx intr, and clear diag reg */
3426 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3427 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3428 	}
3429 
3430 	if (!dd->cspec->num_msix_entries) {
3431 		/* Try to get INTx interrupt */
3432 try_intx:
3433 		if (!dd->pcidev->irq) {
3434 			qib_dev_err(dd,
3435 				"irq is 0, BIOS error?  Interrupts won't work\n");
3436 			goto bail;
3437 		}
3438 		ret = request_irq(dd->pcidev->irq, qib_7322intr,
3439 				  IRQF_SHARED, QIB_DRV_NAME, dd);
3440 		if (ret) {
3441 			qib_dev_err(dd,
3442 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3443 				dd->pcidev->irq, ret);
3444 			goto bail;
3445 		}
3446 		dd->cspec->irq = dd->pcidev->irq;
3447 		dd->cspec->main_int_mask = ~0ULL;
3448 		goto bail;
3449 	}
3450 
3451 	/* Try to get MSIx interrupts */
3452 	memset(redirect, 0, sizeof redirect);
3453 	mask = ~0ULL;
3454 	msixnum = 0;
3455 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3456 	firstcpu = cpumask_first(local_mask);
3457 	if (firstcpu >= nr_cpu_ids ||
3458 			cpumask_weight(local_mask) == num_online_cpus()) {
3459 		local_mask = topology_core_cpumask(0);
3460 		firstcpu = cpumask_first(local_mask);
3461 	}
3462 	if (firstcpu < nr_cpu_ids) {
3463 		secondcpu = cpumask_next(firstcpu, local_mask);
3464 		if (secondcpu >= nr_cpu_ids)
3465 			secondcpu = firstcpu;
3466 		currrcvcpu = secondcpu;
3467 	}
3468 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3469 		irq_handler_t handler;
3470 		void *arg;
3471 		u64 val;
3472 		int lsb, reg, sh;
3473 #ifdef CONFIG_INFINIBAND_QIB_DCA
3474 		int dca = 0;
3475 #endif
3476 
3477 		dd->cspec->msix_entries[msixnum].
3478 			name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3479 			= '\0';
3480 		if (i < ARRAY_SIZE(irq_table)) {
3481 			if (irq_table[i].port) {
3482 				/* skip if for a non-configured port */
3483 				if (irq_table[i].port > dd->num_pports)
3484 					continue;
3485 				arg = dd->pport + irq_table[i].port - 1;
3486 			} else
3487 				arg = dd;
3488 #ifdef CONFIG_INFINIBAND_QIB_DCA
3489 			dca = irq_table[i].dca;
3490 #endif
3491 			lsb = irq_table[i].lsb;
3492 			handler = irq_table[i].handler;
3493 			snprintf(dd->cspec->msix_entries[msixnum].name,
3494 				sizeof(dd->cspec->msix_entries[msixnum].name)
3495 				 - 1,
3496 				QIB_DRV_NAME "%d%s", dd->unit,
3497 				irq_table[i].name);
3498 		} else {
3499 			unsigned ctxt;
3500 
3501 			ctxt = i - ARRAY_SIZE(irq_table);
3502 			/* per krcvq context receive interrupt */
3503 			arg = dd->rcd[ctxt];
3504 			if (!arg)
3505 				continue;
3506 			if (qib_krcvq01_no_msi && ctxt < 2)
3507 				continue;
3508 #ifdef CONFIG_INFINIBAND_QIB_DCA
3509 			dca = 1;
3510 #endif
3511 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3512 			handler = qib_7322pintr;
3513 			snprintf(dd->cspec->msix_entries[msixnum].name,
3514 				sizeof(dd->cspec->msix_entries[msixnum].name)
3515 				 - 1,
3516 				QIB_DRV_NAME "%d (kctx)", dd->unit);
3517 		}
3518 		ret = request_irq(
3519 			dd->cspec->msix_entries[msixnum].msix.vector,
3520 			handler, 0, dd->cspec->msix_entries[msixnum].name,
3521 			arg);
3522 		if (ret) {
3523 			/*
3524 			 * Shouldn't happen since the enable said we could
3525 			 * have as many as we are trying to setup here.
3526 			 */
3527 			qib_dev_err(dd,
3528 				"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3529 				msixnum,
3530 				dd->cspec->msix_entries[msixnum].msix.vector,
3531 				ret);
3532 			qib_7322_nomsix(dd);
3533 			goto try_intx;
3534 		}
3535 		dd->cspec->msix_entries[msixnum].arg = arg;
3536 #ifdef CONFIG_INFINIBAND_QIB_DCA
3537 		dd->cspec->msix_entries[msixnum].dca = dca;
3538 		dd->cspec->msix_entries[msixnum].rcv =
3539 			handler == qib_7322pintr;
3540 #endif
3541 		if (lsb >= 0) {
3542 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3543 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3544 				SYM_LSB(IntRedirect0, vec1);
3545 			mask &= ~(1ULL << lsb);
3546 			redirect[reg] |= ((u64) msixnum) << sh;
3547 		}
3548 		val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3549 			(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3550 		if (firstcpu < nr_cpu_ids &&
3551 			zalloc_cpumask_var(
3552 				&dd->cspec->msix_entries[msixnum].mask,
3553 				GFP_KERNEL)) {
3554 			if (handler == qib_7322pintr) {
3555 				cpumask_set_cpu(currrcvcpu,
3556 					dd->cspec->msix_entries[msixnum].mask);
3557 				currrcvcpu = cpumask_next(currrcvcpu,
3558 					local_mask);
3559 				if (currrcvcpu >= nr_cpu_ids)
3560 					currrcvcpu = secondcpu;
3561 			} else {
3562 				cpumask_set_cpu(firstcpu,
3563 					dd->cspec->msix_entries[msixnum].mask);
3564 			}
3565 			irq_set_affinity_hint(
3566 				dd->cspec->msix_entries[msixnum].msix.vector,
3567 				dd->cspec->msix_entries[msixnum].mask);
3568 		}
3569 		msixnum++;
3570 	}
3571 	/* Initialize the vector mapping */
3572 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3573 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3574 	dd->cspec->main_int_mask = mask;
3575 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3576 		(unsigned long)dd);
3577 bail:;
3578 }
3579 
3580 /**
3581  * qib_7322_boardname - fill in the board name and note features
3582  * @dd: the qlogic_ib device
3583  *
3584  * info will be based on the board revision register
3585  */
3586 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3587 {
3588 	/* Will need enumeration of board-types here */
3589 	char *n;
3590 	u32 boardid, namelen;
3591 	unsigned features = DUAL_PORT_CAP;
3592 
3593 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3594 
3595 	switch (boardid) {
3596 	case 0:
3597 		n = "InfiniPath_QLE7342_Emulation";
3598 		break;
3599 	case 1:
3600 		n = "InfiniPath_QLE7340";
3601 		dd->flags |= QIB_HAS_QSFP;
3602 		features = PORT_SPD_CAP;
3603 		break;
3604 	case 2:
3605 		n = "InfiniPath_QLE7342";
3606 		dd->flags |= QIB_HAS_QSFP;
3607 		break;
3608 	case 3:
3609 		n = "InfiniPath_QMI7342";
3610 		break;
3611 	case 4:
3612 		n = "InfiniPath_Unsupported7342";
3613 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3614 		features = 0;
3615 		break;
3616 	case BOARD_QMH7342:
3617 		n = "InfiniPath_QMH7342";
3618 		features = 0x24;
3619 		break;
3620 	case BOARD_QME7342:
3621 		n = "InfiniPath_QME7342";
3622 		break;
3623 	case 8:
3624 		n = "InfiniPath_QME7362";
3625 		dd->flags |= QIB_HAS_QSFP;
3626 		break;
3627 	case 15:
3628 		n = "InfiniPath_QLE7342_TEST";
3629 		dd->flags |= QIB_HAS_QSFP;
3630 		break;
3631 	default:
3632 		n = "InfiniPath_QLE73xy_UNKNOWN";
3633 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3634 		break;
3635 	}
3636 	dd->board_atten = 1; /* index into txdds_Xdr */
3637 
3638 	namelen = strlen(n) + 1;
3639 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
3640 	if (!dd->boardname)
3641 		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3642 	else
3643 		snprintf(dd->boardname, namelen, "%s", n);
3644 
3645 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3646 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3647 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3648 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3649 		 dd->majrev, dd->minrev,
3650 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3651 
3652 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3653 		qib_devinfo(dd->pcidev,
3654 			"IB%u: Forced to single port mode by module parameter\n",
3655 			dd->unit);
3656 		features &= PORT_SPD_CAP;
3657 	}
3658 
3659 	return features;
3660 }
3661 
3662 /*
3663  * This routine sleeps, so it can only be called from user context, not
3664  * from interrupt context.
3665  */
3666 static int qib_do_7322_reset(struct qib_devdata *dd)
3667 {
3668 	u64 val;
3669 	u64 *msix_vecsave;
3670 	int i, msix_entries, ret = 1;
3671 	u16 cmdval;
3672 	u8 int_line, clinesz;
3673 	unsigned long flags;
3674 
3675 	/* Use dev_err so it shows up in logs, etc. */
3676 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3677 
3678 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3679 
3680 	msix_entries = dd->cspec->num_msix_entries;
3681 
3682 	/* no interrupts till re-initted */
3683 	qib_7322_set_intr_state(dd, 0);
3684 
3685 	if (msix_entries) {
3686 		qib_7322_nomsix(dd);
3687 		/* can be up to 512 bytes, too big for stack */
3688 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3689 			sizeof(u64), GFP_KERNEL);
3690 		if (!msix_vecsave)
3691 			qib_dev_err(dd, "No mem to save MSIx data\n");
3692 	} else
3693 		msix_vecsave = NULL;
3694 
3695 	/*
3696 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3697 	 * info that is set up by the BIOS, so we have to save and restore
3698 	 * it ourselves.   There is some risk something could change it,
3699 	 * after we save it, but since we have disabled the MSIx, it
3700 	 * shouldn't be touched...
3701 	 */
3702 	for (i = 0; i < msix_entries; i++) {
3703 		u64 vecaddr, vecdata;
3704 		vecaddr = qib_read_kreg64(dd, 2 * i +
3705 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3706 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3707 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3708 		if (msix_vecsave) {
3709 			msix_vecsave[2 * i] = vecaddr;
3710 			/* save it without the masked bit set */
3711 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3712 		}
3713 	}
3714 
3715 	dd->pport->cpspec->ibdeltainprog = 0;
3716 	dd->pport->cpspec->ibsymdelta = 0;
3717 	dd->pport->cpspec->iblnkerrdelta = 0;
3718 	dd->pport->cpspec->ibmalfdelta = 0;
3719 	dd->int_counter = 0; /* so we check interrupts work again */
3720 
3721 	/*
3722 	 * Keep chip from being accessed until we are ready.  Use
3723 	 * writeq() directly, to allow the write even though QIB_PRESENT
3724 	 * isn't set.
3725 	 */
3726 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3727 	dd->flags |= QIB_DOING_RESET;
3728 	val = dd->control | QLOGIC_IB_C_RESET;
3729 	writeq(val, &dd->kregbase[kr_control]);
3730 
3731 	for (i = 1; i <= 5; i++) {
3732 		/*
3733 		 * Allow MBIST, etc. to complete; longer on each retry.
3734 		 * We sometimes get machine checks from bus timeout if no
3735 		 * response, so for now, make it *really* long.
3736 		 */
3737 		msleep(1000 + (1 + i) * 3000);
3738 
3739 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3740 
3741 		/*
3742 		 * Use readq directly, so we don't need to mark it as PRESENT
3743 		 * until we get a successful indication that all is well.
3744 		 */
3745 		val = readq(&dd->kregbase[kr_revision]);
3746 		if (val == dd->revision)
3747 			break;
3748 		if (i == 5) {
3749 			qib_dev_err(dd,
3750 				"Failed to initialize after reset, unusable\n");
3751 			ret = 0;
3752 			goto  bail;
3753 		}
3754 	}
3755 
3756 	dd->flags |= QIB_PRESENT; /* it's back */
3757 
3758 	if (msix_entries) {
3759 		/* restore the MSIx vector address and data if saved above */
3760 		for (i = 0; i < msix_entries; i++) {
3761 			dd->cspec->msix_entries[i].msix.entry = i;
3762 			if (!msix_vecsave || !msix_vecsave[2 * i])
3763 				continue;
3764 			qib_write_kreg(dd, 2 * i +
3765 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3766 				msix_vecsave[2 * i]);
3767 			qib_write_kreg(dd, 1 + 2 * i +
3768 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3769 				msix_vecsave[1 + 2 * i]);
3770 		}
3771 	}
3772 
3773 	/* initialize the remaining registers.  */
3774 	for (i = 0; i < dd->num_pports; ++i)
3775 		write_7322_init_portregs(&dd->pport[i]);
3776 	write_7322_initregs(dd);
3777 
3778 	if (qib_pcie_params(dd, dd->lbus_width,
3779 			    &dd->cspec->num_msix_entries,
3780 			    dd->cspec->msix_entries))
3781 		qib_dev_err(dd,
3782 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3783 
3784 	qib_setup_7322_interrupt(dd, 1);
3785 
3786 	for (i = 0; i < dd->num_pports; ++i) {
3787 		struct qib_pportdata *ppd = &dd->pport[i];
3788 
3789 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3790 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3791 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3792 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3793 	}
3794 
3795 bail:
3796 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3797 	kfree(msix_vecsave);
3798 	return ret;
3799 }
3800 
3801 /**
3802  * qib_7322_put_tid - write a TID to the chip
3803  * @dd: the qlogic_ib device
3804  * @tidptr: pointer to the expected TID (in chip) to update
3805  * @tidtype: 0 for eager, 1 for expected
3806  * @pa: physical address of in memory buffer; tidinvalid if freeing
3807  */
3808 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3809 			     u32 type, unsigned long pa)
3810 {
3811 	if (!(dd->flags & QIB_PRESENT))
3812 		return;
3813 	if (pa != dd->tidinvalid) {
3814 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3815 
3816 		/* paranoia checks */
3817 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3818 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3819 				    pa);
3820 			return;
3821 		}
3822 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3823 			qib_dev_err(dd,
3824 				"Physical page address 0x%lx larger than supported\n",
3825 				pa);
3826 			return;
3827 		}
3828 
3829 		if (type == RCVHQ_RCV_TYPE_EAGER)
3830 			chippa |= dd->tidtemplate;
3831 		else /* for now, always full 4KB page */
3832 			chippa |= IBA7322_TID_SZ_4K;
3833 		pa = chippa;
3834 	}
3835 	writeq(pa, tidptr);
3836 	mmiowb();
3837 }
3838 
3839 /**
3840  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3841  * @dd: the qlogic_ib device
3842  * @ctxt: the ctxt
3843  *
3844  * clear all TID entries for a ctxt, expected and eager.
3845  * Used from qib_close().
3846  */
3847 static void qib_7322_clear_tids(struct qib_devdata *dd,
3848 				struct qib_ctxtdata *rcd)
3849 {
3850 	u64 __iomem *tidbase;
3851 	unsigned long tidinv;
3852 	u32 ctxt;
3853 	int i;
3854 
3855 	if (!dd->kregbase || !rcd)
3856 		return;
3857 
3858 	ctxt = rcd->ctxt;
3859 
3860 	tidinv = dd->tidinvalid;
3861 	tidbase = (u64 __iomem *)
3862 		((char __iomem *) dd->kregbase +
3863 		 dd->rcvtidbase +
3864 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3865 
3866 	for (i = 0; i < dd->rcvtidcnt; i++)
3867 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3868 				 tidinv);
3869 
3870 	tidbase = (u64 __iomem *)
3871 		((char __iomem *) dd->kregbase +
3872 		 dd->rcvegrbase +
3873 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3874 
3875 	for (i = 0; i < rcd->rcvegrcnt; i++)
3876 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3877 				 tidinv);
3878 }
3879 
3880 /**
3881  * qib_7322_tidtemplate - setup constants for TID updates
3882  * @dd: the qlogic_ib device
3883  *
3884  * We setup stuff that we use a lot, to avoid calculating each time
3885  */
3886 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3887 {
3888 	/*
3889 	 * For now, we always allocate 4KB buffers (at init) so we can
3890 	 * receive max size packets.  We may want a module parameter to
3891 	 * specify 2KB or 4KB and/or make it per port instead of per device
3892 	 * for those who want to reduce memory footprint.  Note that the
3893 	 * rcvhdrentsize size must be large enough to hold the largest
3894 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3895 	 * course the 2 dwords of RHF).
3896 	 */
3897 	if (dd->rcvegrbufsize == 2048)
3898 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3899 	else if (dd->rcvegrbufsize == 4096)
3900 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3901 	dd->tidinvalid = 0;
3902 }
3903 
3904 /**
3905  * qib_init_7322_get_base_info - set chip-specific flags for user code
3906  * @rcd: the qlogic_ib ctxt
3907  * @kbase: qib_base_info pointer
3908  *
3909  * We set the PCIE flag because the lower bandwidth on PCIe vs
3910  * HyperTransport can affect some user packet algorithims.
3911  */
3912 
3913 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3914 				  struct qib_base_info *kinfo)
3915 {
3916 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3917 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3918 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3919 	if (rcd->dd->cspec->r1)
3920 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3921 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3922 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3923 
3924 	return 0;
3925 }
3926 
3927 static struct qib_message_header *
3928 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3929 {
3930 	u32 offset = qib_hdrget_offset(rhf_addr);
3931 
3932 	return (struct qib_message_header *)
3933 		(rhf_addr - dd->rhf_offset + offset);
3934 }
3935 
3936 /*
3937  * Configure number of contexts.
3938  */
3939 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3940 {
3941 	unsigned long flags;
3942 	u32 nchipctxts;
3943 
3944 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3945 	dd->cspec->numctxts = nchipctxts;
3946 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3947 		dd->first_user_ctxt = NUM_IB_PORTS +
3948 			(qib_n_krcv_queues - 1) * dd->num_pports;
3949 		if (dd->first_user_ctxt > nchipctxts)
3950 			dd->first_user_ctxt = nchipctxts;
3951 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3952 	} else {
3953 		dd->first_user_ctxt = NUM_IB_PORTS;
3954 		dd->n_krcv_queues = 1;
3955 	}
3956 
3957 	if (!qib_cfgctxts) {
3958 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3959 
3960 		if (nctxts <= 6)
3961 			dd->ctxtcnt = 6;
3962 		else if (nctxts <= 10)
3963 			dd->ctxtcnt = 10;
3964 		else if (nctxts <= nchipctxts)
3965 			dd->ctxtcnt = nchipctxts;
3966 	} else if (qib_cfgctxts < dd->num_pports)
3967 		dd->ctxtcnt = dd->num_pports;
3968 	else if (qib_cfgctxts <= nchipctxts)
3969 		dd->ctxtcnt = qib_cfgctxts;
3970 	if (!dd->ctxtcnt) /* none of the above, set to max */
3971 		dd->ctxtcnt = nchipctxts;
3972 
3973 	/*
3974 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3975 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3976 	 * Lock to be paranoid about later motion, etc.
3977 	 */
3978 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3979 	if (dd->ctxtcnt > 10)
3980 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3981 	else if (dd->ctxtcnt > 6)
3982 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3983 	/* else configure for default 6 receive ctxts */
3984 
3985 	/* The XRC opcode is 5. */
3986 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3987 
3988 	/*
3989 	 * RcvCtrl *must* be written here so that the
3990 	 * chip understands how to change rcvegrcnt below.
3991 	 */
3992 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3993 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3994 
3995 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3996 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3997 	if (qib_rcvhdrcnt)
3998 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3999 	else
4000 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
4001 				    dd->num_pports > 1 ? 1024U : 2048U);
4002 }
4003 
4004 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
4005 {
4006 
4007 	int lsb, ret = 0;
4008 	u64 maskr; /* right-justified mask */
4009 
4010 	switch (which) {
4011 
4012 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
4013 		ret = ppd->link_width_enabled;
4014 		goto done;
4015 
4016 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
4017 		ret = ppd->link_width_active;
4018 		goto done;
4019 
4020 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
4021 		ret = ppd->link_speed_enabled;
4022 		goto done;
4023 
4024 	case QIB_IB_CFG_SPD: /* Get current Link spd */
4025 		ret = ppd->link_speed_active;
4026 		goto done;
4027 
4028 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
4029 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4030 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4031 		break;
4032 
4033 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4034 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4035 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4036 		break;
4037 
4038 	case QIB_IB_CFG_LINKLATENCY:
4039 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4040 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4041 		goto done;
4042 
4043 	case QIB_IB_CFG_OP_VLS:
4044 		ret = ppd->vls_operational;
4045 		goto done;
4046 
4047 	case QIB_IB_CFG_VL_HIGH_CAP:
4048 		ret = 16;
4049 		goto done;
4050 
4051 	case QIB_IB_CFG_VL_LOW_CAP:
4052 		ret = 16;
4053 		goto done;
4054 
4055 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4056 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4057 				OverrunThreshold);
4058 		goto done;
4059 
4060 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4061 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4062 				PhyerrThreshold);
4063 		goto done;
4064 
4065 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4066 		/* will only take effect when the link state changes */
4067 		ret = (ppd->cpspec->ibcctrl_a &
4068 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4069 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4070 		goto done;
4071 
4072 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4073 		lsb = IBA7322_IBC_HRTBT_LSB;
4074 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4075 		break;
4076 
4077 	case QIB_IB_CFG_PMA_TICKS:
4078 		/*
4079 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4080 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4081 		 */
4082 		if (ppd->link_speed_active == QIB_IB_QDR)
4083 			ret = 3;
4084 		else if (ppd->link_speed_active == QIB_IB_DDR)
4085 			ret = 1;
4086 		else
4087 			ret = 0;
4088 		goto done;
4089 
4090 	default:
4091 		ret = -EINVAL;
4092 		goto done;
4093 	}
4094 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4095 done:
4096 	return ret;
4097 }
4098 
4099 /*
4100  * Below again cribbed liberally from older version. Do not lean
4101  * heavily on it.
4102  */
4103 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4104 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4105 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4106 
4107 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4108 {
4109 	struct qib_devdata *dd = ppd->dd;
4110 	u64 maskr; /* right-justified mask */
4111 	int lsb, ret = 0;
4112 	u16 lcmd, licmd;
4113 	unsigned long flags;
4114 
4115 	switch (which) {
4116 	case QIB_IB_CFG_LIDLMC:
4117 		/*
4118 		 * Set LID and LMC. Combined to avoid possible hazard
4119 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4120 		 */
4121 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4122 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4123 		/*
4124 		 * For header-checking, the SLID in the packet will
4125 		 * be masked with SendIBSLMCMask, and compared
4126 		 * with SendIBSLIDAssignMask. Make sure we do not
4127 		 * set any bits not covered by the mask, or we get
4128 		 * false-positives.
4129 		 */
4130 		qib_write_kreg_port(ppd, krp_sendslid,
4131 				    val & (val >> 16) & SendIBSLIDAssignMask);
4132 		qib_write_kreg_port(ppd, krp_sendslidmask,
4133 				    (val >> 16) & SendIBSLMCMask);
4134 		break;
4135 
4136 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4137 		ppd->link_width_enabled = val;
4138 		/* convert IB value to chip register value */
4139 		if (val == IB_WIDTH_1X)
4140 			val = 0;
4141 		else if (val == IB_WIDTH_4X)
4142 			val = 1;
4143 		else
4144 			val = 3;
4145 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4146 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4147 		break;
4148 
4149 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4150 		/*
4151 		 * As with width, only write the actual register if the
4152 		 * link is currently down, otherwise takes effect on next
4153 		 * link change.  Since setting is being explicitly requested
4154 		 * (via MAD or sysfs), clear autoneg failure status if speed
4155 		 * autoneg is enabled.
4156 		 */
4157 		ppd->link_speed_enabled = val;
4158 		val <<= IBA7322_IBC_SPEED_LSB;
4159 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4160 			IBA7322_IBC_MAX_SPEED_MASK;
4161 		if (val & (val - 1)) {
4162 			/* Muliple speeds enabled */
4163 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4164 				IBA7322_IBC_MAX_SPEED_MASK;
4165 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4166 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4167 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4168 		} else if (val & IBA7322_IBC_SPEED_QDR)
4169 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4170 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4171 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4172 		break;
4173 
4174 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4175 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4176 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4177 		break;
4178 
4179 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4180 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4181 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4182 		break;
4183 
4184 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4185 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4186 				  OverrunThreshold);
4187 		if (maskr != val) {
4188 			ppd->cpspec->ibcctrl_a &=
4189 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4190 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4191 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4192 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4193 					    ppd->cpspec->ibcctrl_a);
4194 			qib_write_kreg(dd, kr_scratch, 0ULL);
4195 		}
4196 		goto bail;
4197 
4198 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4199 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4200 				  PhyerrThreshold);
4201 		if (maskr != val) {
4202 			ppd->cpspec->ibcctrl_a &=
4203 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4204 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4205 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4206 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4207 					    ppd->cpspec->ibcctrl_a);
4208 			qib_write_kreg(dd, kr_scratch, 0ULL);
4209 		}
4210 		goto bail;
4211 
4212 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4213 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4214 			((u64) ppd->pkeys[2] << 32) |
4215 			((u64) ppd->pkeys[3] << 48);
4216 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4217 		goto bail;
4218 
4219 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4220 		/* will only take effect when the link state changes */
4221 		if (val == IB_LINKINITCMD_POLL)
4222 			ppd->cpspec->ibcctrl_a &=
4223 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4224 		else /* SLEEP */
4225 			ppd->cpspec->ibcctrl_a |=
4226 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4227 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4228 		qib_write_kreg(dd, kr_scratch, 0ULL);
4229 		goto bail;
4230 
4231 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4232 		/*
4233 		 * Update our housekeeping variables, and set IBC max
4234 		 * size, same as init code; max IBC is max we allow in
4235 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4236 		 * Set even if it's unchanged, print debug message only
4237 		 * on changes.
4238 		 */
4239 		val = (ppd->ibmaxlen >> 2) + 1;
4240 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4241 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4242 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4243 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4244 				    ppd->cpspec->ibcctrl_a);
4245 		qib_write_kreg(dd, kr_scratch, 0ULL);
4246 		goto bail;
4247 
4248 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4249 		switch (val & 0xffff0000) {
4250 		case IB_LINKCMD_DOWN:
4251 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4252 			ppd->cpspec->ibmalfusesnap = 1;
4253 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4254 				crp_errlink);
4255 			if (!ppd->cpspec->ibdeltainprog &&
4256 			    qib_compat_ddr_negotiate) {
4257 				ppd->cpspec->ibdeltainprog = 1;
4258 				ppd->cpspec->ibsymsnap =
4259 					read_7322_creg32_port(ppd,
4260 							      crp_ibsymbolerr);
4261 				ppd->cpspec->iblnkerrsnap =
4262 					read_7322_creg32_port(ppd,
4263 						      crp_iblinkerrrecov);
4264 			}
4265 			break;
4266 
4267 		case IB_LINKCMD_ARMED:
4268 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4269 			if (ppd->cpspec->ibmalfusesnap) {
4270 				ppd->cpspec->ibmalfusesnap = 0;
4271 				ppd->cpspec->ibmalfdelta +=
4272 					read_7322_creg32_port(ppd,
4273 							      crp_errlink) -
4274 					ppd->cpspec->ibmalfsnap;
4275 			}
4276 			break;
4277 
4278 		case IB_LINKCMD_ACTIVE:
4279 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4280 			break;
4281 
4282 		default:
4283 			ret = -EINVAL;
4284 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4285 			goto bail;
4286 		}
4287 		switch (val & 0xffff) {
4288 		case IB_LINKINITCMD_NOP:
4289 			licmd = 0;
4290 			break;
4291 
4292 		case IB_LINKINITCMD_POLL:
4293 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4294 			break;
4295 
4296 		case IB_LINKINITCMD_SLEEP:
4297 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4298 			break;
4299 
4300 		case IB_LINKINITCMD_DISABLE:
4301 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4302 			ppd->cpspec->chase_end = 0;
4303 			/*
4304 			 * stop state chase counter and timer, if running.
4305 			 * wait forpending timer, but don't clear .data (ppd)!
4306 			 */
4307 			if (ppd->cpspec->chase_timer.expires) {
4308 				del_timer_sync(&ppd->cpspec->chase_timer);
4309 				ppd->cpspec->chase_timer.expires = 0;
4310 			}
4311 			break;
4312 
4313 		default:
4314 			ret = -EINVAL;
4315 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4316 				    val & 0xffff);
4317 			goto bail;
4318 		}
4319 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4320 		goto bail;
4321 
4322 	case QIB_IB_CFG_OP_VLS:
4323 		if (ppd->vls_operational != val) {
4324 			ppd->vls_operational = val;
4325 			set_vls(ppd);
4326 		}
4327 		goto bail;
4328 
4329 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4330 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4331 		goto bail;
4332 
4333 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4334 		if (val > 3) {
4335 			ret = -EINVAL;
4336 			goto bail;
4337 		}
4338 		lsb = IBA7322_IBC_HRTBT_LSB;
4339 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4340 		break;
4341 
4342 	case QIB_IB_CFG_PORT:
4343 		/* val is the port number of the switch we are connected to. */
4344 		if (ppd->dd->cspec->r1) {
4345 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4346 			ppd->cpspec->ipg_tries = 0;
4347 		}
4348 		goto bail;
4349 
4350 	default:
4351 		ret = -EINVAL;
4352 		goto bail;
4353 	}
4354 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4355 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4356 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4357 	qib_write_kreg(dd, kr_scratch, 0);
4358 bail:
4359 	return ret;
4360 }
4361 
4362 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4363 {
4364 	int ret = 0;
4365 	u64 val, ctrlb;
4366 
4367 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4368 	if (!strncmp(what, "ibc", 3)) {
4369 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4370 						       Loopback);
4371 		val = 0; /* disable heart beat, so link will come up */
4372 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4373 			 ppd->dd->unit, ppd->port);
4374 	} else if (!strncmp(what, "off", 3)) {
4375 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4376 							Loopback);
4377 		/* enable heart beat again */
4378 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4379 		qib_devinfo(ppd->dd->pcidev,
4380 			"Disabling IB%u:%u IBC loopback (normal)\n",
4381 			ppd->dd->unit, ppd->port);
4382 	} else
4383 		ret = -EINVAL;
4384 	if (!ret) {
4385 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4386 				    ppd->cpspec->ibcctrl_a);
4387 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4388 					     << IBA7322_IBC_HRTBT_LSB);
4389 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4390 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4391 				    ppd->cpspec->ibcctrl_b);
4392 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4393 	}
4394 	return ret;
4395 }
4396 
4397 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4398 			   struct ib_vl_weight_elem *vl)
4399 {
4400 	unsigned i;
4401 
4402 	for (i = 0; i < 16; i++, regno++, vl++) {
4403 		u32 val = qib_read_kreg_port(ppd, regno);
4404 
4405 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4406 			SYM_RMASK(LowPriority0_0, VirtualLane);
4407 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4408 			SYM_RMASK(LowPriority0_0, Weight);
4409 	}
4410 }
4411 
4412 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4413 			   struct ib_vl_weight_elem *vl)
4414 {
4415 	unsigned i;
4416 
4417 	for (i = 0; i < 16; i++, regno++, vl++) {
4418 		u64 val;
4419 
4420 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4421 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4422 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4423 			SYM_LSB(LowPriority0_0, Weight));
4424 		qib_write_kreg_port(ppd, regno, val);
4425 	}
4426 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4427 		struct qib_devdata *dd = ppd->dd;
4428 		unsigned long flags;
4429 
4430 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4431 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4432 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4433 		qib_write_kreg(dd, kr_scratch, 0);
4434 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4435 	}
4436 }
4437 
4438 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4439 {
4440 	switch (which) {
4441 	case QIB_IB_TBL_VL_HIGH_ARB:
4442 		get_vl_weights(ppd, krp_highprio_0, t);
4443 		break;
4444 
4445 	case QIB_IB_TBL_VL_LOW_ARB:
4446 		get_vl_weights(ppd, krp_lowprio_0, t);
4447 		break;
4448 
4449 	default:
4450 		return -EINVAL;
4451 	}
4452 	return 0;
4453 }
4454 
4455 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4456 {
4457 	switch (which) {
4458 	case QIB_IB_TBL_VL_HIGH_ARB:
4459 		set_vl_weights(ppd, krp_highprio_0, t);
4460 		break;
4461 
4462 	case QIB_IB_TBL_VL_LOW_ARB:
4463 		set_vl_weights(ppd, krp_lowprio_0, t);
4464 		break;
4465 
4466 	default:
4467 		return -EINVAL;
4468 	}
4469 	return 0;
4470 }
4471 
4472 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4473 				    u32 updegr, u32 egrhd, u32 npkts)
4474 {
4475 	/*
4476 	 * Need to write timeout register before updating rcvhdrhead to ensure
4477 	 * that the timer is enabled on reception of a packet.
4478 	 */
4479 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4480 		adjust_rcv_timeout(rcd, npkts);
4481 	if (updegr)
4482 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4483 	mmiowb();
4484 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4485 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4486 	mmiowb();
4487 }
4488 
4489 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4490 {
4491 	u32 head, tail;
4492 
4493 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4494 	if (rcd->rcvhdrtail_kvaddr)
4495 		tail = qib_get_rcvhdrtail(rcd);
4496 	else
4497 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4498 	return head == tail;
4499 }
4500 
4501 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4502 	QIB_RCVCTRL_CTXT_DIS | \
4503 	QIB_RCVCTRL_TIDFLOW_ENB | \
4504 	QIB_RCVCTRL_TIDFLOW_DIS | \
4505 	QIB_RCVCTRL_TAILUPD_ENB | \
4506 	QIB_RCVCTRL_TAILUPD_DIS | \
4507 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4508 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4509 	QIB_RCVCTRL_BP_ENB | \
4510 	QIB_RCVCTRL_BP_DIS)
4511 
4512 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4513 	QIB_RCVCTRL_CTXT_DIS | \
4514 	QIB_RCVCTRL_PKEY_DIS | \
4515 	QIB_RCVCTRL_PKEY_ENB)
4516 
4517 /*
4518  * Modify the RCVCTRL register in chip-specific way. This
4519  * is a function because bit positions and (future) register
4520  * location is chip-specifc, but the needed operations are
4521  * generic. <op> is a bit-mask because we often want to
4522  * do multiple modifications.
4523  */
4524 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4525 			     int ctxt)
4526 {
4527 	struct qib_devdata *dd = ppd->dd;
4528 	struct qib_ctxtdata *rcd;
4529 	u64 mask, val;
4530 	unsigned long flags;
4531 
4532 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4533 
4534 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4535 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4536 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4537 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4538 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4539 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4540 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4541 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4542 	if (op & QIB_RCVCTRL_PKEY_ENB)
4543 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4544 	if (op & QIB_RCVCTRL_PKEY_DIS)
4545 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4546 	if (ctxt < 0) {
4547 		mask = (1ULL << dd->ctxtcnt) - 1;
4548 		rcd = NULL;
4549 	} else {
4550 		mask = (1ULL << ctxt);
4551 		rcd = dd->rcd[ctxt];
4552 	}
4553 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4554 		ppd->p_rcvctrl |=
4555 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4556 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4557 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4558 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4559 		}
4560 		/* Write these registers before the context is enabled. */
4561 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4562 				    rcd->rcvhdrqtailaddr_phys);
4563 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4564 				    rcd->rcvhdrq_phys);
4565 		rcd->seq_cnt = 1;
4566 	}
4567 	if (op & QIB_RCVCTRL_CTXT_DIS)
4568 		ppd->p_rcvctrl &=
4569 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4570 	if (op & QIB_RCVCTRL_BP_ENB)
4571 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4572 	if (op & QIB_RCVCTRL_BP_DIS)
4573 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4574 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4575 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4576 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4577 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4578 	/*
4579 	 * Decide which registers to write depending on the ops enabled.
4580 	 * Special case is "flush" (no bits set at all)
4581 	 * which needs to write both.
4582 	 */
4583 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4584 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4585 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4586 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4587 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4588 		/*
4589 		 * Init the context registers also; if we were
4590 		 * disabled, tail and head should both be zero
4591 		 * already from the enable, but since we don't
4592 		 * know, we have to do it explicitly.
4593 		 */
4594 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4595 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4596 
4597 		/* be sure enabling write seen; hd/tl should be 0 */
4598 		(void) qib_read_kreg32(dd, kr_scratch);
4599 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4600 		dd->rcd[ctxt]->head = val;
4601 		/* If kctxt, interrupt on next receive. */
4602 		if (ctxt < dd->first_user_ctxt)
4603 			val |= dd->rhdrhead_intr_off;
4604 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4605 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4606 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4607 		/* arm rcv interrupt */
4608 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4609 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4610 	}
4611 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4612 		unsigned f;
4613 
4614 		/* Now that the context is disabled, clear these registers. */
4615 		if (ctxt >= 0) {
4616 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4617 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4618 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4619 				qib_write_ureg(dd, ur_rcvflowtable + f,
4620 					       TIDFLOW_ERRBITS, ctxt);
4621 		} else {
4622 			unsigned i;
4623 
4624 			for (i = 0; i < dd->cfgctxts; i++) {
4625 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4626 						    i, 0);
4627 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4628 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4629 					qib_write_ureg(dd, ur_rcvflowtable + f,
4630 						       TIDFLOW_ERRBITS, i);
4631 			}
4632 		}
4633 	}
4634 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4635 }
4636 
4637 /*
4638  * Modify the SENDCTRL register in chip-specific way. This
4639  * is a function where there are multiple such registers with
4640  * slightly different layouts.
4641  * The chip doesn't allow back-to-back sendctrl writes, so write
4642  * the scratch register after writing sendctrl.
4643  *
4644  * Which register is written depends on the operation.
4645  * Most operate on the common register, while
4646  * SEND_ENB and SEND_DIS operate on the per-port ones.
4647  * SEND_ENB is included in common because it can change SPCL_TRIG
4648  */
4649 #define SENDCTRL_COMMON_MODS (\
4650 	QIB_SENDCTRL_CLEAR | \
4651 	QIB_SENDCTRL_AVAIL_DIS | \
4652 	QIB_SENDCTRL_AVAIL_ENB | \
4653 	QIB_SENDCTRL_AVAIL_BLIP | \
4654 	QIB_SENDCTRL_DISARM | \
4655 	QIB_SENDCTRL_DISARM_ALL | \
4656 	QIB_SENDCTRL_SEND_ENB)
4657 
4658 #define SENDCTRL_PORT_MODS (\
4659 	QIB_SENDCTRL_CLEAR | \
4660 	QIB_SENDCTRL_SEND_ENB | \
4661 	QIB_SENDCTRL_SEND_DIS | \
4662 	QIB_SENDCTRL_FLUSH)
4663 
4664 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4665 {
4666 	struct qib_devdata *dd = ppd->dd;
4667 	u64 tmp_dd_sendctrl;
4668 	unsigned long flags;
4669 
4670 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4671 
4672 	/* First the dd ones that are "sticky", saved in shadow */
4673 	if (op & QIB_SENDCTRL_CLEAR)
4674 		dd->sendctrl = 0;
4675 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4676 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4677 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4678 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4679 		if (dd->flags & QIB_USE_SPCL_TRIG)
4680 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4681 	}
4682 
4683 	/* Then the ppd ones that are "sticky", saved in shadow */
4684 	if (op & QIB_SENDCTRL_SEND_DIS)
4685 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4686 	else if (op & QIB_SENDCTRL_SEND_ENB)
4687 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4688 
4689 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4690 		u32 i, last;
4691 
4692 		tmp_dd_sendctrl = dd->sendctrl;
4693 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4694 		/*
4695 		 * Disarm any buffers that are not yet launched,
4696 		 * disabling updates until done.
4697 		 */
4698 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4699 		for (i = 0; i < last; i++) {
4700 			qib_write_kreg(dd, kr_sendctrl,
4701 				       tmp_dd_sendctrl |
4702 				       SYM_MASK(SendCtrl, Disarm) | i);
4703 			qib_write_kreg(dd, kr_scratch, 0);
4704 		}
4705 	}
4706 
4707 	if (op & QIB_SENDCTRL_FLUSH) {
4708 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4709 
4710 		/*
4711 		 * Now drain all the fifos.  The Abort bit should never be
4712 		 * needed, so for now, at least, we don't use it.
4713 		 */
4714 		tmp_ppd_sendctrl |=
4715 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4716 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4717 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4718 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4719 		qib_write_kreg(dd, kr_scratch, 0);
4720 	}
4721 
4722 	tmp_dd_sendctrl = dd->sendctrl;
4723 
4724 	if (op & QIB_SENDCTRL_DISARM)
4725 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4726 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4727 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4728 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4729 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4730 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4731 
4732 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4733 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4734 		qib_write_kreg(dd, kr_scratch, 0);
4735 	}
4736 
4737 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4738 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4739 		qib_write_kreg(dd, kr_scratch, 0);
4740 	}
4741 
4742 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4743 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4744 		qib_write_kreg(dd, kr_scratch, 0);
4745 	}
4746 
4747 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4748 
4749 	if (op & QIB_SENDCTRL_FLUSH) {
4750 		u32 v;
4751 		/*
4752 		 * ensure writes have hit chip, then do a few
4753 		 * more reads, to allow DMA of pioavail registers
4754 		 * to occur, so in-memory copy is in sync with
4755 		 * the chip.  Not always safe to sleep.
4756 		 */
4757 		v = qib_read_kreg32(dd, kr_scratch);
4758 		qib_write_kreg(dd, kr_scratch, v);
4759 		v = qib_read_kreg32(dd, kr_scratch);
4760 		qib_write_kreg(dd, kr_scratch, v);
4761 		qib_read_kreg32(dd, kr_scratch);
4762 	}
4763 }
4764 
4765 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4766 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4767 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4768 
4769 /**
4770  * qib_portcntr_7322 - read a per-port chip counter
4771  * @ppd: the qlogic_ib pport
4772  * @creg: the counter to read (not a chip offset)
4773  */
4774 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4775 {
4776 	struct qib_devdata *dd = ppd->dd;
4777 	u64 ret = 0ULL;
4778 	u16 creg;
4779 	/* 0xffff for unimplemented or synthesized counters */
4780 	static const u32 xlator[] = {
4781 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4782 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4783 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4784 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4785 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4786 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4787 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4788 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4789 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4790 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4791 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4792 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4793 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4794 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4795 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4796 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4797 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4798 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4799 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4800 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4801 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4802 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4803 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4804 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4805 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4806 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4807 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4808 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4809 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4810 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4811 		/*
4812 		 * the next 3 aren't really counters, but were implemented
4813 		 * as counters in older chips, so still get accessed as
4814 		 * though they were counters from this code.
4815 		 */
4816 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4817 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4818 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4819 		/* pseudo-counter, summed for all ports */
4820 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4821 	};
4822 
4823 	if (reg >= ARRAY_SIZE(xlator)) {
4824 		qib_devinfo(ppd->dd->pcidev,
4825 			 "Unimplemented portcounter %u\n", reg);
4826 		goto done;
4827 	}
4828 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4829 
4830 	/* handle non-counters and special cases first */
4831 	if (reg == QIBPORTCNTR_KHDROVFL) {
4832 		int i;
4833 
4834 		/* sum over all kernel contexts (skip if mini_init) */
4835 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4836 			struct qib_ctxtdata *rcd = dd->rcd[i];
4837 
4838 			if (!rcd || rcd->ppd != ppd)
4839 				continue;
4840 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4841 		}
4842 		goto done;
4843 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4844 		/*
4845 		 * Used as part of the synthesis of port_rcv_errors
4846 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4847 		 * because all the errors are already counted by other cntrs.
4848 		 */
4849 		goto done;
4850 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4851 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4852 		/* were counters in older chips, now per-port kernel regs */
4853 		ret = qib_read_kreg_port(ppd, creg);
4854 		goto done;
4855 	}
4856 
4857 	/*
4858 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4859 	 * avoid two independent reads when on Opteron.
4860 	 */
4861 	if (xlator[reg] & _PORT_64BIT_FLAG)
4862 		ret = read_7322_creg_port(ppd, creg);
4863 	else
4864 		ret = read_7322_creg32_port(ppd, creg);
4865 	if (creg == crp_ibsymbolerr) {
4866 		if (ppd->cpspec->ibdeltainprog)
4867 			ret -= ret - ppd->cpspec->ibsymsnap;
4868 		ret -= ppd->cpspec->ibsymdelta;
4869 	} else if (creg == crp_iblinkerrrecov) {
4870 		if (ppd->cpspec->ibdeltainprog)
4871 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4872 		ret -= ppd->cpspec->iblnkerrdelta;
4873 	} else if (creg == crp_errlink)
4874 		ret -= ppd->cpspec->ibmalfdelta;
4875 	else if (creg == crp_iblinkdown)
4876 		ret += ppd->cpspec->iblnkdowndelta;
4877 done:
4878 	return ret;
4879 }
4880 
4881 /*
4882  * Device counter names (not port-specific), one line per stat,
4883  * single string.  Used by utilities like ipathstats to print the stats
4884  * in a way which works for different versions of drivers, without changing
4885  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4886  * display by utility.
4887  * Non-error counters are first.
4888  * Start of "error" conters is indicated by a leading "E " on the first
4889  * "error" counter, and doesn't count in label length.
4890  * The EgrOvfl list needs to be last so we truncate them at the configured
4891  * context count for the device.
4892  * cntr7322indices contains the corresponding register indices.
4893  */
4894 static const char cntr7322names[] =
4895 	"Interrupts\n"
4896 	"HostBusStall\n"
4897 	"E RxTIDFull\n"
4898 	"RxTIDInvalid\n"
4899 	"RxTIDFloDrop\n" /* 7322 only */
4900 	"Ctxt0EgrOvfl\n"
4901 	"Ctxt1EgrOvfl\n"
4902 	"Ctxt2EgrOvfl\n"
4903 	"Ctxt3EgrOvfl\n"
4904 	"Ctxt4EgrOvfl\n"
4905 	"Ctxt5EgrOvfl\n"
4906 	"Ctxt6EgrOvfl\n"
4907 	"Ctxt7EgrOvfl\n"
4908 	"Ctxt8EgrOvfl\n"
4909 	"Ctxt9EgrOvfl\n"
4910 	"Ctx10EgrOvfl\n"
4911 	"Ctx11EgrOvfl\n"
4912 	"Ctx12EgrOvfl\n"
4913 	"Ctx13EgrOvfl\n"
4914 	"Ctx14EgrOvfl\n"
4915 	"Ctx15EgrOvfl\n"
4916 	"Ctx16EgrOvfl\n"
4917 	"Ctx17EgrOvfl\n"
4918 	;
4919 
4920 static const u32 cntr7322indices[] = {
4921 	cr_lbint | _PORT_64BIT_FLAG,
4922 	cr_lbstall | _PORT_64BIT_FLAG,
4923 	cr_tidfull,
4924 	cr_tidinvalid,
4925 	cr_rxtidflowdrop,
4926 	cr_base_egrovfl + 0,
4927 	cr_base_egrovfl + 1,
4928 	cr_base_egrovfl + 2,
4929 	cr_base_egrovfl + 3,
4930 	cr_base_egrovfl + 4,
4931 	cr_base_egrovfl + 5,
4932 	cr_base_egrovfl + 6,
4933 	cr_base_egrovfl + 7,
4934 	cr_base_egrovfl + 8,
4935 	cr_base_egrovfl + 9,
4936 	cr_base_egrovfl + 10,
4937 	cr_base_egrovfl + 11,
4938 	cr_base_egrovfl + 12,
4939 	cr_base_egrovfl + 13,
4940 	cr_base_egrovfl + 14,
4941 	cr_base_egrovfl + 15,
4942 	cr_base_egrovfl + 16,
4943 	cr_base_egrovfl + 17,
4944 };
4945 
4946 /*
4947  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4948  * portcntr7322indices is somewhat complicated by some registers needing
4949  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4950  */
4951 static const char portcntr7322names[] =
4952 	"TxPkt\n"
4953 	"TxFlowPkt\n"
4954 	"TxWords\n"
4955 	"RxPkt\n"
4956 	"RxFlowPkt\n"
4957 	"RxWords\n"
4958 	"TxFlowStall\n"
4959 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4960 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4961 	"IBStatusChng\n"
4962 	"IBLinkDown\n"
4963 	"IBLnkRecov\n"
4964 	"IBRxLinkErr\n"
4965 	"IBSymbolErr\n"
4966 	"RxLLIErr\n"
4967 	"RxBadFormat\n"
4968 	"RxBadLen\n"
4969 	"RxBufOvrfl\n"
4970 	"RxEBP\n"
4971 	"RxFlowCtlErr\n"
4972 	"RxICRCerr\n"
4973 	"RxLPCRCerr\n"
4974 	"RxVCRCerr\n"
4975 	"RxInvalLen\n"
4976 	"RxInvalPKey\n"
4977 	"RxPktDropped\n"
4978 	"TxBadLength\n"
4979 	"TxDropped\n"
4980 	"TxInvalLen\n"
4981 	"TxUnderrun\n"
4982 	"TxUnsupVL\n"
4983 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4984 	"RxVL15Drop\n"
4985 	"RxVlErr\n"
4986 	"XcessBufOvfl\n"
4987 	"RxQPBadCtxt\n" /* 7322-only from here down */
4988 	"TXBadHeader\n"
4989 	;
4990 
4991 static const u32 portcntr7322indices[] = {
4992 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4993 	crp_pktsendflow,
4994 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4995 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4996 	crp_pktrcvflowctrl,
4997 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4998 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4999 	crp_txsdmadesc | _PORT_64BIT_FLAG,
5000 	crp_rxdlidfltr,
5001 	crp_ibstatuschange,
5002 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
5003 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
5004 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
5005 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
5006 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
5007 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
5008 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
5009 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
5010 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
5011 	crp_rcvflowctrlviol,
5012 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
5013 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5014 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5015 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5016 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5017 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5018 	crp_txminmaxlenerr,
5019 	crp_txdroppedpkt,
5020 	crp_txlenerr,
5021 	crp_txunderrun,
5022 	crp_txunsupvl,
5023 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5024 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5025 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5026 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5027 	crp_rxqpinvalidctxt,
5028 	crp_txhdrerr,
5029 };
5030 
5031 /* do all the setup to make the counter reads efficient later */
5032 static void init_7322_cntrnames(struct qib_devdata *dd)
5033 {
5034 	int i, j = 0;
5035 	char *s;
5036 
5037 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5038 	     i++) {
5039 		/* we always have at least one counter before the egrovfl */
5040 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5041 			j = 1;
5042 		s = strchr(s + 1, '\n');
5043 		if (s && j)
5044 			j++;
5045 	}
5046 	dd->cspec->ncntrs = i;
5047 	if (!s)
5048 		/* full list; size is without terminating null */
5049 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5050 	else
5051 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5052 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5053 		* sizeof(u64), GFP_KERNEL);
5054 	if (!dd->cspec->cntrs)
5055 		qib_dev_err(dd, "Failed allocation for counters\n");
5056 
5057 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5058 		s = strchr(s + 1, '\n');
5059 	dd->cspec->nportcntrs = i - 1;
5060 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5061 	for (i = 0; i < dd->num_pports; ++i) {
5062 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5063 			* sizeof(u64), GFP_KERNEL);
5064 		if (!dd->pport[i].cpspec->portcntrs)
5065 			qib_dev_err(dd,
5066 				"Failed allocation for portcounters\n");
5067 	}
5068 }
5069 
5070 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5071 			      u64 **cntrp)
5072 {
5073 	u32 ret;
5074 
5075 	if (namep) {
5076 		ret = dd->cspec->cntrnamelen;
5077 		if (pos >= ret)
5078 			ret = 0; /* final read after getting everything */
5079 		else
5080 			*namep = (char *) cntr7322names;
5081 	} else {
5082 		u64 *cntr = dd->cspec->cntrs;
5083 		int i;
5084 
5085 		ret = dd->cspec->ncntrs * sizeof(u64);
5086 		if (!cntr || pos >= ret) {
5087 			/* everything read, or couldn't get memory */
5088 			ret = 0;
5089 			goto done;
5090 		}
5091 		*cntrp = cntr;
5092 		for (i = 0; i < dd->cspec->ncntrs; i++)
5093 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5094 				*cntr++ = read_7322_creg(dd,
5095 							 cntr7322indices[i] &
5096 							 _PORT_CNTR_IDXMASK);
5097 			else
5098 				*cntr++ = read_7322_creg32(dd,
5099 							   cntr7322indices[i]);
5100 	}
5101 done:
5102 	return ret;
5103 }
5104 
5105 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5106 				  char **namep, u64 **cntrp)
5107 {
5108 	u32 ret;
5109 
5110 	if (namep) {
5111 		ret = dd->cspec->portcntrnamelen;
5112 		if (pos >= ret)
5113 			ret = 0; /* final read after getting everything */
5114 		else
5115 			*namep = (char *)portcntr7322names;
5116 	} else {
5117 		struct qib_pportdata *ppd = &dd->pport[port];
5118 		u64 *cntr = ppd->cpspec->portcntrs;
5119 		int i;
5120 
5121 		ret = dd->cspec->nportcntrs * sizeof(u64);
5122 		if (!cntr || pos >= ret) {
5123 			/* everything read, or couldn't get memory */
5124 			ret = 0;
5125 			goto done;
5126 		}
5127 		*cntrp = cntr;
5128 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5129 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5130 				*cntr++ = qib_portcntr_7322(ppd,
5131 					portcntr7322indices[i] &
5132 					_PORT_CNTR_IDXMASK);
5133 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5134 				*cntr++ = read_7322_creg_port(ppd,
5135 					   portcntr7322indices[i] &
5136 					    _PORT_CNTR_IDXMASK);
5137 			else
5138 				*cntr++ = read_7322_creg32_port(ppd,
5139 					   portcntr7322indices[i]);
5140 		}
5141 	}
5142 done:
5143 	return ret;
5144 }
5145 
5146 /**
5147  * qib_get_7322_faststats - get word counters from chip before they overflow
5148  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5149  *
5150  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5151  * real purpose of this function is to maintain the notion of
5152  * "active time", which in turn is only logged into the eeprom,
5153  * which we don;t have, yet, for 7322-based boards.
5154  *
5155  * called from add_timer
5156  */
5157 static void qib_get_7322_faststats(unsigned long opaque)
5158 {
5159 	struct qib_devdata *dd = (struct qib_devdata *) opaque;
5160 	struct qib_pportdata *ppd;
5161 	unsigned long flags;
5162 	u64 traffic_wds;
5163 	int pidx;
5164 
5165 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5166 		ppd = dd->pport + pidx;
5167 
5168 		/*
5169 		 * If port isn't enabled or not operational ports, or
5170 		 * diags is running (can cause memory diags to fail)
5171 		 * skip this port this time.
5172 		 */
5173 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5174 		    || dd->diag_client)
5175 			continue;
5176 
5177 		/*
5178 		 * Maintain an activity timer, based on traffic
5179 		 * exceeding a threshold, so we need to check the word-counts
5180 		 * even if they are 64-bit.
5181 		 */
5182 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5183 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5184 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5185 		traffic_wds -= ppd->dd->traffic_wds;
5186 		ppd->dd->traffic_wds += traffic_wds;
5187 		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5188 			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5189 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5190 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5191 						QIB_IB_QDR) &&
5192 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5193 				    QIBL_LINKACTIVE)) &&
5194 		    ppd->cpspec->qdr_dfe_time &&
5195 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5196 			ppd->cpspec->qdr_dfe_on = 0;
5197 
5198 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5199 					    ppd->dd->cspec->r1 ?
5200 					    QDR_STATIC_ADAPT_INIT_R1 :
5201 					    QDR_STATIC_ADAPT_INIT);
5202 			force_h1(ppd);
5203 		}
5204 	}
5205 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5206 }
5207 
5208 /*
5209  * If we were using MSIx, try to fallback to INTx.
5210  */
5211 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5212 {
5213 	if (!dd->cspec->num_msix_entries)
5214 		return 0; /* already using INTx */
5215 
5216 	qib_devinfo(dd->pcidev,
5217 		"MSIx interrupt not detected, trying INTx interrupts\n");
5218 	qib_7322_nomsix(dd);
5219 	qib_enable_intx(dd->pcidev);
5220 	qib_setup_7322_interrupt(dd, 0);
5221 	return 1;
5222 }
5223 
5224 /*
5225  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5226  * than resetting the IBC or external link state, and useful in some
5227  * cases to cause some retraining.  To do this right, we reset IBC
5228  * as well, then return to previous state (which may be still in reset)
5229  * NOTE: some callers of this "know" this writes the current value
5230  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5231  * check all callers.
5232  */
5233 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5234 {
5235 	u64 val;
5236 	struct qib_devdata *dd = ppd->dd;
5237 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5238 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5239 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5240 
5241 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5242 	qib_write_kreg(dd, kr_hwerrmask,
5243 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5244 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5245 			    ppd->cpspec->ibcctrl_a &
5246 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5247 
5248 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5249 	qib_read_kreg32(dd, kr_scratch);
5250 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5251 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5252 	qib_write_kreg(dd, kr_scratch, 0ULL);
5253 	qib_write_kreg(dd, kr_hwerrclear,
5254 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5255 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5256 }
5257 
5258 /*
5259  * This code for non-IBTA-compliant IB speed negotiation is only known to
5260  * work for the SDR to DDR transition, and only between an HCA and a switch
5261  * with recent firmware.  It is based on observed heuristics, rather than
5262  * actual knowledge of the non-compliant speed negotiation.
5263  * It has a number of hard-coded fields, since the hope is to rewrite this
5264  * when a spec is available on how the negoation is intended to work.
5265  */
5266 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5267 				 u32 dcnt, u32 *data)
5268 {
5269 	int i;
5270 	u64 pbc;
5271 	u32 __iomem *piobuf;
5272 	u32 pnum, control, len;
5273 	struct qib_devdata *dd = ppd->dd;
5274 
5275 	i = 0;
5276 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5277 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5278 	pbc = ((u64) control << 32) | len;
5279 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5280 		if (i++ > 15)
5281 			return;
5282 		udelay(2);
5283 	}
5284 	/* disable header check on this packet, since it can't be valid */
5285 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5286 	writeq(pbc, piobuf);
5287 	qib_flush_wc();
5288 	qib_pio_copy(piobuf + 2, hdr, 7);
5289 	qib_pio_copy(piobuf + 9, data, dcnt);
5290 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5291 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5292 
5293 		qib_flush_wc();
5294 		__raw_writel(0xaebecede, piobuf + spcl_off);
5295 	}
5296 	qib_flush_wc();
5297 	qib_sendbuf_done(dd, pnum);
5298 	/* and re-enable hdr check */
5299 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5300 }
5301 
5302 /*
5303  * _start packet gets sent twice at start, _done gets sent twice at end
5304  */
5305 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5306 {
5307 	struct qib_devdata *dd = ppd->dd;
5308 	static u32 swapped;
5309 	u32 dw, i, hcnt, dcnt, *data;
5310 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5311 	static u32 madpayload_start[0x40] = {
5312 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5313 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5314 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5315 		};
5316 	static u32 madpayload_done[0x40] = {
5317 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5318 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5319 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5320 		};
5321 
5322 	dcnt = ARRAY_SIZE(madpayload_start);
5323 	hcnt = ARRAY_SIZE(hdr);
5324 	if (!swapped) {
5325 		/* for maintainability, do it at runtime */
5326 		for (i = 0; i < hcnt; i++) {
5327 			dw = (__force u32) cpu_to_be32(hdr[i]);
5328 			hdr[i] = dw;
5329 		}
5330 		for (i = 0; i < dcnt; i++) {
5331 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5332 			madpayload_start[i] = dw;
5333 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5334 			madpayload_done[i] = dw;
5335 		}
5336 		swapped = 1;
5337 	}
5338 
5339 	data = which ? madpayload_done : madpayload_start;
5340 
5341 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5342 	qib_read_kreg64(dd, kr_scratch);
5343 	udelay(2);
5344 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5345 	qib_read_kreg64(dd, kr_scratch);
5346 	udelay(2);
5347 }
5348 
5349 /*
5350  * Do the absolute minimum to cause an IB speed change, and make it
5351  * ready, but don't actually trigger the change.   The caller will
5352  * do that when ready (if link is in Polling training state, it will
5353  * happen immediately, otherwise when link next goes down)
5354  *
5355  * This routine should only be used as part of the DDR autonegotation
5356  * code for devices that are not compliant with IB 1.2 (or code that
5357  * fixes things up for same).
5358  *
5359  * When link has gone down, and autoneg enabled, or autoneg has
5360  * failed and we give up until next time we set both speeds, and
5361  * then we want IBTA enabled as well as "use max enabled speed.
5362  */
5363 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5364 {
5365 	u64 newctrlb;
5366 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5367 				    IBA7322_IBC_IBTA_1_2_MASK |
5368 				    IBA7322_IBC_MAX_SPEED_MASK);
5369 
5370 	if (speed & (speed - 1)) /* multiple speeds */
5371 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5372 				    IBA7322_IBC_IBTA_1_2_MASK |
5373 				    IBA7322_IBC_MAX_SPEED_MASK;
5374 	else
5375 		newctrlb |= speed == QIB_IB_QDR ?
5376 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5377 			((speed == QIB_IB_DDR ?
5378 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5379 
5380 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5381 		return;
5382 
5383 	ppd->cpspec->ibcctrl_b = newctrlb;
5384 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5385 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5386 }
5387 
5388 /*
5389  * This routine is only used when we are not talking to another
5390  * IB 1.2-compliant device that we think can do DDR.
5391  * (This includes all existing switch chips as of Oct 2007.)
5392  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5393  */
5394 static void try_7322_autoneg(struct qib_pportdata *ppd)
5395 {
5396 	unsigned long flags;
5397 
5398 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5399 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5400 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5401 	qib_autoneg_7322_send(ppd, 0);
5402 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5403 	qib_7322_mini_pcs_reset(ppd);
5404 	/* 2 msec is minimum length of a poll cycle */
5405 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5406 			   msecs_to_jiffies(2));
5407 }
5408 
5409 /*
5410  * Handle the empirically determined mechanism for auto-negotiation
5411  * of DDR speed with switches.
5412  */
5413 static void autoneg_7322_work(struct work_struct *work)
5414 {
5415 	struct qib_pportdata *ppd;
5416 	struct qib_devdata *dd;
5417 	u64 startms;
5418 	u32 i;
5419 	unsigned long flags;
5420 
5421 	ppd = container_of(work, struct qib_chippport_specific,
5422 			    autoneg_work.work)->ppd;
5423 	dd = ppd->dd;
5424 
5425 	startms = jiffies_to_msecs(jiffies);
5426 
5427 	/*
5428 	 * Busy wait for this first part, it should be at most a
5429 	 * few hundred usec, since we scheduled ourselves for 2msec.
5430 	 */
5431 	for (i = 0; i < 25; i++) {
5432 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5433 		     == IB_7322_LT_STATE_POLLQUIET) {
5434 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5435 			break;
5436 		}
5437 		udelay(100);
5438 	}
5439 
5440 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5441 		goto done; /* we got there early or told to stop */
5442 
5443 	/* we expect this to timeout */
5444 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5445 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5446 			       msecs_to_jiffies(90)))
5447 		goto done;
5448 	qib_7322_mini_pcs_reset(ppd);
5449 
5450 	/* we expect this to timeout */
5451 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5452 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5453 			       msecs_to_jiffies(1700)))
5454 		goto done;
5455 	qib_7322_mini_pcs_reset(ppd);
5456 
5457 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5458 
5459 	/*
5460 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5461 	 * this should terminate early.
5462 	 */
5463 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5464 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5465 		msecs_to_jiffies(250));
5466 done:
5467 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5468 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5469 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5470 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5471 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5472 			ppd->cpspec->autoneg_tries = 0;
5473 		}
5474 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5475 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5476 	}
5477 }
5478 
5479 /*
5480  * This routine is used to request IPG set in the QLogic switch.
5481  * Only called if r1.
5482  */
5483 static void try_7322_ipg(struct qib_pportdata *ppd)
5484 {
5485 	struct qib_ibport *ibp = &ppd->ibport_data;
5486 	struct ib_mad_send_buf *send_buf;
5487 	struct ib_mad_agent *agent;
5488 	struct ib_smp *smp;
5489 	unsigned delay;
5490 	int ret;
5491 
5492 	agent = ibp->send_agent;
5493 	if (!agent)
5494 		goto retry;
5495 
5496 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5497 				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
5498 	if (IS_ERR(send_buf))
5499 		goto retry;
5500 
5501 	if (!ibp->smi_ah) {
5502 		struct ib_ah *ah;
5503 
5504 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5505 		if (IS_ERR(ah))
5506 			ret = PTR_ERR(ah);
5507 		else {
5508 			send_buf->ah = ah;
5509 			ibp->smi_ah = to_iah(ah);
5510 			ret = 0;
5511 		}
5512 	} else {
5513 		send_buf->ah = &ibp->smi_ah->ibah;
5514 		ret = 0;
5515 	}
5516 
5517 	smp = send_buf->mad;
5518 	smp->base_version = IB_MGMT_BASE_VERSION;
5519 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5520 	smp->class_version = 1;
5521 	smp->method = IB_MGMT_METHOD_SEND;
5522 	smp->hop_cnt = 1;
5523 	smp->attr_id = QIB_VENDOR_IPG;
5524 	smp->attr_mod = 0;
5525 
5526 	if (!ret)
5527 		ret = ib_post_send_mad(send_buf, NULL);
5528 	if (ret)
5529 		ib_free_send_mad(send_buf);
5530 retry:
5531 	delay = 2 << ppd->cpspec->ipg_tries;
5532 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5533 			   msecs_to_jiffies(delay));
5534 }
5535 
5536 /*
5537  * Timeout handler for setting IPG.
5538  * Only called if r1.
5539  */
5540 static void ipg_7322_work(struct work_struct *work)
5541 {
5542 	struct qib_pportdata *ppd;
5543 
5544 	ppd = container_of(work, struct qib_chippport_specific,
5545 			   ipg_work.work)->ppd;
5546 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5547 	    && ++ppd->cpspec->ipg_tries <= 10)
5548 		try_7322_ipg(ppd);
5549 }
5550 
5551 static u32 qib_7322_iblink_state(u64 ibcs)
5552 {
5553 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5554 
5555 	switch (state) {
5556 	case IB_7322_L_STATE_INIT:
5557 		state = IB_PORT_INIT;
5558 		break;
5559 	case IB_7322_L_STATE_ARM:
5560 		state = IB_PORT_ARMED;
5561 		break;
5562 	case IB_7322_L_STATE_ACTIVE:
5563 		/* fall through */
5564 	case IB_7322_L_STATE_ACT_DEFER:
5565 		state = IB_PORT_ACTIVE;
5566 		break;
5567 	default: /* fall through */
5568 	case IB_7322_L_STATE_DOWN:
5569 		state = IB_PORT_DOWN;
5570 		break;
5571 	}
5572 	return state;
5573 }
5574 
5575 /* returns the IBTA port state, rather than the IBC link training state */
5576 static u8 qib_7322_phys_portstate(u64 ibcs)
5577 {
5578 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5579 	return qib_7322_physportstate[state];
5580 }
5581 
5582 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5583 {
5584 	int ret = 0, symadj = 0;
5585 	unsigned long flags;
5586 	int mult;
5587 
5588 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5589 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5590 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5591 
5592 	/* Update our picture of width and speed from chip */
5593 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5594 		ppd->link_speed_active = QIB_IB_QDR;
5595 		mult = 4;
5596 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5597 		ppd->link_speed_active = QIB_IB_DDR;
5598 		mult = 2;
5599 	} else {
5600 		ppd->link_speed_active = QIB_IB_SDR;
5601 		mult = 1;
5602 	}
5603 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5604 		ppd->link_width_active = IB_WIDTH_4X;
5605 		mult *= 4;
5606 	} else
5607 		ppd->link_width_active = IB_WIDTH_1X;
5608 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5609 
5610 	if (!ibup) {
5611 		u64 clr;
5612 
5613 		/* Link went down. */
5614 		/* do IPG MAD again after linkdown, even if last time failed */
5615 		ppd->cpspec->ipg_tries = 0;
5616 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5617 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5618 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5619 		if (clr)
5620 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5621 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5622 				     QIBL_IB_AUTONEG_INPROG)))
5623 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5624 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5625 			struct qib_qsfp_data *qd =
5626 				&ppd->cpspec->qsfp_data;
5627 			/* unlock the Tx settings, speed may change */
5628 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5629 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5630 				reset_tx_deemphasis_override));
5631 			qib_cancel_sends(ppd);
5632 			/* on link down, ensure sane pcs state */
5633 			qib_7322_mini_pcs_reset(ppd);
5634 			/* schedule the qsfp refresh which should turn the link
5635 			   off */
5636 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5637 				qd->t_insert = jiffies;
5638 				queue_work(ib_wq, &qd->work);
5639 			}
5640 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5641 			if (__qib_sdma_running(ppd))
5642 				__qib_sdma_process_event(ppd,
5643 					qib_sdma_event_e70_go_idle);
5644 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5645 		}
5646 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5647 		if (clr == ppd->cpspec->iblnkdownsnap)
5648 			ppd->cpspec->iblnkdowndelta++;
5649 	} else {
5650 		if (qib_compat_ddr_negotiate &&
5651 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5652 				     QIBL_IB_AUTONEG_INPROG)) &&
5653 		    ppd->link_speed_active == QIB_IB_SDR &&
5654 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5655 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5656 			/* we are SDR, and auto-negotiation enabled */
5657 			++ppd->cpspec->autoneg_tries;
5658 			if (!ppd->cpspec->ibdeltainprog) {
5659 				ppd->cpspec->ibdeltainprog = 1;
5660 				ppd->cpspec->ibsymdelta +=
5661 					read_7322_creg32_port(ppd,
5662 						crp_ibsymbolerr) -
5663 						ppd->cpspec->ibsymsnap;
5664 				ppd->cpspec->iblnkerrdelta +=
5665 					read_7322_creg32_port(ppd,
5666 						crp_iblinkerrrecov) -
5667 						ppd->cpspec->iblnkerrsnap;
5668 			}
5669 			try_7322_autoneg(ppd);
5670 			ret = 1; /* no other IB status change processing */
5671 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5672 			   ppd->link_speed_active == QIB_IB_SDR) {
5673 			qib_autoneg_7322_send(ppd, 1);
5674 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5675 			qib_7322_mini_pcs_reset(ppd);
5676 			udelay(2);
5677 			ret = 1; /* no other IB status change processing */
5678 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5679 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5680 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5681 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5682 					 QIBL_IB_AUTONEG_FAILED);
5683 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5684 			ppd->cpspec->autoneg_tries = 0;
5685 			/* re-enable SDR, for next link down */
5686 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5687 			wake_up(&ppd->cpspec->autoneg_wait);
5688 			symadj = 1;
5689 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5690 			/*
5691 			 * Clear autoneg failure flag, and do setup
5692 			 * so we'll try next time link goes down and
5693 			 * back to INIT (possibly connected to a
5694 			 * different device).
5695 			 */
5696 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5697 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5698 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5699 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5700 			symadj = 1;
5701 		}
5702 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5703 			symadj = 1;
5704 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5705 				try_7322_ipg(ppd);
5706 			if (!ppd->cpspec->recovery_init)
5707 				setup_7322_link_recovery(ppd, 0);
5708 			ppd->cpspec->qdr_dfe_time = jiffies +
5709 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5710 		}
5711 		ppd->cpspec->ibmalfusesnap = 0;
5712 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5713 			crp_errlink);
5714 	}
5715 	if (symadj) {
5716 		ppd->cpspec->iblnkdownsnap =
5717 			read_7322_creg32_port(ppd, crp_iblinkdown);
5718 		if (ppd->cpspec->ibdeltainprog) {
5719 			ppd->cpspec->ibdeltainprog = 0;
5720 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5721 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5722 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5723 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5724 		}
5725 	} else if (!ibup && qib_compat_ddr_negotiate &&
5726 		   !ppd->cpspec->ibdeltainprog &&
5727 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5728 		ppd->cpspec->ibdeltainprog = 1;
5729 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5730 			crp_ibsymbolerr);
5731 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5732 			crp_iblinkerrrecov);
5733 	}
5734 
5735 	if (!ret)
5736 		qib_setup_7322_setextled(ppd, ibup);
5737 	return ret;
5738 }
5739 
5740 /*
5741  * Does read/modify/write to appropriate registers to
5742  * set output and direction bits selected by mask.
5743  * these are in their canonical postions (e.g. lsb of
5744  * dir will end up in D48 of extctrl on existing chips).
5745  * returns contents of GP Inputs.
5746  */
5747 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5748 {
5749 	u64 read_val, new_out;
5750 	unsigned long flags;
5751 
5752 	if (mask) {
5753 		/* some bits being written, lock access to GPIO */
5754 		dir &= mask;
5755 		out &= mask;
5756 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5757 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5758 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5759 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5760 
5761 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5762 		qib_write_kreg(dd, kr_gpio_out, new_out);
5763 		dd->cspec->gpio_out = new_out;
5764 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5765 	}
5766 	/*
5767 	 * It is unlikely that a read at this time would get valid
5768 	 * data on a pin whose direction line was set in the same
5769 	 * call to this function. We include the read here because
5770 	 * that allows us to potentially combine a change on one pin with
5771 	 * a read on another, and because the old code did something like
5772 	 * this.
5773 	 */
5774 	read_val = qib_read_kreg64(dd, kr_extstatus);
5775 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5776 }
5777 
5778 /* Enable writes to config EEPROM, if possible. Returns previous state */
5779 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5780 {
5781 	int prev_wen;
5782 	u32 mask;
5783 
5784 	mask = 1 << QIB_EEPROM_WEN_NUM;
5785 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5786 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5787 
5788 	return prev_wen & 1;
5789 }
5790 
5791 /*
5792  * Read fundamental info we need to use the chip.  These are
5793  * the registers that describe chip capabilities, and are
5794  * saved in shadow registers.
5795  */
5796 static void get_7322_chip_params(struct qib_devdata *dd)
5797 {
5798 	u64 val;
5799 	u32 piobufs;
5800 	int mtu;
5801 
5802 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5803 
5804 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5805 
5806 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5807 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5808 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5809 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5810 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5811 
5812 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5813 	dd->piobcnt2k = val & ~0U;
5814 	dd->piobcnt4k = val >> 32;
5815 	val = qib_read_kreg64(dd, kr_sendpiosize);
5816 	dd->piosize2k = val & ~0U;
5817 	dd->piosize4k = val >> 32;
5818 
5819 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5820 	if (mtu == -1)
5821 		mtu = QIB_DEFAULT_MTU;
5822 	dd->pport[0].ibmtu = (u32)mtu;
5823 	dd->pport[1].ibmtu = (u32)mtu;
5824 
5825 	/* these may be adjusted in init_chip_wc_pat() */
5826 	dd->pio2kbase = (u32 __iomem *)
5827 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5828 	dd->pio4kbase = (u32 __iomem *)
5829 		((char __iomem *) dd->kregbase +
5830 		 (dd->piobufbase >> 32));
5831 	/*
5832 	 * 4K buffers take 2 pages; we use roundup just to be
5833 	 * paranoid; we calculate it once here, rather than on
5834 	 * ever buf allocate
5835 	 */
5836 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5837 
5838 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5839 
5840 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5841 		(sizeof(u64) * BITS_PER_BYTE / 2);
5842 }
5843 
5844 /*
5845  * The chip base addresses in cspec and cpspec have to be set
5846  * after possible init_chip_wc_pat(), rather than in
5847  * get_7322_chip_params(), so split out as separate function
5848  */
5849 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5850 {
5851 	u32 cregbase;
5852 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5853 
5854 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5855 		(char __iomem *)dd->kregbase);
5856 
5857 	dd->egrtidbase = (u64 __iomem *)
5858 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5859 
5860 	/* port registers are defined as relative to base of chip */
5861 	dd->pport[0].cpspec->kpregbase =
5862 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5863 	dd->pport[1].cpspec->kpregbase =
5864 		(u64 __iomem *)(dd->palign +
5865 		(char __iomem *)dd->kregbase);
5866 	dd->pport[0].cpspec->cpregbase =
5867 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5868 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5869 	dd->pport[1].cpspec->cpregbase =
5870 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5871 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5872 }
5873 
5874 /*
5875  * This is a fairly special-purpose observer, so we only support
5876  * the port-specific parts of SendCtrl
5877  */
5878 
5879 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5880 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5881 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5882 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5883 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5884 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5885 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5886 
5887 static int sendctrl_hook(struct qib_devdata *dd,
5888 			 const struct diag_observer *op, u32 offs,
5889 			 u64 *data, u64 mask, int only_32)
5890 {
5891 	unsigned long flags;
5892 	unsigned idx;
5893 	unsigned pidx;
5894 	struct qib_pportdata *ppd = NULL;
5895 	u64 local_data, all_bits;
5896 
5897 	/*
5898 	 * The fixed correspondence between Physical ports and pports is
5899 	 * severed. We need to hunt for the ppd that corresponds
5900 	 * to the offset we got. And we have to do that without admitting
5901 	 * we know the stride, apparently.
5902 	 */
5903 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5904 		u64 __iomem *psptr;
5905 		u32 psoffs;
5906 
5907 		ppd = dd->pport + pidx;
5908 		if (!ppd->cpspec->kpregbase)
5909 			continue;
5910 
5911 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5912 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5913 		if (psoffs == offs)
5914 			break;
5915 	}
5916 
5917 	/* If pport is not being managed by driver, just avoid shadows. */
5918 	if (pidx >= dd->num_pports)
5919 		ppd = NULL;
5920 
5921 	/* In any case, "idx" is flat index in kreg space */
5922 	idx = offs / sizeof(u64);
5923 
5924 	all_bits = ~0ULL;
5925 	if (only_32)
5926 		all_bits >>= 32;
5927 
5928 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5929 	if (!ppd || (mask & all_bits) != all_bits) {
5930 		/*
5931 		 * At least some mask bits are zero, so we need
5932 		 * to read. The judgement call is whether from
5933 		 * reg or shadow. First-cut: read reg, and complain
5934 		 * if any bits which should be shadowed are different
5935 		 * from their shadowed value.
5936 		 */
5937 		if (only_32)
5938 			local_data = (u64)qib_read_kreg32(dd, idx);
5939 		else
5940 			local_data = qib_read_kreg64(dd, idx);
5941 		*data = (local_data & ~mask) | (*data & mask);
5942 	}
5943 	if (mask) {
5944 		/*
5945 		 * At least some mask bits are one, so we need
5946 		 * to write, but only shadow some bits.
5947 		 */
5948 		u64 sval, tval; /* Shadowed, transient */
5949 
5950 		/*
5951 		 * New shadow val is bits we don't want to touch,
5952 		 * ORed with bits we do, that are intended for shadow.
5953 		 */
5954 		if (ppd) {
5955 			sval = ppd->p_sendctrl & ~mask;
5956 			sval |= *data & SENDCTRL_SHADOWED & mask;
5957 			ppd->p_sendctrl = sval;
5958 		} else
5959 			sval = *data & SENDCTRL_SHADOWED & mask;
5960 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5961 		qib_write_kreg(dd, idx, tval);
5962 		qib_write_kreg(dd, kr_scratch, 0Ull);
5963 	}
5964 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5965 	return only_32 ? 4 : 8;
5966 }
5967 
5968 static const struct diag_observer sendctrl_0_observer = {
5969 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5970 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5971 };
5972 
5973 static const struct diag_observer sendctrl_1_observer = {
5974 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5975 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5976 };
5977 
5978 static ushort sdma_fetch_prio = 8;
5979 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5980 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5981 
5982 /* Besides logging QSFP events, we set appropriate TxDDS values */
5983 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5984 
5985 static void qsfp_7322_event(struct work_struct *work)
5986 {
5987 	struct qib_qsfp_data *qd;
5988 	struct qib_pportdata *ppd;
5989 	unsigned long pwrup;
5990 	unsigned long flags;
5991 	int ret;
5992 	u32 le2;
5993 
5994 	qd = container_of(work, struct qib_qsfp_data, work);
5995 	ppd = qd->ppd;
5996 	pwrup = qd->t_insert +
5997 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5998 
5999 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
6000 	mdelay(QSFP_MODPRS_LAG_MSEC);
6001 
6002 	if (!qib_qsfp_mod_present(ppd)) {
6003 		ppd->cpspec->qsfp_data.modpresent = 0;
6004 		/* Set the physical link to disabled */
6005 		qib_set_ib_7322_lstate(ppd, 0,
6006 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
6007 		spin_lock_irqsave(&ppd->lflags_lock, flags);
6008 		ppd->lflags &= ~QIBL_LINKV;
6009 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6010 	} else {
6011 		/*
6012 		 * Some QSFP's not only do not respond until the full power-up
6013 		 * time, but may behave badly if we try. So hold off responding
6014 		 * to insertion.
6015 		 */
6016 		while (1) {
6017 			if (time_is_before_jiffies(pwrup))
6018 				break;
6019 			msleep(20);
6020 		}
6021 
6022 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6023 
6024 		/*
6025 		 * Need to change LE2 back to defaults if we couldn't
6026 		 * read the cable type (to handle cable swaps), so do this
6027 		 * even on failure to read cable information.  We don't
6028 		 * get here for QME, so IS_QME check not needed here.
6029 		 */
6030 		if (!ret && !ppd->dd->cspec->r1) {
6031 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6032 				le2 = LE2_QME;
6033 			else if (qd->cache.atten[1] >= qib_long_atten &&
6034 				 QSFP_IS_CU(qd->cache.tech))
6035 				le2 = LE2_5m;
6036 			else
6037 				le2 = LE2_DEFAULT;
6038 		} else
6039 			le2 = LE2_DEFAULT;
6040 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6041 		/*
6042 		 * We always change parameteters, since we can choose
6043 		 * values for cables without eeproms, and the cable may have
6044 		 * changed from a cable with full or partial eeprom content
6045 		 * to one with partial or no content.
6046 		 */
6047 		init_txdds_table(ppd, 0);
6048 		/* The physical link is being re-enabled only when the
6049 		 * previous state was DISABLED and the VALID bit is not
6050 		 * set. This should only happen when  the cable has been
6051 		 * physically pulled. */
6052 		if (!ppd->cpspec->qsfp_data.modpresent &&
6053 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6054 			ppd->cpspec->qsfp_data.modpresent = 1;
6055 			qib_set_ib_7322_lstate(ppd, 0,
6056 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6057 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6058 			ppd->lflags |= QIBL_LINKV;
6059 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6060 		}
6061 	}
6062 }
6063 
6064 /*
6065  * There is little we can do but complain to the user if QSFP
6066  * initialization fails.
6067  */
6068 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6069 {
6070 	unsigned long flags;
6071 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6072 	struct qib_devdata *dd = ppd->dd;
6073 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6074 
6075 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6076 	qd->ppd = ppd;
6077 	qib_qsfp_init(qd, qsfp_7322_event);
6078 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6079 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6080 	dd->cspec->gpio_mask |= mod_prs_bit;
6081 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6082 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6083 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6084 }
6085 
6086 /*
6087  * called at device initialization time, and also if the txselect
6088  * module parameter is changed.  This is used for cables that don't
6089  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6090  * We initialize to the default, then if there is a specific
6091  * unit,port match, we use that (and set it immediately, for the
6092  * current speed, if the link is at INIT or better).
6093  * String format is "default# unit#,port#=# ... u,p=#", separators must
6094  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6095  * optionally have "u,p=#,#", where the final # is the H1 value
6096  * The last specific match is used (actually, all are used, but last
6097  * one is the one that winds up set); if none at all, fall back on default.
6098  */
6099 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6100 {
6101 	char *nxt, *str;
6102 	u32 pidx, unit, port, deflt, h1;
6103 	unsigned long val;
6104 	int any = 0, seth1;
6105 	int txdds_size;
6106 
6107 	str = txselect_list;
6108 
6109 	/* default number is validated in setup_txselect() */
6110 	deflt = simple_strtoul(str, &nxt, 0);
6111 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6112 		dd->pport[pidx].cpspec->no_eep = deflt;
6113 
6114 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6115 	if (IS_QME(dd) || IS_QMH(dd))
6116 		txdds_size += TXDDS_MFG_SZ;
6117 
6118 	while (*nxt && nxt[1]) {
6119 		str = ++nxt;
6120 		unit = simple_strtoul(str, &nxt, 0);
6121 		if (nxt == str || !*nxt || *nxt != ',') {
6122 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6123 				;
6124 			continue;
6125 		}
6126 		str = ++nxt;
6127 		port = simple_strtoul(str, &nxt, 0);
6128 		if (nxt == str || *nxt != '=') {
6129 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6130 				;
6131 			continue;
6132 		}
6133 		str = ++nxt;
6134 		val = simple_strtoul(str, &nxt, 0);
6135 		if (nxt == str) {
6136 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6137 				;
6138 			continue;
6139 		}
6140 		if (val >= txdds_size)
6141 			continue;
6142 		seth1 = 0;
6143 		h1 = 0; /* gcc thinks it might be used uninitted */
6144 		if (*nxt == ',' && nxt[1]) {
6145 			str = ++nxt;
6146 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6147 			if (nxt == str)
6148 				while (*nxt && *nxt++ != ' ') /* skip */
6149 					;
6150 			else
6151 				seth1 = 1;
6152 		}
6153 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6154 		     ++pidx) {
6155 			struct qib_pportdata *ppd = &dd->pport[pidx];
6156 
6157 			if (ppd->port != port || !ppd->link_speed_supported)
6158 				continue;
6159 			ppd->cpspec->no_eep = val;
6160 			if (seth1)
6161 				ppd->cpspec->h1_val = h1;
6162 			/* now change the IBC and serdes, overriding generic */
6163 			init_txdds_table(ppd, 1);
6164 			/* Re-enable the physical state machine on mezz boards
6165 			 * now that the correct settings have been set.
6166 			 * QSFP boards are handles by the QSFP event handler */
6167 			if (IS_QMH(dd) || IS_QME(dd))
6168 				qib_set_ib_7322_lstate(ppd, 0,
6169 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6170 			any++;
6171 		}
6172 		if (*nxt == '\n')
6173 			break; /* done */
6174 	}
6175 	if (change && !any) {
6176 		/* no specific setting, use the default.
6177 		 * Change the IBC and serdes, but since it's
6178 		 * general, don't override specific settings.
6179 		 */
6180 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6181 			if (dd->pport[pidx].link_speed_supported)
6182 				init_txdds_table(&dd->pport[pidx], 0);
6183 	}
6184 }
6185 
6186 /* handle the txselect parameter changing */
6187 static int setup_txselect(const char *str, struct kernel_param *kp)
6188 {
6189 	struct qib_devdata *dd;
6190 	unsigned long val;
6191 	int ret;
6192 
6193 	if (strlen(str) >= MAX_ATTEN_LEN) {
6194 		pr_info("txselect_values string too long\n");
6195 		return -ENOSPC;
6196 	}
6197 	ret = kstrtoul(str, 0, &val);
6198 	if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6199 				TXDDS_MFG_SZ)) {
6200 		pr_info("txselect_values must start with a number < %d\n",
6201 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6202 		return ret ? ret : -EINVAL;
6203 	}
6204 
6205 	strcpy(txselect_list, str);
6206 	list_for_each_entry(dd, &qib_dev_list, list)
6207 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6208 			set_no_qsfp_atten(dd, 1);
6209 	return 0;
6210 }
6211 
6212 /*
6213  * Write the final few registers that depend on some of the
6214  * init setup.  Done late in init, just before bringing up
6215  * the serdes.
6216  */
6217 static int qib_late_7322_initreg(struct qib_devdata *dd)
6218 {
6219 	int ret = 0, n;
6220 	u64 val;
6221 
6222 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6223 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6224 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6225 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6226 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6227 	if (val != dd->pioavailregs_phys) {
6228 		qib_dev_err(dd,
6229 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6230 			(unsigned long) dd->pioavailregs_phys,
6231 			(unsigned long long) val);
6232 		ret = -EINVAL;
6233 	}
6234 
6235 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6236 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6237 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6238 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6239 
6240 	qib_register_observer(dd, &sendctrl_0_observer);
6241 	qib_register_observer(dd, &sendctrl_1_observer);
6242 
6243 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6244 	qib_write_kreg(dd, kr_control, dd->control);
6245 	/*
6246 	 * Set SendDmaFetchPriority and init Tx params, including
6247 	 * QSFP handler on boards that have QSFP.
6248 	 * First set our default attenuation entry for cables that
6249 	 * don't have valid attenuation.
6250 	 */
6251 	set_no_qsfp_atten(dd, 0);
6252 	for (n = 0; n < dd->num_pports; ++n) {
6253 		struct qib_pportdata *ppd = dd->pport + n;
6254 
6255 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6256 				    sdma_fetch_prio & 0xf);
6257 		/* Initialize qsfp if present on board. */
6258 		if (dd->flags & QIB_HAS_QSFP)
6259 			qib_init_7322_qsfp(ppd);
6260 	}
6261 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6262 	qib_write_kreg(dd, kr_control, dd->control);
6263 
6264 	return ret;
6265 }
6266 
6267 /* per IB port errors.  */
6268 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6269 	MASK_ACROSS(8, 15))
6270 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6271 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6272 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6273 	MASK_ACROSS(0, 11))
6274 
6275 /*
6276  * Write the initialization per-port registers that need to be done at
6277  * driver load and after reset completes (i.e., that aren't done as part
6278  * of other init procedures called from qib_init.c).
6279  * Some of these should be redundant on reset, but play safe.
6280  */
6281 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6282 {
6283 	u64 val;
6284 	int i;
6285 
6286 	if (!ppd->link_speed_supported) {
6287 		/* no buffer credits for this port */
6288 		for (i = 1; i < 8; i++)
6289 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6290 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6291 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6292 		return;
6293 	}
6294 
6295 	/*
6296 	 * Set the number of supported virtual lanes in IBC,
6297 	 * for flow control packet handling on unsupported VLs
6298 	 */
6299 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6300 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6301 	val |= (u64)(ppd->vls_supported - 1) <<
6302 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6303 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6304 
6305 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6306 
6307 	/* enable tx header checking */
6308 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6309 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6310 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6311 
6312 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6313 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6314 
6315 	/*
6316 	 * Unconditionally clear the bufmask bits.  If SDMA is
6317 	 * enabled, we'll set them appropriately later.
6318 	 */
6319 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6320 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6321 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6322 	if (ppd->dd->cspec->r1)
6323 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6324 }
6325 
6326 /*
6327  * Write the initialization per-device registers that need to be done at
6328  * driver load and after reset completes (i.e., that aren't done as part
6329  * of other init procedures called from qib_init.c).  Also write per-port
6330  * registers that are affected by overall device config, such as QP mapping
6331  * Some of these should be redundant on reset, but play safe.
6332  */
6333 static void write_7322_initregs(struct qib_devdata *dd)
6334 {
6335 	struct qib_pportdata *ppd;
6336 	int i, pidx;
6337 	u64 val;
6338 
6339 	/* Set Multicast QPs received by port 2 to map to context one. */
6340 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6341 
6342 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6343 		unsigned n, regno;
6344 		unsigned long flags;
6345 
6346 		if (dd->n_krcv_queues < 2 ||
6347 			!dd->pport[pidx].link_speed_supported)
6348 			continue;
6349 
6350 		ppd = &dd->pport[pidx];
6351 
6352 		/* be paranoid against later code motion, etc. */
6353 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6354 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6355 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6356 
6357 		/* Initialize QP to context mapping */
6358 		regno = krp_rcvqpmaptable;
6359 		val = 0;
6360 		if (dd->num_pports > 1)
6361 			n = dd->first_user_ctxt / dd->num_pports;
6362 		else
6363 			n = dd->first_user_ctxt - 1;
6364 		for (i = 0; i < 32; ) {
6365 			unsigned ctxt;
6366 
6367 			if (dd->num_pports > 1)
6368 				ctxt = (i % n) * dd->num_pports + pidx;
6369 			else if (i % n)
6370 				ctxt = (i % n) + 1;
6371 			else
6372 				ctxt = ppd->hw_pidx;
6373 			val |= ctxt << (5 * (i % 6));
6374 			i++;
6375 			if (i % 6 == 0) {
6376 				qib_write_kreg_port(ppd, regno, val);
6377 				val = 0;
6378 				regno++;
6379 			}
6380 		}
6381 		qib_write_kreg_port(ppd, regno, val);
6382 	}
6383 
6384 	/*
6385 	 * Setup up interrupt mitigation for kernel contexts, but
6386 	 * not user contexts (user contexts use interrupts when
6387 	 * stalled waiting for any packet, so want those interrupts
6388 	 * right away).
6389 	 */
6390 	for (i = 0; i < dd->first_user_ctxt; i++) {
6391 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6392 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6393 	}
6394 
6395 	/*
6396 	 * Initialize  as (disabled) rcvflow tables.  Application code
6397 	 * will setup each flow as it uses the flow.
6398 	 * Doesn't clear any of the error bits that might be set.
6399 	 */
6400 	val = TIDFLOW_ERRBITS; /* these are W1C */
6401 	for (i = 0; i < dd->cfgctxts; i++) {
6402 		int flow;
6403 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6404 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6405 	}
6406 
6407 	/*
6408 	 * dual cards init to dual port recovery, single port cards to
6409 	 * the one port.  Dual port cards may later adjust to 1 port,
6410 	 * and then back to dual port if both ports are connected
6411 	 * */
6412 	if (dd->num_pports)
6413 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6414 }
6415 
6416 static int qib_init_7322_variables(struct qib_devdata *dd)
6417 {
6418 	struct qib_pportdata *ppd;
6419 	unsigned features, pidx, sbufcnt;
6420 	int ret, mtu;
6421 	u32 sbufs, updthresh;
6422 
6423 	/* pport structs are contiguous, allocated after devdata */
6424 	ppd = (struct qib_pportdata *)(dd + 1);
6425 	dd->pport = ppd;
6426 	ppd[0].dd = dd;
6427 	ppd[1].dd = dd;
6428 
6429 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6430 
6431 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6432 	ppd[1].cpspec = &ppd[0].cpspec[1];
6433 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6434 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6435 
6436 	spin_lock_init(&dd->cspec->rcvmod_lock);
6437 	spin_lock_init(&dd->cspec->gpio_lock);
6438 
6439 	/* we haven't yet set QIB_PRESENT, so use read directly */
6440 	dd->revision = readq(&dd->kregbase[kr_revision]);
6441 
6442 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6443 		qib_dev_err(dd,
6444 			"Revision register read failure, giving up initialization\n");
6445 		ret = -ENODEV;
6446 		goto bail;
6447 	}
6448 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6449 
6450 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6451 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6452 	dd->cspec->r1 = dd->minrev == 1;
6453 
6454 	get_7322_chip_params(dd);
6455 	features = qib_7322_boardname(dd);
6456 
6457 	/* now that piobcnt2k and 4k set, we can allocate these */
6458 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6459 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6460 	sbufcnt /= BITS_PER_LONG;
6461 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
6462 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6463 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6464 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6465 	dd->cspec->sendibchk = kmalloc(sbufcnt *
6466 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6467 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6468 		!dd->cspec->sendibchk) {
6469 		qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6470 		ret = -ENOMEM;
6471 		goto bail;
6472 	}
6473 
6474 	ppd = dd->pport;
6475 
6476 	/*
6477 	 * GPIO bits for TWSI data and clock,
6478 	 * used for serial EEPROM.
6479 	 */
6480 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6481 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6482 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6483 
6484 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6485 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6486 		QIB_HAS_THRESH_UPDATE |
6487 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6488 	dd->flags |= qib_special_trigger ?
6489 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6490 
6491 	/*
6492 	 * Setup initial values.  These may change when PAT is enabled, but
6493 	 * we need these to do initial chip register accesses.
6494 	 */
6495 	qib_7322_set_baseaddrs(dd);
6496 
6497 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6498 	if (mtu == -1)
6499 		mtu = QIB_DEFAULT_MTU;
6500 
6501 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6502 	/* all hwerrors become interrupts, unless special purposed */
6503 	dd->cspec->hwerrmask = ~0ULL;
6504 	/*  link_recovery setup causes these errors, so ignore them,
6505 	 *  other than clearing them when they occur */
6506 	dd->cspec->hwerrmask &=
6507 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6508 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6509 		  HWE_MASK(LATriggered));
6510 
6511 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6512 		struct qib_chippport_specific *cp = ppd->cpspec;
6513 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6514 		features >>=  PORT_SPD_CAP_SHIFT;
6515 		if (!ppd->link_speed_supported) {
6516 			/* single port mode (7340, or configured) */
6517 			dd->skip_kctxt_mask |= 1 << pidx;
6518 			if (pidx == 0) {
6519 				/* Make sure port is disabled. */
6520 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6521 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6522 				ppd[0] = ppd[1];
6523 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6524 						  IBSerdesPClkNotDetectMask_0)
6525 						  | SYM_MASK(HwErrMask,
6526 						  SDmaMemReadErrMask_0));
6527 				dd->cspec->int_enable_mask &= ~(
6528 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6529 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6530 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6531 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6532 				     SYM_MASK(IntMask, ErrIntMask_0) |
6533 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6534 			} else {
6535 				/* Make sure port is disabled. */
6536 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6537 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6538 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6539 						  IBSerdesPClkNotDetectMask_1)
6540 						  | SYM_MASK(HwErrMask,
6541 						  SDmaMemReadErrMask_1));
6542 				dd->cspec->int_enable_mask &= ~(
6543 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6544 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6545 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6546 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6547 				     SYM_MASK(IntMask, ErrIntMask_1) |
6548 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6549 			}
6550 			continue;
6551 		}
6552 
6553 		dd->num_pports++;
6554 		qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6555 
6556 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6557 		ppd->link_width_enabled = IB_WIDTH_4X;
6558 		ppd->link_speed_enabled = ppd->link_speed_supported;
6559 		/*
6560 		 * Set the initial values to reasonable default, will be set
6561 		 * for real when link is up.
6562 		 */
6563 		ppd->link_width_active = IB_WIDTH_4X;
6564 		ppd->link_speed_active = QIB_IB_SDR;
6565 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6566 		switch (qib_num_cfg_vls) {
6567 		case 1:
6568 			ppd->vls_supported = IB_VL_VL0;
6569 			break;
6570 		case 2:
6571 			ppd->vls_supported = IB_VL_VL0_1;
6572 			break;
6573 		default:
6574 			qib_devinfo(dd->pcidev,
6575 				    "Invalid num_vls %u, using 4 VLs\n",
6576 				    qib_num_cfg_vls);
6577 			qib_num_cfg_vls = 4;
6578 			/* fall through */
6579 		case 4:
6580 			ppd->vls_supported = IB_VL_VL0_3;
6581 			break;
6582 		case 8:
6583 			if (mtu <= 2048)
6584 				ppd->vls_supported = IB_VL_VL0_7;
6585 			else {
6586 				qib_devinfo(dd->pcidev,
6587 					    "Invalid num_vls %u for MTU %d "
6588 					    ", using 4 VLs\n",
6589 					    qib_num_cfg_vls, mtu);
6590 				ppd->vls_supported = IB_VL_VL0_3;
6591 				qib_num_cfg_vls = 4;
6592 			}
6593 			break;
6594 		}
6595 		ppd->vls_operational = ppd->vls_supported;
6596 
6597 		init_waitqueue_head(&cp->autoneg_wait);
6598 		INIT_DELAYED_WORK(&cp->autoneg_work,
6599 				  autoneg_7322_work);
6600 		if (ppd->dd->cspec->r1)
6601 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6602 
6603 		/*
6604 		 * For Mez and similar cards, no qsfp info, so do
6605 		 * the "cable info" setup here.  Can be overridden
6606 		 * in adapter-specific routines.
6607 		 */
6608 		if (!(dd->flags & QIB_HAS_QSFP)) {
6609 			if (!IS_QMH(dd) && !IS_QME(dd))
6610 				qib_devinfo(dd->pcidev,
6611 					"IB%u:%u: Unknown mezzanine card type\n",
6612 					dd->unit, ppd->port);
6613 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6614 			/*
6615 			 * Choose center value as default tx serdes setting
6616 			 * until changed through module parameter.
6617 			 */
6618 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6619 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6620 		} else
6621 			cp->h1_val = H1_FORCE_VAL;
6622 
6623 		/* Avoid writes to chip for mini_init */
6624 		if (!qib_mini_init)
6625 			write_7322_init_portregs(ppd);
6626 
6627 		init_timer(&cp->chase_timer);
6628 		cp->chase_timer.function = reenable_chase;
6629 		cp->chase_timer.data = (unsigned long)ppd;
6630 
6631 		ppd++;
6632 	}
6633 
6634 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6635 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6636 	dd->rcvhdrsize = qib_rcvhdrsize ?
6637 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6638 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6639 
6640 	/* we always allocate at least 2048 bytes for eager buffers */
6641 	dd->rcvegrbufsize = max(mtu, 2048);
6642 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6643 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6644 
6645 	qib_7322_tidtemplate(dd);
6646 
6647 	/*
6648 	 * We can request a receive interrupt for 1 or
6649 	 * more packets from current offset.
6650 	 */
6651 	dd->rhdrhead_intr_off =
6652 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6653 
6654 	/* setup the stats timer; the add_timer is done at end of init */
6655 	init_timer(&dd->stats_timer);
6656 	dd->stats_timer.function = qib_get_7322_faststats;
6657 	dd->stats_timer.data = (unsigned long) dd;
6658 
6659 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6660 
6661 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6662 
6663 	qib_7322_config_ctxts(dd);
6664 	qib_set_ctxtcnt(dd);
6665 
6666 	if (qib_wc_pat) {
6667 		resource_size_t vl15off;
6668 		/*
6669 		 * We do not set WC on the VL15 buffers to avoid
6670 		 * a rare problem with unaligned writes from
6671 		 * interrupt-flushed store buffers, so we need
6672 		 * to map those separately here.  We can't solve
6673 		 * this for the rarely used mtrr case.
6674 		 */
6675 		ret = init_chip_wc_pat(dd, 0);
6676 		if (ret)
6677 			goto bail;
6678 
6679 		/* vl15 buffers start just after the 4k buffers */
6680 		vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6681 			dd->piobcnt4k * dd->align4k;
6682 		dd->piovl15base	= ioremap_nocache(vl15off,
6683 						  NUM_VL15_BUFS * dd->align4k);
6684 		if (!dd->piovl15base) {
6685 			ret = -ENOMEM;
6686 			goto bail;
6687 		}
6688 	}
6689 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6690 
6691 	ret = 0;
6692 	if (qib_mini_init)
6693 		goto bail;
6694 	if (!dd->num_pports) {
6695 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6696 		goto bail; /* no error, so can still figure out why err */
6697 	}
6698 
6699 	write_7322_initregs(dd);
6700 	ret = qib_create_ctxts(dd);
6701 	init_7322_cntrnames(dd);
6702 
6703 	updthresh = 8U; /* update threshold */
6704 
6705 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6706 	 * reserve the update threshold amount for other kernel use, such
6707 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6708 	 * unless we aren't enabling SDMA, in which case we want to use
6709 	 * all the 4k bufs for the kernel.
6710 	 * if this was less than the update threshold, we could wait
6711 	 * a long time for an update.  Coded this way because we
6712 	 * sometimes change the update threshold for various reasons,
6713 	 * and we want this to remain robust.
6714 	 */
6715 	if (dd->flags & QIB_HAS_SEND_DMA) {
6716 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6717 		sbufs = updthresh > 3 ? updthresh : 3;
6718 	} else {
6719 		dd->cspec->sdmabufcnt = 0;
6720 		sbufs = dd->piobcnt4k;
6721 	}
6722 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6723 		dd->cspec->sdmabufcnt;
6724 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6725 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6726 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6727 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6728 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6729 
6730 	/*
6731 	 * If we have 16 user contexts, we will have 7 sbufs
6732 	 * per context, so reduce the update threshold to match.  We
6733 	 * want to update before we actually run out, at low pbufs/ctxt
6734 	 * so give ourselves some margin.
6735 	 */
6736 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6737 		updthresh = dd->pbufsctxt - 2;
6738 	dd->cspec->updthresh_dflt = updthresh;
6739 	dd->cspec->updthresh = updthresh;
6740 
6741 	/* before full enable, no interrupts, no locking needed */
6742 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6743 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6744 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6745 
6746 	dd->psxmitwait_supported = 1;
6747 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6748 bail:
6749 	if (!dd->ctxtcnt)
6750 		dd->ctxtcnt = 1; /* for other initialization code */
6751 
6752 	return ret;
6753 }
6754 
6755 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6756 					u32 *pbufnum)
6757 {
6758 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6759 	struct qib_devdata *dd = ppd->dd;
6760 
6761 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6762 	if (pbc & PBC_7322_VL15_SEND) {
6763 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6764 		last = first;
6765 	} else {
6766 		if ((plen + 1) > dd->piosize2kmax_dwords)
6767 			first = dd->piobcnt2k;
6768 		else
6769 			first = 0;
6770 		last = dd->cspec->lastbuf_for_pio;
6771 	}
6772 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6773 }
6774 
6775 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6776 				     u32 start)
6777 {
6778 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6779 	qib_write_kreg_port(ppd, krp_psstart, start);
6780 }
6781 
6782 /*
6783  * Must be called with sdma_lock held, or before init finished.
6784  */
6785 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6786 {
6787 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6788 }
6789 
6790 /*
6791  * sdma_lock should be acquired before calling this routine
6792  */
6793 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6794 {
6795 	u64 reg, reg1, reg2;
6796 
6797 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6798 	qib_dev_porterr(ppd->dd, ppd->port,
6799 		"SDMA senddmastatus: 0x%016llx\n", reg);
6800 
6801 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6802 	qib_dev_porterr(ppd->dd, ppd->port,
6803 		"SDMA sendctrl: 0x%016llx\n", reg);
6804 
6805 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6806 	qib_dev_porterr(ppd->dd, ppd->port,
6807 		"SDMA senddmabase: 0x%016llx\n", reg);
6808 
6809 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6810 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6811 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6812 	qib_dev_porterr(ppd->dd, ppd->port,
6813 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6814 		 reg, reg1, reg2);
6815 
6816 	/* get bufuse bits, clear them, and print them again if non-zero */
6817 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6818 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6819 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6820 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6821 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6822 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6823 	/* 0 and 1 should always be zero, so print as short form */
6824 	qib_dev_porterr(ppd->dd, ppd->port,
6825 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6826 		 reg, reg1, reg2);
6827 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6828 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6829 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6830 	/* 0 and 1 should always be zero, so print as short form */
6831 	qib_dev_porterr(ppd->dd, ppd->port,
6832 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6833 		 reg, reg1, reg2);
6834 
6835 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6836 	qib_dev_porterr(ppd->dd, ppd->port,
6837 		"SDMA senddmatail: 0x%016llx\n", reg);
6838 
6839 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6840 	qib_dev_porterr(ppd->dd, ppd->port,
6841 		"SDMA senddmahead: 0x%016llx\n", reg);
6842 
6843 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6844 	qib_dev_porterr(ppd->dd, ppd->port,
6845 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6846 
6847 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6848 	qib_dev_porterr(ppd->dd, ppd->port,
6849 		"SDMA senddmalengen: 0x%016llx\n", reg);
6850 
6851 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6852 	qib_dev_porterr(ppd->dd, ppd->port,
6853 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6854 
6855 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6856 	qib_dev_porterr(ppd->dd, ppd->port,
6857 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6858 
6859 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6860 	qib_dev_porterr(ppd->dd, ppd->port,
6861 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6862 
6863 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6864 	qib_dev_porterr(ppd->dd, ppd->port,
6865 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6866 
6867 	dump_sdma_state(ppd);
6868 }
6869 
6870 static struct sdma_set_state_action sdma_7322_action_table[] = {
6871 	[qib_sdma_state_s00_hw_down] = {
6872 		.go_s99_running_tofalse = 1,
6873 		.op_enable = 0,
6874 		.op_intenable = 0,
6875 		.op_halt = 0,
6876 		.op_drain = 0,
6877 	},
6878 	[qib_sdma_state_s10_hw_start_up_wait] = {
6879 		.op_enable = 0,
6880 		.op_intenable = 1,
6881 		.op_halt = 1,
6882 		.op_drain = 0,
6883 	},
6884 	[qib_sdma_state_s20_idle] = {
6885 		.op_enable = 1,
6886 		.op_intenable = 1,
6887 		.op_halt = 1,
6888 		.op_drain = 0,
6889 	},
6890 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6891 		.op_enable = 0,
6892 		.op_intenable = 1,
6893 		.op_halt = 1,
6894 		.op_drain = 0,
6895 	},
6896 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6897 		.op_enable = 1,
6898 		.op_intenable = 1,
6899 		.op_halt = 1,
6900 		.op_drain = 0,
6901 	},
6902 	[qib_sdma_state_s50_hw_halt_wait] = {
6903 		.op_enable = 1,
6904 		.op_intenable = 1,
6905 		.op_halt = 1,
6906 		.op_drain = 1,
6907 	},
6908 	[qib_sdma_state_s99_running] = {
6909 		.op_enable = 1,
6910 		.op_intenable = 1,
6911 		.op_halt = 0,
6912 		.op_drain = 0,
6913 		.go_s99_running_totrue = 1,
6914 	},
6915 };
6916 
6917 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6918 {
6919 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6920 }
6921 
6922 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6923 {
6924 	struct qib_devdata *dd = ppd->dd;
6925 	unsigned lastbuf, erstbuf;
6926 	u64 senddmabufmask[3] = { 0 };
6927 	int n, ret = 0;
6928 
6929 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6930 	qib_sdma_7322_setlengen(ppd);
6931 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6932 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6933 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6934 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6935 
6936 	if (dd->num_pports)
6937 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6938 	else
6939 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6940 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6941 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6942 		dd->cspec->sdmabufcnt);
6943 	lastbuf = erstbuf + n;
6944 
6945 	ppd->sdma_state.first_sendbuf = erstbuf;
6946 	ppd->sdma_state.last_sendbuf = lastbuf;
6947 	for (; erstbuf < lastbuf; ++erstbuf) {
6948 		unsigned word = erstbuf / BITS_PER_LONG;
6949 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6950 
6951 		BUG_ON(word >= 3);
6952 		senddmabufmask[word] |= 1ULL << bit;
6953 	}
6954 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6955 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6956 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6957 	return ret;
6958 }
6959 
6960 /* sdma_lock must be held */
6961 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6962 {
6963 	struct qib_devdata *dd = ppd->dd;
6964 	int sane;
6965 	int use_dmahead;
6966 	u16 swhead;
6967 	u16 swtail;
6968 	u16 cnt;
6969 	u16 hwhead;
6970 
6971 	use_dmahead = __qib_sdma_running(ppd) &&
6972 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6973 retry:
6974 	hwhead = use_dmahead ?
6975 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6976 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6977 
6978 	swhead = ppd->sdma_descq_head;
6979 	swtail = ppd->sdma_descq_tail;
6980 	cnt = ppd->sdma_descq_cnt;
6981 
6982 	if (swhead < swtail)
6983 		/* not wrapped */
6984 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6985 	else if (swhead > swtail)
6986 		/* wrapped around */
6987 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6988 			(hwhead <= swtail);
6989 	else
6990 		/* empty */
6991 		sane = (hwhead == swhead);
6992 
6993 	if (unlikely(!sane)) {
6994 		if (use_dmahead) {
6995 			/* try one more time, directly from the register */
6996 			use_dmahead = 0;
6997 			goto retry;
6998 		}
6999 		/* proceed as if no progress */
7000 		hwhead = swhead;
7001 	}
7002 
7003 	return hwhead;
7004 }
7005 
7006 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
7007 {
7008 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
7009 
7010 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
7011 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
7012 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
7013 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
7014 }
7015 
7016 /*
7017  * Compute the amount of delay before sending the next packet if the
7018  * port's send rate differs from the static rate set for the QP.
7019  * The delay affects the next packet and the amount of the delay is
7020  * based on the length of the this packet.
7021  */
7022 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7023 				   u8 srate, u8 vl)
7024 {
7025 	u8 snd_mult = ppd->delay_mult;
7026 	u8 rcv_mult = ib_rate_to_delay[srate];
7027 	u32 ret;
7028 
7029 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7030 
7031 	/* Indicate VL15, else set the VL in the control word */
7032 	if (vl == 15)
7033 		ret |= PBC_7322_VL15_SEND_CTRL;
7034 	else
7035 		ret |= vl << PBC_VL_NUM_LSB;
7036 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7037 
7038 	return ret;
7039 }
7040 
7041 /*
7042  * Enable the per-port VL15 send buffers for use.
7043  * They follow the rest of the buffers, without a config parameter.
7044  * This was in initregs, but that is done before the shadow
7045  * is set up, and this has to be done after the shadow is
7046  * set up.
7047  */
7048 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7049 {
7050 	unsigned vl15bufs;
7051 
7052 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7053 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7054 			       TXCHK_CHG_TYPE_KERN, NULL);
7055 }
7056 
7057 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7058 {
7059 	if (rcd->ctxt < NUM_IB_PORTS) {
7060 		if (rcd->dd->num_pports > 1) {
7061 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7062 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7063 		} else {
7064 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7065 			rcd->rcvegr_tid_base = 0;
7066 		}
7067 	} else {
7068 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7069 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7070 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7071 	}
7072 }
7073 
7074 #define QTXSLEEPS 5000
7075 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7076 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7077 {
7078 	int i;
7079 	const int last = start + len - 1;
7080 	const int lastr = last / BITS_PER_LONG;
7081 	u32 sleeps = 0;
7082 	int wait = rcd != NULL;
7083 	unsigned long flags;
7084 
7085 	while (wait) {
7086 		unsigned long shadow;
7087 		int cstart, previ = -1;
7088 
7089 		/*
7090 		 * when flipping from kernel to user, we can't change
7091 		 * the checking type if the buffer is allocated to the
7092 		 * driver.   It's OK the other direction, because it's
7093 		 * from close, and we have just disarm'ed all the
7094 		 * buffers.  All the kernel to kernel changes are also
7095 		 * OK.
7096 		 */
7097 		for (cstart = start; cstart <= last; cstart++) {
7098 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7099 				/ BITS_PER_LONG;
7100 			if (i != previ) {
7101 				shadow = (unsigned long)
7102 					le64_to_cpu(dd->pioavailregs_dma[i]);
7103 				previ = i;
7104 			}
7105 			if (test_bit(((2 * cstart) +
7106 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7107 				     % BITS_PER_LONG, &shadow))
7108 				break;
7109 		}
7110 
7111 		if (cstart > last)
7112 			break;
7113 
7114 		if (sleeps == QTXSLEEPS)
7115 			break;
7116 		/* make sure we see an updated copy next time around */
7117 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7118 		sleeps++;
7119 		msleep(20);
7120 	}
7121 
7122 	switch (which) {
7123 	case TXCHK_CHG_TYPE_DIS1:
7124 		/*
7125 		 * disable checking on a range; used by diags; just
7126 		 * one buffer, but still written generically
7127 		 */
7128 		for (i = start; i <= last; i++)
7129 			clear_bit(i, dd->cspec->sendchkenable);
7130 		break;
7131 
7132 	case TXCHK_CHG_TYPE_ENAB1:
7133 		/*
7134 		 * (re)enable checking on a range; used by diags; just
7135 		 * one buffer, but still written generically; read
7136 		 * scratch to be sure buffer actually triggered, not
7137 		 * just flushed from processor.
7138 		 */
7139 		qib_read_kreg32(dd, kr_scratch);
7140 		for (i = start; i <= last; i++)
7141 			set_bit(i, dd->cspec->sendchkenable);
7142 		break;
7143 
7144 	case TXCHK_CHG_TYPE_KERN:
7145 		/* usable by kernel */
7146 		for (i = start; i <= last; i++) {
7147 			set_bit(i, dd->cspec->sendibchk);
7148 			clear_bit(i, dd->cspec->sendgrhchk);
7149 		}
7150 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7151 		/* see if we need to raise avail update threshold */
7152 		for (i = dd->first_user_ctxt;
7153 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7154 		     && i < dd->cfgctxts; i++)
7155 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7156 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7157 			   < dd->cspec->updthresh_dflt)
7158 				break;
7159 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7160 		if (i == dd->cfgctxts) {
7161 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7162 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7163 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7164 			dd->sendctrl |= (dd->cspec->updthresh &
7165 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7166 					   SYM_LSB(SendCtrl, AvailUpdThld);
7167 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7168 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7169 		}
7170 		break;
7171 
7172 	case TXCHK_CHG_TYPE_USER:
7173 		/* for user process */
7174 		for (i = start; i <= last; i++) {
7175 			clear_bit(i, dd->cspec->sendibchk);
7176 			set_bit(i, dd->cspec->sendgrhchk);
7177 		}
7178 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7179 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7180 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7181 			dd->cspec->updthresh = (rcd->piocnt /
7182 						rcd->subctxt_cnt) - 1;
7183 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7184 			dd->sendctrl |= (dd->cspec->updthresh &
7185 					SYM_RMASK(SendCtrl, AvailUpdThld))
7186 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7187 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7188 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7189 		} else
7190 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7191 		break;
7192 
7193 	default:
7194 		break;
7195 	}
7196 
7197 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7198 		qib_write_kreg(dd, kr_sendcheckmask + i,
7199 			       dd->cspec->sendchkenable[i]);
7200 
7201 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7202 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7203 			       dd->cspec->sendgrhchk[i]);
7204 		qib_write_kreg(dd, kr_sendibpktmask + i,
7205 			       dd->cspec->sendibchk[i]);
7206 	}
7207 
7208 	/*
7209 	 * Be sure whatever we did was seen by the chip and acted upon,
7210 	 * before we return.  Mostly important for which >= 2.
7211 	 */
7212 	qib_read_kreg32(dd, kr_scratch);
7213 }
7214 
7215 
7216 /* useful for trigger analyzers, etc. */
7217 static void writescratch(struct qib_devdata *dd, u32 val)
7218 {
7219 	qib_write_kreg(dd, kr_scratch, val);
7220 }
7221 
7222 /* Dummy for now, use chip regs soon */
7223 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7224 {
7225 	return -ENXIO;
7226 }
7227 
7228 /**
7229  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7230  * @dev: the pci_dev for qlogic_ib device
7231  * @ent: pci_device_id struct for this dev
7232  *
7233  * Also allocates, inits, and returns the devdata struct for this
7234  * device instance
7235  *
7236  * This is global, and is called directly at init to set up the
7237  * chip-specific function pointers for later use.
7238  */
7239 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7240 					   const struct pci_device_id *ent)
7241 {
7242 	struct qib_devdata *dd;
7243 	int ret, i;
7244 	u32 tabsize, actual_cnt = 0;
7245 
7246 	dd = qib_alloc_devdata(pdev,
7247 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7248 		sizeof(struct qib_chip_specific) +
7249 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7250 	if (IS_ERR(dd))
7251 		goto bail;
7252 
7253 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7254 	dd->f_cleanup           = qib_setup_7322_cleanup;
7255 	dd->f_clear_tids        = qib_7322_clear_tids;
7256 	dd->f_free_irq          = qib_7322_free_irq;
7257 	dd->f_get_base_info     = qib_7322_get_base_info;
7258 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7259 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7260 	dd->f_gpio_mod          = gpio_7322_mod;
7261 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7262 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7263 	dd->f_ib_updown         = qib_7322_ib_updown;
7264 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7265 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7266 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7267 	dd->f_late_initreg      = qib_late_7322_initreg;
7268 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7269 	dd->f_portcntr          = qib_portcntr_7322;
7270 	dd->f_put_tid           = qib_7322_put_tid;
7271 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7272 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7273 	dd->f_read_cntrs        = qib_read_7322cntrs;
7274 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7275 	dd->f_reset             = qib_do_7322_reset;
7276 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7277 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7278 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7279 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7280 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7281 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7282 	dd->f_sendctrl          = sendctrl_7322_mod;
7283 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7284 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7285 	dd->f_iblink_state      = qib_7322_iblink_state;
7286 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7287 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7288 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7289 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7290 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7291 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7292 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7293 	dd->f_setextled         = qib_setup_7322_setextled;
7294 	dd->f_txchk_change      = qib_7322_txchk_change;
7295 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7296 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7297 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7298 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7299 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7300 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7301 	dd->f_writescratch      = writescratch;
7302 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7303 #ifdef CONFIG_INFINIBAND_QIB_DCA
7304 	dd->f_notify_dca	= qib_7322_notify_dca;
7305 #endif
7306 	/*
7307 	 * Do remaining PCIe setup and save PCIe values in dd.
7308 	 * Any error printing is already done by the init code.
7309 	 * On return, we have the chip mapped, but chip registers
7310 	 * are not set up until start of qib_init_7322_variables.
7311 	 */
7312 	ret = qib_pcie_ddinit(dd, pdev, ent);
7313 	if (ret < 0)
7314 		goto bail_free;
7315 
7316 	/* initialize chip-specific variables */
7317 	ret = qib_init_7322_variables(dd);
7318 	if (ret)
7319 		goto bail_cleanup;
7320 
7321 	if (qib_mini_init || !dd->num_pports)
7322 		goto bail;
7323 
7324 	/*
7325 	 * Determine number of vectors we want; depends on port count
7326 	 * and number of configured kernel receive queues actually used.
7327 	 * Should also depend on whether sdma is enabled or not, but
7328 	 * that's such a rare testing case it's not worth worrying about.
7329 	 */
7330 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7331 	for (i = 0; i < tabsize; i++)
7332 		if ((i < ARRAY_SIZE(irq_table) &&
7333 		     irq_table[i].port <= dd->num_pports) ||
7334 		    (i >= ARRAY_SIZE(irq_table) &&
7335 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7336 			actual_cnt++;
7337 	/* reduce by ctxt's < 2 */
7338 	if (qib_krcvq01_no_msi)
7339 		actual_cnt -= dd->num_pports;
7340 
7341 	tabsize = actual_cnt;
7342 	dd->cspec->msix_entries = kzalloc(tabsize *
7343 			sizeof(struct qib_msix_entry), GFP_KERNEL);
7344 	if (!dd->cspec->msix_entries) {
7345 		qib_dev_err(dd, "No memory for MSIx table\n");
7346 		tabsize = 0;
7347 	}
7348 	for (i = 0; i < tabsize; i++)
7349 		dd->cspec->msix_entries[i].msix.entry = i;
7350 
7351 	if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7352 		qib_dev_err(dd,
7353 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7354 	/* may be less than we wanted, if not enough available */
7355 	dd->cspec->num_msix_entries = tabsize;
7356 
7357 	/* setup interrupt handler */
7358 	qib_setup_7322_interrupt(dd, 1);
7359 
7360 	/* clear diagctrl register, in case diags were running and crashed */
7361 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7362 #ifdef CONFIG_INFINIBAND_QIB_DCA
7363 	if (!dca_add_requester(&pdev->dev)) {
7364 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7365 		dd->flags |= QIB_DCA_ENABLED;
7366 		qib_setup_dca(dd);
7367 	}
7368 #endif
7369 	goto bail;
7370 
7371 bail_cleanup:
7372 	qib_pcie_ddcleanup(dd);
7373 bail_free:
7374 	qib_free_devdata(dd);
7375 	dd = ERR_PTR(ret);
7376 bail:
7377 	return dd;
7378 }
7379 
7380 /*
7381  * Set the table entry at the specified index from the table specifed.
7382  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7383  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7384  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7385  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7386  */
7387 #define DDS_ENT_AMP_LSB 14
7388 #define DDS_ENT_MAIN_LSB 9
7389 #define DDS_ENT_POST_LSB 5
7390 #define DDS_ENT_PRE_XTRA_LSB 3
7391 #define DDS_ENT_PRE_LSB 0
7392 
7393 /*
7394  * Set one entry in the TxDDS table for spec'd port
7395  * ridx picks one of the entries, while tp points
7396  * to the appropriate table entry.
7397  */
7398 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7399 		      const struct txdds_ent *tp)
7400 {
7401 	struct qib_devdata *dd = ppd->dd;
7402 	u32 pack_ent;
7403 	int regidx;
7404 
7405 	/* Get correct offset in chip-space, and in source table */
7406 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7407 	/*
7408 	 * We do not use qib_write_kreg_port() because it was intended
7409 	 * only for registers in the lower "port specific" pages.
7410 	 * So do index calculation  by hand.
7411 	 */
7412 	if (ppd->hw_pidx)
7413 		regidx += (dd->palign / sizeof(u64));
7414 
7415 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7416 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7417 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7418 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7419 	qib_write_kreg(dd, regidx, pack_ent);
7420 	/* Prevent back-to-back writes by hitting scratch */
7421 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7422 }
7423 
7424 static const struct vendor_txdds_ent vendor_txdds[] = {
7425 	{ /* Amphenol 1m 30awg NoEq */
7426 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7427 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7428 	},
7429 	{ /* Amphenol 3m 28awg NoEq */
7430 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7431 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7432 	},
7433 	{ /* Finisar 3m OM2 Optical */
7434 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7435 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7436 	},
7437 	{ /* Finisar 30m OM2 Optical */
7438 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7439 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7440 	},
7441 	{ /* Finisar Default OM2 Optical */
7442 		{ 0x00, 0x90, 0x65 }, NULL,
7443 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7444 	},
7445 	{ /* Gore 1m 30awg NoEq */
7446 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7447 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7448 	},
7449 	{ /* Gore 2m 30awg NoEq */
7450 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7451 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7452 	},
7453 	{ /* Gore 1m 28awg NoEq */
7454 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7455 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7456 	},
7457 	{ /* Gore 3m 28awg NoEq */
7458 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7459 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7460 	},
7461 	{ /* Gore 5m 24awg Eq */
7462 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7463 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7464 	},
7465 	{ /* Gore 7m 24awg Eq */
7466 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7467 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7468 	},
7469 	{ /* Gore 5m 26awg Eq */
7470 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7471 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7472 	},
7473 	{ /* Gore 7m 26awg Eq */
7474 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7475 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7476 	},
7477 	{ /* Intersil 12m 24awg Active */
7478 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7479 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7480 	},
7481 	{ /* Intersil 10m 28awg Active */
7482 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7483 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7484 	},
7485 	{ /* Intersil 7m 30awg Active */
7486 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7487 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7488 	},
7489 	{ /* Intersil 5m 32awg Active */
7490 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7491 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7492 	},
7493 	{ /* Intersil Default Active */
7494 		{ 0x00, 0x30, 0xB4 }, NULL,
7495 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7496 	},
7497 	{ /* Luxtera 20m Active Optical */
7498 		{ 0x00, 0x25, 0x63 }, NULL,
7499 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7500 	},
7501 	{ /* Molex 1M Cu loopback */
7502 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7503 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7504 	},
7505 	{ /* Molex 2m 28awg NoEq */
7506 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7507 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7508 	},
7509 };
7510 
7511 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7512 	/* amp, pre, main, post */
7513 	{  2, 2, 15,  6 },	/* Loopback */
7514 	{  0, 0,  0,  1 },	/*  2 dB */
7515 	{  0, 0,  0,  2 },	/*  3 dB */
7516 	{  0, 0,  0,  3 },	/*  4 dB */
7517 	{  0, 0,  0,  4 },	/*  5 dB */
7518 	{  0, 0,  0,  5 },	/*  6 dB */
7519 	{  0, 0,  0,  6 },	/*  7 dB */
7520 	{  0, 0,  0,  7 },	/*  8 dB */
7521 	{  0, 0,  0,  8 },	/*  9 dB */
7522 	{  0, 0,  0,  9 },	/* 10 dB */
7523 	{  0, 0,  0, 10 },	/* 11 dB */
7524 	{  0, 0,  0, 11 },	/* 12 dB */
7525 	{  0, 0,  0, 12 },	/* 13 dB */
7526 	{  0, 0,  0, 13 },	/* 14 dB */
7527 	{  0, 0,  0, 14 },	/* 15 dB */
7528 	{  0, 0,  0, 15 },	/* 16 dB */
7529 };
7530 
7531 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7532 	/* amp, pre, main, post */
7533 	{  2, 2, 15,  6 },	/* Loopback */
7534 	{  0, 0,  0,  8 },	/*  2 dB */
7535 	{  0, 0,  0,  8 },	/*  3 dB */
7536 	{  0, 0,  0,  9 },	/*  4 dB */
7537 	{  0, 0,  0,  9 },	/*  5 dB */
7538 	{  0, 0,  0, 10 },	/*  6 dB */
7539 	{  0, 0,  0, 10 },	/*  7 dB */
7540 	{  0, 0,  0, 11 },	/*  8 dB */
7541 	{  0, 0,  0, 11 },	/*  9 dB */
7542 	{  0, 0,  0, 12 },	/* 10 dB */
7543 	{  0, 0,  0, 12 },	/* 11 dB */
7544 	{  0, 0,  0, 13 },	/* 12 dB */
7545 	{  0, 0,  0, 13 },	/* 13 dB */
7546 	{  0, 0,  0, 14 },	/* 14 dB */
7547 	{  0, 0,  0, 14 },	/* 15 dB */
7548 	{  0, 0,  0, 15 },	/* 16 dB */
7549 };
7550 
7551 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7552 	/* amp, pre, main, post */
7553 	{  2, 2, 15,  6 },	/* Loopback */
7554 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7555 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7556 	{  0, 1,  0, 11 },	/*  4 dB */
7557 	{  0, 1,  0, 13 },	/*  5 dB */
7558 	{  0, 1,  0, 15 },	/*  6 dB */
7559 	{  0, 1,  3, 15 },	/*  7 dB */
7560 	{  0, 1,  7, 15 },	/*  8 dB */
7561 	{  0, 1,  7, 15 },	/*  9 dB */
7562 	{  0, 1,  8, 15 },	/* 10 dB */
7563 	{  0, 1,  9, 15 },	/* 11 dB */
7564 	{  0, 1, 10, 15 },	/* 12 dB */
7565 	{  0, 2,  6, 15 },	/* 13 dB */
7566 	{  0, 2,  7, 15 },	/* 14 dB */
7567 	{  0, 2,  8, 15 },	/* 15 dB */
7568 	{  0, 2,  9, 15 },	/* 16 dB */
7569 };
7570 
7571 /*
7572  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7573  * These are mostly used for mez cards going through connectors
7574  * and backplane traces, but can be used to add other "unusual"
7575  * table values as well.
7576  */
7577 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7578 	/* amp, pre, main, post */
7579 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7580 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7581 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7582 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7583 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7584 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7585 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7586 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7587 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7588 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7589 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7590 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7591 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7592 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7593 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7594 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7595 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7596 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7597 };
7598 
7599 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7600 	/* amp, pre, main, post */
7601 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7602 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7603 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7604 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7605 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7606 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7607 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7608 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7609 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7610 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7611 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7612 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7613 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7614 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7615 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7616 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7617 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7618 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7619 };
7620 
7621 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7622 	/* amp, pre, main, post */
7623 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7624 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7625 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7626 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7627 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7628 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7629 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7630 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7631 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7632 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7633 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7634 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7635 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7636 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7637 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7638 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7639 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7640 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7641 };
7642 
7643 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7644 	/* amp, pre, main, post */
7645 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7646 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7647 };
7648 
7649 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7650 					       unsigned atten)
7651 {
7652 	/*
7653 	 * The attenuation table starts at 2dB for entry 1,
7654 	 * with entry 0 being the loopback entry.
7655 	 */
7656 	if (atten <= 2)
7657 		atten = 1;
7658 	else if (atten > TXDDS_TABLE_SZ)
7659 		atten = TXDDS_TABLE_SZ - 1;
7660 	else
7661 		atten--;
7662 	return txdds + atten;
7663 }
7664 
7665 /*
7666  * if override is set, the module parameter txselect has a value
7667  * for this specific port, so use it, rather than our normal mechanism.
7668  */
7669 static void find_best_ent(struct qib_pportdata *ppd,
7670 			  const struct txdds_ent **sdr_dds,
7671 			  const struct txdds_ent **ddr_dds,
7672 			  const struct txdds_ent **qdr_dds, int override)
7673 {
7674 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7675 	int idx;
7676 
7677 	/* Search table of known cables */
7678 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7679 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7680 
7681 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7682 		    (!v->partnum ||
7683 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7684 			*sdr_dds = &v->sdr;
7685 			*ddr_dds = &v->ddr;
7686 			*qdr_dds = &v->qdr;
7687 			return;
7688 		}
7689 	}
7690 
7691 	/* Active cables don't have attenuation so we only set SERDES
7692 	 * settings to account for the attenuation of the board traces. */
7693 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7694 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7695 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7696 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7697 		return;
7698 	}
7699 
7700 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7701 						      qd->atten[1])) {
7702 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7703 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7704 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7705 		return;
7706 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7707 		/*
7708 		 * If we have no (or incomplete) data from the cable
7709 		 * EEPROM, or no QSFP, or override is set, use the
7710 		 * module parameter value to index into the attentuation
7711 		 * table.
7712 		 */
7713 		idx = ppd->cpspec->no_eep;
7714 		*sdr_dds = &txdds_sdr[idx];
7715 		*ddr_dds = &txdds_ddr[idx];
7716 		*qdr_dds = &txdds_qdr[idx];
7717 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7718 		/* similar to above, but index into the "extra" table. */
7719 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7720 		*sdr_dds = &txdds_extra_sdr[idx];
7721 		*ddr_dds = &txdds_extra_ddr[idx];
7722 		*qdr_dds = &txdds_extra_qdr[idx];
7723 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7724 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7725 					  TXDDS_MFG_SZ)) {
7726 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7727 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7728 			ppd->dd->unit, ppd->port, idx);
7729 		*sdr_dds = &txdds_extra_mfg[idx];
7730 		*ddr_dds = &txdds_extra_mfg[idx];
7731 		*qdr_dds = &txdds_extra_mfg[idx];
7732 	} else {
7733 		/* this shouldn't happen, it's range checked */
7734 		*sdr_dds = txdds_sdr + qib_long_atten;
7735 		*ddr_dds = txdds_ddr + qib_long_atten;
7736 		*qdr_dds = txdds_qdr + qib_long_atten;
7737 	}
7738 }
7739 
7740 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7741 {
7742 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7743 	struct txdds_ent *dds;
7744 	int idx;
7745 	int single_ent = 0;
7746 
7747 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7748 
7749 	/* for mez cards or override, use the selected value for all entries */
7750 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7751 		single_ent = 1;
7752 
7753 	/* Fill in the first entry with the best entry found. */
7754 	set_txdds(ppd, 0, sdr_dds);
7755 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7756 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7757 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7758 		QIBL_LINKACTIVE)) {
7759 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7760 					   QIB_IB_QDR ?  qdr_dds :
7761 					   (ppd->link_speed_active ==
7762 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7763 		write_tx_serdes_param(ppd, dds);
7764 	}
7765 
7766 	/* Fill in the remaining entries with the default table values. */
7767 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7768 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7769 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7770 			  single_ent ? ddr_dds : txdds_ddr + idx);
7771 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7772 			  single_ent ? qdr_dds : txdds_qdr + idx);
7773 	}
7774 }
7775 
7776 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7777 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7778 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7779 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7780 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7781 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7782 #define AHB_TRANS_TRIES 10
7783 
7784 /*
7785  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7786  * 5=subsystem which is why most calls have "chan + chan >> 1"
7787  * for the channel argument.
7788  */
7789 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7790 		    u32 data, u32 mask)
7791 {
7792 	u32 rd_data, wr_data, sz_mask;
7793 	u64 trans, acc, prev_acc;
7794 	u32 ret = 0xBAD0BAD;
7795 	int tries;
7796 
7797 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7798 	/* From this point on, make sure we return access */
7799 	acc = (quad << 1) | 1;
7800 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7801 
7802 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7803 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7804 		if (trans & AHB_TRANS_RDY)
7805 			break;
7806 	}
7807 	if (tries >= AHB_TRANS_TRIES) {
7808 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7809 		goto bail;
7810 	}
7811 
7812 	/* If mask is not all 1s, we need to read, but different SerDes
7813 	 * entities have different sizes
7814 	 */
7815 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7816 	wr_data = data & mask & sz_mask;
7817 	if ((~mask & sz_mask) != 0) {
7818 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7819 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7820 
7821 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7822 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7823 			if (trans & AHB_TRANS_RDY)
7824 				break;
7825 		}
7826 		if (tries >= AHB_TRANS_TRIES) {
7827 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7828 				    AHB_TRANS_TRIES);
7829 			goto bail;
7830 		}
7831 		/* Re-read in case host split reads and read data first */
7832 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7833 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7834 		wr_data |= (rd_data & ~mask & sz_mask);
7835 	}
7836 
7837 	/* If mask is not zero, we need to write. */
7838 	if (mask & sz_mask) {
7839 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7840 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7841 		trans |= AHB_WR;
7842 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7843 
7844 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7845 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7846 			if (trans & AHB_TRANS_RDY)
7847 				break;
7848 		}
7849 		if (tries >= AHB_TRANS_TRIES) {
7850 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7851 				    AHB_TRANS_TRIES);
7852 			goto bail;
7853 		}
7854 	}
7855 	ret = wr_data;
7856 bail:
7857 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7858 	return ret;
7859 }
7860 
7861 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7862 			     unsigned mask)
7863 {
7864 	struct qib_devdata *dd = ppd->dd;
7865 	int chan;
7866 	u32 rbc;
7867 
7868 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7869 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7870 			data, mask);
7871 		rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7872 			      addr, 0, 0);
7873 	}
7874 }
7875 
7876 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7877 {
7878 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7879 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7880 
7881 	if (enable && !state) {
7882 		pr_info("IB%u:%u Turning LOS on\n",
7883 			ppd->dd->unit, ppd->port);
7884 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7885 	} else if (!enable && state) {
7886 		pr_info("IB%u:%u Turning LOS off\n",
7887 			ppd->dd->unit, ppd->port);
7888 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7889 	}
7890 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7891 }
7892 
7893 static int serdes_7322_init(struct qib_pportdata *ppd)
7894 {
7895 	int ret = 0;
7896 	if (ppd->dd->cspec->r1)
7897 		ret = serdes_7322_init_old(ppd);
7898 	else
7899 		ret = serdes_7322_init_new(ppd);
7900 	return ret;
7901 }
7902 
7903 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7904 {
7905 	u32 le_val;
7906 
7907 	/*
7908 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7909 	 * for adapters with QSFP
7910 	 */
7911 	init_txdds_table(ppd, 0);
7912 
7913 	/* ensure no tx overrides from earlier driver loads */
7914 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7915 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7916 		reset_tx_deemphasis_override));
7917 
7918 	/* Patch some SerDes defaults to "Better for IB" */
7919 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7920 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7921 
7922 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7923 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7924 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7925 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7926 
7927 	/* May be overridden in qsfp_7322_event */
7928 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7929 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7930 
7931 	/* enable LE1 adaptation for all but QME, which is disabled */
7932 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7933 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7934 
7935 	/* Clear cmode-override, may be set from older driver */
7936 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7937 
7938 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7939 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7940 
7941 	/* setup LoS params; these are subsystem, so chan == 5 */
7942 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7943 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7944 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7945 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7946 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7947 
7948 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7949 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7950 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7951 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7952 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7953 
7954 	/* LoS filter select enabled */
7955 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7956 
7957 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7958 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7959 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7960 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7961 
7962 	serdes_7322_los_enable(ppd, 1);
7963 
7964 	/* rxbistena; set 0 to avoid effects of it switch later */
7965 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7966 
7967 	/* Configure 4 DFE taps, and only they adapt */
7968 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7969 
7970 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7971 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7972 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7973 
7974 	/*
7975 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7976 	 * always on, and QDR is initially enabled; later disabled.
7977 	 */
7978 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7979 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7980 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7981 			    ppd->dd->cspec->r1 ?
7982 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7983 	ppd->cpspec->qdr_dfe_on = 1;
7984 
7985 	/* FLoop LOS gate: PPM filter  enabled */
7986 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7987 
7988 	/* rx offset center enabled */
7989 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7990 
7991 	if (!ppd->dd->cspec->r1) {
7992 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7993 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7994 	}
7995 
7996 	/* Set the frequency loop bandwidth to 15 */
7997 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7998 
7999 	return 0;
8000 }
8001 
8002 static int serdes_7322_init_new(struct qib_pportdata *ppd)
8003 {
8004 	unsigned long tend;
8005 	u32 le_val, rxcaldone;
8006 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
8007 
8008 	/* Clear cmode-override, may be set from older driver */
8009 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
8010 
8011 	/* ensure no tx overrides from earlier driver loads */
8012 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
8013 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8014 		reset_tx_deemphasis_override));
8015 
8016 	/* START OF LSI SUGGESTED SERDES BRINGUP */
8017 	/* Reset - Calibration Setup */
8018 	/*       Stop DFE adaptaion */
8019 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8020 	/*       Disable LE1 */
8021 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8022 	/*       Disable autoadapt for LE1 */
8023 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8024 	/*       Disable LE2 */
8025 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8026 	/*       Disable VGA */
8027 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8028 	/*       Disable AFE Offset Cancel */
8029 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8030 	/*       Disable Timing Loop */
8031 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8032 	/*       Disable Frequency Loop */
8033 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8034 	/*       Disable Baseline Wander Correction */
8035 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8036 	/*       Disable RX Calibration */
8037 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8038 	/*       Disable RX Offset Calibration */
8039 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8040 	/*       Select BB CDR */
8041 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8042 	/*       CDR Step Size */
8043 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8044 	/*       Enable phase Calibration */
8045 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8046 	/*       DFE Bandwidth [2:14-12] */
8047 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8048 	/*       DFE Config (4 taps only) */
8049 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8050 	/*       Gain Loop Bandwidth */
8051 	if (!ppd->dd->cspec->r1) {
8052 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8053 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8054 	} else {
8055 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8056 	}
8057 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8058 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8059 	/*       Data Rate Select [5:7-6] (leave as default) */
8060 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8061 
8062 	/* RX REST */
8063 	/*       Single- or Multi-channel reset */
8064 	/*       RX Analog reset */
8065 	/*       RX Digital reset */
8066 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8067 	msleep(20);
8068 	/*       RX Analog reset */
8069 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8070 	msleep(20);
8071 	/*       RX Digital reset */
8072 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8073 	msleep(20);
8074 
8075 	/* setup LoS params; these are subsystem, so chan == 5 */
8076 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8077 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8078 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8079 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8080 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8081 
8082 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8083 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8084 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8085 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8086 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8087 
8088 	/* LoS filter select enabled */
8089 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8090 
8091 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8092 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8093 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8094 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8095 
8096 	/* Turn on LOS on initial SERDES init */
8097 	serdes_7322_los_enable(ppd, 1);
8098 	/* FLoop LOS gate: PPM filter  enabled */
8099 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8100 
8101 	/* RX LATCH CALIBRATION */
8102 	/*       Enable Eyefinder Phase Calibration latch */
8103 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8104 	/*       Enable RX Offset Calibration latch */
8105 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8106 	msleep(20);
8107 	/*       Start Calibration */
8108 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8109 	tend = jiffies + msecs_to_jiffies(500);
8110 	while (chan_done && !time_is_before_jiffies(tend)) {
8111 		msleep(20);
8112 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8113 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8114 					    (chan + (chan >> 1)),
8115 					    25, 0, 0);
8116 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8117 			    (~chan_done & (1 << chan)) == 0)
8118 				chan_done &= ~(1 << chan);
8119 		}
8120 	}
8121 	if (chan_done) {
8122 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8123 			 IBSD(ppd->hw_pidx), chan_done);
8124 	} else {
8125 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8126 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8127 					    (chan + (chan >> 1)),
8128 					    25, 0, 0);
8129 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8130 				pr_info("Serdes %d chan %d calibration failed\n",
8131 					IBSD(ppd->hw_pidx), chan);
8132 		}
8133 	}
8134 
8135 	/*       Turn off Calibration */
8136 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8137 	msleep(20);
8138 
8139 	/* BRING RX UP */
8140 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8141 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8142 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8143 	/*       Set LE2 Loop bandwidth */
8144 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8145 	/*       Enable LE2 */
8146 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8147 	msleep(20);
8148 	/*       Enable H0 only */
8149 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8150 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8151 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8152 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8153 	/*       Enable VGA */
8154 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8155 	msleep(20);
8156 	/*       Set Frequency Loop Bandwidth */
8157 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8158 	/*       Enable Frequency Loop */
8159 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8160 	/*       Set Timing Loop Bandwidth */
8161 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8162 	/*       Enable Timing Loop */
8163 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8164 	msleep(50);
8165 	/*       Enable DFE
8166 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8167 	 *       always on, and QDR is initially enabled; later disabled.
8168 	 */
8169 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8170 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8171 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8172 			    ppd->dd->cspec->r1 ?
8173 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8174 	ppd->cpspec->qdr_dfe_on = 1;
8175 	/*       Disable LE1  */
8176 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8177 	/*       Disable auto adapt for LE1 */
8178 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8179 	msleep(20);
8180 	/*       Enable AFE Offset Cancel */
8181 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8182 	/*       Enable Baseline Wander Correction */
8183 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8184 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8185 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8186 	/* VGA output common mode */
8187 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8188 
8189 	/*
8190 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8191 	 * for adapters with QSFP
8192 	 */
8193 	init_txdds_table(ppd, 0);
8194 
8195 	return 0;
8196 }
8197 
8198 /* start adjust QMH serdes parameters */
8199 
8200 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8201 {
8202 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8203 		9, code << 9, 0x3f << 9);
8204 }
8205 
8206 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8207 	int enable, u32 tapenable)
8208 {
8209 	if (enable)
8210 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8211 			1, 3 << 10, 0x1f << 10);
8212 	else
8213 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8214 			1, 0, 0x1f << 10);
8215 }
8216 
8217 /* Set clock to 1, 0, 1, 0 */
8218 static void clock_man(struct qib_pportdata *ppd, int chan)
8219 {
8220 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8221 		4, 0x4000, 0x4000);
8222 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8223 		4, 0, 0x4000);
8224 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8225 		4, 0x4000, 0x4000);
8226 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8227 		4, 0, 0x4000);
8228 }
8229 
8230 /*
8231  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8232  * The caller must pass the settings appropriate for the current speed,
8233  * or not care if they are correct for the current speed.
8234  */
8235 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8236 				  struct txdds_ent *txdds)
8237 {
8238 	u64 deemph;
8239 
8240 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8241 	/* field names for amp, main, post, pre, respectively */
8242 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8243 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8244 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8245 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8246 
8247 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8248 			   tx_override_deemphasis_select);
8249 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8250 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8251 				       txampcntl_d2a);
8252 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8253 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8254 				   txc0_ena);
8255 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8256 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8257 				    txcp1_ena);
8258 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8259 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8260 				    txcn1_ena);
8261 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8262 }
8263 
8264 /*
8265  * Set the parameters for mez cards on link bounce, so they are
8266  * always exactly what was requested.  Similar logic to init_txdds
8267  * but does just the serdes.
8268  */
8269 static void adj_tx_serdes(struct qib_pportdata *ppd)
8270 {
8271 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8272 	struct txdds_ent *dds;
8273 
8274 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8275 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8276 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8277 				ddr_dds : sdr_dds));
8278 	write_tx_serdes_param(ppd, dds);
8279 }
8280 
8281 /* set QDR forced value for H1, if needed */
8282 static void force_h1(struct qib_pportdata *ppd)
8283 {
8284 	int chan;
8285 
8286 	ppd->cpspec->qdr_reforce = 0;
8287 	if (!ppd->dd->cspec->r1)
8288 		return;
8289 
8290 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8291 		set_man_mode_h1(ppd, chan, 1, 0);
8292 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8293 		clock_man(ppd, chan);
8294 		set_man_mode_h1(ppd, chan, 0, 0);
8295 	}
8296 }
8297 
8298 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8299 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8300 
8301 #define R_OPCODE_LSB 3
8302 #define R_OP_NOP 0
8303 #define R_OP_SHIFT 2
8304 #define R_OP_UPDATE 3
8305 #define R_TDI_LSB 2
8306 #define R_TDO_LSB 1
8307 #define R_RDY 1
8308 
8309 static int qib_r_grab(struct qib_devdata *dd)
8310 {
8311 	u64 val;
8312 	val = SJA_EN;
8313 	qib_write_kreg(dd, kr_r_access, val);
8314 	qib_read_kreg32(dd, kr_scratch);
8315 	return 0;
8316 }
8317 
8318 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8319  * returns the current state of R_TDO
8320  */
8321 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8322 {
8323 	u64 val;
8324 	int timeout;
8325 	for (timeout = 0; timeout < 100 ; ++timeout) {
8326 		val = qib_read_kreg32(dd, kr_r_access);
8327 		if (val & R_RDY)
8328 			return (val >> R_TDO_LSB) & 1;
8329 	}
8330 	return -1;
8331 }
8332 
8333 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8334 		       int len, u8 *inp, u8 *outp)
8335 {
8336 	u64 valbase, val;
8337 	int ret, pos;
8338 
8339 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8340 		(R_OP_SHIFT << R_OPCODE_LSB);
8341 	ret = qib_r_wait_for_rdy(dd);
8342 	if (ret < 0)
8343 		goto bail;
8344 	for (pos = 0; pos < len; ++pos) {
8345 		val = valbase;
8346 		if (outp) {
8347 			outp[pos >> 3] &= ~(1 << (pos & 7));
8348 			outp[pos >> 3] |= (ret << (pos & 7));
8349 		}
8350 		if (inp) {
8351 			int tdi = inp[pos >> 3] >> (pos & 7);
8352 			val |= ((tdi & 1) << R_TDI_LSB);
8353 		}
8354 		qib_write_kreg(dd, kr_r_access, val);
8355 		qib_read_kreg32(dd, kr_scratch);
8356 		ret = qib_r_wait_for_rdy(dd);
8357 		if (ret < 0)
8358 			break;
8359 	}
8360 	/* Restore to NOP between operations. */
8361 	val =  SJA_EN | (bisten << BISTEN_LSB);
8362 	qib_write_kreg(dd, kr_r_access, val);
8363 	qib_read_kreg32(dd, kr_scratch);
8364 	ret = qib_r_wait_for_rdy(dd);
8365 
8366 	if (ret >= 0)
8367 		ret = pos;
8368 bail:
8369 	return ret;
8370 }
8371 
8372 static int qib_r_update(struct qib_devdata *dd, int bisten)
8373 {
8374 	u64 val;
8375 	int ret;
8376 
8377 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8378 	ret = qib_r_wait_for_rdy(dd);
8379 	if (ret >= 0) {
8380 		qib_write_kreg(dd, kr_r_access, val);
8381 		qib_read_kreg32(dd, kr_scratch);
8382 	}
8383 	return ret;
8384 }
8385 
8386 #define BISTEN_PORT_SEL 15
8387 #define LEN_PORT_SEL 625
8388 #define BISTEN_AT 17
8389 #define LEN_AT 156
8390 #define BISTEN_ETM 16
8391 #define LEN_ETM 632
8392 
8393 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8394 
8395 /* these are common for all IB port use cases. */
8396 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8397 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8398 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8399 };
8400 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8401 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8402 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8403 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8404 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8405 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8406 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8407 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8408 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8409 };
8410 static u8 at[BIT2BYTE(LEN_AT)] = {
8411 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8412 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8413 };
8414 
8415 /* used for IB1 or IB2, only one in use */
8416 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8417 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8418 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8419 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8420 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8421 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8422 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8423 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8424 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8425 };
8426 
8427 /* used when both IB1 and IB2 are in use */
8428 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8429 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8430 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8431 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8432 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8433 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8434 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8435 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8436 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8437 };
8438 
8439 /* used when only IB1 is in use */
8440 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8441 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8442 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8443 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8444 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8445 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8446 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8447 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8448 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8449 };
8450 
8451 /* used when only IB2 is in use */
8452 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8453 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8454 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8455 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8456 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8457 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8458 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8459 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8460 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8461 };
8462 
8463 /* used when both IB1 and IB2 are in use */
8464 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8465 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8466 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8467 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8468 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8469 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8470 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8471 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8472 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8473 };
8474 
8475 /*
8476  * Do setup to properly handle IB link recovery; if port is zero, we
8477  * are initializing to cover both ports; otherwise we are initializing
8478  * to cover a single port card, or the port has reached INIT and we may
8479  * need to switch coverage types.
8480  */
8481 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8482 {
8483 	u8 *portsel, *etm;
8484 	struct qib_devdata *dd = ppd->dd;
8485 
8486 	if (!ppd->dd->cspec->r1)
8487 		return;
8488 	if (!both) {
8489 		dd->cspec->recovery_ports_initted++;
8490 		ppd->cpspec->recovery_init = 1;
8491 	}
8492 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8493 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8494 		etm = atetm_1port;
8495 	} else {
8496 		portsel = portsel_2port;
8497 		etm = atetm_2port;
8498 	}
8499 
8500 	if (qib_r_grab(dd) < 0 ||
8501 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8502 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8503 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8504 		qib_r_update(dd, BISTEN_AT) < 0 ||
8505 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8506 			    portsel, NULL) < 0 ||
8507 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8508 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8509 		qib_r_update(dd, BISTEN_AT) < 0 ||
8510 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8511 		qib_r_update(dd, BISTEN_ETM) < 0)
8512 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8513 }
8514 
8515 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8516 {
8517 	struct qib_devdata *dd = ppd->dd;
8518 	u64 fmask;
8519 
8520 	if (dd->cspec->recovery_ports_initted != 1)
8521 		return; /* rest doesn't apply to dualport */
8522 	qib_write_kreg(dd, kr_control, dd->control |
8523 		       SYM_MASK(Control, FreezeMode));
8524 	(void)qib_read_kreg64(dd, kr_scratch);
8525 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8526 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8527 	if (!fmask) {
8528 		/*
8529 		 * require a powercycle before we'll work again, and make
8530 		 * sure we get no more interrupts, and don't turn off
8531 		 * freeze.
8532 		 */
8533 		ppd->dd->cspec->stay_in_freeze = 1;
8534 		qib_7322_set_intr_state(ppd->dd, 0);
8535 		qib_write_kreg(dd, kr_fmask, 0ULL);
8536 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8537 		return; /* eventually reset */
8538 	}
8539 
8540 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8541 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8542 
8543 	/* don't do the full clear_freeze(), not needed for this */
8544 	qib_write_kreg(dd, kr_control, dd->control);
8545 	qib_read_kreg32(dd, kr_scratch);
8546 	/* take IBC out of reset */
8547 	if (ppd->link_speed_supported) {
8548 		ppd->cpspec->ibcctrl_a &=
8549 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8550 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8551 				    ppd->cpspec->ibcctrl_a);
8552 		qib_read_kreg32(dd, kr_scratch);
8553 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8554 			qib_set_ib_7322_lstate(ppd, 0,
8555 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8556 	}
8557 }
8558