xref: /linux/drivers/infiniband/hw/qib/qib_iba7322.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation,
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect,
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define BOARD_QMH7360 9
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 		    BOARD_QMH7342)
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165 		    BOARD_QME7342)
166 
167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
168 
169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170 
171 #define MASK_ACROSS(lsb, msb) \
172 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173 
174 #define SYM_RMASK(regname, fldname) ((u64)              \
175 	QIB_7322_##regname##_##fldname##_RMASK)
176 
177 #define SYM_MASK(regname, fldname) ((u64)               \
178 	QIB_7322_##regname##_##fldname##_RMASK <<       \
179 	 QIB_7322_##regname##_##fldname##_LSB)
180 
181 #define SYM_FIELD(value, regname, fldname) ((u64)	\
182 	(((value) >> SYM_LSB(regname, fldname)) &	\
183 	 SYM_RMASK(regname, fldname)))
184 
185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188 
189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194 /* Below because most, but not all, fields of IntMask have that full suffix */
195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196 
197 
198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199 
200 /*
201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
202  * and 7 is reserved.  We currently use only 2KB and 4KB
203  */
204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
208 
209 #define SendIBSLIDAssignMask \
210 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211 #define SendIBSLMCMask \
212 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213 
214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220 
221 #define _QIB_GPIO_SDA_NUM 1
222 #define _QIB_GPIO_SCL_NUM 0
223 #define QIB_EEPROM_WEN_NUM 14
224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
225 
226 /* HW counter clock is at 4nsec */
227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228 
229 /* full speed IB port 1 only */
230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231 #define PORT_SPD_CAP_SHIFT 3
232 
233 /* full speed featuremask, both ports */
234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235 
236 /*
237  * This file contains almost all the chip-specific register information and
238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
239  */
240 
241 /* Use defines to tie machine-generated names to lower-case names */
242 #define kr_contextcnt KREG_IDX(ContextCnt)
243 #define kr_control KREG_IDX(Control)
244 #define kr_counterregbase KREG_IDX(CntrRegBase)
245 #define kr_errclear KREG_IDX(ErrClear)
246 #define kr_errmask KREG_IDX(ErrMask)
247 #define kr_errstatus KREG_IDX(ErrStatus)
248 #define kr_extctrl KREG_IDX(EXTCtrl)
249 #define kr_extstatus KREG_IDX(EXTStatus)
250 #define kr_gpio_clear KREG_IDX(GPIOClear)
251 #define kr_gpio_mask KREG_IDX(GPIOMask)
252 #define kr_gpio_out KREG_IDX(GPIOOut)
253 #define kr_gpio_status KREG_IDX(GPIOStatus)
254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
256 #define kr_fmask KREG_IDX(feature_mask)
257 #define kr_act_fmask KREG_IDX(active_feature_mask)
258 #define kr_hwerrclear KREG_IDX(HwErrClear)
259 #define kr_hwerrmask KREG_IDX(HwErrMask)
260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
261 #define kr_intclear KREG_IDX(IntClear)
262 #define kr_intmask KREG_IDX(IntMask)
263 #define kr_intredirect KREG_IDX(IntRedirect0)
264 #define kr_intstatus KREG_IDX(IntStatus)
265 #define kr_pagealign KREG_IDX(PageAlign)
266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275 #define kr_revision KREG_IDX(Revision)
276 #define kr_scratch KREG_IDX(Scratch)
277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
279 #define kr_sendctrl KREG_IDX(SendCtrl)
280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285 #define kr_sendpiosize KREG_IDX(SendBufSize)
286 #define kr_sendregbase KREG_IDX(SendRegBase)
287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288 #define kr_userregbase KREG_IDX(UserRegBase)
289 #define kr_intgranted KREG_IDX(Int_Granted)
290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291 #define kr_intblocked KREG_IDX(IntBlocked)
292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293 
294 /*
295  * per-port kernel registers.  Access only with qib_read_kreg_port()
296  * or qib_write_kreg_port()
297  */
298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348 
349 /*
350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
351  * or qib_write_kreg_ctxt()
352  */
353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355 
356 /*
357  * TID Flow table, per context.  Reduces
358  * number of hdrq updates to one per flow (or on errors).
359  * context 0 and 1 share same memory, but have distinct
360  * addresses.  Since for now, we never use expected sends
361  * on kernel contexts, we don't worry about that (we initialize
362  * those entries for ctxt 0/1 on driver load twice, for example).
363  */
364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366 
367 /* these are the error bits in the tid flows, and are W1C */
368 #define TIDFLOW_ERRBITS  ( \
369 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373 
374 /* Most (not all) Counters are per-IBport.
375  * Requires LBIntCnt is at offset 0 in the group
376  */
377 #define CREG_IDX(regname) \
378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379 
380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
421 #define crp_wordsend CREG_IDX(TxDwordCnt)
422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423 
424 /* these are the (few) counters that are not port-specific */
425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434 
435 /* no chip register for # of IB ports supported, so define */
436 #define NUM_IB_PORTS 2
437 
438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
439 #define NUM_VL15_BUFS NUM_IB_PORTS
440 
441 /*
442  * context 0 and 1 are special, and there is no chip register that
443  * defines this value, so we have to define it here.
444  * These are all allocated to either 0 or 1 for single port
445  * hardware configuration, otherwise each gets half
446  */
447 #define KCTXT0_EGRCNT 2048
448 
449 /* values for vl and port fields in PBC, 7322-specific */
450 #define PBC_PORT_SEL_LSB 26
451 #define PBC_PORT_SEL_RMASK 1
452 #define PBC_VL_NUM_LSB 27
453 #define PBC_VL_NUM_RMASK 7
454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
456 
457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458 	[IB_RATE_2_5_GBPS] = 16,
459 	[IB_RATE_5_GBPS] = 8,
460 	[IB_RATE_10_GBPS] = 4,
461 	[IB_RATE_20_GBPS] = 2,
462 	[IB_RATE_30_GBPS] = 2,
463 	[IB_RATE_40_GBPS] = 1
464 };
465 
466 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
467 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
468 
469 /* link training states, from IBC */
470 #define IB_7322_LT_STATE_DISABLED        0x00
471 #define IB_7322_LT_STATE_LINKUP          0x01
472 #define IB_7322_LT_STATE_POLLACTIVE      0x02
473 #define IB_7322_LT_STATE_POLLQUIET       0x03
474 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
475 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
476 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
477 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
478 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
479 #define IB_7322_LT_STATE_CFGIDLE         0x0b
480 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
481 #define IB_7322_LT_STATE_TXREVLANES      0x0d
482 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
483 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
484 #define IB_7322_LT_STATE_CFGENH          0x10
485 #define IB_7322_LT_STATE_CFGTEST         0x11
486 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
487 #define IB_7322_LT_STATE_CFGWAITENH      0x13
488 
489 /* link state machine states from IBC */
490 #define IB_7322_L_STATE_DOWN             0x0
491 #define IB_7322_L_STATE_INIT             0x1
492 #define IB_7322_L_STATE_ARM              0x2
493 #define IB_7322_L_STATE_ACTIVE           0x3
494 #define IB_7322_L_STATE_ACT_DEFER        0x4
495 
496 static const u8 qib_7322_physportstate[0x20] = {
497 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
498 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
499 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
500 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
501 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
502 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
503 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 	[IB_7322_LT_STATE_CFGRCVFCFG] =
505 		IB_PHYSPORTSTATE_CFG_TRAIN,
506 	[IB_7322_LT_STATE_CFGWAITRMT] =
507 		IB_PHYSPORTSTATE_CFG_TRAIN,
508 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
509 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
510 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
511 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
512 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
513 	[IB_7322_LT_STATE_RECOVERIDLE] =
514 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
515 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
516 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
517 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
518 		IB_PHYSPORTSTATE_CFG_TRAIN,
519 	[IB_7322_LT_STATE_CFGWAITENH] =
520 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
521 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
523 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
524 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
525 };
526 
527 #ifdef CONFIG_INFINIBAND_QIB_DCA
528 struct qib_irq_notify {
529 	int rcv;
530 	void *arg;
531 	struct irq_affinity_notify notify;
532 };
533 #endif
534 
535 struct qib_chip_specific {
536 	u64 __iomem *cregbase;
537 	u64 *cntrs;
538 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
539 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
540 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
541 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
542 	u64 errormask;
543 	u64 hwerrmask;
544 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
545 	u64 gpio_mask; /* shadow the gpio mask register */
546 	u64 extctrl; /* shadow the gpio output enable, etc... */
547 	u32 ncntrs;
548 	u32 nportcntrs;
549 	u32 cntrnamelen;
550 	u32 portcntrnamelen;
551 	u32 numctxts;
552 	u32 rcvegrcnt;
553 	u32 updthresh; /* current AvailUpdThld */
554 	u32 updthresh_dflt; /* default AvailUpdThld */
555 	u32 r1;
556 	int irq;
557 	u32 num_msix_entries;
558 	u32 sdmabufcnt;
559 	u32 lastbuf_for_pio;
560 	u32 stay_in_freeze;
561 	u32 recovery_ports_initted;
562 #ifdef CONFIG_INFINIBAND_QIB_DCA
563 	u32 dca_ctrl;
564 	int rhdr_cpu[18];
565 	int sdma_cpu[2];
566 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
567 #endif
568 	struct qib_msix_entry *msix_entries;
569 	unsigned long *sendchkenable;
570 	unsigned long *sendgrhchk;
571 	unsigned long *sendibchk;
572 	u32 rcvavail_timeout[18];
573 	char emsgbuf[128]; /* for device error interrupt msg buffer */
574 };
575 
576 /* Table of entries in "human readable" form Tx Emphasis. */
577 struct txdds_ent {
578 	u8 amp;
579 	u8 pre;
580 	u8 main;
581 	u8 post;
582 };
583 
584 struct vendor_txdds_ent {
585 	u8 oui[QSFP_VOUI_LEN];
586 	u8 *partnum;
587 	struct txdds_ent sdr;
588 	struct txdds_ent ddr;
589 	struct txdds_ent qdr;
590 };
591 
592 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
593 
594 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
595 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
596 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
597 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
598 
599 #define H1_FORCE_VAL 8
600 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
601 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
602 
603 /* The static and dynamic registers are paired, and the pairs indexed by spd */
604 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
605 	+ ((spd) * 2))
606 
607 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
608 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
609 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
610 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
611 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
612 
613 struct qib_chippport_specific {
614 	u64 __iomem *kpregbase;
615 	u64 __iomem *cpregbase;
616 	u64 *portcntrs;
617 	struct qib_pportdata *ppd;
618 	wait_queue_head_t autoneg_wait;
619 	struct delayed_work autoneg_work;
620 	struct delayed_work ipg_work;
621 	struct timer_list chase_timer;
622 	/*
623 	 * these 5 fields are used to establish deltas for IB symbol
624 	 * errors and linkrecovery errors.  They can be reported on
625 	 * some chips during link negotiation prior to INIT, and with
626 	 * DDR when faking DDR negotiations with non-IBTA switches.
627 	 * The chip counters are adjusted at driver unload if there is
628 	 * a non-zero delta.
629 	 */
630 	u64 ibdeltainprog;
631 	u64 ibsymdelta;
632 	u64 ibsymsnap;
633 	u64 iblnkerrdelta;
634 	u64 iblnkerrsnap;
635 	u64 iblnkdownsnap;
636 	u64 iblnkdowndelta;
637 	u64 ibmalfdelta;
638 	u64 ibmalfsnap;
639 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
640 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
641 	unsigned long qdr_dfe_time;
642 	unsigned long chase_end;
643 	u32 autoneg_tries;
644 	u32 recovery_init;
645 	u32 qdr_dfe_on;
646 	u32 qdr_reforce;
647 	/*
648 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
649 	 * entry zero is unused, to simplify indexing
650 	 */
651 	u8 h1_val;
652 	u8 no_eep;  /* txselect table index to use if no qsfp info */
653 	u8 ipg_tries;
654 	u8 ibmalfusesnap;
655 	struct qib_qsfp_data qsfp_data;
656 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
657 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
658 };
659 
660 static struct {
661 	const char *name;
662 	irq_handler_t handler;
663 	int lsb;
664 	int port; /* 0 if not port-specific, else port # */
665 	int dca;
666 } irq_table[] = {
667 	{ "", qib_7322intr, -1, 0, 0 },
668 	{ " (buf avail)", qib_7322bufavail,
669 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
670 	{ " (sdma 0)", sdma_intr,
671 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
672 	{ " (sdma 1)", sdma_intr,
673 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
674 	{ " (sdmaI 0)", sdma_idle_intr,
675 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
676 	{ " (sdmaI 1)", sdma_idle_intr,
677 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
678 	{ " (sdmaP 0)", sdma_progress_intr,
679 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
680 	{ " (sdmaP 1)", sdma_progress_intr,
681 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
682 	{ " (sdmaC 0)", sdma_cleanup_intr,
683 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
684 	{ " (sdmaC 1)", sdma_cleanup_intr,
685 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
686 };
687 
688 #ifdef CONFIG_INFINIBAND_QIB_DCA
689 
690 static const struct dca_reg_map {
691 	int     shadow_inx;
692 	int     lsb;
693 	u64     mask;
694 	u16     regno;
695 } dca_rcvhdr_reg_map[] = {
696 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
697 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
698 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
699 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
700 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
701 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
702 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
703 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
704 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
705 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
706 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
707 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
708 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
709 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
710 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
711 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
712 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
713 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
714 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
715 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
716 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
717 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
718 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
719 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
720 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
721 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
722 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
723 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
724 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
725 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
726 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
727 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
728 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
729 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
730 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
731 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
732 };
733 #endif
734 
735 /* ibcctrl bits */
736 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
737 /* cycle through TS1/TS2 till OK */
738 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
739 /* wait for TS1, then go on */
740 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
741 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
742 
743 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
744 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
745 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
746 
747 #define BLOB_7322_IBCHG 0x101
748 
749 static inline void qib_write_kreg(const struct qib_devdata *dd,
750 				  const u32 regno, u64 value);
751 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
752 static void write_7322_initregs(struct qib_devdata *);
753 static void write_7322_init_portregs(struct qib_pportdata *);
754 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
755 static void check_7322_rxe_status(struct qib_pportdata *);
756 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
757 #ifdef CONFIG_INFINIBAND_QIB_DCA
758 static void qib_setup_dca(struct qib_devdata *dd);
759 static void setup_dca_notifier(struct qib_devdata *dd,
760 			       struct qib_msix_entry *m);
761 static void reset_dca_notifier(struct qib_devdata *dd,
762 			       struct qib_msix_entry *m);
763 #endif
764 
765 /**
766  * qib_read_ureg32 - read 32-bit virtualized per-context register
767  * @dd: device
768  * @regno: register number
769  * @ctxt: context number
770  *
771  * Return the contents of a register that is virtualized to be per context.
772  * Returns -1 on errors (not distinguishable from valid contents at
773  * runtime; we may add a separate error variable at some point).
774  */
775 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
776 				  enum qib_ureg regno, int ctxt)
777 {
778 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
779 		return 0;
780 	return readl(regno + (u64 __iomem *)(
781 		(dd->ureg_align * ctxt) + (dd->userbase ?
782 		 (char __iomem *)dd->userbase :
783 		 (char __iomem *)dd->kregbase + dd->uregbase)));
784 }
785 
786 /**
787  * qib_read_ureg - read virtualized per-context register
788  * @dd: device
789  * @regno: register number
790  * @ctxt: context number
791  *
792  * Return the contents of a register that is virtualized to be per context.
793  * Returns -1 on errors (not distinguishable from valid contents at
794  * runtime; we may add a separate error variable at some point).
795  */
796 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
797 				enum qib_ureg regno, int ctxt)
798 {
799 
800 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
801 		return 0;
802 	return readq(regno + (u64 __iomem *)(
803 		(dd->ureg_align * ctxt) + (dd->userbase ?
804 		 (char __iomem *)dd->userbase :
805 		 (char __iomem *)dd->kregbase + dd->uregbase)));
806 }
807 
808 /**
809  * qib_write_ureg - write virtualized per-context register
810  * @dd: device
811  * @regno: register number
812  * @value: value
813  * @ctxt: context
814  *
815  * Write the contents of a register that is virtualized to be per context.
816  */
817 static inline void qib_write_ureg(const struct qib_devdata *dd,
818 				  enum qib_ureg regno, u64 value, int ctxt)
819 {
820 	u64 __iomem *ubase;
821 
822 	if (dd->userbase)
823 		ubase = (u64 __iomem *)
824 			((char __iomem *) dd->userbase +
825 			 dd->ureg_align * ctxt);
826 	else
827 		ubase = (u64 __iomem *)
828 			(dd->uregbase +
829 			 (char __iomem *) dd->kregbase +
830 			 dd->ureg_align * ctxt);
831 
832 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
833 		writeq(value, &ubase[regno]);
834 }
835 
836 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
837 				  const u32 regno)
838 {
839 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
840 		return -1;
841 	return readl((u32 __iomem *) &dd->kregbase[regno]);
842 }
843 
844 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
845 				  const u32 regno)
846 {
847 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
848 		return -1;
849 	return readq(&dd->kregbase[regno]);
850 }
851 
852 static inline void qib_write_kreg(const struct qib_devdata *dd,
853 				  const u32 regno, u64 value)
854 {
855 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
856 		writeq(value, &dd->kregbase[regno]);
857 }
858 
859 /*
860  * not many sanity checks for the port-specific kernel register routines,
861  * since they are only used when it's known to be safe.
862 */
863 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
864 				     const u16 regno)
865 {
866 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
867 		return 0ULL;
868 	return readq(&ppd->cpspec->kpregbase[regno]);
869 }
870 
871 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
872 				       const u16 regno, u64 value)
873 {
874 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
875 	    (ppd->dd->flags & QIB_PRESENT))
876 		writeq(value, &ppd->cpspec->kpregbase[regno]);
877 }
878 
879 /**
880  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
881  * @dd: the qlogic_ib device
882  * @regno: the register number to write
883  * @ctxt: the context containing the register
884  * @value: the value to write
885  */
886 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
887 				       const u16 regno, unsigned ctxt,
888 				       u64 value)
889 {
890 	qib_write_kreg(dd, regno + ctxt, value);
891 }
892 
893 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
894 {
895 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
896 		return 0;
897 	return readq(&dd->cspec->cregbase[regno]);
898 
899 
900 }
901 
902 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
903 {
904 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
905 		return 0;
906 	return readl(&dd->cspec->cregbase[regno]);
907 
908 
909 }
910 
911 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
912 					u16 regno, u64 value)
913 {
914 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
915 	    (ppd->dd->flags & QIB_PRESENT))
916 		writeq(value, &ppd->cpspec->cpregbase[regno]);
917 }
918 
919 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
920 				      u16 regno)
921 {
922 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
923 	    !(ppd->dd->flags & QIB_PRESENT))
924 		return 0;
925 	return readq(&ppd->cpspec->cpregbase[regno]);
926 }
927 
928 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
929 					u16 regno)
930 {
931 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
932 	    !(ppd->dd->flags & QIB_PRESENT))
933 		return 0;
934 	return readl(&ppd->cpspec->cpregbase[regno]);
935 }
936 
937 /* bits in Control register */
938 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
939 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
940 
941 /* bits in general interrupt regs */
942 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
943 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
944 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
945 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
946 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
947 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
948 #define QIB_I_C_ERROR INT_MASK(Err)
949 
950 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
951 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
952 #define QIB_I_GPIO INT_MASK(AssertGPIO)
953 #define QIB_I_P_SDMAINT(pidx) \
954 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
955 	 INT_MASK_P(SDmaProgress, pidx) | \
956 	 INT_MASK_PM(SDmaCleanupDone, pidx))
957 
958 /* Interrupt bits that are "per port" */
959 #define QIB_I_P_BITSEXTANT(pidx) \
960 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
961 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
962 	INT_MASK_P(SDmaProgress, pidx) | \
963 	INT_MASK_PM(SDmaCleanupDone, pidx))
964 
965 /* Interrupt bits that are common to a device */
966 /* currently unused: QIB_I_SPIOSENT */
967 #define QIB_I_C_BITSEXTANT \
968 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
969 	QIB_I_SPIOSENT | \
970 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
971 
972 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
973 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
974 
975 /*
976  * Error bits that are "per port".
977  */
978 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
979 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
980 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
981 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
982 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
983 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
984 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
985 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
986 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
987 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
988 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
989 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
990 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
991 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
992 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
993 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
994 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
995 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
996 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
997 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
998 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
999 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1000 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1001 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1002 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1003 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1004 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1005 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1006 
1007 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1008 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1009 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1010 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1011 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1012 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1013 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1014 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1015 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1016 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1017 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1018 
1019 /* Error bits that are common to a device */
1020 #define QIB_E_RESET ERR_MASK(ResetNegated)
1021 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1022 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1023 
1024 
1025 /*
1026  * Per chip (rather than per-port) errors.  Most either do
1027  * nothing but trigger a print (because they self-recover, or
1028  * always occur in tandem with other errors that handle the
1029  * issue), or because they indicate errors with no recovery,
1030  * but we want to know that they happened.
1031  */
1032 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1033 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1034 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1035 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1036 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1037 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1038 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1039 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1040 
1041 /* SDMA chip errors (not per port)
1042  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1043  * the SDMAHALT error immediately, so we just print the dup error via the
1044  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1045  * as well, but since this is port-independent, by definition, it's
1046  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1047  * packet send errors, and so are handled in the same manner as other
1048  * per-packet errors.
1049  */
1050 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1051 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1052 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1053 
1054 /*
1055  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1056  * it is used to print "common" packet errors.
1057  */
1058 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1059 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1060 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1061 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1062 	QIB_E_P_REBP)
1063 
1064 /* Error Bits that Packet-related (Receive, per-port) */
1065 #define QIB_E_P_RPKTERRS (\
1066 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1067 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1068 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1069 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1070 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1071 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1072 
1073 /*
1074  * Error bits that are Send-related (per port)
1075  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1076  * All of these potentially need to have a buffer disarmed
1077  */
1078 #define QIB_E_P_SPKTERRS (\
1079 	QIB_E_P_SUNEXP_PKTNUM |\
1080 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1081 	QIB_E_P_SMAXPKTLEN |\
1082 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1083 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1084 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1085 
1086 #define QIB_E_SPKTERRS ( \
1087 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1088 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1089 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1090 
1091 #define QIB_E_P_SDMAERRS ( \
1092 	QIB_E_P_SDMAHALT | \
1093 	QIB_E_P_SDMADESCADDRMISALIGN | \
1094 	QIB_E_P_SDMAUNEXPDATA | \
1095 	QIB_E_P_SDMAMISSINGDW | \
1096 	QIB_E_P_SDMADWEN | \
1097 	QIB_E_P_SDMARPYTAG | \
1098 	QIB_E_P_SDMA1STDESC | \
1099 	QIB_E_P_SDMABASE | \
1100 	QIB_E_P_SDMATAILOUTOFBOUND | \
1101 	QIB_E_P_SDMAOUTOFBOUND | \
1102 	QIB_E_P_SDMAGENMISMATCH)
1103 
1104 /*
1105  * This sets some bits more than once, but makes it more obvious which
1106  * bits are not handled under other categories, and the repeat definition
1107  * is not a problem.
1108  */
1109 #define QIB_E_P_BITSEXTANT ( \
1110 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1111 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1112 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1113 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1114 	)
1115 
1116 /*
1117  * These are errors that can occur when the link
1118  * changes state while a packet is being sent or received.  This doesn't
1119  * cover things like EBP or VCRC that can be the result of a sending
1120  * having the link change state, so we receive a "known bad" packet.
1121  * All of these are "per port", so renamed:
1122  */
1123 #define QIB_E_P_LINK_PKTERRS (\
1124 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1125 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1126 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1127 	QIB_E_P_RUNEXPCHAR)
1128 
1129 /*
1130  * This sets some bits more than once, but makes it more obvious which
1131  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1132  * and the repeat definition is not a problem.
1133  */
1134 #define QIB_E_C_BITSEXTANT (\
1135 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1136 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1137 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1138 
1139 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1140 #define E_SPKT_ERRS_IGNORE 0
1141 
1142 #define QIB_EXTS_MEMBIST_DISABLED \
1143 	SYM_MASK(EXTStatus, MemBISTDisabled)
1144 #define QIB_EXTS_MEMBIST_ENDTEST \
1145 	SYM_MASK(EXTStatus, MemBISTEndTest)
1146 
1147 #define QIB_E_SPIOARMLAUNCH \
1148 	ERR_MASK(SendArmLaunchErr)
1149 
1150 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1151 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1152 
1153 /*
1154  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1155  * and also if forced QDR (only QDR enabled).  It's enabled for the
1156  * forced QDR case so that scrambling will be enabled by the TS3
1157  * exchange, when supported by both sides of the link.
1158  */
1159 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1160 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1161 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1162 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1163 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1164 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1165 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1166 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1167 
1168 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1169 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1170 
1171 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1172 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1173 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1174 
1175 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1176 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1177 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1178 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1179 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1180 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1181 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1182 
1183 #define IBA7322_REDIRECT_VEC_PER_REG 12
1184 
1185 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1186 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1187 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1188 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1189 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1190 
1191 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1192 
1193 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1194 	.msg = #fldname , .sz = sizeof(#fldname) }
1195 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1196 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1197 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1198 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1199 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1200 	HWE_AUTO(PCIESerdesPClkNotDetect),
1201 	HWE_AUTO(PowerOnBISTFailed),
1202 	HWE_AUTO(TempsenseTholdReached),
1203 	HWE_AUTO(MemoryErr),
1204 	HWE_AUTO(PCIeBusParityErr),
1205 	HWE_AUTO(PcieCplTimeout),
1206 	HWE_AUTO(PciePoisonedTLP),
1207 	HWE_AUTO_P(SDmaMemReadErr, 1),
1208 	HWE_AUTO_P(SDmaMemReadErr, 0),
1209 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1210 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1211 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1212 	HWE_AUTO(statusValidNoEop),
1213 	HWE_AUTO(LATriggered),
1214 	{ .mask = 0, .sz = 0 }
1215 };
1216 
1217 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1218 	.msg = #fldname, .sz = sizeof(#fldname) }
1219 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1220 	.msg = #fldname, .sz = sizeof(#fldname) }
1221 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1222 	E_AUTO(RcvEgrFullErr),
1223 	E_AUTO(RcvHdrFullErr),
1224 	E_AUTO(ResetNegated),
1225 	E_AUTO(HardwareErr),
1226 	E_AUTO(InvalidAddrErr),
1227 	E_AUTO(SDmaVL15Err),
1228 	E_AUTO(SBufVL15MisUseErr),
1229 	E_AUTO(InvalidEEPCmd),
1230 	E_AUTO(RcvContextShareErr),
1231 	E_AUTO(SendVLMismatchErr),
1232 	E_AUTO(SendArmLaunchErr),
1233 	E_AUTO(SendSpecialTriggerErr),
1234 	E_AUTO(SDmaWrongPortErr),
1235 	E_AUTO(SDmaBufMaskDuplicateErr),
1236 	{ .mask = 0, .sz = 0 }
1237 };
1238 
1239 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1240 	E_P_AUTO(IBStatusChanged),
1241 	E_P_AUTO(SHeadersErr),
1242 	E_P_AUTO(VL15BufMisuseErr),
1243 	/*
1244 	 * SDmaHaltErr is not really an error, make it clearer;
1245 	 */
1246 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1247 		.sz = 11},
1248 	E_P_AUTO(SDmaDescAddrMisalignErr),
1249 	E_P_AUTO(SDmaUnexpDataErr),
1250 	E_P_AUTO(SDmaMissingDwErr),
1251 	E_P_AUTO(SDmaDwEnErr),
1252 	E_P_AUTO(SDmaRpyTagErr),
1253 	E_P_AUTO(SDma1stDescErr),
1254 	E_P_AUTO(SDmaBaseErr),
1255 	E_P_AUTO(SDmaTailOutOfBoundErr),
1256 	E_P_AUTO(SDmaOutOfBoundErr),
1257 	E_P_AUTO(SDmaGenMismatchErr),
1258 	E_P_AUTO(SendBufMisuseErr),
1259 	E_P_AUTO(SendUnsupportedVLErr),
1260 	E_P_AUTO(SendUnexpectedPktNumErr),
1261 	E_P_AUTO(SendDroppedDataPktErr),
1262 	E_P_AUTO(SendDroppedSmpPktErr),
1263 	E_P_AUTO(SendPktLenErr),
1264 	E_P_AUTO(SendUnderRunErr),
1265 	E_P_AUTO(SendMaxPktLenErr),
1266 	E_P_AUTO(SendMinPktLenErr),
1267 	E_P_AUTO(RcvIBLostLinkErr),
1268 	E_P_AUTO(RcvHdrErr),
1269 	E_P_AUTO(RcvHdrLenErr),
1270 	E_P_AUTO(RcvBadTidErr),
1271 	E_P_AUTO(RcvBadVersionErr),
1272 	E_P_AUTO(RcvIBFlowErr),
1273 	E_P_AUTO(RcvEBPErr),
1274 	E_P_AUTO(RcvUnsupportedVLErr),
1275 	E_P_AUTO(RcvUnexpectedCharErr),
1276 	E_P_AUTO(RcvShortPktLenErr),
1277 	E_P_AUTO(RcvLongPktLenErr),
1278 	E_P_AUTO(RcvMaxPktLenErr),
1279 	E_P_AUTO(RcvMinPktLenErr),
1280 	E_P_AUTO(RcvICRCErr),
1281 	E_P_AUTO(RcvVCRCErr),
1282 	E_P_AUTO(RcvFormatErr),
1283 	{ .mask = 0, .sz = 0 }
1284 };
1285 
1286 /*
1287  * Below generates "auto-message" for interrupts not specific to any port or
1288  * context
1289  */
1290 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1291 	.msg = #fldname, .sz = sizeof(#fldname) }
1292 /* Below generates "auto-message" for interrupts specific to a port */
1293 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1294 	SYM_LSB(IntMask, fldname##Mask##_0), \
1295 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1296 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1297 /* For some reason, the SerDesTrimDone bits are reversed */
1298 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1299 	SYM_LSB(IntMask, fldname##Mask##_1), \
1300 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1301 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1302 /*
1303  * Below generates "auto-message" for interrupts specific to a context,
1304  * with ctxt-number appended
1305  */
1306 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1307 	SYM_LSB(IntMask, fldname##0IntMask), \
1308 	SYM_LSB(IntMask, fldname##17IntMask)), \
1309 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1310 
1311 #define TXSYMPTOM_AUTO_P(fldname) \
1312 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1313 	.msg = #fldname, .sz = sizeof(#fldname) }
1314 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1315 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1316 	TXSYMPTOM_AUTO_P(GRHFail),
1317 	TXSYMPTOM_AUTO_P(PkeyFail),
1318 	TXSYMPTOM_AUTO_P(QPFail),
1319 	TXSYMPTOM_AUTO_P(SLIDFail),
1320 	TXSYMPTOM_AUTO_P(RawIPV6),
1321 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1322 	{ .mask = 0, .sz = 0 }
1323 };
1324 
1325 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1326 
1327 /*
1328  * Called when we might have an error that is specific to a particular
1329  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1330  * because we don't need to force the update of pioavail
1331  */
1332 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1333 {
1334 	struct qib_devdata *dd = ppd->dd;
1335 	u32 i;
1336 	int any;
1337 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1338 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1339 	unsigned long sbuf[4];
1340 
1341 	/*
1342 	 * It's possible that sendbuffererror could have bits set; might
1343 	 * have already done this as a result of hardware error handling.
1344 	 */
1345 	any = 0;
1346 	for (i = 0; i < regcnt; ++i) {
1347 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1348 		if (sbuf[i]) {
1349 			any = 1;
1350 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1351 		}
1352 	}
1353 
1354 	if (any)
1355 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1356 }
1357 
1358 /* No txe_recover yet, if ever */
1359 
1360 /* No decode__errors yet */
1361 static void err_decode(char *msg, size_t len, u64 errs,
1362 		       const struct qib_hwerror_msgs *msp)
1363 {
1364 	u64 these, lmask;
1365 	int took, multi, n = 0;
1366 
1367 	while (errs && msp && msp->mask) {
1368 		multi = (msp->mask & (msp->mask - 1));
1369 		while (errs & msp->mask) {
1370 			these = (errs & msp->mask);
1371 			lmask = (these & (these - 1)) ^ these;
1372 			if (len) {
1373 				if (n++) {
1374 					/* separate the strings */
1375 					*msg++ = ',';
1376 					len--;
1377 				}
1378 				BUG_ON(!msp->sz);
1379 				/* msp->sz counts the nul */
1380 				took = min_t(size_t, msp->sz - (size_t)1, len);
1381 				memcpy(msg,  msp->msg, took);
1382 				len -= took;
1383 				msg += took;
1384 				if (len)
1385 					*msg = '\0';
1386 			}
1387 			errs &= ~lmask;
1388 			if (len && multi) {
1389 				/* More than one bit this mask */
1390 				int idx = -1;
1391 
1392 				while (lmask & msp->mask) {
1393 					++idx;
1394 					lmask >>= 1;
1395 				}
1396 				took = scnprintf(msg, len, "_%d", idx);
1397 				len -= took;
1398 				msg += took;
1399 			}
1400 		}
1401 		++msp;
1402 	}
1403 	/* If some bits are left, show in hex. */
1404 	if (len && errs)
1405 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1406 			(unsigned long long) errs);
1407 }
1408 
1409 /* only called if r1 set */
1410 static void flush_fifo(struct qib_pportdata *ppd)
1411 {
1412 	struct qib_devdata *dd = ppd->dd;
1413 	u32 __iomem *piobuf;
1414 	u32 bufn;
1415 	u32 *hdr;
1416 	u64 pbc;
1417 	const unsigned hdrwords = 7;
1418 	static struct ib_header ibhdr = {
1419 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1420 		.lrh[1] = IB_LID_PERMISSIVE,
1421 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1422 		.lrh[3] = IB_LID_PERMISSIVE,
1423 		.u.oth.bth[0] = cpu_to_be32(
1424 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1425 		.u.oth.bth[1] = cpu_to_be32(0),
1426 		.u.oth.bth[2] = cpu_to_be32(0),
1427 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1428 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1429 	};
1430 
1431 	/*
1432 	 * Send a dummy VL15 packet to flush the launch FIFO.
1433 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1434 	 */
1435 	pbc = PBC_7322_VL15_SEND |
1436 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1437 		(hdrwords + SIZE_OF_CRC);
1438 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1439 	if (!piobuf)
1440 		return;
1441 	writeq(pbc, piobuf);
1442 	hdr = (u32 *) &ibhdr;
1443 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1444 		qib_flush_wc();
1445 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1446 		qib_flush_wc();
1447 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1448 		qib_flush_wc();
1449 	} else
1450 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1451 	qib_sendbuf_done(dd, bufn);
1452 }
1453 
1454 /*
1455  * This is called with interrupts disabled and sdma_lock held.
1456  */
1457 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1458 {
1459 	struct qib_devdata *dd = ppd->dd;
1460 	u64 set_sendctrl = 0;
1461 	u64 clr_sendctrl = 0;
1462 
1463 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1464 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1465 	else
1466 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1467 
1468 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1469 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1470 	else
1471 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1472 
1473 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1474 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1475 	else
1476 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1477 
1478 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1479 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1480 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1481 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1482 	else
1483 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1484 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1485 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1486 
1487 	spin_lock(&dd->sendctrl_lock);
1488 
1489 	/* If we are draining everything, block sends first */
1490 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1491 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1492 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1493 		qib_write_kreg(dd, kr_scratch, 0);
1494 	}
1495 
1496 	ppd->p_sendctrl |= set_sendctrl;
1497 	ppd->p_sendctrl &= ~clr_sendctrl;
1498 
1499 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1500 		qib_write_kreg_port(ppd, krp_sendctrl,
1501 				    ppd->p_sendctrl |
1502 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1503 	else
1504 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1505 	qib_write_kreg(dd, kr_scratch, 0);
1506 
1507 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1508 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1509 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1510 		qib_write_kreg(dd, kr_scratch, 0);
1511 	}
1512 
1513 	spin_unlock(&dd->sendctrl_lock);
1514 
1515 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1516 		flush_fifo(ppd);
1517 }
1518 
1519 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1520 {
1521 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1522 }
1523 
1524 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1525 {
1526 	/*
1527 	 * Set SendDmaLenGen and clear and set
1528 	 * the MSB of the generation count to enable generation checking
1529 	 * and load the internal generation counter.
1530 	 */
1531 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1532 	qib_write_kreg_port(ppd, krp_senddmalengen,
1533 			    ppd->sdma_descq_cnt |
1534 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1535 }
1536 
1537 /*
1538  * Must be called with sdma_lock held, or before init finished.
1539  */
1540 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1541 {
1542 	/* Commit writes to memory and advance the tail on the chip */
1543 	wmb();
1544 	ppd->sdma_descq_tail = tail;
1545 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1546 }
1547 
1548 /*
1549  * This is called with interrupts disabled and sdma_lock held.
1550  */
1551 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1552 {
1553 	/*
1554 	 * Drain all FIFOs.
1555 	 * The hardware doesn't require this but we do it so that verbs
1556 	 * and user applications don't wait for link active to send stale
1557 	 * data.
1558 	 */
1559 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1560 
1561 	qib_sdma_7322_setlengen(ppd);
1562 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1563 	ppd->sdma_head_dma[0] = 0;
1564 	qib_7322_sdma_sendctrl(ppd,
1565 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1566 }
1567 
1568 #define DISABLES_SDMA ( \
1569 	QIB_E_P_SDMAHALT | \
1570 	QIB_E_P_SDMADESCADDRMISALIGN | \
1571 	QIB_E_P_SDMAMISSINGDW | \
1572 	QIB_E_P_SDMADWEN | \
1573 	QIB_E_P_SDMARPYTAG | \
1574 	QIB_E_P_SDMA1STDESC | \
1575 	QIB_E_P_SDMABASE | \
1576 	QIB_E_P_SDMATAILOUTOFBOUND | \
1577 	QIB_E_P_SDMAOUTOFBOUND | \
1578 	QIB_E_P_SDMAGENMISMATCH)
1579 
1580 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1581 {
1582 	unsigned long flags;
1583 	struct qib_devdata *dd = ppd->dd;
1584 
1585 	errs &= QIB_E_P_SDMAERRS;
1586 	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1587 		   errs, qib_7322p_error_msgs);
1588 
1589 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1590 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1591 			    ppd->port);
1592 
1593 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1594 
1595 	if (errs != QIB_E_P_SDMAHALT) {
1596 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1597 		qib_dev_porterr(dd, ppd->port,
1598 			"SDMA %s 0x%016llx %s\n",
1599 			qib_sdma_state_names[ppd->sdma_state.current_state],
1600 			errs, ppd->cpspec->sdmamsgbuf);
1601 		dump_sdma_7322_state(ppd);
1602 	}
1603 
1604 	switch (ppd->sdma_state.current_state) {
1605 	case qib_sdma_state_s00_hw_down:
1606 		break;
1607 
1608 	case qib_sdma_state_s10_hw_start_up_wait:
1609 		if (errs & QIB_E_P_SDMAHALT)
1610 			__qib_sdma_process_event(ppd,
1611 				qib_sdma_event_e20_hw_started);
1612 		break;
1613 
1614 	case qib_sdma_state_s20_idle:
1615 		break;
1616 
1617 	case qib_sdma_state_s30_sw_clean_up_wait:
1618 		break;
1619 
1620 	case qib_sdma_state_s40_hw_clean_up_wait:
1621 		if (errs & QIB_E_P_SDMAHALT)
1622 			__qib_sdma_process_event(ppd,
1623 				qib_sdma_event_e50_hw_cleaned);
1624 		break;
1625 
1626 	case qib_sdma_state_s50_hw_halt_wait:
1627 		if (errs & QIB_E_P_SDMAHALT)
1628 			__qib_sdma_process_event(ppd,
1629 				qib_sdma_event_e60_hw_halted);
1630 		break;
1631 
1632 	case qib_sdma_state_s99_running:
1633 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1634 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1635 		break;
1636 	}
1637 
1638 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1639 }
1640 
1641 /*
1642  * handle per-device errors (not per-port errors)
1643  */
1644 static noinline void handle_7322_errors(struct qib_devdata *dd)
1645 {
1646 	char *msg;
1647 	u64 iserr = 0;
1648 	u64 errs;
1649 	u64 mask;
1650 	int log_idx;
1651 
1652 	qib_stats.sps_errints++;
1653 	errs = qib_read_kreg64(dd, kr_errstatus);
1654 	if (!errs) {
1655 		qib_devinfo(dd->pcidev,
1656 			"device error interrupt, but no error bits set!\n");
1657 		goto done;
1658 	}
1659 
1660 	/* don't report errors that are masked */
1661 	errs &= dd->cspec->errormask;
1662 	msg = dd->cspec->emsgbuf;
1663 
1664 	/* do these first, they are most important */
1665 	if (errs & QIB_E_HARDWARE) {
1666 		*msg = '\0';
1667 		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1668 	} else
1669 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1670 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1671 				qib_inc_eeprom_err(dd, log_idx, 1);
1672 
1673 	if (errs & QIB_E_SPKTERRS) {
1674 		qib_disarm_7322_senderrbufs(dd->pport);
1675 		qib_stats.sps_txerrs++;
1676 	} else if (errs & QIB_E_INVALIDADDR)
1677 		qib_stats.sps_txerrs++;
1678 	else if (errs & QIB_E_ARMLAUNCH) {
1679 		qib_stats.sps_txerrs++;
1680 		qib_disarm_7322_senderrbufs(dd->pport);
1681 	}
1682 	qib_write_kreg(dd, kr_errclear, errs);
1683 
1684 	/*
1685 	 * The ones we mask off are handled specially below
1686 	 * or above.  Also mask SDMADISABLED by default as it
1687 	 * is too chatty.
1688 	 */
1689 	mask = QIB_E_HARDWARE;
1690 	*msg = '\0';
1691 
1692 	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1693 		   qib_7322error_msgs);
1694 
1695 	/*
1696 	 * Getting reset is a tragedy for all ports. Mark the device
1697 	 * _and_ the ports as "offline" in way meaningful to each.
1698 	 */
1699 	if (errs & QIB_E_RESET) {
1700 		int pidx;
1701 
1702 		qib_dev_err(dd,
1703 			"Got reset, requires re-init (unload and reload driver)\n");
1704 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1705 		/* mark as having had error */
1706 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1707 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1708 			if (dd->pport[pidx].link_speed_supported)
1709 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1710 	}
1711 
1712 	if (*msg && iserr)
1713 		qib_dev_err(dd, "%s error\n", msg);
1714 
1715 	/*
1716 	 * If there were hdrq or egrfull errors, wake up any processes
1717 	 * waiting in poll.  We used to try to check which contexts had
1718 	 * the overflow, but given the cost of that and the chip reads
1719 	 * to support it, it's better to just wake everybody up if we
1720 	 * get an overflow; waiters can poll again if it's not them.
1721 	 */
1722 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1723 		qib_handle_urcv(dd, ~0U);
1724 		if (errs & ERR_MASK(RcvEgrFullErr))
1725 			qib_stats.sps_buffull++;
1726 		else
1727 			qib_stats.sps_hdrfull++;
1728 	}
1729 
1730 done:
1731 	return;
1732 }
1733 
1734 static void qib_error_tasklet(unsigned long data)
1735 {
1736 	struct qib_devdata *dd = (struct qib_devdata *)data;
1737 
1738 	handle_7322_errors(dd);
1739 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1740 }
1741 
1742 static void reenable_chase(unsigned long opaque)
1743 {
1744 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1745 
1746 	ppd->cpspec->chase_timer.expires = 0;
1747 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1748 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1749 }
1750 
1751 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1752 		u8 ibclt)
1753 {
1754 	ppd->cpspec->chase_end = 0;
1755 
1756 	if (!qib_chase)
1757 		return;
1758 
1759 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1760 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1761 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1762 	add_timer(&ppd->cpspec->chase_timer);
1763 }
1764 
1765 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1766 {
1767 	u8 ibclt;
1768 	unsigned long tnow;
1769 
1770 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1771 
1772 	/*
1773 	 * Detect and handle the state chase issue, where we can
1774 	 * get stuck if we are unlucky on timing on both sides of
1775 	 * the link.   If we are, we disable, set a timer, and
1776 	 * then re-enable.
1777 	 */
1778 	switch (ibclt) {
1779 	case IB_7322_LT_STATE_CFGRCVFCFG:
1780 	case IB_7322_LT_STATE_CFGWAITRMT:
1781 	case IB_7322_LT_STATE_TXREVLANES:
1782 	case IB_7322_LT_STATE_CFGENH:
1783 		tnow = jiffies;
1784 		if (ppd->cpspec->chase_end &&
1785 		     time_after(tnow, ppd->cpspec->chase_end))
1786 			disable_chase(ppd, tnow, ibclt);
1787 		else if (!ppd->cpspec->chase_end)
1788 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1789 		break;
1790 	default:
1791 		ppd->cpspec->chase_end = 0;
1792 		break;
1793 	}
1794 
1795 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1796 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1797 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1798 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1799 		force_h1(ppd);
1800 		ppd->cpspec->qdr_reforce = 1;
1801 		if (!ppd->dd->cspec->r1)
1802 			serdes_7322_los_enable(ppd, 0);
1803 	} else if (ppd->cpspec->qdr_reforce &&
1804 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1805 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1806 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1807 		ibclt == IB_7322_LT_STATE_LINKUP))
1808 		force_h1(ppd);
1809 
1810 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1811 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1812 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1813 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1814 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1815 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1816 		adj_tx_serdes(ppd);
1817 
1818 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1819 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1820 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1821 					  LinkTrainingState);
1822 		if (!ppd->dd->cspec->r1 &&
1823 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1824 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1825 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1826 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1827 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1828 			/* If the link went down (but no into recovery,
1829 			 * turn LOS back on */
1830 			serdes_7322_los_enable(ppd, 1);
1831 		if (!ppd->cpspec->qdr_dfe_on &&
1832 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1833 			ppd->cpspec->qdr_dfe_on = 1;
1834 			ppd->cpspec->qdr_dfe_time = 0;
1835 			/* On link down, reenable QDR adaptation */
1836 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1837 					    ppd->dd->cspec->r1 ?
1838 					    QDR_STATIC_ADAPT_DOWN_R1 :
1839 					    QDR_STATIC_ADAPT_DOWN);
1840 			pr_info(
1841 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1842 				ppd->dd->unit, ppd->port, ibclt);
1843 		}
1844 	}
1845 }
1846 
1847 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1848 
1849 /*
1850  * This is per-pport error handling.
1851  * will likely get it's own MSIx interrupt (one for each port,
1852  * although just a single handler).
1853  */
1854 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1855 {
1856 	char *msg;
1857 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1858 	struct qib_devdata *dd = ppd->dd;
1859 
1860 	/* do this as soon as possible */
1861 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1862 	if (!fmask)
1863 		check_7322_rxe_status(ppd);
1864 
1865 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1866 	if (!errs)
1867 		qib_devinfo(dd->pcidev,
1868 			 "Port%d error interrupt, but no error bits set!\n",
1869 			 ppd->port);
1870 	if (!fmask)
1871 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1872 	if (!errs)
1873 		goto done;
1874 
1875 	msg = ppd->cpspec->epmsgbuf;
1876 	*msg = '\0';
1877 
1878 	if (errs & ~QIB_E_P_BITSEXTANT) {
1879 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1880 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1881 		if (!*msg)
1882 			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1883 				 "no others");
1884 		qib_dev_porterr(dd, ppd->port,
1885 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1886 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1887 		*msg = '\0';
1888 	}
1889 
1890 	if (errs & QIB_E_P_SHDR) {
1891 		u64 symptom;
1892 
1893 		/* determine cause, then write to clear */
1894 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1895 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1896 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1897 			   hdrchk_msgs);
1898 		*msg = '\0';
1899 		/* senderrbuf cleared in SPKTERRS below */
1900 	}
1901 
1902 	if (errs & QIB_E_P_SPKTERRS) {
1903 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1904 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1905 			/*
1906 			 * This can happen when trying to bring the link
1907 			 * up, but the IB link changes state at the "wrong"
1908 			 * time. The IB logic then complains that the packet
1909 			 * isn't valid.  We don't want to confuse people, so
1910 			 * we just don't print them, except at debug
1911 			 */
1912 			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1913 				   (errs & QIB_E_P_LINK_PKTERRS),
1914 				   qib_7322p_error_msgs);
1915 			*msg = '\0';
1916 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1917 		}
1918 		qib_disarm_7322_senderrbufs(ppd);
1919 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1920 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1921 		/*
1922 		 * This can happen when SMA is trying to bring the link
1923 		 * up, but the IB link changes state at the "wrong" time.
1924 		 * The IB logic then complains that the packet isn't
1925 		 * valid.  We don't want to confuse people, so we just
1926 		 * don't print them, except at debug
1927 		 */
1928 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1929 			   qib_7322p_error_msgs);
1930 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1931 		*msg = '\0';
1932 	}
1933 
1934 	qib_write_kreg_port(ppd, krp_errclear, errs);
1935 
1936 	errs &= ~ignore_this_time;
1937 	if (!errs)
1938 		goto done;
1939 
1940 	if (errs & QIB_E_P_RPKTERRS)
1941 		qib_stats.sps_rcverrs++;
1942 	if (errs & QIB_E_P_SPKTERRS)
1943 		qib_stats.sps_txerrs++;
1944 
1945 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1946 
1947 	if (errs & QIB_E_P_SDMAERRS)
1948 		sdma_7322_p_errors(ppd, errs);
1949 
1950 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1951 		u64 ibcs;
1952 		u8 ltstate;
1953 
1954 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1955 		ltstate = qib_7322_phys_portstate(ibcs);
1956 
1957 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1958 			handle_serdes_issues(ppd, ibcs);
1959 		if (!(ppd->cpspec->ibcctrl_a &
1960 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1961 			/*
1962 			 * We got our interrupt, so init code should be
1963 			 * happy and not try alternatives. Now squelch
1964 			 * other "chatter" from link-negotiation (pre Init)
1965 			 */
1966 			ppd->cpspec->ibcctrl_a |=
1967 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1968 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1969 					    ppd->cpspec->ibcctrl_a);
1970 		}
1971 
1972 		/* Update our picture of width and speed from chip */
1973 		ppd->link_width_active =
1974 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1975 			    IB_WIDTH_4X : IB_WIDTH_1X;
1976 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1977 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1978 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1979 				   QIB_IB_DDR : QIB_IB_SDR;
1980 
1981 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1982 		    IB_PHYSPORTSTATE_DISABLED)
1983 			qib_set_ib_7322_lstate(ppd, 0,
1984 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1985 		else
1986 			/*
1987 			 * Since going into a recovery state causes the link
1988 			 * state to go down and since recovery is transitory,
1989 			 * it is better if we "miss" ever seeing the link
1990 			 * training state go into recovery (i.e., ignore this
1991 			 * transition for link state special handling purposes)
1992 			 * without updating lastibcstat.
1993 			 */
1994 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1995 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1996 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1997 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1998 				qib_handle_e_ibstatuschanged(ppd, ibcs);
1999 	}
2000 	if (*msg && iserr)
2001 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2002 
2003 	if (ppd->state_wanted & ppd->lflags)
2004 		wake_up_interruptible(&ppd->state_wait);
2005 done:
2006 	return;
2007 }
2008 
2009 /* enable/disable chip from delivering interrupts */
2010 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2011 {
2012 	if (enable) {
2013 		if (dd->flags & QIB_BADINTR)
2014 			return;
2015 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2016 		/* cause any pending enabled interrupts to be re-delivered */
2017 		qib_write_kreg(dd, kr_intclear, 0ULL);
2018 		if (dd->cspec->num_msix_entries) {
2019 			/* and same for MSIx */
2020 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2021 
2022 			if (val)
2023 				qib_write_kreg(dd, kr_intgranted, val);
2024 		}
2025 	} else
2026 		qib_write_kreg(dd, kr_intmask, 0ULL);
2027 }
2028 
2029 /*
2030  * Try to cleanup as much as possible for anything that might have gone
2031  * wrong while in freeze mode, such as pio buffers being written by user
2032  * processes (causing armlaunch), send errors due to going into freeze mode,
2033  * etc., and try to avoid causing extra interrupts while doing so.
2034  * Forcibly update the in-memory pioavail register copies after cleanup
2035  * because the chip won't do it while in freeze mode (the register values
2036  * themselves are kept correct).
2037  * Make sure that we don't lose any important interrupts by using the chip
2038  * feature that says that writing 0 to a bit in *clear that is set in
2039  * *status will cause an interrupt to be generated again (if allowed by
2040  * the *mask value).
2041  * This is in chip-specific code because of all of the register accesses,
2042  * even though the details are similar on most chips.
2043  */
2044 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2045 {
2046 	int pidx;
2047 
2048 	/* disable error interrupts, to avoid confusion */
2049 	qib_write_kreg(dd, kr_errmask, 0ULL);
2050 
2051 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2052 		if (dd->pport[pidx].link_speed_supported)
2053 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2054 					    0ULL);
2055 
2056 	/* also disable interrupts; errormask is sometimes overwriten */
2057 	qib_7322_set_intr_state(dd, 0);
2058 
2059 	/* clear the freeze, and be sure chip saw it */
2060 	qib_write_kreg(dd, kr_control, dd->control);
2061 	qib_read_kreg32(dd, kr_scratch);
2062 
2063 	/*
2064 	 * Force new interrupt if any hwerr, error or interrupt bits are
2065 	 * still set, and clear "safe" send packet errors related to freeze
2066 	 * and cancelling sends.  Re-enable error interrupts before possible
2067 	 * force of re-interrupt on pending interrupts.
2068 	 */
2069 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2070 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2071 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2072 	/* We need to purge per-port errs and reset mask, too */
2073 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2074 		if (!dd->pport[pidx].link_speed_supported)
2075 			continue;
2076 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2077 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2078 	}
2079 	qib_7322_set_intr_state(dd, 1);
2080 }
2081 
2082 /* no error handling to speak of */
2083 /**
2084  * qib_7322_handle_hwerrors - display hardware errors.
2085  * @dd: the qlogic_ib device
2086  * @msg: the output buffer
2087  * @msgl: the size of the output buffer
2088  *
2089  * Use same msg buffer as regular errors to avoid excessive stack
2090  * use.  Most hardware errors are catastrophic, but for right now,
2091  * we'll print them and continue.  We reuse the same message buffer as
2092  * qib_handle_errors() to avoid excessive stack usage.
2093  */
2094 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2095 				     size_t msgl)
2096 {
2097 	u64 hwerrs;
2098 	u32 ctrl;
2099 	int isfatal = 0;
2100 
2101 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2102 	if (!hwerrs)
2103 		goto bail;
2104 	if (hwerrs == ~0ULL) {
2105 		qib_dev_err(dd,
2106 			"Read of hardware error status failed (all bits set); ignoring\n");
2107 		goto bail;
2108 	}
2109 	qib_stats.sps_hwerrs++;
2110 
2111 	/* Always clear the error status register, except BIST fail */
2112 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2113 		       ~HWE_MASK(PowerOnBISTFailed));
2114 
2115 	hwerrs &= dd->cspec->hwerrmask;
2116 
2117 	/* no EEPROM logging, yet */
2118 
2119 	if (hwerrs)
2120 		qib_devinfo(dd->pcidev,
2121 			"Hardware error: hwerr=0x%llx (cleared)\n",
2122 			(unsigned long long) hwerrs);
2123 
2124 	ctrl = qib_read_kreg32(dd, kr_control);
2125 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2126 		/*
2127 		 * No recovery yet...
2128 		 */
2129 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2130 		    dd->cspec->stay_in_freeze) {
2131 			/*
2132 			 * If any set that we aren't ignoring only make the
2133 			 * complaint once, in case it's stuck or recurring,
2134 			 * and we get here multiple times
2135 			 * Force link down, so switch knows, and
2136 			 * LEDs are turned off.
2137 			 */
2138 			if (dd->flags & QIB_INITTED)
2139 				isfatal = 1;
2140 		} else
2141 			qib_7322_clear_freeze(dd);
2142 	}
2143 
2144 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2145 		isfatal = 1;
2146 		strlcpy(msg,
2147 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2148 			msgl);
2149 		/* ignore from now on, so disable until driver reloaded */
2150 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2151 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2152 	}
2153 
2154 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2155 
2156 	/* Ignore esoteric PLL failures et al. */
2157 
2158 	qib_dev_err(dd, "%s hardware error\n", msg);
2159 
2160 	if (hwerrs &
2161 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2162 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2163 		int pidx = 0;
2164 		int err;
2165 		unsigned long flags;
2166 		struct qib_pportdata *ppd = dd->pport;
2167 
2168 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2169 			err = 0;
2170 			if (pidx == 0 && (hwerrs &
2171 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2172 				err++;
2173 			if (pidx == 1 && (hwerrs &
2174 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2175 				err++;
2176 			if (err) {
2177 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2178 				dump_sdma_7322_state(ppd);
2179 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2180 			}
2181 		}
2182 	}
2183 
2184 	if (isfatal && !dd->diag_client) {
2185 		qib_dev_err(dd,
2186 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2187 			dd->serial);
2188 		/*
2189 		 * for /sys status file and user programs to print; if no
2190 		 * trailing brace is copied, we'll know it was truncated.
2191 		 */
2192 		if (dd->freezemsg)
2193 			snprintf(dd->freezemsg, dd->freezelen,
2194 				 "{%s}", msg);
2195 		qib_disable_after_error(dd);
2196 	}
2197 bail:;
2198 }
2199 
2200 /**
2201  * qib_7322_init_hwerrors - enable hardware errors
2202  * @dd: the qlogic_ib device
2203  *
2204  * now that we have finished initializing everything that might reasonably
2205  * cause a hardware error, and cleared those errors bits as they occur,
2206  * we can enable hardware errors in the mask (potentially enabling
2207  * freeze mode), and enable hardware errors as errors (along with
2208  * everything else) in errormask
2209  */
2210 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2211 {
2212 	int pidx;
2213 	u64 extsval;
2214 
2215 	extsval = qib_read_kreg64(dd, kr_extstatus);
2216 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2217 			 QIB_EXTS_MEMBIST_ENDTEST)))
2218 		qib_dev_err(dd, "MemBIST did not complete!\n");
2219 
2220 	/* never clear BIST failure, so reported on each driver load */
2221 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2222 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2223 
2224 	/* clear all */
2225 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2226 	/* enable errors that are masked, at least this first time. */
2227 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2228 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2229 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2230 		if (dd->pport[pidx].link_speed_supported)
2231 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2232 					    ~0ULL);
2233 }
2234 
2235 /*
2236  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2237  * on chips that are count-based, rather than trigger-based.  There is no
2238  * reference counting, but that's also fine, given the intended use.
2239  * Only chip-specific because it's all register accesses
2240  */
2241 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2242 {
2243 	if (enable) {
2244 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2245 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2246 	} else
2247 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2248 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2249 }
2250 
2251 /*
2252  * Formerly took parameter <which> in pre-shifted,
2253  * pre-merged form with LinkCmd and LinkInitCmd
2254  * together, and assuming the zero was NOP.
2255  */
2256 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2257 				   u16 linitcmd)
2258 {
2259 	u64 mod_wd;
2260 	struct qib_devdata *dd = ppd->dd;
2261 	unsigned long flags;
2262 
2263 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2264 		/*
2265 		 * If we are told to disable, note that so link-recovery
2266 		 * code does not attempt to bring us back up.
2267 		 * Also reset everything that we can, so we start
2268 		 * completely clean when re-enabled (before we
2269 		 * actually issue the disable to the IBC)
2270 		 */
2271 		qib_7322_mini_pcs_reset(ppd);
2272 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2273 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2274 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2275 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2276 		/*
2277 		 * Any other linkinitcmd will lead to LINKDOWN and then
2278 		 * to INIT (if all is well), so clear flag to let
2279 		 * link-recovery code attempt to bring us back up.
2280 		 */
2281 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2282 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2283 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2284 		/*
2285 		 * Clear status change interrupt reduction so the
2286 		 * new state is seen.
2287 		 */
2288 		ppd->cpspec->ibcctrl_a &=
2289 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2290 	}
2291 
2292 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2293 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2294 
2295 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2296 			    mod_wd);
2297 	/* write to chip to prevent back-to-back writes of ibc reg */
2298 	qib_write_kreg(dd, kr_scratch, 0);
2299 
2300 }
2301 
2302 /*
2303  * The total RCV buffer memory is 64KB, used for both ports, and is
2304  * in units of 64 bytes (same as IB flow control credit unit).
2305  * The consumedVL unit in the same registers are in 32 byte units!
2306  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2307  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2308  * in krp_rxcreditvl15, rather than 10.
2309  */
2310 #define RCV_BUF_UNITSZ 64
2311 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2312 
2313 static void set_vls(struct qib_pportdata *ppd)
2314 {
2315 	int i, numvls, totcred, cred_vl, vl0extra;
2316 	struct qib_devdata *dd = ppd->dd;
2317 	u64 val;
2318 
2319 	numvls = qib_num_vls(ppd->vls_operational);
2320 
2321 	/*
2322 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2323 	 * 1) port is disabled at the time early_init is called.
2324 	 * 2) give VL15 17 credits, for two max-plausible packets.
2325 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2326 	 */
2327 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2328 	totcred = NUM_RCV_BUF_UNITS(dd);
2329 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2330 	totcred -= cred_vl;
2331 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2332 	cred_vl = totcred / numvls;
2333 	vl0extra = totcred - cred_vl * numvls;
2334 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2335 	for (i = 1; i < numvls; i++)
2336 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2337 	for (; i < 8; i++) /* no buffer space for other VLs */
2338 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2339 
2340 	/* Notify IBC that credits need to be recalculated */
2341 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2342 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2343 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2344 	qib_write_kreg(dd, kr_scratch, 0ULL);
2345 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2346 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2347 
2348 	for (i = 0; i < numvls; i++)
2349 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2350 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2351 
2352 	/* Change the number of operational VLs */
2353 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2354 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2355 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2356 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2357 	qib_write_kreg(dd, kr_scratch, 0ULL);
2358 }
2359 
2360 /*
2361  * The code that deals with actual SerDes is in serdes_7322_init().
2362  * Compared to the code for iba7220, it is minimal.
2363  */
2364 static int serdes_7322_init(struct qib_pportdata *ppd);
2365 
2366 /**
2367  * qib_7322_bringup_serdes - bring up the serdes
2368  * @ppd: physical port on the qlogic_ib device
2369  */
2370 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2371 {
2372 	struct qib_devdata *dd = ppd->dd;
2373 	u64 val, guid, ibc;
2374 	unsigned long flags;
2375 	int ret = 0;
2376 
2377 	/*
2378 	 * SerDes model not in Pd, but still need to
2379 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2380 	 * eventually.
2381 	 */
2382 	/* Put IBC in reset, sends disabled (should be in reset already) */
2383 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2384 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2385 	qib_write_kreg(dd, kr_scratch, 0ULL);
2386 
2387 	/* ensure previous Tx parameters are not still forced */
2388 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2389 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2390 		reset_tx_deemphasis_override));
2391 
2392 	if (qib_compat_ddr_negotiate) {
2393 		ppd->cpspec->ibdeltainprog = 1;
2394 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2395 						crp_ibsymbolerr);
2396 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2397 						crp_iblinkerrrecov);
2398 	}
2399 
2400 	/* flowcontrolwatermark is in units of KBytes */
2401 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2402 	/*
2403 	 * Flow control is sent this often, even if no changes in
2404 	 * buffer space occur.  Units are 128ns for this chip.
2405 	 * Set to 3usec.
2406 	 */
2407 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2408 	/* max error tolerance */
2409 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2410 	/* IB credit flow control. */
2411 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2412 	/*
2413 	 * set initial max size pkt IBC will send, including ICRC; it's the
2414 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2415 	 */
2416 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2417 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2418 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2419 
2420 	/*
2421 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2422 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2423 	 */
2424 	qib_7322_mini_pcs_reset(ppd);
2425 
2426 	if (!ppd->cpspec->ibcctrl_b) {
2427 		unsigned lse = ppd->link_speed_enabled;
2428 
2429 		/*
2430 		 * Not on re-init after reset, establish shadow
2431 		 * and force initial config.
2432 		 */
2433 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2434 							     krp_ibcctrl_b);
2435 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2436 				IBA7322_IBC_SPEED_DDR |
2437 				IBA7322_IBC_SPEED_SDR |
2438 				IBA7322_IBC_WIDTH_AUTONEG |
2439 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2440 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2441 			ppd->cpspec->ibcctrl_b |=
2442 				(lse << IBA7322_IBC_SPEED_LSB) |
2443 				IBA7322_IBC_IBTA_1_2_MASK |
2444 				IBA7322_IBC_MAX_SPEED_MASK;
2445 		else
2446 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2447 				IBA7322_IBC_SPEED_QDR |
2448 				 IBA7322_IBC_IBTA_1_2_MASK :
2449 				(lse == QIB_IB_DDR) ?
2450 					IBA7322_IBC_SPEED_DDR :
2451 					IBA7322_IBC_SPEED_SDR;
2452 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2453 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2454 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2455 		else
2456 			ppd->cpspec->ibcctrl_b |=
2457 				ppd->link_width_enabled == IB_WIDTH_4X ?
2458 				IBA7322_IBC_WIDTH_4X_ONLY :
2459 				IBA7322_IBC_WIDTH_1X_ONLY;
2460 
2461 		/* always enable these on driver reload, not sticky */
2462 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2463 			IBA7322_IBC_HRTBT_MASK);
2464 	}
2465 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2466 
2467 	/* setup so we have more time at CFGTEST to change H1 */
2468 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2469 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2470 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2471 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2472 
2473 	serdes_7322_init(ppd);
2474 
2475 	guid = be64_to_cpu(ppd->guid);
2476 	if (!guid) {
2477 		if (dd->base_guid)
2478 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2479 		ppd->guid = cpu_to_be64(guid);
2480 	}
2481 
2482 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2483 	/* write to chip to prevent back-to-back writes of ibc reg */
2484 	qib_write_kreg(dd, kr_scratch, 0);
2485 
2486 	/* Enable port */
2487 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2488 	set_vls(ppd);
2489 
2490 	/* initially come up DISABLED, without sending anything. */
2491 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2492 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2493 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2494 	qib_write_kreg(dd, kr_scratch, 0ULL);
2495 	/* clear the linkinit cmds */
2496 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2497 
2498 	/* be paranoid against later code motion, etc. */
2499 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2500 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2501 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2502 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2503 
2504 	/* Also enable IBSTATUSCHG interrupt.  */
2505 	val = qib_read_kreg_port(ppd, krp_errmask);
2506 	qib_write_kreg_port(ppd, krp_errmask,
2507 		val | ERR_MASK_N(IBStatusChanged));
2508 
2509 	/* Always zero until we start messing with SerDes for real */
2510 	return ret;
2511 }
2512 
2513 /**
2514  * qib_7322_quiet_serdes - set serdes to txidle
2515  * @dd: the qlogic_ib device
2516  * Called when driver is being unloaded
2517  */
2518 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2519 {
2520 	u64 val;
2521 	unsigned long flags;
2522 
2523 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2524 
2525 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2526 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2527 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2528 	wake_up(&ppd->cpspec->autoneg_wait);
2529 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2530 	if (ppd->dd->cspec->r1)
2531 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2532 
2533 	ppd->cpspec->chase_end = 0;
2534 	if (ppd->cpspec->chase_timer.data) /* if initted */
2535 		del_timer_sync(&ppd->cpspec->chase_timer);
2536 
2537 	/*
2538 	 * Despite the name, actually disables IBC as well. Do it when
2539 	 * we are as sure as possible that no more packets can be
2540 	 * received, following the down and the PCS reset.
2541 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2542 	 * along with the PCS being reset.
2543 	 */
2544 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2545 	qib_7322_mini_pcs_reset(ppd);
2546 
2547 	/*
2548 	 * Update the adjusted counters so the adjustment persists
2549 	 * across driver reload.
2550 	 */
2551 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2552 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2553 		struct qib_devdata *dd = ppd->dd;
2554 		u64 diagc;
2555 
2556 		/* enable counter writes */
2557 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2558 		qib_write_kreg(dd, kr_hwdiagctrl,
2559 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2560 
2561 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2562 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2563 			if (ppd->cpspec->ibdeltainprog)
2564 				val -= val - ppd->cpspec->ibsymsnap;
2565 			val -= ppd->cpspec->ibsymdelta;
2566 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2567 		}
2568 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2569 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2570 			if (ppd->cpspec->ibdeltainprog)
2571 				val -= val - ppd->cpspec->iblnkerrsnap;
2572 			val -= ppd->cpspec->iblnkerrdelta;
2573 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2574 		}
2575 		if (ppd->cpspec->iblnkdowndelta) {
2576 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2577 			val += ppd->cpspec->iblnkdowndelta;
2578 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2579 		}
2580 		/*
2581 		 * No need to save ibmalfdelta since IB perfcounters
2582 		 * are cleared on driver reload.
2583 		 */
2584 
2585 		/* and disable counter writes */
2586 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2587 	}
2588 }
2589 
2590 /**
2591  * qib_setup_7322_setextled - set the state of the two external LEDs
2592  * @ppd: physical port on the qlogic_ib device
2593  * @on: whether the link is up or not
2594  *
2595  * The exact combo of LEDs if on is true is determined by looking
2596  * at the ibcstatus.
2597  *
2598  * These LEDs indicate the physical and logical state of IB link.
2599  * For this chip (at least with recommended board pinouts), LED1
2600  * is Yellow (logical state) and LED2 is Green (physical state),
2601  *
2602  * Note:  We try to match the Mellanox HCA LED behavior as best
2603  * we can.  Green indicates physical link state is OK (something is
2604  * plugged in, and we can train).
2605  * Amber indicates the link is logically up (ACTIVE).
2606  * Mellanox further blinks the amber LED to indicate data packet
2607  * activity, but we have no hardware support for that, so it would
2608  * require waking up every 10-20 msecs and checking the counters
2609  * on the chip, and then turning the LED off if appropriate.  That's
2610  * visible overhead, so not something we will do.
2611  */
2612 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2613 {
2614 	struct qib_devdata *dd = ppd->dd;
2615 	u64 extctl, ledblink = 0, val;
2616 	unsigned long flags;
2617 	int yel, grn;
2618 
2619 	/*
2620 	 * The diags use the LED to indicate diag info, so we leave
2621 	 * the external LED alone when the diags are running.
2622 	 */
2623 	if (dd->diag_client)
2624 		return;
2625 
2626 	/* Allow override of LED display for, e.g. Locating system in rack */
2627 	if (ppd->led_override) {
2628 		grn = (ppd->led_override & QIB_LED_PHYS);
2629 		yel = (ppd->led_override & QIB_LED_LOG);
2630 	} else if (on) {
2631 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2632 		grn = qib_7322_phys_portstate(val) ==
2633 			IB_PHYSPORTSTATE_LINKUP;
2634 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2635 	} else {
2636 		grn = 0;
2637 		yel = 0;
2638 	}
2639 
2640 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2641 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2642 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2643 	if (grn) {
2644 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2645 		/*
2646 		 * Counts are in chip clock (4ns) periods.
2647 		 * This is 1/16 sec (66.6ms) on,
2648 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2649 		 */
2650 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2651 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2652 	}
2653 	if (yel)
2654 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2655 	dd->cspec->extctrl = extctl;
2656 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2657 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2658 
2659 	if (ledblink) /* blink the LED on packet receive */
2660 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2661 }
2662 
2663 #ifdef CONFIG_INFINIBAND_QIB_DCA
2664 
2665 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2666 {
2667 	switch (event) {
2668 	case DCA_PROVIDER_ADD:
2669 		if (dd->flags & QIB_DCA_ENABLED)
2670 			break;
2671 		if (!dca_add_requester(&dd->pcidev->dev)) {
2672 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2673 			dd->flags |= QIB_DCA_ENABLED;
2674 			qib_setup_dca(dd);
2675 		}
2676 		break;
2677 	case DCA_PROVIDER_REMOVE:
2678 		if (dd->flags & QIB_DCA_ENABLED) {
2679 			dca_remove_requester(&dd->pcidev->dev);
2680 			dd->flags &= ~QIB_DCA_ENABLED;
2681 			dd->cspec->dca_ctrl = 0;
2682 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2683 				dd->cspec->dca_ctrl);
2684 		}
2685 		break;
2686 	}
2687 	return 0;
2688 }
2689 
2690 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2691 {
2692 	struct qib_devdata *dd = rcd->dd;
2693 	struct qib_chip_specific *cspec = dd->cspec;
2694 
2695 	if (!(dd->flags & QIB_DCA_ENABLED))
2696 		return;
2697 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2698 		const struct dca_reg_map *rmp;
2699 
2700 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2701 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2702 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2703 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2704 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2705 		qib_devinfo(dd->pcidev,
2706 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2707 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2708 		qib_write_kreg(dd, rmp->regno,
2709 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2710 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2711 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2712 	}
2713 }
2714 
2715 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2716 {
2717 	struct qib_devdata *dd = ppd->dd;
2718 	struct qib_chip_specific *cspec = dd->cspec;
2719 	unsigned pidx = ppd->port - 1;
2720 
2721 	if (!(dd->flags & QIB_DCA_ENABLED))
2722 		return;
2723 	if (cspec->sdma_cpu[pidx] != cpu) {
2724 		cspec->sdma_cpu[pidx] = cpu;
2725 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2726 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2727 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2728 		cspec->dca_rcvhdr_ctrl[4] |=
2729 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2730 				(ppd->hw_pidx ?
2731 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2732 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2733 		qib_devinfo(dd->pcidev,
2734 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2735 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2736 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2737 			       cspec->dca_rcvhdr_ctrl[4]);
2738 		cspec->dca_ctrl |= ppd->hw_pidx ?
2739 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2740 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2741 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2742 	}
2743 }
2744 
2745 static void qib_setup_dca(struct qib_devdata *dd)
2746 {
2747 	struct qib_chip_specific *cspec = dd->cspec;
2748 	int i;
2749 
2750 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2751 		cspec->rhdr_cpu[i] = -1;
2752 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2753 		cspec->sdma_cpu[i] = -1;
2754 	cspec->dca_rcvhdr_ctrl[0] =
2755 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2756 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2757 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2758 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2759 	cspec->dca_rcvhdr_ctrl[1] =
2760 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2761 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2762 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2763 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2764 	cspec->dca_rcvhdr_ctrl[2] =
2765 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2766 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2768 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2769 	cspec->dca_rcvhdr_ctrl[3] =
2770 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2771 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2772 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2773 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2774 	cspec->dca_rcvhdr_ctrl[4] =
2775 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2776 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2777 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2778 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2779 			       cspec->dca_rcvhdr_ctrl[i]);
2780 	for (i = 0; i < cspec->num_msix_entries; i++)
2781 		setup_dca_notifier(dd, &cspec->msix_entries[i]);
2782 }
2783 
2784 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2785 			     const cpumask_t *mask)
2786 {
2787 	struct qib_irq_notify *n =
2788 		container_of(notify, struct qib_irq_notify, notify);
2789 	int cpu = cpumask_first(mask);
2790 
2791 	if (n->rcv) {
2792 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2793 
2794 		qib_update_rhdrq_dca(rcd, cpu);
2795 	} else {
2796 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2797 
2798 		qib_update_sdma_dca(ppd, cpu);
2799 	}
2800 }
2801 
2802 static void qib_irq_notifier_release(struct kref *ref)
2803 {
2804 	struct qib_irq_notify *n =
2805 		container_of(ref, struct qib_irq_notify, notify.kref);
2806 	struct qib_devdata *dd;
2807 
2808 	if (n->rcv) {
2809 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2810 
2811 		dd = rcd->dd;
2812 	} else {
2813 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2814 
2815 		dd = ppd->dd;
2816 	}
2817 	qib_devinfo(dd->pcidev,
2818 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2819 	kfree(n);
2820 }
2821 #endif
2822 
2823 /*
2824  * Disable MSIx interrupt if enabled, call generic MSIx code
2825  * to cleanup, and clear pending MSIx interrupts.
2826  * Used for fallback to INTx, after reset, and when MSIx setup fails.
2827  */
2828 static void qib_7322_nomsix(struct qib_devdata *dd)
2829 {
2830 	u64 intgranted;
2831 	int n;
2832 
2833 	dd->cspec->main_int_mask = ~0ULL;
2834 	n = dd->cspec->num_msix_entries;
2835 	if (n) {
2836 		int i;
2837 
2838 		dd->cspec->num_msix_entries = 0;
2839 		for (i = 0; i < n; i++) {
2840 #ifdef CONFIG_INFINIBAND_QIB_DCA
2841 			reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2842 #endif
2843 			irq_set_affinity_hint(
2844 			  dd->cspec->msix_entries[i].msix.vector, NULL);
2845 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2846 			free_irq(dd->cspec->msix_entries[i].msix.vector,
2847 			   dd->cspec->msix_entries[i].arg);
2848 		}
2849 		qib_nomsix(dd);
2850 	}
2851 	/* make sure no MSIx interrupts are left pending */
2852 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2853 	if (intgranted)
2854 		qib_write_kreg(dd, kr_intgranted, intgranted);
2855 }
2856 
2857 static void qib_7322_free_irq(struct qib_devdata *dd)
2858 {
2859 	if (dd->cspec->irq) {
2860 		free_irq(dd->cspec->irq, dd);
2861 		dd->cspec->irq = 0;
2862 	}
2863 	qib_7322_nomsix(dd);
2864 }
2865 
2866 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2867 {
2868 	int i;
2869 
2870 #ifdef CONFIG_INFINIBAND_QIB_DCA
2871 	if (dd->flags & QIB_DCA_ENABLED) {
2872 		dca_remove_requester(&dd->pcidev->dev);
2873 		dd->flags &= ~QIB_DCA_ENABLED;
2874 		dd->cspec->dca_ctrl = 0;
2875 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2876 	}
2877 #endif
2878 
2879 	qib_7322_free_irq(dd);
2880 	kfree(dd->cspec->cntrs);
2881 	kfree(dd->cspec->sendchkenable);
2882 	kfree(dd->cspec->sendgrhchk);
2883 	kfree(dd->cspec->sendibchk);
2884 	kfree(dd->cspec->msix_entries);
2885 	for (i = 0; i < dd->num_pports; i++) {
2886 		unsigned long flags;
2887 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2888 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2889 
2890 		kfree(dd->pport[i].cpspec->portcntrs);
2891 		if (dd->flags & QIB_HAS_QSFP) {
2892 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2893 			dd->cspec->gpio_mask &= ~mask;
2894 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2895 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2896 			qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2897 		}
2898 	}
2899 }
2900 
2901 /* handle SDMA interrupts */
2902 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2903 {
2904 	struct qib_pportdata *ppd0 = &dd->pport[0];
2905 	struct qib_pportdata *ppd1 = &dd->pport[1];
2906 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2907 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2908 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2909 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2910 
2911 	if (intr0)
2912 		qib_sdma_intr(ppd0);
2913 	if (intr1)
2914 		qib_sdma_intr(ppd1);
2915 
2916 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2917 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2918 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2919 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2920 }
2921 
2922 /*
2923  * Set or clear the Send buffer available interrupt enable bit.
2924  */
2925 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2926 {
2927 	unsigned long flags;
2928 
2929 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2930 	if (needint)
2931 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2932 	else
2933 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2934 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2935 	qib_write_kreg(dd, kr_scratch, 0ULL);
2936 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2937 }
2938 
2939 /*
2940  * Somehow got an interrupt with reserved bits set in interrupt status.
2941  * Print a message so we know it happened, then clear them.
2942  * keep mainline interrupt handler cache-friendly
2943  */
2944 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2945 {
2946 	u64 kills;
2947 	char msg[128];
2948 
2949 	kills = istat & ~QIB_I_BITSEXTANT;
2950 	qib_dev_err(dd,
2951 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2952 		(unsigned long long) kills, msg);
2953 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2954 }
2955 
2956 /* keep mainline interrupt handler cache-friendly */
2957 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2958 {
2959 	u32 gpiostatus;
2960 	int handled = 0;
2961 	int pidx;
2962 
2963 	/*
2964 	 * Boards for this chip currently don't use GPIO interrupts,
2965 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2966 	 * to developer.  To avoid endless repeats, clear
2967 	 * the bits in the mask, since there is some kind of
2968 	 * programming error or chip problem.
2969 	 */
2970 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2971 	/*
2972 	 * In theory, writing GPIOstatus to GPIOclear could
2973 	 * have a bad side-effect on some diagnostic that wanted
2974 	 * to poll for a status-change, but the various shadows
2975 	 * make that problematic at best. Diags will just suppress
2976 	 * all GPIO interrupts during such tests.
2977 	 */
2978 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2979 	/*
2980 	 * Check for QSFP MOD_PRS changes
2981 	 * only works for single port if IB1 != pidx1
2982 	 */
2983 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2984 	     ++pidx) {
2985 		struct qib_pportdata *ppd;
2986 		struct qib_qsfp_data *qd;
2987 		u32 mask;
2988 
2989 		if (!dd->pport[pidx].link_speed_supported)
2990 			continue;
2991 		mask = QSFP_GPIO_MOD_PRS_N;
2992 		ppd = dd->pport + pidx;
2993 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2994 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2995 			u64 pins;
2996 
2997 			qd = &ppd->cpspec->qsfp_data;
2998 			gpiostatus &= ~mask;
2999 			pins = qib_read_kreg64(dd, kr_extstatus);
3000 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
3001 			if (!(pins & mask)) {
3002 				++handled;
3003 				qd->t_insert = jiffies;
3004 				queue_work(ib_wq, &qd->work);
3005 			}
3006 		}
3007 	}
3008 
3009 	if (gpiostatus && !handled) {
3010 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3011 		u32 gpio_irq = mask & gpiostatus;
3012 
3013 		/*
3014 		 * Clear any troublemakers, and update chip from shadow
3015 		 */
3016 		dd->cspec->gpio_mask &= ~gpio_irq;
3017 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3018 	}
3019 }
3020 
3021 /*
3022  * Handle errors and unusual events first, separate function
3023  * to improve cache hits for fast path interrupt handling.
3024  */
3025 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3026 {
3027 	if (istat & ~QIB_I_BITSEXTANT)
3028 		unknown_7322_ibits(dd, istat);
3029 	if (istat & QIB_I_GPIO)
3030 		unknown_7322_gpio_intr(dd);
3031 	if (istat & QIB_I_C_ERROR) {
3032 		qib_write_kreg(dd, kr_errmask, 0ULL);
3033 		tasklet_schedule(&dd->error_tasklet);
3034 	}
3035 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3036 		handle_7322_p_errors(dd->rcd[0]->ppd);
3037 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3038 		handle_7322_p_errors(dd->rcd[1]->ppd);
3039 }
3040 
3041 /*
3042  * Dynamically adjust the rcv int timeout for a context based on incoming
3043  * packet rate.
3044  */
3045 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3046 {
3047 	struct qib_devdata *dd = rcd->dd;
3048 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3049 
3050 	/*
3051 	 * Dynamically adjust idle timeout on chip
3052 	 * based on number of packets processed.
3053 	 */
3054 	if (npkts < rcv_int_count && timeout > 2)
3055 		timeout >>= 1;
3056 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3057 		timeout = min(timeout << 1, rcv_int_timeout);
3058 	else
3059 		return;
3060 
3061 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3062 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3063 }
3064 
3065 /*
3066  * This is the main interrupt handler.
3067  * It will normally only be used for low frequency interrupts but may
3068  * have to handle all interrupts if INTx is enabled or fewer than normal
3069  * MSIx interrupts were allocated.
3070  * This routine should ignore the interrupt bits for any of the
3071  * dedicated MSIx handlers.
3072  */
3073 static irqreturn_t qib_7322intr(int irq, void *data)
3074 {
3075 	struct qib_devdata *dd = data;
3076 	irqreturn_t ret;
3077 	u64 istat;
3078 	u64 ctxtrbits;
3079 	u64 rmask;
3080 	unsigned i;
3081 	u32 npkts;
3082 
3083 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3084 		/*
3085 		 * This return value is not great, but we do not want the
3086 		 * interrupt core code to remove our interrupt handler
3087 		 * because we don't appear to be handling an interrupt
3088 		 * during a chip reset.
3089 		 */
3090 		ret = IRQ_HANDLED;
3091 		goto bail;
3092 	}
3093 
3094 	istat = qib_read_kreg64(dd, kr_intstatus);
3095 
3096 	if (unlikely(istat == ~0ULL)) {
3097 		qib_bad_intrstatus(dd);
3098 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3099 		/* don't know if it was our interrupt or not */
3100 		ret = IRQ_NONE;
3101 		goto bail;
3102 	}
3103 
3104 	istat &= dd->cspec->main_int_mask;
3105 	if (unlikely(!istat)) {
3106 		/* already handled, or shared and not us */
3107 		ret = IRQ_NONE;
3108 		goto bail;
3109 	}
3110 
3111 	this_cpu_inc(*dd->int_counter);
3112 
3113 	/* handle "errors" of various kinds first, device ahead of port */
3114 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3115 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3116 			      INT_MASK_P(Err, 1))))
3117 		unlikely_7322_intr(dd, istat);
3118 
3119 	/*
3120 	 * Clear the interrupt bits we found set, relatively early, so we
3121 	 * "know" know the chip will have seen this by the time we process
3122 	 * the queue, and will re-interrupt if necessary.  The processor
3123 	 * itself won't take the interrupt again until we return.
3124 	 */
3125 	qib_write_kreg(dd, kr_intclear, istat);
3126 
3127 	/*
3128 	 * Handle kernel receive queues before checking for pio buffers
3129 	 * available since receives can overflow; piobuf waiters can afford
3130 	 * a few extra cycles, since they were waiting anyway.
3131 	 */
3132 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3133 	if (ctxtrbits) {
3134 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3135 			(1ULL << QIB_I_RCVURG_LSB);
3136 		for (i = 0; i < dd->first_user_ctxt; i++) {
3137 			if (ctxtrbits & rmask) {
3138 				ctxtrbits &= ~rmask;
3139 				if (dd->rcd[i])
3140 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3141 			}
3142 			rmask <<= 1;
3143 		}
3144 		if (ctxtrbits) {
3145 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3146 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3147 			qib_handle_urcv(dd, ctxtrbits);
3148 		}
3149 	}
3150 
3151 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3152 		sdma_7322_intr(dd, istat);
3153 
3154 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3155 		qib_ib_piobufavail(dd);
3156 
3157 	ret = IRQ_HANDLED;
3158 bail:
3159 	return ret;
3160 }
3161 
3162 /*
3163  * Dedicated receive packet available interrupt handler.
3164  */
3165 static irqreturn_t qib_7322pintr(int irq, void *data)
3166 {
3167 	struct qib_ctxtdata *rcd = data;
3168 	struct qib_devdata *dd = rcd->dd;
3169 	u32 npkts;
3170 
3171 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3172 		/*
3173 		 * This return value is not great, but we do not want the
3174 		 * interrupt core code to remove our interrupt handler
3175 		 * because we don't appear to be handling an interrupt
3176 		 * during a chip reset.
3177 		 */
3178 		return IRQ_HANDLED;
3179 
3180 	this_cpu_inc(*dd->int_counter);
3181 
3182 	/* Clear the interrupt bit we expect to be set. */
3183 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3184 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3185 
3186 	qib_kreceive(rcd, NULL, &npkts);
3187 
3188 	return IRQ_HANDLED;
3189 }
3190 
3191 /*
3192  * Dedicated Send buffer available interrupt handler.
3193  */
3194 static irqreturn_t qib_7322bufavail(int irq, void *data)
3195 {
3196 	struct qib_devdata *dd = data;
3197 
3198 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3199 		/*
3200 		 * This return value is not great, but we do not want the
3201 		 * interrupt core code to remove our interrupt handler
3202 		 * because we don't appear to be handling an interrupt
3203 		 * during a chip reset.
3204 		 */
3205 		return IRQ_HANDLED;
3206 
3207 	this_cpu_inc(*dd->int_counter);
3208 
3209 	/* Clear the interrupt bit we expect to be set. */
3210 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3211 
3212 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3213 	if (dd->flags & QIB_INITTED)
3214 		qib_ib_piobufavail(dd);
3215 	else
3216 		qib_wantpiobuf_7322_intr(dd, 0);
3217 
3218 	return IRQ_HANDLED;
3219 }
3220 
3221 /*
3222  * Dedicated Send DMA interrupt handler.
3223  */
3224 static irqreturn_t sdma_intr(int irq, void *data)
3225 {
3226 	struct qib_pportdata *ppd = data;
3227 	struct qib_devdata *dd = ppd->dd;
3228 
3229 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3230 		/*
3231 		 * This return value is not great, but we do not want the
3232 		 * interrupt core code to remove our interrupt handler
3233 		 * because we don't appear to be handling an interrupt
3234 		 * during a chip reset.
3235 		 */
3236 		return IRQ_HANDLED;
3237 
3238 	this_cpu_inc(*dd->int_counter);
3239 
3240 	/* Clear the interrupt bit we expect to be set. */
3241 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3242 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3243 	qib_sdma_intr(ppd);
3244 
3245 	return IRQ_HANDLED;
3246 }
3247 
3248 /*
3249  * Dedicated Send DMA idle interrupt handler.
3250  */
3251 static irqreturn_t sdma_idle_intr(int irq, void *data)
3252 {
3253 	struct qib_pportdata *ppd = data;
3254 	struct qib_devdata *dd = ppd->dd;
3255 
3256 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3257 		/*
3258 		 * This return value is not great, but we do not want the
3259 		 * interrupt core code to remove our interrupt handler
3260 		 * because we don't appear to be handling an interrupt
3261 		 * during a chip reset.
3262 		 */
3263 		return IRQ_HANDLED;
3264 
3265 	this_cpu_inc(*dd->int_counter);
3266 
3267 	/* Clear the interrupt bit we expect to be set. */
3268 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3269 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3270 	qib_sdma_intr(ppd);
3271 
3272 	return IRQ_HANDLED;
3273 }
3274 
3275 /*
3276  * Dedicated Send DMA progress interrupt handler.
3277  */
3278 static irqreturn_t sdma_progress_intr(int irq, void *data)
3279 {
3280 	struct qib_pportdata *ppd = data;
3281 	struct qib_devdata *dd = ppd->dd;
3282 
3283 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3284 		/*
3285 		 * This return value is not great, but we do not want the
3286 		 * interrupt core code to remove our interrupt handler
3287 		 * because we don't appear to be handling an interrupt
3288 		 * during a chip reset.
3289 		 */
3290 		return IRQ_HANDLED;
3291 
3292 	this_cpu_inc(*dd->int_counter);
3293 
3294 	/* Clear the interrupt bit we expect to be set. */
3295 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3296 		       INT_MASK_P(SDmaProgress, 1) :
3297 		       INT_MASK_P(SDmaProgress, 0));
3298 	qib_sdma_intr(ppd);
3299 
3300 	return IRQ_HANDLED;
3301 }
3302 
3303 /*
3304  * Dedicated Send DMA cleanup interrupt handler.
3305  */
3306 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3307 {
3308 	struct qib_pportdata *ppd = data;
3309 	struct qib_devdata *dd = ppd->dd;
3310 
3311 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3312 		/*
3313 		 * This return value is not great, but we do not want the
3314 		 * interrupt core code to remove our interrupt handler
3315 		 * because we don't appear to be handling an interrupt
3316 		 * during a chip reset.
3317 		 */
3318 		return IRQ_HANDLED;
3319 
3320 	this_cpu_inc(*dd->int_counter);
3321 
3322 	/* Clear the interrupt bit we expect to be set. */
3323 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3324 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3325 		       INT_MASK_PM(SDmaCleanupDone, 0));
3326 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3327 
3328 	return IRQ_HANDLED;
3329 }
3330 
3331 #ifdef CONFIG_INFINIBAND_QIB_DCA
3332 
3333 static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3334 {
3335 	if (!m->dca)
3336 		return;
3337 	qib_devinfo(dd->pcidev,
3338 		"Disabling notifier on HCA %d irq %d\n",
3339 		dd->unit,
3340 		m->msix.vector);
3341 	irq_set_affinity_notifier(
3342 		m->msix.vector,
3343 		NULL);
3344 	m->notifier = NULL;
3345 }
3346 
3347 static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3348 {
3349 	struct qib_irq_notify *n;
3350 
3351 	if (!m->dca)
3352 		return;
3353 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3354 	if (n) {
3355 		int ret;
3356 
3357 		m->notifier = n;
3358 		n->notify.irq = m->msix.vector;
3359 		n->notify.notify = qib_irq_notifier_notify;
3360 		n->notify.release = qib_irq_notifier_release;
3361 		n->arg = m->arg;
3362 		n->rcv = m->rcv;
3363 		qib_devinfo(dd->pcidev,
3364 			"set notifier irq %d rcv %d notify %p\n",
3365 			n->notify.irq, n->rcv, &n->notify);
3366 		ret = irq_set_affinity_notifier(
3367 				n->notify.irq,
3368 				&n->notify);
3369 		if (ret) {
3370 			m->notifier = NULL;
3371 			kfree(n);
3372 		}
3373 	}
3374 }
3375 
3376 #endif
3377 
3378 /*
3379  * Set up our chip-specific interrupt handler.
3380  * The interrupt type has already been setup, so
3381  * we just need to do the registration and error checking.
3382  * If we are using MSIx interrupts, we may fall back to
3383  * INTx later, if the interrupt handler doesn't get called
3384  * within 1/2 second (see verify_interrupt()).
3385  */
3386 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3387 {
3388 	int ret, i, msixnum;
3389 	u64 redirect[6];
3390 	u64 mask;
3391 	const struct cpumask *local_mask;
3392 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3393 
3394 	if (!dd->num_pports)
3395 		return;
3396 
3397 	if (clearpend) {
3398 		/*
3399 		 * if not switching interrupt types, be sure interrupts are
3400 		 * disabled, and then clear anything pending at this point,
3401 		 * because we are starting clean.
3402 		 */
3403 		qib_7322_set_intr_state(dd, 0);
3404 
3405 		/* clear the reset error, init error/hwerror mask */
3406 		qib_7322_init_hwerrors(dd);
3407 
3408 		/* clear any interrupt bits that might be set */
3409 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3410 
3411 		/* make sure no pending MSIx intr, and clear diag reg */
3412 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3413 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3414 	}
3415 
3416 	if (!dd->cspec->num_msix_entries) {
3417 		/* Try to get INTx interrupt */
3418 try_intx:
3419 		if (!dd->pcidev->irq) {
3420 			qib_dev_err(dd,
3421 				"irq is 0, BIOS error?  Interrupts won't work\n");
3422 			goto bail;
3423 		}
3424 		ret = request_irq(dd->pcidev->irq, qib_7322intr,
3425 				  IRQF_SHARED, QIB_DRV_NAME, dd);
3426 		if (ret) {
3427 			qib_dev_err(dd,
3428 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3429 				dd->pcidev->irq, ret);
3430 			goto bail;
3431 		}
3432 		dd->cspec->irq = dd->pcidev->irq;
3433 		dd->cspec->main_int_mask = ~0ULL;
3434 		goto bail;
3435 	}
3436 
3437 	/* Try to get MSIx interrupts */
3438 	memset(redirect, 0, sizeof(redirect));
3439 	mask = ~0ULL;
3440 	msixnum = 0;
3441 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3442 	firstcpu = cpumask_first(local_mask);
3443 	if (firstcpu >= nr_cpu_ids ||
3444 			cpumask_weight(local_mask) == num_online_cpus()) {
3445 		local_mask = topology_core_cpumask(0);
3446 		firstcpu = cpumask_first(local_mask);
3447 	}
3448 	if (firstcpu < nr_cpu_ids) {
3449 		secondcpu = cpumask_next(firstcpu, local_mask);
3450 		if (secondcpu >= nr_cpu_ids)
3451 			secondcpu = firstcpu;
3452 		currrcvcpu = secondcpu;
3453 	}
3454 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3455 		irq_handler_t handler;
3456 		void *arg;
3457 		u64 val;
3458 		int lsb, reg, sh;
3459 #ifdef CONFIG_INFINIBAND_QIB_DCA
3460 		int dca = 0;
3461 #endif
3462 
3463 		dd->cspec->msix_entries[msixnum].
3464 			name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3465 			= '\0';
3466 		if (i < ARRAY_SIZE(irq_table)) {
3467 			if (irq_table[i].port) {
3468 				/* skip if for a non-configured port */
3469 				if (irq_table[i].port > dd->num_pports)
3470 					continue;
3471 				arg = dd->pport + irq_table[i].port - 1;
3472 			} else
3473 				arg = dd;
3474 #ifdef CONFIG_INFINIBAND_QIB_DCA
3475 			dca = irq_table[i].dca;
3476 #endif
3477 			lsb = irq_table[i].lsb;
3478 			handler = irq_table[i].handler;
3479 			snprintf(dd->cspec->msix_entries[msixnum].name,
3480 				sizeof(dd->cspec->msix_entries[msixnum].name)
3481 				 - 1,
3482 				QIB_DRV_NAME "%d%s", dd->unit,
3483 				irq_table[i].name);
3484 		} else {
3485 			unsigned ctxt;
3486 
3487 			ctxt = i - ARRAY_SIZE(irq_table);
3488 			/* per krcvq context receive interrupt */
3489 			arg = dd->rcd[ctxt];
3490 			if (!arg)
3491 				continue;
3492 			if (qib_krcvq01_no_msi && ctxt < 2)
3493 				continue;
3494 #ifdef CONFIG_INFINIBAND_QIB_DCA
3495 			dca = 1;
3496 #endif
3497 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3498 			handler = qib_7322pintr;
3499 			snprintf(dd->cspec->msix_entries[msixnum].name,
3500 				sizeof(dd->cspec->msix_entries[msixnum].name)
3501 				 - 1,
3502 				QIB_DRV_NAME "%d (kctx)", dd->unit);
3503 		}
3504 		ret = request_irq(
3505 			dd->cspec->msix_entries[msixnum].msix.vector,
3506 			handler, 0, dd->cspec->msix_entries[msixnum].name,
3507 			arg);
3508 		if (ret) {
3509 			/*
3510 			 * Shouldn't happen since the enable said we could
3511 			 * have as many as we are trying to setup here.
3512 			 */
3513 			qib_dev_err(dd,
3514 				"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3515 				msixnum,
3516 				dd->cspec->msix_entries[msixnum].msix.vector,
3517 				ret);
3518 			qib_7322_nomsix(dd);
3519 			goto try_intx;
3520 		}
3521 		dd->cspec->msix_entries[msixnum].arg = arg;
3522 #ifdef CONFIG_INFINIBAND_QIB_DCA
3523 		dd->cspec->msix_entries[msixnum].dca = dca;
3524 		dd->cspec->msix_entries[msixnum].rcv =
3525 			handler == qib_7322pintr;
3526 #endif
3527 		if (lsb >= 0) {
3528 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3529 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3530 				SYM_LSB(IntRedirect0, vec1);
3531 			mask &= ~(1ULL << lsb);
3532 			redirect[reg] |= ((u64) msixnum) << sh;
3533 		}
3534 		val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3535 			(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3536 		if (firstcpu < nr_cpu_ids &&
3537 			zalloc_cpumask_var(
3538 				&dd->cspec->msix_entries[msixnum].mask,
3539 				GFP_KERNEL)) {
3540 			if (handler == qib_7322pintr) {
3541 				cpumask_set_cpu(currrcvcpu,
3542 					dd->cspec->msix_entries[msixnum].mask);
3543 				currrcvcpu = cpumask_next(currrcvcpu,
3544 					local_mask);
3545 				if (currrcvcpu >= nr_cpu_ids)
3546 					currrcvcpu = secondcpu;
3547 			} else {
3548 				cpumask_set_cpu(firstcpu,
3549 					dd->cspec->msix_entries[msixnum].mask);
3550 			}
3551 			irq_set_affinity_hint(
3552 				dd->cspec->msix_entries[msixnum].msix.vector,
3553 				dd->cspec->msix_entries[msixnum].mask);
3554 		}
3555 		msixnum++;
3556 	}
3557 	/* Initialize the vector mapping */
3558 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3559 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3560 	dd->cspec->main_int_mask = mask;
3561 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3562 		(unsigned long)dd);
3563 bail:;
3564 }
3565 
3566 /**
3567  * qib_7322_boardname - fill in the board name and note features
3568  * @dd: the qlogic_ib device
3569  *
3570  * info will be based on the board revision register
3571  */
3572 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3573 {
3574 	/* Will need enumeration of board-types here */
3575 	char *n;
3576 	u32 boardid, namelen;
3577 	unsigned features = DUAL_PORT_CAP;
3578 
3579 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3580 
3581 	switch (boardid) {
3582 	case 0:
3583 		n = "InfiniPath_QLE7342_Emulation";
3584 		break;
3585 	case 1:
3586 		n = "InfiniPath_QLE7340";
3587 		dd->flags |= QIB_HAS_QSFP;
3588 		features = PORT_SPD_CAP;
3589 		break;
3590 	case 2:
3591 		n = "InfiniPath_QLE7342";
3592 		dd->flags |= QIB_HAS_QSFP;
3593 		break;
3594 	case 3:
3595 		n = "InfiniPath_QMI7342";
3596 		break;
3597 	case 4:
3598 		n = "InfiniPath_Unsupported7342";
3599 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3600 		features = 0;
3601 		break;
3602 	case BOARD_QMH7342:
3603 		n = "InfiniPath_QMH7342";
3604 		features = 0x24;
3605 		break;
3606 	case BOARD_QME7342:
3607 		n = "InfiniPath_QME7342";
3608 		break;
3609 	case 8:
3610 		n = "InfiniPath_QME7362";
3611 		dd->flags |= QIB_HAS_QSFP;
3612 		break;
3613 	case BOARD_QMH7360:
3614 		n = "Intel IB QDR 1P FLR-QSFP Adptr";
3615 		dd->flags |= QIB_HAS_QSFP;
3616 		break;
3617 	case 15:
3618 		n = "InfiniPath_QLE7342_TEST";
3619 		dd->flags |= QIB_HAS_QSFP;
3620 		break;
3621 	default:
3622 		n = "InfiniPath_QLE73xy_UNKNOWN";
3623 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3624 		break;
3625 	}
3626 	dd->board_atten = 1; /* index into txdds_Xdr */
3627 
3628 	namelen = strlen(n) + 1;
3629 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
3630 	if (dd->boardname)
3631 		snprintf(dd->boardname, namelen, "%s", n);
3632 
3633 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3634 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3635 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3636 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3637 		 dd->majrev, dd->minrev,
3638 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3639 
3640 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3641 		qib_devinfo(dd->pcidev,
3642 			"IB%u: Forced to single port mode by module parameter\n",
3643 			dd->unit);
3644 		features &= PORT_SPD_CAP;
3645 	}
3646 
3647 	return features;
3648 }
3649 
3650 /*
3651  * This routine sleeps, so it can only be called from user context, not
3652  * from interrupt context.
3653  */
3654 static int qib_do_7322_reset(struct qib_devdata *dd)
3655 {
3656 	u64 val;
3657 	u64 *msix_vecsave = NULL;
3658 	int i, msix_entries, ret = 1;
3659 	u16 cmdval;
3660 	u8 int_line, clinesz;
3661 	unsigned long flags;
3662 
3663 	/* Use dev_err so it shows up in logs, etc. */
3664 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3665 
3666 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3667 
3668 	msix_entries = dd->cspec->num_msix_entries;
3669 
3670 	/* no interrupts till re-initted */
3671 	qib_7322_set_intr_state(dd, 0);
3672 
3673 	if (msix_entries) {
3674 		qib_7322_nomsix(dd);
3675 		/* can be up to 512 bytes, too big for stack */
3676 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3677 			sizeof(u64), GFP_KERNEL);
3678 	}
3679 
3680 	/*
3681 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3682 	 * info that is set up by the BIOS, so we have to save and restore
3683 	 * it ourselves.   There is some risk something could change it,
3684 	 * after we save it, but since we have disabled the MSIx, it
3685 	 * shouldn't be touched...
3686 	 */
3687 	for (i = 0; i < msix_entries; i++) {
3688 		u64 vecaddr, vecdata;
3689 
3690 		vecaddr = qib_read_kreg64(dd, 2 * i +
3691 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3692 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3693 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3694 		if (msix_vecsave) {
3695 			msix_vecsave[2 * i] = vecaddr;
3696 			/* save it without the masked bit set */
3697 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3698 		}
3699 	}
3700 
3701 	dd->pport->cpspec->ibdeltainprog = 0;
3702 	dd->pport->cpspec->ibsymdelta = 0;
3703 	dd->pport->cpspec->iblnkerrdelta = 0;
3704 	dd->pport->cpspec->ibmalfdelta = 0;
3705 	/* so we check interrupts work again */
3706 	dd->z_int_counter = qib_int_counter(dd);
3707 
3708 	/*
3709 	 * Keep chip from being accessed until we are ready.  Use
3710 	 * writeq() directly, to allow the write even though QIB_PRESENT
3711 	 * isn't set.
3712 	 */
3713 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3714 	dd->flags |= QIB_DOING_RESET;
3715 	val = dd->control | QLOGIC_IB_C_RESET;
3716 	writeq(val, &dd->kregbase[kr_control]);
3717 
3718 	for (i = 1; i <= 5; i++) {
3719 		/*
3720 		 * Allow MBIST, etc. to complete; longer on each retry.
3721 		 * We sometimes get machine checks from bus timeout if no
3722 		 * response, so for now, make it *really* long.
3723 		 */
3724 		msleep(1000 + (1 + i) * 3000);
3725 
3726 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3727 
3728 		/*
3729 		 * Use readq directly, so we don't need to mark it as PRESENT
3730 		 * until we get a successful indication that all is well.
3731 		 */
3732 		val = readq(&dd->kregbase[kr_revision]);
3733 		if (val == dd->revision)
3734 			break;
3735 		if (i == 5) {
3736 			qib_dev_err(dd,
3737 				"Failed to initialize after reset, unusable\n");
3738 			ret = 0;
3739 			goto  bail;
3740 		}
3741 	}
3742 
3743 	dd->flags |= QIB_PRESENT; /* it's back */
3744 
3745 	if (msix_entries) {
3746 		/* restore the MSIx vector address and data if saved above */
3747 		for (i = 0; i < msix_entries; i++) {
3748 			dd->cspec->msix_entries[i].msix.entry = i;
3749 			if (!msix_vecsave || !msix_vecsave[2 * i])
3750 				continue;
3751 			qib_write_kreg(dd, 2 * i +
3752 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3753 				msix_vecsave[2 * i]);
3754 			qib_write_kreg(dd, 1 + 2 * i +
3755 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3756 				msix_vecsave[1 + 2 * i]);
3757 		}
3758 	}
3759 
3760 	/* initialize the remaining registers.  */
3761 	for (i = 0; i < dd->num_pports; ++i)
3762 		write_7322_init_portregs(&dd->pport[i]);
3763 	write_7322_initregs(dd);
3764 
3765 	if (qib_pcie_params(dd, dd->lbus_width,
3766 			    &dd->cspec->num_msix_entries,
3767 			    dd->cspec->msix_entries))
3768 		qib_dev_err(dd,
3769 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3770 
3771 	qib_setup_7322_interrupt(dd, 1);
3772 
3773 	for (i = 0; i < dd->num_pports; ++i) {
3774 		struct qib_pportdata *ppd = &dd->pport[i];
3775 
3776 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3777 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3778 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3779 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3780 	}
3781 
3782 bail:
3783 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3784 	kfree(msix_vecsave);
3785 	return ret;
3786 }
3787 
3788 /**
3789  * qib_7322_put_tid - write a TID to the chip
3790  * @dd: the qlogic_ib device
3791  * @tidptr: pointer to the expected TID (in chip) to update
3792  * @tidtype: 0 for eager, 1 for expected
3793  * @pa: physical address of in memory buffer; tidinvalid if freeing
3794  */
3795 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3796 			     u32 type, unsigned long pa)
3797 {
3798 	if (!(dd->flags & QIB_PRESENT))
3799 		return;
3800 	if (pa != dd->tidinvalid) {
3801 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3802 
3803 		/* paranoia checks */
3804 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3805 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3806 				    pa);
3807 			return;
3808 		}
3809 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3810 			qib_dev_err(dd,
3811 				"Physical page address 0x%lx larger than supported\n",
3812 				pa);
3813 			return;
3814 		}
3815 
3816 		if (type == RCVHQ_RCV_TYPE_EAGER)
3817 			chippa |= dd->tidtemplate;
3818 		else /* for now, always full 4KB page */
3819 			chippa |= IBA7322_TID_SZ_4K;
3820 		pa = chippa;
3821 	}
3822 	writeq(pa, tidptr);
3823 	mmiowb();
3824 }
3825 
3826 /**
3827  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3828  * @dd: the qlogic_ib device
3829  * @ctxt: the ctxt
3830  *
3831  * clear all TID entries for a ctxt, expected and eager.
3832  * Used from qib_close().
3833  */
3834 static void qib_7322_clear_tids(struct qib_devdata *dd,
3835 				struct qib_ctxtdata *rcd)
3836 {
3837 	u64 __iomem *tidbase;
3838 	unsigned long tidinv;
3839 	u32 ctxt;
3840 	int i;
3841 
3842 	if (!dd->kregbase || !rcd)
3843 		return;
3844 
3845 	ctxt = rcd->ctxt;
3846 
3847 	tidinv = dd->tidinvalid;
3848 	tidbase = (u64 __iomem *)
3849 		((char __iomem *) dd->kregbase +
3850 		 dd->rcvtidbase +
3851 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3852 
3853 	for (i = 0; i < dd->rcvtidcnt; i++)
3854 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3855 				 tidinv);
3856 
3857 	tidbase = (u64 __iomem *)
3858 		((char __iomem *) dd->kregbase +
3859 		 dd->rcvegrbase +
3860 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3861 
3862 	for (i = 0; i < rcd->rcvegrcnt; i++)
3863 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3864 				 tidinv);
3865 }
3866 
3867 /**
3868  * qib_7322_tidtemplate - setup constants for TID updates
3869  * @dd: the qlogic_ib device
3870  *
3871  * We setup stuff that we use a lot, to avoid calculating each time
3872  */
3873 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3874 {
3875 	/*
3876 	 * For now, we always allocate 4KB buffers (at init) so we can
3877 	 * receive max size packets.  We may want a module parameter to
3878 	 * specify 2KB or 4KB and/or make it per port instead of per device
3879 	 * for those who want to reduce memory footprint.  Note that the
3880 	 * rcvhdrentsize size must be large enough to hold the largest
3881 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3882 	 * course the 2 dwords of RHF).
3883 	 */
3884 	if (dd->rcvegrbufsize == 2048)
3885 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3886 	else if (dd->rcvegrbufsize == 4096)
3887 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3888 	dd->tidinvalid = 0;
3889 }
3890 
3891 /**
3892  * qib_init_7322_get_base_info - set chip-specific flags for user code
3893  * @rcd: the qlogic_ib ctxt
3894  * @kbase: qib_base_info pointer
3895  *
3896  * We set the PCIE flag because the lower bandwidth on PCIe vs
3897  * HyperTransport can affect some user packet algorithims.
3898  */
3899 
3900 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3901 				  struct qib_base_info *kinfo)
3902 {
3903 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3904 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3905 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3906 	if (rcd->dd->cspec->r1)
3907 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3908 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3909 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3910 
3911 	return 0;
3912 }
3913 
3914 static struct qib_message_header *
3915 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3916 {
3917 	u32 offset = qib_hdrget_offset(rhf_addr);
3918 
3919 	return (struct qib_message_header *)
3920 		(rhf_addr - dd->rhf_offset + offset);
3921 }
3922 
3923 /*
3924  * Configure number of contexts.
3925  */
3926 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3927 {
3928 	unsigned long flags;
3929 	u32 nchipctxts;
3930 
3931 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3932 	dd->cspec->numctxts = nchipctxts;
3933 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3934 		dd->first_user_ctxt = NUM_IB_PORTS +
3935 			(qib_n_krcv_queues - 1) * dd->num_pports;
3936 		if (dd->first_user_ctxt > nchipctxts)
3937 			dd->first_user_ctxt = nchipctxts;
3938 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3939 	} else {
3940 		dd->first_user_ctxt = NUM_IB_PORTS;
3941 		dd->n_krcv_queues = 1;
3942 	}
3943 
3944 	if (!qib_cfgctxts) {
3945 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3946 
3947 		if (nctxts <= 6)
3948 			dd->ctxtcnt = 6;
3949 		else if (nctxts <= 10)
3950 			dd->ctxtcnt = 10;
3951 		else if (nctxts <= nchipctxts)
3952 			dd->ctxtcnt = nchipctxts;
3953 	} else if (qib_cfgctxts < dd->num_pports)
3954 		dd->ctxtcnt = dd->num_pports;
3955 	else if (qib_cfgctxts <= nchipctxts)
3956 		dd->ctxtcnt = qib_cfgctxts;
3957 	if (!dd->ctxtcnt) /* none of the above, set to max */
3958 		dd->ctxtcnt = nchipctxts;
3959 
3960 	/*
3961 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3962 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3963 	 * Lock to be paranoid about later motion, etc.
3964 	 */
3965 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3966 	if (dd->ctxtcnt > 10)
3967 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3968 	else if (dd->ctxtcnt > 6)
3969 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3970 	/* else configure for default 6 receive ctxts */
3971 
3972 	/* The XRC opcode is 5. */
3973 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3974 
3975 	/*
3976 	 * RcvCtrl *must* be written here so that the
3977 	 * chip understands how to change rcvegrcnt below.
3978 	 */
3979 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3980 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3981 
3982 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3983 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3984 	if (qib_rcvhdrcnt)
3985 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3986 	else
3987 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3988 				    dd->num_pports > 1 ? 1024U : 2048U);
3989 }
3990 
3991 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3992 {
3993 
3994 	int lsb, ret = 0;
3995 	u64 maskr; /* right-justified mask */
3996 
3997 	switch (which) {
3998 
3999 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
4000 		ret = ppd->link_width_enabled;
4001 		goto done;
4002 
4003 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
4004 		ret = ppd->link_width_active;
4005 		goto done;
4006 
4007 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
4008 		ret = ppd->link_speed_enabled;
4009 		goto done;
4010 
4011 	case QIB_IB_CFG_SPD: /* Get current Link spd */
4012 		ret = ppd->link_speed_active;
4013 		goto done;
4014 
4015 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
4016 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4017 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4018 		break;
4019 
4020 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4021 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4022 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4023 		break;
4024 
4025 	case QIB_IB_CFG_LINKLATENCY:
4026 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4027 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4028 		goto done;
4029 
4030 	case QIB_IB_CFG_OP_VLS:
4031 		ret = ppd->vls_operational;
4032 		goto done;
4033 
4034 	case QIB_IB_CFG_VL_HIGH_CAP:
4035 		ret = 16;
4036 		goto done;
4037 
4038 	case QIB_IB_CFG_VL_LOW_CAP:
4039 		ret = 16;
4040 		goto done;
4041 
4042 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4043 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4044 				OverrunThreshold);
4045 		goto done;
4046 
4047 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4048 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4049 				PhyerrThreshold);
4050 		goto done;
4051 
4052 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4053 		/* will only take effect when the link state changes */
4054 		ret = (ppd->cpspec->ibcctrl_a &
4055 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4056 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4057 		goto done;
4058 
4059 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4060 		lsb = IBA7322_IBC_HRTBT_LSB;
4061 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4062 		break;
4063 
4064 	case QIB_IB_CFG_PMA_TICKS:
4065 		/*
4066 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4067 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4068 		 */
4069 		if (ppd->link_speed_active == QIB_IB_QDR)
4070 			ret = 3;
4071 		else if (ppd->link_speed_active == QIB_IB_DDR)
4072 			ret = 1;
4073 		else
4074 			ret = 0;
4075 		goto done;
4076 
4077 	default:
4078 		ret = -EINVAL;
4079 		goto done;
4080 	}
4081 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4082 done:
4083 	return ret;
4084 }
4085 
4086 /*
4087  * Below again cribbed liberally from older version. Do not lean
4088  * heavily on it.
4089  */
4090 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4091 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4092 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4093 
4094 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4095 {
4096 	struct qib_devdata *dd = ppd->dd;
4097 	u64 maskr; /* right-justified mask */
4098 	int lsb, ret = 0;
4099 	u16 lcmd, licmd;
4100 	unsigned long flags;
4101 
4102 	switch (which) {
4103 	case QIB_IB_CFG_LIDLMC:
4104 		/*
4105 		 * Set LID and LMC. Combined to avoid possible hazard
4106 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4107 		 */
4108 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4109 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4110 		/*
4111 		 * For header-checking, the SLID in the packet will
4112 		 * be masked with SendIBSLMCMask, and compared
4113 		 * with SendIBSLIDAssignMask. Make sure we do not
4114 		 * set any bits not covered by the mask, or we get
4115 		 * false-positives.
4116 		 */
4117 		qib_write_kreg_port(ppd, krp_sendslid,
4118 				    val & (val >> 16) & SendIBSLIDAssignMask);
4119 		qib_write_kreg_port(ppd, krp_sendslidmask,
4120 				    (val >> 16) & SendIBSLMCMask);
4121 		break;
4122 
4123 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4124 		ppd->link_width_enabled = val;
4125 		/* convert IB value to chip register value */
4126 		if (val == IB_WIDTH_1X)
4127 			val = 0;
4128 		else if (val == IB_WIDTH_4X)
4129 			val = 1;
4130 		else
4131 			val = 3;
4132 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4133 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4134 		break;
4135 
4136 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4137 		/*
4138 		 * As with width, only write the actual register if the
4139 		 * link is currently down, otherwise takes effect on next
4140 		 * link change.  Since setting is being explicitly requested
4141 		 * (via MAD or sysfs), clear autoneg failure status if speed
4142 		 * autoneg is enabled.
4143 		 */
4144 		ppd->link_speed_enabled = val;
4145 		val <<= IBA7322_IBC_SPEED_LSB;
4146 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4147 			IBA7322_IBC_MAX_SPEED_MASK;
4148 		if (val & (val - 1)) {
4149 			/* Muliple speeds enabled */
4150 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4151 				IBA7322_IBC_MAX_SPEED_MASK;
4152 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4153 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4154 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4155 		} else if (val & IBA7322_IBC_SPEED_QDR)
4156 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4157 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4158 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4159 		break;
4160 
4161 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4162 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4163 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4164 		break;
4165 
4166 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4167 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4168 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4169 		break;
4170 
4171 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4172 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4173 				  OverrunThreshold);
4174 		if (maskr != val) {
4175 			ppd->cpspec->ibcctrl_a &=
4176 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4177 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4178 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4179 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4180 					    ppd->cpspec->ibcctrl_a);
4181 			qib_write_kreg(dd, kr_scratch, 0ULL);
4182 		}
4183 		goto bail;
4184 
4185 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4186 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4187 				  PhyerrThreshold);
4188 		if (maskr != val) {
4189 			ppd->cpspec->ibcctrl_a &=
4190 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4191 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4192 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4193 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4194 					    ppd->cpspec->ibcctrl_a);
4195 			qib_write_kreg(dd, kr_scratch, 0ULL);
4196 		}
4197 		goto bail;
4198 
4199 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4200 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4201 			((u64) ppd->pkeys[2] << 32) |
4202 			((u64) ppd->pkeys[3] << 48);
4203 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4204 		goto bail;
4205 
4206 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4207 		/* will only take effect when the link state changes */
4208 		if (val == IB_LINKINITCMD_POLL)
4209 			ppd->cpspec->ibcctrl_a &=
4210 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4211 		else /* SLEEP */
4212 			ppd->cpspec->ibcctrl_a |=
4213 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4214 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4215 		qib_write_kreg(dd, kr_scratch, 0ULL);
4216 		goto bail;
4217 
4218 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4219 		/*
4220 		 * Update our housekeeping variables, and set IBC max
4221 		 * size, same as init code; max IBC is max we allow in
4222 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4223 		 * Set even if it's unchanged, print debug message only
4224 		 * on changes.
4225 		 */
4226 		val = (ppd->ibmaxlen >> 2) + 1;
4227 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4228 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4229 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4230 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4231 				    ppd->cpspec->ibcctrl_a);
4232 		qib_write_kreg(dd, kr_scratch, 0ULL);
4233 		goto bail;
4234 
4235 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4236 		switch (val & 0xffff0000) {
4237 		case IB_LINKCMD_DOWN:
4238 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4239 			ppd->cpspec->ibmalfusesnap = 1;
4240 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4241 				crp_errlink);
4242 			if (!ppd->cpspec->ibdeltainprog &&
4243 			    qib_compat_ddr_negotiate) {
4244 				ppd->cpspec->ibdeltainprog = 1;
4245 				ppd->cpspec->ibsymsnap =
4246 					read_7322_creg32_port(ppd,
4247 							      crp_ibsymbolerr);
4248 				ppd->cpspec->iblnkerrsnap =
4249 					read_7322_creg32_port(ppd,
4250 						      crp_iblinkerrrecov);
4251 			}
4252 			break;
4253 
4254 		case IB_LINKCMD_ARMED:
4255 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4256 			if (ppd->cpspec->ibmalfusesnap) {
4257 				ppd->cpspec->ibmalfusesnap = 0;
4258 				ppd->cpspec->ibmalfdelta +=
4259 					read_7322_creg32_port(ppd,
4260 							      crp_errlink) -
4261 					ppd->cpspec->ibmalfsnap;
4262 			}
4263 			break;
4264 
4265 		case IB_LINKCMD_ACTIVE:
4266 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4267 			break;
4268 
4269 		default:
4270 			ret = -EINVAL;
4271 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4272 			goto bail;
4273 		}
4274 		switch (val & 0xffff) {
4275 		case IB_LINKINITCMD_NOP:
4276 			licmd = 0;
4277 			break;
4278 
4279 		case IB_LINKINITCMD_POLL:
4280 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4281 			break;
4282 
4283 		case IB_LINKINITCMD_SLEEP:
4284 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4285 			break;
4286 
4287 		case IB_LINKINITCMD_DISABLE:
4288 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4289 			ppd->cpspec->chase_end = 0;
4290 			/*
4291 			 * stop state chase counter and timer, if running.
4292 			 * wait forpending timer, but don't clear .data (ppd)!
4293 			 */
4294 			if (ppd->cpspec->chase_timer.expires) {
4295 				del_timer_sync(&ppd->cpspec->chase_timer);
4296 				ppd->cpspec->chase_timer.expires = 0;
4297 			}
4298 			break;
4299 
4300 		default:
4301 			ret = -EINVAL;
4302 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4303 				    val & 0xffff);
4304 			goto bail;
4305 		}
4306 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4307 		goto bail;
4308 
4309 	case QIB_IB_CFG_OP_VLS:
4310 		if (ppd->vls_operational != val) {
4311 			ppd->vls_operational = val;
4312 			set_vls(ppd);
4313 		}
4314 		goto bail;
4315 
4316 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4317 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4318 		goto bail;
4319 
4320 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4321 		if (val > 3) {
4322 			ret = -EINVAL;
4323 			goto bail;
4324 		}
4325 		lsb = IBA7322_IBC_HRTBT_LSB;
4326 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4327 		break;
4328 
4329 	case QIB_IB_CFG_PORT:
4330 		/* val is the port number of the switch we are connected to. */
4331 		if (ppd->dd->cspec->r1) {
4332 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4333 			ppd->cpspec->ipg_tries = 0;
4334 		}
4335 		goto bail;
4336 
4337 	default:
4338 		ret = -EINVAL;
4339 		goto bail;
4340 	}
4341 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4342 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4343 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4344 	qib_write_kreg(dd, kr_scratch, 0);
4345 bail:
4346 	return ret;
4347 }
4348 
4349 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4350 {
4351 	int ret = 0;
4352 	u64 val, ctrlb;
4353 
4354 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4355 	if (!strncmp(what, "ibc", 3)) {
4356 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4357 						       Loopback);
4358 		val = 0; /* disable heart beat, so link will come up */
4359 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4360 			 ppd->dd->unit, ppd->port);
4361 	} else if (!strncmp(what, "off", 3)) {
4362 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4363 							Loopback);
4364 		/* enable heart beat again */
4365 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4366 		qib_devinfo(ppd->dd->pcidev,
4367 			"Disabling IB%u:%u IBC loopback (normal)\n",
4368 			ppd->dd->unit, ppd->port);
4369 	} else
4370 		ret = -EINVAL;
4371 	if (!ret) {
4372 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4373 				    ppd->cpspec->ibcctrl_a);
4374 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4375 					     << IBA7322_IBC_HRTBT_LSB);
4376 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4377 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4378 				    ppd->cpspec->ibcctrl_b);
4379 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4380 	}
4381 	return ret;
4382 }
4383 
4384 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4385 			   struct ib_vl_weight_elem *vl)
4386 {
4387 	unsigned i;
4388 
4389 	for (i = 0; i < 16; i++, regno++, vl++) {
4390 		u32 val = qib_read_kreg_port(ppd, regno);
4391 
4392 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4393 			SYM_RMASK(LowPriority0_0, VirtualLane);
4394 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4395 			SYM_RMASK(LowPriority0_0, Weight);
4396 	}
4397 }
4398 
4399 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4400 			   struct ib_vl_weight_elem *vl)
4401 {
4402 	unsigned i;
4403 
4404 	for (i = 0; i < 16; i++, regno++, vl++) {
4405 		u64 val;
4406 
4407 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4408 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4409 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4410 			SYM_LSB(LowPriority0_0, Weight));
4411 		qib_write_kreg_port(ppd, regno, val);
4412 	}
4413 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4414 		struct qib_devdata *dd = ppd->dd;
4415 		unsigned long flags;
4416 
4417 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4418 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4419 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4420 		qib_write_kreg(dd, kr_scratch, 0);
4421 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4422 	}
4423 }
4424 
4425 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4426 {
4427 	switch (which) {
4428 	case QIB_IB_TBL_VL_HIGH_ARB:
4429 		get_vl_weights(ppd, krp_highprio_0, t);
4430 		break;
4431 
4432 	case QIB_IB_TBL_VL_LOW_ARB:
4433 		get_vl_weights(ppd, krp_lowprio_0, t);
4434 		break;
4435 
4436 	default:
4437 		return -EINVAL;
4438 	}
4439 	return 0;
4440 }
4441 
4442 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4443 {
4444 	switch (which) {
4445 	case QIB_IB_TBL_VL_HIGH_ARB:
4446 		set_vl_weights(ppd, krp_highprio_0, t);
4447 		break;
4448 
4449 	case QIB_IB_TBL_VL_LOW_ARB:
4450 		set_vl_weights(ppd, krp_lowprio_0, t);
4451 		break;
4452 
4453 	default:
4454 		return -EINVAL;
4455 	}
4456 	return 0;
4457 }
4458 
4459 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4460 				    u32 updegr, u32 egrhd, u32 npkts)
4461 {
4462 	/*
4463 	 * Need to write timeout register before updating rcvhdrhead to ensure
4464 	 * that the timer is enabled on reception of a packet.
4465 	 */
4466 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4467 		adjust_rcv_timeout(rcd, npkts);
4468 	if (updegr)
4469 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4470 	mmiowb();
4471 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4472 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4473 	mmiowb();
4474 }
4475 
4476 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4477 {
4478 	u32 head, tail;
4479 
4480 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4481 	if (rcd->rcvhdrtail_kvaddr)
4482 		tail = qib_get_rcvhdrtail(rcd);
4483 	else
4484 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4485 	return head == tail;
4486 }
4487 
4488 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4489 	QIB_RCVCTRL_CTXT_DIS | \
4490 	QIB_RCVCTRL_TIDFLOW_ENB | \
4491 	QIB_RCVCTRL_TIDFLOW_DIS | \
4492 	QIB_RCVCTRL_TAILUPD_ENB | \
4493 	QIB_RCVCTRL_TAILUPD_DIS | \
4494 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4495 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4496 	QIB_RCVCTRL_BP_ENB | \
4497 	QIB_RCVCTRL_BP_DIS)
4498 
4499 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4500 	QIB_RCVCTRL_CTXT_DIS | \
4501 	QIB_RCVCTRL_PKEY_DIS | \
4502 	QIB_RCVCTRL_PKEY_ENB)
4503 
4504 /*
4505  * Modify the RCVCTRL register in chip-specific way. This
4506  * is a function because bit positions and (future) register
4507  * location is chip-specifc, but the needed operations are
4508  * generic. <op> is a bit-mask because we often want to
4509  * do multiple modifications.
4510  */
4511 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4512 			     int ctxt)
4513 {
4514 	struct qib_devdata *dd = ppd->dd;
4515 	struct qib_ctxtdata *rcd;
4516 	u64 mask, val;
4517 	unsigned long flags;
4518 
4519 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4520 
4521 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4522 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4523 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4524 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4525 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4526 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4527 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4528 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4529 	if (op & QIB_RCVCTRL_PKEY_ENB)
4530 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4531 	if (op & QIB_RCVCTRL_PKEY_DIS)
4532 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4533 	if (ctxt < 0) {
4534 		mask = (1ULL << dd->ctxtcnt) - 1;
4535 		rcd = NULL;
4536 	} else {
4537 		mask = (1ULL << ctxt);
4538 		rcd = dd->rcd[ctxt];
4539 	}
4540 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4541 		ppd->p_rcvctrl |=
4542 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4543 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4544 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4545 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4546 		}
4547 		/* Write these registers before the context is enabled. */
4548 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4549 				    rcd->rcvhdrqtailaddr_phys);
4550 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4551 				    rcd->rcvhdrq_phys);
4552 		rcd->seq_cnt = 1;
4553 	}
4554 	if (op & QIB_RCVCTRL_CTXT_DIS)
4555 		ppd->p_rcvctrl &=
4556 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4557 	if (op & QIB_RCVCTRL_BP_ENB)
4558 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4559 	if (op & QIB_RCVCTRL_BP_DIS)
4560 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4561 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4562 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4563 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4564 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4565 	/*
4566 	 * Decide which registers to write depending on the ops enabled.
4567 	 * Special case is "flush" (no bits set at all)
4568 	 * which needs to write both.
4569 	 */
4570 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4571 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4572 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4573 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4574 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4575 		/*
4576 		 * Init the context registers also; if we were
4577 		 * disabled, tail and head should both be zero
4578 		 * already from the enable, but since we don't
4579 		 * know, we have to do it explicitly.
4580 		 */
4581 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4582 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4583 
4584 		/* be sure enabling write seen; hd/tl should be 0 */
4585 		(void) qib_read_kreg32(dd, kr_scratch);
4586 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4587 		dd->rcd[ctxt]->head = val;
4588 		/* If kctxt, interrupt on next receive. */
4589 		if (ctxt < dd->first_user_ctxt)
4590 			val |= dd->rhdrhead_intr_off;
4591 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4592 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4593 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4594 		/* arm rcv interrupt */
4595 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4596 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4597 	}
4598 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4599 		unsigned f;
4600 
4601 		/* Now that the context is disabled, clear these registers. */
4602 		if (ctxt >= 0) {
4603 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4604 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4605 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4606 				qib_write_ureg(dd, ur_rcvflowtable + f,
4607 					       TIDFLOW_ERRBITS, ctxt);
4608 		} else {
4609 			unsigned i;
4610 
4611 			for (i = 0; i < dd->cfgctxts; i++) {
4612 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4613 						    i, 0);
4614 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4615 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4616 					qib_write_ureg(dd, ur_rcvflowtable + f,
4617 						       TIDFLOW_ERRBITS, i);
4618 			}
4619 		}
4620 	}
4621 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4622 }
4623 
4624 /*
4625  * Modify the SENDCTRL register in chip-specific way. This
4626  * is a function where there are multiple such registers with
4627  * slightly different layouts.
4628  * The chip doesn't allow back-to-back sendctrl writes, so write
4629  * the scratch register after writing sendctrl.
4630  *
4631  * Which register is written depends on the operation.
4632  * Most operate on the common register, while
4633  * SEND_ENB and SEND_DIS operate on the per-port ones.
4634  * SEND_ENB is included in common because it can change SPCL_TRIG
4635  */
4636 #define SENDCTRL_COMMON_MODS (\
4637 	QIB_SENDCTRL_CLEAR | \
4638 	QIB_SENDCTRL_AVAIL_DIS | \
4639 	QIB_SENDCTRL_AVAIL_ENB | \
4640 	QIB_SENDCTRL_AVAIL_BLIP | \
4641 	QIB_SENDCTRL_DISARM | \
4642 	QIB_SENDCTRL_DISARM_ALL | \
4643 	QIB_SENDCTRL_SEND_ENB)
4644 
4645 #define SENDCTRL_PORT_MODS (\
4646 	QIB_SENDCTRL_CLEAR | \
4647 	QIB_SENDCTRL_SEND_ENB | \
4648 	QIB_SENDCTRL_SEND_DIS | \
4649 	QIB_SENDCTRL_FLUSH)
4650 
4651 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4652 {
4653 	struct qib_devdata *dd = ppd->dd;
4654 	u64 tmp_dd_sendctrl;
4655 	unsigned long flags;
4656 
4657 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4658 
4659 	/* First the dd ones that are "sticky", saved in shadow */
4660 	if (op & QIB_SENDCTRL_CLEAR)
4661 		dd->sendctrl = 0;
4662 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4663 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4664 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4665 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4666 		if (dd->flags & QIB_USE_SPCL_TRIG)
4667 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4668 	}
4669 
4670 	/* Then the ppd ones that are "sticky", saved in shadow */
4671 	if (op & QIB_SENDCTRL_SEND_DIS)
4672 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4673 	else if (op & QIB_SENDCTRL_SEND_ENB)
4674 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4675 
4676 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4677 		u32 i, last;
4678 
4679 		tmp_dd_sendctrl = dd->sendctrl;
4680 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4681 		/*
4682 		 * Disarm any buffers that are not yet launched,
4683 		 * disabling updates until done.
4684 		 */
4685 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4686 		for (i = 0; i < last; i++) {
4687 			qib_write_kreg(dd, kr_sendctrl,
4688 				       tmp_dd_sendctrl |
4689 				       SYM_MASK(SendCtrl, Disarm) | i);
4690 			qib_write_kreg(dd, kr_scratch, 0);
4691 		}
4692 	}
4693 
4694 	if (op & QIB_SENDCTRL_FLUSH) {
4695 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4696 
4697 		/*
4698 		 * Now drain all the fifos.  The Abort bit should never be
4699 		 * needed, so for now, at least, we don't use it.
4700 		 */
4701 		tmp_ppd_sendctrl |=
4702 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4703 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4704 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4705 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4706 		qib_write_kreg(dd, kr_scratch, 0);
4707 	}
4708 
4709 	tmp_dd_sendctrl = dd->sendctrl;
4710 
4711 	if (op & QIB_SENDCTRL_DISARM)
4712 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4713 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4714 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4715 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4716 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4717 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4718 
4719 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4720 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4721 		qib_write_kreg(dd, kr_scratch, 0);
4722 	}
4723 
4724 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4725 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4726 		qib_write_kreg(dd, kr_scratch, 0);
4727 	}
4728 
4729 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4730 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4731 		qib_write_kreg(dd, kr_scratch, 0);
4732 	}
4733 
4734 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4735 
4736 	if (op & QIB_SENDCTRL_FLUSH) {
4737 		u32 v;
4738 		/*
4739 		 * ensure writes have hit chip, then do a few
4740 		 * more reads, to allow DMA of pioavail registers
4741 		 * to occur, so in-memory copy is in sync with
4742 		 * the chip.  Not always safe to sleep.
4743 		 */
4744 		v = qib_read_kreg32(dd, kr_scratch);
4745 		qib_write_kreg(dd, kr_scratch, v);
4746 		v = qib_read_kreg32(dd, kr_scratch);
4747 		qib_write_kreg(dd, kr_scratch, v);
4748 		qib_read_kreg32(dd, kr_scratch);
4749 	}
4750 }
4751 
4752 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4753 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4754 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4755 
4756 /**
4757  * qib_portcntr_7322 - read a per-port chip counter
4758  * @ppd: the qlogic_ib pport
4759  * @creg: the counter to read (not a chip offset)
4760  */
4761 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4762 {
4763 	struct qib_devdata *dd = ppd->dd;
4764 	u64 ret = 0ULL;
4765 	u16 creg;
4766 	/* 0xffff for unimplemented or synthesized counters */
4767 	static const u32 xlator[] = {
4768 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4769 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4770 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4771 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4772 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4773 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4774 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4775 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4776 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4777 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4778 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4779 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4780 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4781 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4782 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4783 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4784 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4785 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4786 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4787 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4788 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4789 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4790 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4791 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4792 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4793 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4794 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4795 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4796 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4797 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4798 		/*
4799 		 * the next 3 aren't really counters, but were implemented
4800 		 * as counters in older chips, so still get accessed as
4801 		 * though they were counters from this code.
4802 		 */
4803 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4804 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4805 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4806 		/* pseudo-counter, summed for all ports */
4807 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4808 	};
4809 
4810 	if (reg >= ARRAY_SIZE(xlator)) {
4811 		qib_devinfo(ppd->dd->pcidev,
4812 			 "Unimplemented portcounter %u\n", reg);
4813 		goto done;
4814 	}
4815 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4816 
4817 	/* handle non-counters and special cases first */
4818 	if (reg == QIBPORTCNTR_KHDROVFL) {
4819 		int i;
4820 
4821 		/* sum over all kernel contexts (skip if mini_init) */
4822 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4823 			struct qib_ctxtdata *rcd = dd->rcd[i];
4824 
4825 			if (!rcd || rcd->ppd != ppd)
4826 				continue;
4827 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4828 		}
4829 		goto done;
4830 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4831 		/*
4832 		 * Used as part of the synthesis of port_rcv_errors
4833 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4834 		 * because all the errors are already counted by other cntrs.
4835 		 */
4836 		goto done;
4837 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4838 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4839 		/* were counters in older chips, now per-port kernel regs */
4840 		ret = qib_read_kreg_port(ppd, creg);
4841 		goto done;
4842 	}
4843 
4844 	/*
4845 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4846 	 * avoid two independent reads when on Opteron.
4847 	 */
4848 	if (xlator[reg] & _PORT_64BIT_FLAG)
4849 		ret = read_7322_creg_port(ppd, creg);
4850 	else
4851 		ret = read_7322_creg32_port(ppd, creg);
4852 	if (creg == crp_ibsymbolerr) {
4853 		if (ppd->cpspec->ibdeltainprog)
4854 			ret -= ret - ppd->cpspec->ibsymsnap;
4855 		ret -= ppd->cpspec->ibsymdelta;
4856 	} else if (creg == crp_iblinkerrrecov) {
4857 		if (ppd->cpspec->ibdeltainprog)
4858 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4859 		ret -= ppd->cpspec->iblnkerrdelta;
4860 	} else if (creg == crp_errlink)
4861 		ret -= ppd->cpspec->ibmalfdelta;
4862 	else if (creg == crp_iblinkdown)
4863 		ret += ppd->cpspec->iblnkdowndelta;
4864 done:
4865 	return ret;
4866 }
4867 
4868 /*
4869  * Device counter names (not port-specific), one line per stat,
4870  * single string.  Used by utilities like ipathstats to print the stats
4871  * in a way which works for different versions of drivers, without changing
4872  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4873  * display by utility.
4874  * Non-error counters are first.
4875  * Start of "error" conters is indicated by a leading "E " on the first
4876  * "error" counter, and doesn't count in label length.
4877  * The EgrOvfl list needs to be last so we truncate them at the configured
4878  * context count for the device.
4879  * cntr7322indices contains the corresponding register indices.
4880  */
4881 static const char cntr7322names[] =
4882 	"Interrupts\n"
4883 	"HostBusStall\n"
4884 	"E RxTIDFull\n"
4885 	"RxTIDInvalid\n"
4886 	"RxTIDFloDrop\n" /* 7322 only */
4887 	"Ctxt0EgrOvfl\n"
4888 	"Ctxt1EgrOvfl\n"
4889 	"Ctxt2EgrOvfl\n"
4890 	"Ctxt3EgrOvfl\n"
4891 	"Ctxt4EgrOvfl\n"
4892 	"Ctxt5EgrOvfl\n"
4893 	"Ctxt6EgrOvfl\n"
4894 	"Ctxt7EgrOvfl\n"
4895 	"Ctxt8EgrOvfl\n"
4896 	"Ctxt9EgrOvfl\n"
4897 	"Ctx10EgrOvfl\n"
4898 	"Ctx11EgrOvfl\n"
4899 	"Ctx12EgrOvfl\n"
4900 	"Ctx13EgrOvfl\n"
4901 	"Ctx14EgrOvfl\n"
4902 	"Ctx15EgrOvfl\n"
4903 	"Ctx16EgrOvfl\n"
4904 	"Ctx17EgrOvfl\n"
4905 	;
4906 
4907 static const u32 cntr7322indices[] = {
4908 	cr_lbint | _PORT_64BIT_FLAG,
4909 	cr_lbstall | _PORT_64BIT_FLAG,
4910 	cr_tidfull,
4911 	cr_tidinvalid,
4912 	cr_rxtidflowdrop,
4913 	cr_base_egrovfl + 0,
4914 	cr_base_egrovfl + 1,
4915 	cr_base_egrovfl + 2,
4916 	cr_base_egrovfl + 3,
4917 	cr_base_egrovfl + 4,
4918 	cr_base_egrovfl + 5,
4919 	cr_base_egrovfl + 6,
4920 	cr_base_egrovfl + 7,
4921 	cr_base_egrovfl + 8,
4922 	cr_base_egrovfl + 9,
4923 	cr_base_egrovfl + 10,
4924 	cr_base_egrovfl + 11,
4925 	cr_base_egrovfl + 12,
4926 	cr_base_egrovfl + 13,
4927 	cr_base_egrovfl + 14,
4928 	cr_base_egrovfl + 15,
4929 	cr_base_egrovfl + 16,
4930 	cr_base_egrovfl + 17,
4931 };
4932 
4933 /*
4934  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4935  * portcntr7322indices is somewhat complicated by some registers needing
4936  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4937  */
4938 static const char portcntr7322names[] =
4939 	"TxPkt\n"
4940 	"TxFlowPkt\n"
4941 	"TxWords\n"
4942 	"RxPkt\n"
4943 	"RxFlowPkt\n"
4944 	"RxWords\n"
4945 	"TxFlowStall\n"
4946 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4947 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4948 	"IBStatusChng\n"
4949 	"IBLinkDown\n"
4950 	"IBLnkRecov\n"
4951 	"IBRxLinkErr\n"
4952 	"IBSymbolErr\n"
4953 	"RxLLIErr\n"
4954 	"RxBadFormat\n"
4955 	"RxBadLen\n"
4956 	"RxBufOvrfl\n"
4957 	"RxEBP\n"
4958 	"RxFlowCtlErr\n"
4959 	"RxICRCerr\n"
4960 	"RxLPCRCerr\n"
4961 	"RxVCRCerr\n"
4962 	"RxInvalLen\n"
4963 	"RxInvalPKey\n"
4964 	"RxPktDropped\n"
4965 	"TxBadLength\n"
4966 	"TxDropped\n"
4967 	"TxInvalLen\n"
4968 	"TxUnderrun\n"
4969 	"TxUnsupVL\n"
4970 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4971 	"RxVL15Drop\n"
4972 	"RxVlErr\n"
4973 	"XcessBufOvfl\n"
4974 	"RxQPBadCtxt\n" /* 7322-only from here down */
4975 	"TXBadHeader\n"
4976 	;
4977 
4978 static const u32 portcntr7322indices[] = {
4979 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4980 	crp_pktsendflow,
4981 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4982 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4983 	crp_pktrcvflowctrl,
4984 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4985 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4986 	crp_txsdmadesc | _PORT_64BIT_FLAG,
4987 	crp_rxdlidfltr,
4988 	crp_ibstatuschange,
4989 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4990 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4991 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4992 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4993 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4994 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4995 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4996 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4997 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4998 	crp_rcvflowctrlviol,
4999 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
5000 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5001 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5002 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5003 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5004 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5005 	crp_txminmaxlenerr,
5006 	crp_txdroppedpkt,
5007 	crp_txlenerr,
5008 	crp_txunderrun,
5009 	crp_txunsupvl,
5010 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5011 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5012 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5013 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5014 	crp_rxqpinvalidctxt,
5015 	crp_txhdrerr,
5016 };
5017 
5018 /* do all the setup to make the counter reads efficient later */
5019 static void init_7322_cntrnames(struct qib_devdata *dd)
5020 {
5021 	int i, j = 0;
5022 	char *s;
5023 
5024 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5025 	     i++) {
5026 		/* we always have at least one counter before the egrovfl */
5027 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5028 			j = 1;
5029 		s = strchr(s + 1, '\n');
5030 		if (s && j)
5031 			j++;
5032 	}
5033 	dd->cspec->ncntrs = i;
5034 	if (!s)
5035 		/* full list; size is without terminating null */
5036 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5037 	else
5038 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5039 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5040 		* sizeof(u64), GFP_KERNEL);
5041 
5042 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5043 		s = strchr(s + 1, '\n');
5044 	dd->cspec->nportcntrs = i - 1;
5045 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5046 	for (i = 0; i < dd->num_pports; ++i) {
5047 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5048 			* sizeof(u64), GFP_KERNEL);
5049 	}
5050 }
5051 
5052 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5053 			      u64 **cntrp)
5054 {
5055 	u32 ret;
5056 
5057 	if (namep) {
5058 		ret = dd->cspec->cntrnamelen;
5059 		if (pos >= ret)
5060 			ret = 0; /* final read after getting everything */
5061 		else
5062 			*namep = (char *) cntr7322names;
5063 	} else {
5064 		u64 *cntr = dd->cspec->cntrs;
5065 		int i;
5066 
5067 		ret = dd->cspec->ncntrs * sizeof(u64);
5068 		if (!cntr || pos >= ret) {
5069 			/* everything read, or couldn't get memory */
5070 			ret = 0;
5071 			goto done;
5072 		}
5073 		*cntrp = cntr;
5074 		for (i = 0; i < dd->cspec->ncntrs; i++)
5075 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5076 				*cntr++ = read_7322_creg(dd,
5077 							 cntr7322indices[i] &
5078 							 _PORT_CNTR_IDXMASK);
5079 			else
5080 				*cntr++ = read_7322_creg32(dd,
5081 							   cntr7322indices[i]);
5082 	}
5083 done:
5084 	return ret;
5085 }
5086 
5087 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5088 				  char **namep, u64 **cntrp)
5089 {
5090 	u32 ret;
5091 
5092 	if (namep) {
5093 		ret = dd->cspec->portcntrnamelen;
5094 		if (pos >= ret)
5095 			ret = 0; /* final read after getting everything */
5096 		else
5097 			*namep = (char *)portcntr7322names;
5098 	} else {
5099 		struct qib_pportdata *ppd = &dd->pport[port];
5100 		u64 *cntr = ppd->cpspec->portcntrs;
5101 		int i;
5102 
5103 		ret = dd->cspec->nportcntrs * sizeof(u64);
5104 		if (!cntr || pos >= ret) {
5105 			/* everything read, or couldn't get memory */
5106 			ret = 0;
5107 			goto done;
5108 		}
5109 		*cntrp = cntr;
5110 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5111 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5112 				*cntr++ = qib_portcntr_7322(ppd,
5113 					portcntr7322indices[i] &
5114 					_PORT_CNTR_IDXMASK);
5115 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5116 				*cntr++ = read_7322_creg_port(ppd,
5117 					   portcntr7322indices[i] &
5118 					    _PORT_CNTR_IDXMASK);
5119 			else
5120 				*cntr++ = read_7322_creg32_port(ppd,
5121 					   portcntr7322indices[i]);
5122 		}
5123 	}
5124 done:
5125 	return ret;
5126 }
5127 
5128 /**
5129  * qib_get_7322_faststats - get word counters from chip before they overflow
5130  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5131  *
5132  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5133  * real purpose of this function is to maintain the notion of
5134  * "active time", which in turn is only logged into the eeprom,
5135  * which we don;t have, yet, for 7322-based boards.
5136  *
5137  * called from add_timer
5138  */
5139 static void qib_get_7322_faststats(unsigned long opaque)
5140 {
5141 	struct qib_devdata *dd = (struct qib_devdata *) opaque;
5142 	struct qib_pportdata *ppd;
5143 	unsigned long flags;
5144 	u64 traffic_wds;
5145 	int pidx;
5146 
5147 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5148 		ppd = dd->pport + pidx;
5149 
5150 		/*
5151 		 * If port isn't enabled or not operational ports, or
5152 		 * diags is running (can cause memory diags to fail)
5153 		 * skip this port this time.
5154 		 */
5155 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5156 		    || dd->diag_client)
5157 			continue;
5158 
5159 		/*
5160 		 * Maintain an activity timer, based on traffic
5161 		 * exceeding a threshold, so we need to check the word-counts
5162 		 * even if they are 64-bit.
5163 		 */
5164 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5165 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5166 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5167 		traffic_wds -= ppd->dd->traffic_wds;
5168 		ppd->dd->traffic_wds += traffic_wds;
5169 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5170 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5171 						QIB_IB_QDR) &&
5172 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5173 				    QIBL_LINKACTIVE)) &&
5174 		    ppd->cpspec->qdr_dfe_time &&
5175 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5176 			ppd->cpspec->qdr_dfe_on = 0;
5177 
5178 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5179 					    ppd->dd->cspec->r1 ?
5180 					    QDR_STATIC_ADAPT_INIT_R1 :
5181 					    QDR_STATIC_ADAPT_INIT);
5182 			force_h1(ppd);
5183 		}
5184 	}
5185 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5186 }
5187 
5188 /*
5189  * If we were using MSIx, try to fallback to INTx.
5190  */
5191 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5192 {
5193 	if (!dd->cspec->num_msix_entries)
5194 		return 0; /* already using INTx */
5195 
5196 	qib_devinfo(dd->pcidev,
5197 		"MSIx interrupt not detected, trying INTx interrupts\n");
5198 	qib_7322_nomsix(dd);
5199 	qib_enable_intx(dd->pcidev);
5200 	qib_setup_7322_interrupt(dd, 0);
5201 	return 1;
5202 }
5203 
5204 /*
5205  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5206  * than resetting the IBC or external link state, and useful in some
5207  * cases to cause some retraining.  To do this right, we reset IBC
5208  * as well, then return to previous state (which may be still in reset)
5209  * NOTE: some callers of this "know" this writes the current value
5210  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5211  * check all callers.
5212  */
5213 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5214 {
5215 	u64 val;
5216 	struct qib_devdata *dd = ppd->dd;
5217 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5218 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5219 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5220 
5221 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5222 	qib_write_kreg(dd, kr_hwerrmask,
5223 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5224 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5225 			    ppd->cpspec->ibcctrl_a &
5226 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5227 
5228 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5229 	qib_read_kreg32(dd, kr_scratch);
5230 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5231 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5232 	qib_write_kreg(dd, kr_scratch, 0ULL);
5233 	qib_write_kreg(dd, kr_hwerrclear,
5234 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5235 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5236 }
5237 
5238 /*
5239  * This code for non-IBTA-compliant IB speed negotiation is only known to
5240  * work for the SDR to DDR transition, and only between an HCA and a switch
5241  * with recent firmware.  It is based on observed heuristics, rather than
5242  * actual knowledge of the non-compliant speed negotiation.
5243  * It has a number of hard-coded fields, since the hope is to rewrite this
5244  * when a spec is available on how the negoation is intended to work.
5245  */
5246 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5247 				 u32 dcnt, u32 *data)
5248 {
5249 	int i;
5250 	u64 pbc;
5251 	u32 __iomem *piobuf;
5252 	u32 pnum, control, len;
5253 	struct qib_devdata *dd = ppd->dd;
5254 
5255 	i = 0;
5256 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5257 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5258 	pbc = ((u64) control << 32) | len;
5259 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5260 		if (i++ > 15)
5261 			return;
5262 		udelay(2);
5263 	}
5264 	/* disable header check on this packet, since it can't be valid */
5265 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5266 	writeq(pbc, piobuf);
5267 	qib_flush_wc();
5268 	qib_pio_copy(piobuf + 2, hdr, 7);
5269 	qib_pio_copy(piobuf + 9, data, dcnt);
5270 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5271 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5272 
5273 		qib_flush_wc();
5274 		__raw_writel(0xaebecede, piobuf + spcl_off);
5275 	}
5276 	qib_flush_wc();
5277 	qib_sendbuf_done(dd, pnum);
5278 	/* and re-enable hdr check */
5279 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5280 }
5281 
5282 /*
5283  * _start packet gets sent twice at start, _done gets sent twice at end
5284  */
5285 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5286 {
5287 	struct qib_devdata *dd = ppd->dd;
5288 	static u32 swapped;
5289 	u32 dw, i, hcnt, dcnt, *data;
5290 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5291 	static u32 madpayload_start[0x40] = {
5292 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5293 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5294 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5295 		};
5296 	static u32 madpayload_done[0x40] = {
5297 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5298 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5299 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5300 		};
5301 
5302 	dcnt = ARRAY_SIZE(madpayload_start);
5303 	hcnt = ARRAY_SIZE(hdr);
5304 	if (!swapped) {
5305 		/* for maintainability, do it at runtime */
5306 		for (i = 0; i < hcnt; i++) {
5307 			dw = (__force u32) cpu_to_be32(hdr[i]);
5308 			hdr[i] = dw;
5309 		}
5310 		for (i = 0; i < dcnt; i++) {
5311 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5312 			madpayload_start[i] = dw;
5313 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5314 			madpayload_done[i] = dw;
5315 		}
5316 		swapped = 1;
5317 	}
5318 
5319 	data = which ? madpayload_done : madpayload_start;
5320 
5321 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5322 	qib_read_kreg64(dd, kr_scratch);
5323 	udelay(2);
5324 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5325 	qib_read_kreg64(dd, kr_scratch);
5326 	udelay(2);
5327 }
5328 
5329 /*
5330  * Do the absolute minimum to cause an IB speed change, and make it
5331  * ready, but don't actually trigger the change.   The caller will
5332  * do that when ready (if link is in Polling training state, it will
5333  * happen immediately, otherwise when link next goes down)
5334  *
5335  * This routine should only be used as part of the DDR autonegotation
5336  * code for devices that are not compliant with IB 1.2 (or code that
5337  * fixes things up for same).
5338  *
5339  * When link has gone down, and autoneg enabled, or autoneg has
5340  * failed and we give up until next time we set both speeds, and
5341  * then we want IBTA enabled as well as "use max enabled speed.
5342  */
5343 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5344 {
5345 	u64 newctrlb;
5346 
5347 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5348 				    IBA7322_IBC_IBTA_1_2_MASK |
5349 				    IBA7322_IBC_MAX_SPEED_MASK);
5350 
5351 	if (speed & (speed - 1)) /* multiple speeds */
5352 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5353 				    IBA7322_IBC_IBTA_1_2_MASK |
5354 				    IBA7322_IBC_MAX_SPEED_MASK;
5355 	else
5356 		newctrlb |= speed == QIB_IB_QDR ?
5357 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5358 			((speed == QIB_IB_DDR ?
5359 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5360 
5361 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5362 		return;
5363 
5364 	ppd->cpspec->ibcctrl_b = newctrlb;
5365 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5366 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5367 }
5368 
5369 /*
5370  * This routine is only used when we are not talking to another
5371  * IB 1.2-compliant device that we think can do DDR.
5372  * (This includes all existing switch chips as of Oct 2007.)
5373  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5374  */
5375 static void try_7322_autoneg(struct qib_pportdata *ppd)
5376 {
5377 	unsigned long flags;
5378 
5379 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5380 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5381 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5382 	qib_autoneg_7322_send(ppd, 0);
5383 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5384 	qib_7322_mini_pcs_reset(ppd);
5385 	/* 2 msec is minimum length of a poll cycle */
5386 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5387 			   msecs_to_jiffies(2));
5388 }
5389 
5390 /*
5391  * Handle the empirically determined mechanism for auto-negotiation
5392  * of DDR speed with switches.
5393  */
5394 static void autoneg_7322_work(struct work_struct *work)
5395 {
5396 	struct qib_pportdata *ppd;
5397 	struct qib_devdata *dd;
5398 	u64 startms;
5399 	u32 i;
5400 	unsigned long flags;
5401 
5402 	ppd = container_of(work, struct qib_chippport_specific,
5403 			    autoneg_work.work)->ppd;
5404 	dd = ppd->dd;
5405 
5406 	startms = jiffies_to_msecs(jiffies);
5407 
5408 	/*
5409 	 * Busy wait for this first part, it should be at most a
5410 	 * few hundred usec, since we scheduled ourselves for 2msec.
5411 	 */
5412 	for (i = 0; i < 25; i++) {
5413 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5414 		     == IB_7322_LT_STATE_POLLQUIET) {
5415 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5416 			break;
5417 		}
5418 		udelay(100);
5419 	}
5420 
5421 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5422 		goto done; /* we got there early or told to stop */
5423 
5424 	/* we expect this to timeout */
5425 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5426 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5427 			       msecs_to_jiffies(90)))
5428 		goto done;
5429 	qib_7322_mini_pcs_reset(ppd);
5430 
5431 	/* we expect this to timeout */
5432 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5433 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5434 			       msecs_to_jiffies(1700)))
5435 		goto done;
5436 	qib_7322_mini_pcs_reset(ppd);
5437 
5438 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5439 
5440 	/*
5441 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5442 	 * this should terminate early.
5443 	 */
5444 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5445 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5446 		msecs_to_jiffies(250));
5447 done:
5448 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5449 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5450 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5451 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5452 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5453 			ppd->cpspec->autoneg_tries = 0;
5454 		}
5455 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5456 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5457 	}
5458 }
5459 
5460 /*
5461  * This routine is used to request IPG set in the QLogic switch.
5462  * Only called if r1.
5463  */
5464 static void try_7322_ipg(struct qib_pportdata *ppd)
5465 {
5466 	struct qib_ibport *ibp = &ppd->ibport_data;
5467 	struct ib_mad_send_buf *send_buf;
5468 	struct ib_mad_agent *agent;
5469 	struct ib_smp *smp;
5470 	unsigned delay;
5471 	int ret;
5472 
5473 	agent = ibp->rvp.send_agent;
5474 	if (!agent)
5475 		goto retry;
5476 
5477 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5478 				      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5479 				      IB_MGMT_BASE_VERSION);
5480 	if (IS_ERR(send_buf))
5481 		goto retry;
5482 
5483 	if (!ibp->smi_ah) {
5484 		struct ib_ah *ah;
5485 
5486 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5487 		if (IS_ERR(ah))
5488 			ret = PTR_ERR(ah);
5489 		else {
5490 			send_buf->ah = ah;
5491 			ibp->smi_ah = ibah_to_rvtah(ah);
5492 			ret = 0;
5493 		}
5494 	} else {
5495 		send_buf->ah = &ibp->smi_ah->ibah;
5496 		ret = 0;
5497 	}
5498 
5499 	smp = send_buf->mad;
5500 	smp->base_version = IB_MGMT_BASE_VERSION;
5501 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5502 	smp->class_version = 1;
5503 	smp->method = IB_MGMT_METHOD_SEND;
5504 	smp->hop_cnt = 1;
5505 	smp->attr_id = QIB_VENDOR_IPG;
5506 	smp->attr_mod = 0;
5507 
5508 	if (!ret)
5509 		ret = ib_post_send_mad(send_buf, NULL);
5510 	if (ret)
5511 		ib_free_send_mad(send_buf);
5512 retry:
5513 	delay = 2 << ppd->cpspec->ipg_tries;
5514 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5515 			   msecs_to_jiffies(delay));
5516 }
5517 
5518 /*
5519  * Timeout handler for setting IPG.
5520  * Only called if r1.
5521  */
5522 static void ipg_7322_work(struct work_struct *work)
5523 {
5524 	struct qib_pportdata *ppd;
5525 
5526 	ppd = container_of(work, struct qib_chippport_specific,
5527 			   ipg_work.work)->ppd;
5528 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5529 	    && ++ppd->cpspec->ipg_tries <= 10)
5530 		try_7322_ipg(ppd);
5531 }
5532 
5533 static u32 qib_7322_iblink_state(u64 ibcs)
5534 {
5535 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5536 
5537 	switch (state) {
5538 	case IB_7322_L_STATE_INIT:
5539 		state = IB_PORT_INIT;
5540 		break;
5541 	case IB_7322_L_STATE_ARM:
5542 		state = IB_PORT_ARMED;
5543 		break;
5544 	case IB_7322_L_STATE_ACTIVE:
5545 		/* fall through */
5546 	case IB_7322_L_STATE_ACT_DEFER:
5547 		state = IB_PORT_ACTIVE;
5548 		break;
5549 	default: /* fall through */
5550 	case IB_7322_L_STATE_DOWN:
5551 		state = IB_PORT_DOWN;
5552 		break;
5553 	}
5554 	return state;
5555 }
5556 
5557 /* returns the IBTA port state, rather than the IBC link training state */
5558 static u8 qib_7322_phys_portstate(u64 ibcs)
5559 {
5560 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5561 	return qib_7322_physportstate[state];
5562 }
5563 
5564 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5565 {
5566 	int ret = 0, symadj = 0;
5567 	unsigned long flags;
5568 	int mult;
5569 
5570 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5571 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5572 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5573 
5574 	/* Update our picture of width and speed from chip */
5575 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5576 		ppd->link_speed_active = QIB_IB_QDR;
5577 		mult = 4;
5578 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5579 		ppd->link_speed_active = QIB_IB_DDR;
5580 		mult = 2;
5581 	} else {
5582 		ppd->link_speed_active = QIB_IB_SDR;
5583 		mult = 1;
5584 	}
5585 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5586 		ppd->link_width_active = IB_WIDTH_4X;
5587 		mult *= 4;
5588 	} else
5589 		ppd->link_width_active = IB_WIDTH_1X;
5590 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5591 
5592 	if (!ibup) {
5593 		u64 clr;
5594 
5595 		/* Link went down. */
5596 		/* do IPG MAD again after linkdown, even if last time failed */
5597 		ppd->cpspec->ipg_tries = 0;
5598 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5599 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5600 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5601 		if (clr)
5602 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5603 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5604 				     QIBL_IB_AUTONEG_INPROG)))
5605 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5606 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5607 			struct qib_qsfp_data *qd =
5608 				&ppd->cpspec->qsfp_data;
5609 			/* unlock the Tx settings, speed may change */
5610 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5611 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5612 				reset_tx_deemphasis_override));
5613 			qib_cancel_sends(ppd);
5614 			/* on link down, ensure sane pcs state */
5615 			qib_7322_mini_pcs_reset(ppd);
5616 			/* schedule the qsfp refresh which should turn the link
5617 			   off */
5618 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5619 				qd->t_insert = jiffies;
5620 				queue_work(ib_wq, &qd->work);
5621 			}
5622 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5623 			if (__qib_sdma_running(ppd))
5624 				__qib_sdma_process_event(ppd,
5625 					qib_sdma_event_e70_go_idle);
5626 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5627 		}
5628 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5629 		if (clr == ppd->cpspec->iblnkdownsnap)
5630 			ppd->cpspec->iblnkdowndelta++;
5631 	} else {
5632 		if (qib_compat_ddr_negotiate &&
5633 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5634 				     QIBL_IB_AUTONEG_INPROG)) &&
5635 		    ppd->link_speed_active == QIB_IB_SDR &&
5636 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5637 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5638 			/* we are SDR, and auto-negotiation enabled */
5639 			++ppd->cpspec->autoneg_tries;
5640 			if (!ppd->cpspec->ibdeltainprog) {
5641 				ppd->cpspec->ibdeltainprog = 1;
5642 				ppd->cpspec->ibsymdelta +=
5643 					read_7322_creg32_port(ppd,
5644 						crp_ibsymbolerr) -
5645 						ppd->cpspec->ibsymsnap;
5646 				ppd->cpspec->iblnkerrdelta +=
5647 					read_7322_creg32_port(ppd,
5648 						crp_iblinkerrrecov) -
5649 						ppd->cpspec->iblnkerrsnap;
5650 			}
5651 			try_7322_autoneg(ppd);
5652 			ret = 1; /* no other IB status change processing */
5653 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5654 			   ppd->link_speed_active == QIB_IB_SDR) {
5655 			qib_autoneg_7322_send(ppd, 1);
5656 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5657 			qib_7322_mini_pcs_reset(ppd);
5658 			udelay(2);
5659 			ret = 1; /* no other IB status change processing */
5660 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5661 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5662 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5663 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5664 					 QIBL_IB_AUTONEG_FAILED);
5665 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5666 			ppd->cpspec->autoneg_tries = 0;
5667 			/* re-enable SDR, for next link down */
5668 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5669 			wake_up(&ppd->cpspec->autoneg_wait);
5670 			symadj = 1;
5671 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5672 			/*
5673 			 * Clear autoneg failure flag, and do setup
5674 			 * so we'll try next time link goes down and
5675 			 * back to INIT (possibly connected to a
5676 			 * different device).
5677 			 */
5678 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5679 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5680 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5681 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5682 			symadj = 1;
5683 		}
5684 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5685 			symadj = 1;
5686 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5687 				try_7322_ipg(ppd);
5688 			if (!ppd->cpspec->recovery_init)
5689 				setup_7322_link_recovery(ppd, 0);
5690 			ppd->cpspec->qdr_dfe_time = jiffies +
5691 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5692 		}
5693 		ppd->cpspec->ibmalfusesnap = 0;
5694 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5695 			crp_errlink);
5696 	}
5697 	if (symadj) {
5698 		ppd->cpspec->iblnkdownsnap =
5699 			read_7322_creg32_port(ppd, crp_iblinkdown);
5700 		if (ppd->cpspec->ibdeltainprog) {
5701 			ppd->cpspec->ibdeltainprog = 0;
5702 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5703 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5704 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5705 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5706 		}
5707 	} else if (!ibup && qib_compat_ddr_negotiate &&
5708 		   !ppd->cpspec->ibdeltainprog &&
5709 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5710 		ppd->cpspec->ibdeltainprog = 1;
5711 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5712 			crp_ibsymbolerr);
5713 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5714 			crp_iblinkerrrecov);
5715 	}
5716 
5717 	if (!ret)
5718 		qib_setup_7322_setextled(ppd, ibup);
5719 	return ret;
5720 }
5721 
5722 /*
5723  * Does read/modify/write to appropriate registers to
5724  * set output and direction bits selected by mask.
5725  * these are in their canonical postions (e.g. lsb of
5726  * dir will end up in D48 of extctrl on existing chips).
5727  * returns contents of GP Inputs.
5728  */
5729 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5730 {
5731 	u64 read_val, new_out;
5732 	unsigned long flags;
5733 
5734 	if (mask) {
5735 		/* some bits being written, lock access to GPIO */
5736 		dir &= mask;
5737 		out &= mask;
5738 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5739 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5740 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5741 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5742 
5743 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5744 		qib_write_kreg(dd, kr_gpio_out, new_out);
5745 		dd->cspec->gpio_out = new_out;
5746 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5747 	}
5748 	/*
5749 	 * It is unlikely that a read at this time would get valid
5750 	 * data on a pin whose direction line was set in the same
5751 	 * call to this function. We include the read here because
5752 	 * that allows us to potentially combine a change on one pin with
5753 	 * a read on another, and because the old code did something like
5754 	 * this.
5755 	 */
5756 	read_val = qib_read_kreg64(dd, kr_extstatus);
5757 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5758 }
5759 
5760 /* Enable writes to config EEPROM, if possible. Returns previous state */
5761 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5762 {
5763 	int prev_wen;
5764 	u32 mask;
5765 
5766 	mask = 1 << QIB_EEPROM_WEN_NUM;
5767 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5768 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5769 
5770 	return prev_wen & 1;
5771 }
5772 
5773 /*
5774  * Read fundamental info we need to use the chip.  These are
5775  * the registers that describe chip capabilities, and are
5776  * saved in shadow registers.
5777  */
5778 static void get_7322_chip_params(struct qib_devdata *dd)
5779 {
5780 	u64 val;
5781 	u32 piobufs;
5782 	int mtu;
5783 
5784 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5785 
5786 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5787 
5788 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5789 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5790 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5791 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5792 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5793 
5794 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5795 	dd->piobcnt2k = val & ~0U;
5796 	dd->piobcnt4k = val >> 32;
5797 	val = qib_read_kreg64(dd, kr_sendpiosize);
5798 	dd->piosize2k = val & ~0U;
5799 	dd->piosize4k = val >> 32;
5800 
5801 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5802 	if (mtu == -1)
5803 		mtu = QIB_DEFAULT_MTU;
5804 	dd->pport[0].ibmtu = (u32)mtu;
5805 	dd->pport[1].ibmtu = (u32)mtu;
5806 
5807 	/* these may be adjusted in init_chip_wc_pat() */
5808 	dd->pio2kbase = (u32 __iomem *)
5809 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5810 	dd->pio4kbase = (u32 __iomem *)
5811 		((char __iomem *) dd->kregbase +
5812 		 (dd->piobufbase >> 32));
5813 	/*
5814 	 * 4K buffers take 2 pages; we use roundup just to be
5815 	 * paranoid; we calculate it once here, rather than on
5816 	 * ever buf allocate
5817 	 */
5818 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5819 
5820 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5821 
5822 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5823 		(sizeof(u64) * BITS_PER_BYTE / 2);
5824 }
5825 
5826 /*
5827  * The chip base addresses in cspec and cpspec have to be set
5828  * after possible init_chip_wc_pat(), rather than in
5829  * get_7322_chip_params(), so split out as separate function
5830  */
5831 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5832 {
5833 	u32 cregbase;
5834 
5835 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5836 
5837 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5838 		(char __iomem *)dd->kregbase);
5839 
5840 	dd->egrtidbase = (u64 __iomem *)
5841 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5842 
5843 	/* port registers are defined as relative to base of chip */
5844 	dd->pport[0].cpspec->kpregbase =
5845 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5846 	dd->pport[1].cpspec->kpregbase =
5847 		(u64 __iomem *)(dd->palign +
5848 		(char __iomem *)dd->kregbase);
5849 	dd->pport[0].cpspec->cpregbase =
5850 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5851 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5852 	dd->pport[1].cpspec->cpregbase =
5853 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5854 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5855 }
5856 
5857 /*
5858  * This is a fairly special-purpose observer, so we only support
5859  * the port-specific parts of SendCtrl
5860  */
5861 
5862 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5863 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5864 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5865 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5866 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5867 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5868 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5869 
5870 static int sendctrl_hook(struct qib_devdata *dd,
5871 			 const struct diag_observer *op, u32 offs,
5872 			 u64 *data, u64 mask, int only_32)
5873 {
5874 	unsigned long flags;
5875 	unsigned idx;
5876 	unsigned pidx;
5877 	struct qib_pportdata *ppd = NULL;
5878 	u64 local_data, all_bits;
5879 
5880 	/*
5881 	 * The fixed correspondence between Physical ports and pports is
5882 	 * severed. We need to hunt for the ppd that corresponds
5883 	 * to the offset we got. And we have to do that without admitting
5884 	 * we know the stride, apparently.
5885 	 */
5886 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5887 		u64 __iomem *psptr;
5888 		u32 psoffs;
5889 
5890 		ppd = dd->pport + pidx;
5891 		if (!ppd->cpspec->kpregbase)
5892 			continue;
5893 
5894 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5895 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5896 		if (psoffs == offs)
5897 			break;
5898 	}
5899 
5900 	/* If pport is not being managed by driver, just avoid shadows. */
5901 	if (pidx >= dd->num_pports)
5902 		ppd = NULL;
5903 
5904 	/* In any case, "idx" is flat index in kreg space */
5905 	idx = offs / sizeof(u64);
5906 
5907 	all_bits = ~0ULL;
5908 	if (only_32)
5909 		all_bits >>= 32;
5910 
5911 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5912 	if (!ppd || (mask & all_bits) != all_bits) {
5913 		/*
5914 		 * At least some mask bits are zero, so we need
5915 		 * to read. The judgement call is whether from
5916 		 * reg or shadow. First-cut: read reg, and complain
5917 		 * if any bits which should be shadowed are different
5918 		 * from their shadowed value.
5919 		 */
5920 		if (only_32)
5921 			local_data = (u64)qib_read_kreg32(dd, idx);
5922 		else
5923 			local_data = qib_read_kreg64(dd, idx);
5924 		*data = (local_data & ~mask) | (*data & mask);
5925 	}
5926 	if (mask) {
5927 		/*
5928 		 * At least some mask bits are one, so we need
5929 		 * to write, but only shadow some bits.
5930 		 */
5931 		u64 sval, tval; /* Shadowed, transient */
5932 
5933 		/*
5934 		 * New shadow val is bits we don't want to touch,
5935 		 * ORed with bits we do, that are intended for shadow.
5936 		 */
5937 		if (ppd) {
5938 			sval = ppd->p_sendctrl & ~mask;
5939 			sval |= *data & SENDCTRL_SHADOWED & mask;
5940 			ppd->p_sendctrl = sval;
5941 		} else
5942 			sval = *data & SENDCTRL_SHADOWED & mask;
5943 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5944 		qib_write_kreg(dd, idx, tval);
5945 		qib_write_kreg(dd, kr_scratch, 0Ull);
5946 	}
5947 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5948 	return only_32 ? 4 : 8;
5949 }
5950 
5951 static const struct diag_observer sendctrl_0_observer = {
5952 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5953 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5954 };
5955 
5956 static const struct diag_observer sendctrl_1_observer = {
5957 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5958 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5959 };
5960 
5961 static ushort sdma_fetch_prio = 8;
5962 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5963 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5964 
5965 /* Besides logging QSFP events, we set appropriate TxDDS values */
5966 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5967 
5968 static void qsfp_7322_event(struct work_struct *work)
5969 {
5970 	struct qib_qsfp_data *qd;
5971 	struct qib_pportdata *ppd;
5972 	unsigned long pwrup;
5973 	unsigned long flags;
5974 	int ret;
5975 	u32 le2;
5976 
5977 	qd = container_of(work, struct qib_qsfp_data, work);
5978 	ppd = qd->ppd;
5979 	pwrup = qd->t_insert +
5980 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5981 
5982 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
5983 	mdelay(QSFP_MODPRS_LAG_MSEC);
5984 
5985 	if (!qib_qsfp_mod_present(ppd)) {
5986 		ppd->cpspec->qsfp_data.modpresent = 0;
5987 		/* Set the physical link to disabled */
5988 		qib_set_ib_7322_lstate(ppd, 0,
5989 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5990 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5991 		ppd->lflags &= ~QIBL_LINKV;
5992 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5993 	} else {
5994 		/*
5995 		 * Some QSFP's not only do not respond until the full power-up
5996 		 * time, but may behave badly if we try. So hold off responding
5997 		 * to insertion.
5998 		 */
5999 		while (1) {
6000 			if (time_is_before_jiffies(pwrup))
6001 				break;
6002 			msleep(20);
6003 		}
6004 
6005 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6006 
6007 		/*
6008 		 * Need to change LE2 back to defaults if we couldn't
6009 		 * read the cable type (to handle cable swaps), so do this
6010 		 * even on failure to read cable information.  We don't
6011 		 * get here for QME, so IS_QME check not needed here.
6012 		 */
6013 		if (!ret && !ppd->dd->cspec->r1) {
6014 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6015 				le2 = LE2_QME;
6016 			else if (qd->cache.atten[1] >= qib_long_atten &&
6017 				 QSFP_IS_CU(qd->cache.tech))
6018 				le2 = LE2_5m;
6019 			else
6020 				le2 = LE2_DEFAULT;
6021 		} else
6022 			le2 = LE2_DEFAULT;
6023 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6024 		/*
6025 		 * We always change parameteters, since we can choose
6026 		 * values for cables without eeproms, and the cable may have
6027 		 * changed from a cable with full or partial eeprom content
6028 		 * to one with partial or no content.
6029 		 */
6030 		init_txdds_table(ppd, 0);
6031 		/* The physical link is being re-enabled only when the
6032 		 * previous state was DISABLED and the VALID bit is not
6033 		 * set. This should only happen when  the cable has been
6034 		 * physically pulled. */
6035 		if (!ppd->cpspec->qsfp_data.modpresent &&
6036 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6037 			ppd->cpspec->qsfp_data.modpresent = 1;
6038 			qib_set_ib_7322_lstate(ppd, 0,
6039 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6040 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6041 			ppd->lflags |= QIBL_LINKV;
6042 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6043 		}
6044 	}
6045 }
6046 
6047 /*
6048  * There is little we can do but complain to the user if QSFP
6049  * initialization fails.
6050  */
6051 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6052 {
6053 	unsigned long flags;
6054 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6055 	struct qib_devdata *dd = ppd->dd;
6056 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6057 
6058 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6059 	qd->ppd = ppd;
6060 	qib_qsfp_init(qd, qsfp_7322_event);
6061 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6062 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6063 	dd->cspec->gpio_mask |= mod_prs_bit;
6064 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6065 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6066 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6067 }
6068 
6069 /*
6070  * called at device initialization time, and also if the txselect
6071  * module parameter is changed.  This is used for cables that don't
6072  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6073  * We initialize to the default, then if there is a specific
6074  * unit,port match, we use that (and set it immediately, for the
6075  * current speed, if the link is at INIT or better).
6076  * String format is "default# unit#,port#=# ... u,p=#", separators must
6077  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6078  * optionally have "u,p=#,#", where the final # is the H1 value
6079  * The last specific match is used (actually, all are used, but last
6080  * one is the one that winds up set); if none at all, fall back on default.
6081  */
6082 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6083 {
6084 	char *nxt, *str;
6085 	u32 pidx, unit, port, deflt, h1;
6086 	unsigned long val;
6087 	int any = 0, seth1;
6088 	int txdds_size;
6089 
6090 	str = txselect_list;
6091 
6092 	/* default number is validated in setup_txselect() */
6093 	deflt = simple_strtoul(str, &nxt, 0);
6094 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6095 		dd->pport[pidx].cpspec->no_eep = deflt;
6096 
6097 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6098 	if (IS_QME(dd) || IS_QMH(dd))
6099 		txdds_size += TXDDS_MFG_SZ;
6100 
6101 	while (*nxt && nxt[1]) {
6102 		str = ++nxt;
6103 		unit = simple_strtoul(str, &nxt, 0);
6104 		if (nxt == str || !*nxt || *nxt != ',') {
6105 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6106 				;
6107 			continue;
6108 		}
6109 		str = ++nxt;
6110 		port = simple_strtoul(str, &nxt, 0);
6111 		if (nxt == str || *nxt != '=') {
6112 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6113 				;
6114 			continue;
6115 		}
6116 		str = ++nxt;
6117 		val = simple_strtoul(str, &nxt, 0);
6118 		if (nxt == str) {
6119 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6120 				;
6121 			continue;
6122 		}
6123 		if (val >= txdds_size)
6124 			continue;
6125 		seth1 = 0;
6126 		h1 = 0; /* gcc thinks it might be used uninitted */
6127 		if (*nxt == ',' && nxt[1]) {
6128 			str = ++nxt;
6129 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6130 			if (nxt == str)
6131 				while (*nxt && *nxt++ != ' ') /* skip */
6132 					;
6133 			else
6134 				seth1 = 1;
6135 		}
6136 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6137 		     ++pidx) {
6138 			struct qib_pportdata *ppd = &dd->pport[pidx];
6139 
6140 			if (ppd->port != port || !ppd->link_speed_supported)
6141 				continue;
6142 			ppd->cpspec->no_eep = val;
6143 			if (seth1)
6144 				ppd->cpspec->h1_val = h1;
6145 			/* now change the IBC and serdes, overriding generic */
6146 			init_txdds_table(ppd, 1);
6147 			/* Re-enable the physical state machine on mezz boards
6148 			 * now that the correct settings have been set.
6149 			 * QSFP boards are handles by the QSFP event handler */
6150 			if (IS_QMH(dd) || IS_QME(dd))
6151 				qib_set_ib_7322_lstate(ppd, 0,
6152 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6153 			any++;
6154 		}
6155 		if (*nxt == '\n')
6156 			break; /* done */
6157 	}
6158 	if (change && !any) {
6159 		/* no specific setting, use the default.
6160 		 * Change the IBC and serdes, but since it's
6161 		 * general, don't override specific settings.
6162 		 */
6163 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6164 			if (dd->pport[pidx].link_speed_supported)
6165 				init_txdds_table(&dd->pport[pidx], 0);
6166 	}
6167 }
6168 
6169 /* handle the txselect parameter changing */
6170 static int setup_txselect(const char *str, struct kernel_param *kp)
6171 {
6172 	struct qib_devdata *dd;
6173 	unsigned long val;
6174 	char *n;
6175 
6176 	if (strlen(str) >= MAX_ATTEN_LEN) {
6177 		pr_info("txselect_values string too long\n");
6178 		return -ENOSPC;
6179 	}
6180 	val = simple_strtoul(str, &n, 0);
6181 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6182 				TXDDS_MFG_SZ)) {
6183 		pr_info("txselect_values must start with a number < %d\n",
6184 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6185 		return -EINVAL;
6186 	}
6187 	strcpy(txselect_list, str);
6188 
6189 	list_for_each_entry(dd, &qib_dev_list, list)
6190 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6191 			set_no_qsfp_atten(dd, 1);
6192 	return 0;
6193 }
6194 
6195 /*
6196  * Write the final few registers that depend on some of the
6197  * init setup.  Done late in init, just before bringing up
6198  * the serdes.
6199  */
6200 static int qib_late_7322_initreg(struct qib_devdata *dd)
6201 {
6202 	int ret = 0, n;
6203 	u64 val;
6204 
6205 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6206 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6207 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6208 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6209 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6210 	if (val != dd->pioavailregs_phys) {
6211 		qib_dev_err(dd,
6212 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6213 			(unsigned long) dd->pioavailregs_phys,
6214 			(unsigned long long) val);
6215 		ret = -EINVAL;
6216 	}
6217 
6218 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6219 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6220 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6221 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6222 
6223 	qib_register_observer(dd, &sendctrl_0_observer);
6224 	qib_register_observer(dd, &sendctrl_1_observer);
6225 
6226 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6227 	qib_write_kreg(dd, kr_control, dd->control);
6228 	/*
6229 	 * Set SendDmaFetchPriority and init Tx params, including
6230 	 * QSFP handler on boards that have QSFP.
6231 	 * First set our default attenuation entry for cables that
6232 	 * don't have valid attenuation.
6233 	 */
6234 	set_no_qsfp_atten(dd, 0);
6235 	for (n = 0; n < dd->num_pports; ++n) {
6236 		struct qib_pportdata *ppd = dd->pport + n;
6237 
6238 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6239 				    sdma_fetch_prio & 0xf);
6240 		/* Initialize qsfp if present on board. */
6241 		if (dd->flags & QIB_HAS_QSFP)
6242 			qib_init_7322_qsfp(ppd);
6243 	}
6244 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6245 	qib_write_kreg(dd, kr_control, dd->control);
6246 
6247 	return ret;
6248 }
6249 
6250 /* per IB port errors.  */
6251 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6252 	MASK_ACROSS(8, 15))
6253 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6254 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6255 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6256 	MASK_ACROSS(0, 11))
6257 
6258 /*
6259  * Write the initialization per-port registers that need to be done at
6260  * driver load and after reset completes (i.e., that aren't done as part
6261  * of other init procedures called from qib_init.c).
6262  * Some of these should be redundant on reset, but play safe.
6263  */
6264 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6265 {
6266 	u64 val;
6267 	int i;
6268 
6269 	if (!ppd->link_speed_supported) {
6270 		/* no buffer credits for this port */
6271 		for (i = 1; i < 8; i++)
6272 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6273 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6274 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6275 		return;
6276 	}
6277 
6278 	/*
6279 	 * Set the number of supported virtual lanes in IBC,
6280 	 * for flow control packet handling on unsupported VLs
6281 	 */
6282 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6283 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6284 	val |= (u64)(ppd->vls_supported - 1) <<
6285 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6286 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6287 
6288 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6289 
6290 	/* enable tx header checking */
6291 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6292 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6293 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6294 
6295 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6296 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6297 
6298 	/*
6299 	 * Unconditionally clear the bufmask bits.  If SDMA is
6300 	 * enabled, we'll set them appropriately later.
6301 	 */
6302 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6303 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6304 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6305 	if (ppd->dd->cspec->r1)
6306 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6307 }
6308 
6309 /*
6310  * Write the initialization per-device registers that need to be done at
6311  * driver load and after reset completes (i.e., that aren't done as part
6312  * of other init procedures called from qib_init.c).  Also write per-port
6313  * registers that are affected by overall device config, such as QP mapping
6314  * Some of these should be redundant on reset, but play safe.
6315  */
6316 static void write_7322_initregs(struct qib_devdata *dd)
6317 {
6318 	struct qib_pportdata *ppd;
6319 	int i, pidx;
6320 	u64 val;
6321 
6322 	/* Set Multicast QPs received by port 2 to map to context one. */
6323 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6324 
6325 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6326 		unsigned n, regno;
6327 		unsigned long flags;
6328 
6329 		if (dd->n_krcv_queues < 2 ||
6330 			!dd->pport[pidx].link_speed_supported)
6331 			continue;
6332 
6333 		ppd = &dd->pport[pidx];
6334 
6335 		/* be paranoid against later code motion, etc. */
6336 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6337 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6338 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6339 
6340 		/* Initialize QP to context mapping */
6341 		regno = krp_rcvqpmaptable;
6342 		val = 0;
6343 		if (dd->num_pports > 1)
6344 			n = dd->first_user_ctxt / dd->num_pports;
6345 		else
6346 			n = dd->first_user_ctxt - 1;
6347 		for (i = 0; i < 32; ) {
6348 			unsigned ctxt;
6349 
6350 			if (dd->num_pports > 1)
6351 				ctxt = (i % n) * dd->num_pports + pidx;
6352 			else if (i % n)
6353 				ctxt = (i % n) + 1;
6354 			else
6355 				ctxt = ppd->hw_pidx;
6356 			val |= ctxt << (5 * (i % 6));
6357 			i++;
6358 			if (i % 6 == 0) {
6359 				qib_write_kreg_port(ppd, regno, val);
6360 				val = 0;
6361 				regno++;
6362 			}
6363 		}
6364 		qib_write_kreg_port(ppd, regno, val);
6365 	}
6366 
6367 	/*
6368 	 * Setup up interrupt mitigation for kernel contexts, but
6369 	 * not user contexts (user contexts use interrupts when
6370 	 * stalled waiting for any packet, so want those interrupts
6371 	 * right away).
6372 	 */
6373 	for (i = 0; i < dd->first_user_ctxt; i++) {
6374 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6375 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6376 	}
6377 
6378 	/*
6379 	 * Initialize  as (disabled) rcvflow tables.  Application code
6380 	 * will setup each flow as it uses the flow.
6381 	 * Doesn't clear any of the error bits that might be set.
6382 	 */
6383 	val = TIDFLOW_ERRBITS; /* these are W1C */
6384 	for (i = 0; i < dd->cfgctxts; i++) {
6385 		int flow;
6386 
6387 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6388 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6389 	}
6390 
6391 	/*
6392 	 * dual cards init to dual port recovery, single port cards to
6393 	 * the one port.  Dual port cards may later adjust to 1 port,
6394 	 * and then back to dual port if both ports are connected
6395 	 * */
6396 	if (dd->num_pports)
6397 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6398 }
6399 
6400 static int qib_init_7322_variables(struct qib_devdata *dd)
6401 {
6402 	struct qib_pportdata *ppd;
6403 	unsigned features, pidx, sbufcnt;
6404 	int ret, mtu;
6405 	u32 sbufs, updthresh;
6406 	resource_size_t vl15off;
6407 
6408 	/* pport structs are contiguous, allocated after devdata */
6409 	ppd = (struct qib_pportdata *)(dd + 1);
6410 	dd->pport = ppd;
6411 	ppd[0].dd = dd;
6412 	ppd[1].dd = dd;
6413 
6414 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6415 
6416 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6417 	ppd[1].cpspec = &ppd[0].cpspec[1];
6418 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6419 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6420 
6421 	spin_lock_init(&dd->cspec->rcvmod_lock);
6422 	spin_lock_init(&dd->cspec->gpio_lock);
6423 
6424 	/* we haven't yet set QIB_PRESENT, so use read directly */
6425 	dd->revision = readq(&dd->kregbase[kr_revision]);
6426 
6427 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6428 		qib_dev_err(dd,
6429 			"Revision register read failure, giving up initialization\n");
6430 		ret = -ENODEV;
6431 		goto bail;
6432 	}
6433 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6434 
6435 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6436 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6437 	dd->cspec->r1 = dd->minrev == 1;
6438 
6439 	get_7322_chip_params(dd);
6440 	features = qib_7322_boardname(dd);
6441 
6442 	/* now that piobcnt2k and 4k set, we can allocate these */
6443 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6444 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6445 	sbufcnt /= BITS_PER_LONG;
6446 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
6447 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6448 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6449 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6450 	dd->cspec->sendibchk = kmalloc(sbufcnt *
6451 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6452 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6453 		!dd->cspec->sendibchk) {
6454 		ret = -ENOMEM;
6455 		goto bail;
6456 	}
6457 
6458 	ppd = dd->pport;
6459 
6460 	/*
6461 	 * GPIO bits for TWSI data and clock,
6462 	 * used for serial EEPROM.
6463 	 */
6464 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6465 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6466 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6467 
6468 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6469 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6470 		QIB_HAS_THRESH_UPDATE |
6471 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6472 	dd->flags |= qib_special_trigger ?
6473 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6474 
6475 	/*
6476 	 * Setup initial values.  These may change when PAT is enabled, but
6477 	 * we need these to do initial chip register accesses.
6478 	 */
6479 	qib_7322_set_baseaddrs(dd);
6480 
6481 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6482 	if (mtu == -1)
6483 		mtu = QIB_DEFAULT_MTU;
6484 
6485 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6486 	/* all hwerrors become interrupts, unless special purposed */
6487 	dd->cspec->hwerrmask = ~0ULL;
6488 	/*  link_recovery setup causes these errors, so ignore them,
6489 	 *  other than clearing them when they occur */
6490 	dd->cspec->hwerrmask &=
6491 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6492 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6493 		  HWE_MASK(LATriggered));
6494 
6495 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6496 		struct qib_chippport_specific *cp = ppd->cpspec;
6497 
6498 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6499 		features >>=  PORT_SPD_CAP_SHIFT;
6500 		if (!ppd->link_speed_supported) {
6501 			/* single port mode (7340, or configured) */
6502 			dd->skip_kctxt_mask |= 1 << pidx;
6503 			if (pidx == 0) {
6504 				/* Make sure port is disabled. */
6505 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6506 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6507 				ppd[0] = ppd[1];
6508 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6509 						  IBSerdesPClkNotDetectMask_0)
6510 						  | SYM_MASK(HwErrMask,
6511 						  SDmaMemReadErrMask_0));
6512 				dd->cspec->int_enable_mask &= ~(
6513 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6514 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6515 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6516 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6517 				     SYM_MASK(IntMask, ErrIntMask_0) |
6518 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6519 			} else {
6520 				/* Make sure port is disabled. */
6521 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6522 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6523 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6524 						  IBSerdesPClkNotDetectMask_1)
6525 						  | SYM_MASK(HwErrMask,
6526 						  SDmaMemReadErrMask_1));
6527 				dd->cspec->int_enable_mask &= ~(
6528 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6529 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6530 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6531 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6532 				     SYM_MASK(IntMask, ErrIntMask_1) |
6533 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6534 			}
6535 			continue;
6536 		}
6537 
6538 		dd->num_pports++;
6539 		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6540 		if (ret) {
6541 			dd->num_pports--;
6542 			goto bail;
6543 		}
6544 
6545 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6546 		ppd->link_width_enabled = IB_WIDTH_4X;
6547 		ppd->link_speed_enabled = ppd->link_speed_supported;
6548 		/*
6549 		 * Set the initial values to reasonable default, will be set
6550 		 * for real when link is up.
6551 		 */
6552 		ppd->link_width_active = IB_WIDTH_4X;
6553 		ppd->link_speed_active = QIB_IB_SDR;
6554 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6555 		switch (qib_num_cfg_vls) {
6556 		case 1:
6557 			ppd->vls_supported = IB_VL_VL0;
6558 			break;
6559 		case 2:
6560 			ppd->vls_supported = IB_VL_VL0_1;
6561 			break;
6562 		default:
6563 			qib_devinfo(dd->pcidev,
6564 				    "Invalid num_vls %u, using 4 VLs\n",
6565 				    qib_num_cfg_vls);
6566 			qib_num_cfg_vls = 4;
6567 			/* fall through */
6568 		case 4:
6569 			ppd->vls_supported = IB_VL_VL0_3;
6570 			break;
6571 		case 8:
6572 			if (mtu <= 2048)
6573 				ppd->vls_supported = IB_VL_VL0_7;
6574 			else {
6575 				qib_devinfo(dd->pcidev,
6576 					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6577 					    qib_num_cfg_vls, mtu);
6578 				ppd->vls_supported = IB_VL_VL0_3;
6579 				qib_num_cfg_vls = 4;
6580 			}
6581 			break;
6582 		}
6583 		ppd->vls_operational = ppd->vls_supported;
6584 
6585 		init_waitqueue_head(&cp->autoneg_wait);
6586 		INIT_DELAYED_WORK(&cp->autoneg_work,
6587 				  autoneg_7322_work);
6588 		if (ppd->dd->cspec->r1)
6589 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6590 
6591 		/*
6592 		 * For Mez and similar cards, no qsfp info, so do
6593 		 * the "cable info" setup here.  Can be overridden
6594 		 * in adapter-specific routines.
6595 		 */
6596 		if (!(dd->flags & QIB_HAS_QSFP)) {
6597 			if (!IS_QMH(dd) && !IS_QME(dd))
6598 				qib_devinfo(dd->pcidev,
6599 					"IB%u:%u: Unknown mezzanine card type\n",
6600 					dd->unit, ppd->port);
6601 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6602 			/*
6603 			 * Choose center value as default tx serdes setting
6604 			 * until changed through module parameter.
6605 			 */
6606 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6607 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6608 		} else
6609 			cp->h1_val = H1_FORCE_VAL;
6610 
6611 		/* Avoid writes to chip for mini_init */
6612 		if (!qib_mini_init)
6613 			write_7322_init_portregs(ppd);
6614 
6615 		init_timer(&cp->chase_timer);
6616 		cp->chase_timer.function = reenable_chase;
6617 		cp->chase_timer.data = (unsigned long)ppd;
6618 
6619 		ppd++;
6620 	}
6621 
6622 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6623 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6624 	dd->rcvhdrsize = qib_rcvhdrsize ?
6625 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6626 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6627 
6628 	/* we always allocate at least 2048 bytes for eager buffers */
6629 	dd->rcvegrbufsize = max(mtu, 2048);
6630 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6631 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6632 
6633 	qib_7322_tidtemplate(dd);
6634 
6635 	/*
6636 	 * We can request a receive interrupt for 1 or
6637 	 * more packets from current offset.
6638 	 */
6639 	dd->rhdrhead_intr_off =
6640 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6641 
6642 	/* setup the stats timer; the add_timer is done at end of init */
6643 	init_timer(&dd->stats_timer);
6644 	dd->stats_timer.function = qib_get_7322_faststats;
6645 	dd->stats_timer.data = (unsigned long) dd;
6646 
6647 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6648 
6649 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6650 
6651 	qib_7322_config_ctxts(dd);
6652 	qib_set_ctxtcnt(dd);
6653 
6654 	/*
6655 	 * We do not set WC on the VL15 buffers to avoid
6656 	 * a rare problem with unaligned writes from
6657 	 * interrupt-flushed store buffers, so we need
6658 	 * to map those separately here.  We can't solve
6659 	 * this for the rarely used mtrr case.
6660 	 */
6661 	ret = init_chip_wc_pat(dd, 0);
6662 	if (ret)
6663 		goto bail;
6664 
6665 	/* vl15 buffers start just after the 4k buffers */
6666 	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6667 		  dd->piobcnt4k * dd->align4k;
6668 	dd->piovl15base	= ioremap_nocache(vl15off,
6669 					  NUM_VL15_BUFS * dd->align4k);
6670 	if (!dd->piovl15base) {
6671 		ret = -ENOMEM;
6672 		goto bail;
6673 	}
6674 
6675 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6676 
6677 	ret = 0;
6678 	if (qib_mini_init)
6679 		goto bail;
6680 	if (!dd->num_pports) {
6681 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6682 		goto bail; /* no error, so can still figure out why err */
6683 	}
6684 
6685 	write_7322_initregs(dd);
6686 	ret = qib_create_ctxts(dd);
6687 	init_7322_cntrnames(dd);
6688 
6689 	updthresh = 8U; /* update threshold */
6690 
6691 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6692 	 * reserve the update threshold amount for other kernel use, such
6693 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6694 	 * unless we aren't enabling SDMA, in which case we want to use
6695 	 * all the 4k bufs for the kernel.
6696 	 * if this was less than the update threshold, we could wait
6697 	 * a long time for an update.  Coded this way because we
6698 	 * sometimes change the update threshold for various reasons,
6699 	 * and we want this to remain robust.
6700 	 */
6701 	if (dd->flags & QIB_HAS_SEND_DMA) {
6702 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6703 		sbufs = updthresh > 3 ? updthresh : 3;
6704 	} else {
6705 		dd->cspec->sdmabufcnt = 0;
6706 		sbufs = dd->piobcnt4k;
6707 	}
6708 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6709 		dd->cspec->sdmabufcnt;
6710 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6711 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6712 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6713 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6714 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6715 
6716 	/*
6717 	 * If we have 16 user contexts, we will have 7 sbufs
6718 	 * per context, so reduce the update threshold to match.  We
6719 	 * want to update before we actually run out, at low pbufs/ctxt
6720 	 * so give ourselves some margin.
6721 	 */
6722 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6723 		updthresh = dd->pbufsctxt - 2;
6724 	dd->cspec->updthresh_dflt = updthresh;
6725 	dd->cspec->updthresh = updthresh;
6726 
6727 	/* before full enable, no interrupts, no locking needed */
6728 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6729 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6730 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6731 
6732 	dd->psxmitwait_supported = 1;
6733 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6734 bail:
6735 	if (!dd->ctxtcnt)
6736 		dd->ctxtcnt = 1; /* for other initialization code */
6737 
6738 	return ret;
6739 }
6740 
6741 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6742 					u32 *pbufnum)
6743 {
6744 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6745 	struct qib_devdata *dd = ppd->dd;
6746 
6747 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6748 	if (pbc & PBC_7322_VL15_SEND) {
6749 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6750 		last = first;
6751 	} else {
6752 		if ((plen + 1) > dd->piosize2kmax_dwords)
6753 			first = dd->piobcnt2k;
6754 		else
6755 			first = 0;
6756 		last = dd->cspec->lastbuf_for_pio;
6757 	}
6758 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6759 }
6760 
6761 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6762 				     u32 start)
6763 {
6764 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6765 	qib_write_kreg_port(ppd, krp_psstart, start);
6766 }
6767 
6768 /*
6769  * Must be called with sdma_lock held, or before init finished.
6770  */
6771 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6772 {
6773 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6774 }
6775 
6776 /*
6777  * sdma_lock should be acquired before calling this routine
6778  */
6779 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6780 {
6781 	u64 reg, reg1, reg2;
6782 
6783 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6784 	qib_dev_porterr(ppd->dd, ppd->port,
6785 		"SDMA senddmastatus: 0x%016llx\n", reg);
6786 
6787 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6788 	qib_dev_porterr(ppd->dd, ppd->port,
6789 		"SDMA sendctrl: 0x%016llx\n", reg);
6790 
6791 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6792 	qib_dev_porterr(ppd->dd, ppd->port,
6793 		"SDMA senddmabase: 0x%016llx\n", reg);
6794 
6795 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6796 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6797 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6798 	qib_dev_porterr(ppd->dd, ppd->port,
6799 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6800 		 reg, reg1, reg2);
6801 
6802 	/* get bufuse bits, clear them, and print them again if non-zero */
6803 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6804 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6805 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6806 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6807 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6808 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6809 	/* 0 and 1 should always be zero, so print as short form */
6810 	qib_dev_porterr(ppd->dd, ppd->port,
6811 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6812 		 reg, reg1, reg2);
6813 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6814 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6815 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6816 	/* 0 and 1 should always be zero, so print as short form */
6817 	qib_dev_porterr(ppd->dd, ppd->port,
6818 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6819 		 reg, reg1, reg2);
6820 
6821 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6822 	qib_dev_porterr(ppd->dd, ppd->port,
6823 		"SDMA senddmatail: 0x%016llx\n", reg);
6824 
6825 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6826 	qib_dev_porterr(ppd->dd, ppd->port,
6827 		"SDMA senddmahead: 0x%016llx\n", reg);
6828 
6829 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6830 	qib_dev_porterr(ppd->dd, ppd->port,
6831 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6832 
6833 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6834 	qib_dev_porterr(ppd->dd, ppd->port,
6835 		"SDMA senddmalengen: 0x%016llx\n", reg);
6836 
6837 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6838 	qib_dev_porterr(ppd->dd, ppd->port,
6839 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6840 
6841 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6842 	qib_dev_porterr(ppd->dd, ppd->port,
6843 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6844 
6845 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6846 	qib_dev_porterr(ppd->dd, ppd->port,
6847 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6848 
6849 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6850 	qib_dev_porterr(ppd->dd, ppd->port,
6851 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6852 
6853 	dump_sdma_state(ppd);
6854 }
6855 
6856 static struct sdma_set_state_action sdma_7322_action_table[] = {
6857 	[qib_sdma_state_s00_hw_down] = {
6858 		.go_s99_running_tofalse = 1,
6859 		.op_enable = 0,
6860 		.op_intenable = 0,
6861 		.op_halt = 0,
6862 		.op_drain = 0,
6863 	},
6864 	[qib_sdma_state_s10_hw_start_up_wait] = {
6865 		.op_enable = 0,
6866 		.op_intenable = 1,
6867 		.op_halt = 1,
6868 		.op_drain = 0,
6869 	},
6870 	[qib_sdma_state_s20_idle] = {
6871 		.op_enable = 1,
6872 		.op_intenable = 1,
6873 		.op_halt = 1,
6874 		.op_drain = 0,
6875 	},
6876 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6877 		.op_enable = 0,
6878 		.op_intenable = 1,
6879 		.op_halt = 1,
6880 		.op_drain = 0,
6881 	},
6882 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6883 		.op_enable = 1,
6884 		.op_intenable = 1,
6885 		.op_halt = 1,
6886 		.op_drain = 0,
6887 	},
6888 	[qib_sdma_state_s50_hw_halt_wait] = {
6889 		.op_enable = 1,
6890 		.op_intenable = 1,
6891 		.op_halt = 1,
6892 		.op_drain = 1,
6893 	},
6894 	[qib_sdma_state_s99_running] = {
6895 		.op_enable = 1,
6896 		.op_intenable = 1,
6897 		.op_halt = 0,
6898 		.op_drain = 0,
6899 		.go_s99_running_totrue = 1,
6900 	},
6901 };
6902 
6903 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6904 {
6905 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6906 }
6907 
6908 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6909 {
6910 	struct qib_devdata *dd = ppd->dd;
6911 	unsigned lastbuf, erstbuf;
6912 	u64 senddmabufmask[3] = { 0 };
6913 	int n, ret = 0;
6914 
6915 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6916 	qib_sdma_7322_setlengen(ppd);
6917 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6918 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6919 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6920 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6921 
6922 	if (dd->num_pports)
6923 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6924 	else
6925 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6926 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6927 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6928 		dd->cspec->sdmabufcnt);
6929 	lastbuf = erstbuf + n;
6930 
6931 	ppd->sdma_state.first_sendbuf = erstbuf;
6932 	ppd->sdma_state.last_sendbuf = lastbuf;
6933 	for (; erstbuf < lastbuf; ++erstbuf) {
6934 		unsigned word = erstbuf / BITS_PER_LONG;
6935 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6936 
6937 		BUG_ON(word >= 3);
6938 		senddmabufmask[word] |= 1ULL << bit;
6939 	}
6940 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6941 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6942 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6943 	return ret;
6944 }
6945 
6946 /* sdma_lock must be held */
6947 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6948 {
6949 	struct qib_devdata *dd = ppd->dd;
6950 	int sane;
6951 	int use_dmahead;
6952 	u16 swhead;
6953 	u16 swtail;
6954 	u16 cnt;
6955 	u16 hwhead;
6956 
6957 	use_dmahead = __qib_sdma_running(ppd) &&
6958 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6959 retry:
6960 	hwhead = use_dmahead ?
6961 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6962 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6963 
6964 	swhead = ppd->sdma_descq_head;
6965 	swtail = ppd->sdma_descq_tail;
6966 	cnt = ppd->sdma_descq_cnt;
6967 
6968 	if (swhead < swtail)
6969 		/* not wrapped */
6970 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6971 	else if (swhead > swtail)
6972 		/* wrapped around */
6973 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6974 			(hwhead <= swtail);
6975 	else
6976 		/* empty */
6977 		sane = (hwhead == swhead);
6978 
6979 	if (unlikely(!sane)) {
6980 		if (use_dmahead) {
6981 			/* try one more time, directly from the register */
6982 			use_dmahead = 0;
6983 			goto retry;
6984 		}
6985 		/* proceed as if no progress */
6986 		hwhead = swhead;
6987 	}
6988 
6989 	return hwhead;
6990 }
6991 
6992 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6993 {
6994 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6995 
6996 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6997 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6998 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6999 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
7000 }
7001 
7002 /*
7003  * Compute the amount of delay before sending the next packet if the
7004  * port's send rate differs from the static rate set for the QP.
7005  * The delay affects the next packet and the amount of the delay is
7006  * based on the length of the this packet.
7007  */
7008 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7009 				   u8 srate, u8 vl)
7010 {
7011 	u8 snd_mult = ppd->delay_mult;
7012 	u8 rcv_mult = ib_rate_to_delay[srate];
7013 	u32 ret;
7014 
7015 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7016 
7017 	/* Indicate VL15, else set the VL in the control word */
7018 	if (vl == 15)
7019 		ret |= PBC_7322_VL15_SEND_CTRL;
7020 	else
7021 		ret |= vl << PBC_VL_NUM_LSB;
7022 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7023 
7024 	return ret;
7025 }
7026 
7027 /*
7028  * Enable the per-port VL15 send buffers for use.
7029  * They follow the rest of the buffers, without a config parameter.
7030  * This was in initregs, but that is done before the shadow
7031  * is set up, and this has to be done after the shadow is
7032  * set up.
7033  */
7034 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7035 {
7036 	unsigned vl15bufs;
7037 
7038 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7039 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7040 			       TXCHK_CHG_TYPE_KERN, NULL);
7041 }
7042 
7043 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7044 {
7045 	if (rcd->ctxt < NUM_IB_PORTS) {
7046 		if (rcd->dd->num_pports > 1) {
7047 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7048 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7049 		} else {
7050 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7051 			rcd->rcvegr_tid_base = 0;
7052 		}
7053 	} else {
7054 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7055 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7056 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7057 	}
7058 }
7059 
7060 #define QTXSLEEPS 5000
7061 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7062 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7063 {
7064 	int i;
7065 	const int last = start + len - 1;
7066 	const int lastr = last / BITS_PER_LONG;
7067 	u32 sleeps = 0;
7068 	int wait = rcd != NULL;
7069 	unsigned long flags;
7070 
7071 	while (wait) {
7072 		unsigned long shadow;
7073 		int cstart, previ = -1;
7074 
7075 		/*
7076 		 * when flipping from kernel to user, we can't change
7077 		 * the checking type if the buffer is allocated to the
7078 		 * driver.   It's OK the other direction, because it's
7079 		 * from close, and we have just disarm'ed all the
7080 		 * buffers.  All the kernel to kernel changes are also
7081 		 * OK.
7082 		 */
7083 		for (cstart = start; cstart <= last; cstart++) {
7084 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7085 				/ BITS_PER_LONG;
7086 			if (i != previ) {
7087 				shadow = (unsigned long)
7088 					le64_to_cpu(dd->pioavailregs_dma[i]);
7089 				previ = i;
7090 			}
7091 			if (test_bit(((2 * cstart) +
7092 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7093 				     % BITS_PER_LONG, &shadow))
7094 				break;
7095 		}
7096 
7097 		if (cstart > last)
7098 			break;
7099 
7100 		if (sleeps == QTXSLEEPS)
7101 			break;
7102 		/* make sure we see an updated copy next time around */
7103 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7104 		sleeps++;
7105 		msleep(20);
7106 	}
7107 
7108 	switch (which) {
7109 	case TXCHK_CHG_TYPE_DIS1:
7110 		/*
7111 		 * disable checking on a range; used by diags; just
7112 		 * one buffer, but still written generically
7113 		 */
7114 		for (i = start; i <= last; i++)
7115 			clear_bit(i, dd->cspec->sendchkenable);
7116 		break;
7117 
7118 	case TXCHK_CHG_TYPE_ENAB1:
7119 		/*
7120 		 * (re)enable checking on a range; used by diags; just
7121 		 * one buffer, but still written generically; read
7122 		 * scratch to be sure buffer actually triggered, not
7123 		 * just flushed from processor.
7124 		 */
7125 		qib_read_kreg32(dd, kr_scratch);
7126 		for (i = start; i <= last; i++)
7127 			set_bit(i, dd->cspec->sendchkenable);
7128 		break;
7129 
7130 	case TXCHK_CHG_TYPE_KERN:
7131 		/* usable by kernel */
7132 		for (i = start; i <= last; i++) {
7133 			set_bit(i, dd->cspec->sendibchk);
7134 			clear_bit(i, dd->cspec->sendgrhchk);
7135 		}
7136 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7137 		/* see if we need to raise avail update threshold */
7138 		for (i = dd->first_user_ctxt;
7139 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7140 		     && i < dd->cfgctxts; i++)
7141 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7142 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7143 			   < dd->cspec->updthresh_dflt)
7144 				break;
7145 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7146 		if (i == dd->cfgctxts) {
7147 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7148 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7149 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7150 			dd->sendctrl |= (dd->cspec->updthresh &
7151 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7152 					   SYM_LSB(SendCtrl, AvailUpdThld);
7153 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7154 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7155 		}
7156 		break;
7157 
7158 	case TXCHK_CHG_TYPE_USER:
7159 		/* for user process */
7160 		for (i = start; i <= last; i++) {
7161 			clear_bit(i, dd->cspec->sendibchk);
7162 			set_bit(i, dd->cspec->sendgrhchk);
7163 		}
7164 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7165 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7166 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7167 			dd->cspec->updthresh = (rcd->piocnt /
7168 						rcd->subctxt_cnt) - 1;
7169 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7170 			dd->sendctrl |= (dd->cspec->updthresh &
7171 					SYM_RMASK(SendCtrl, AvailUpdThld))
7172 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7173 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7174 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7175 		} else
7176 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7177 		break;
7178 
7179 	default:
7180 		break;
7181 	}
7182 
7183 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7184 		qib_write_kreg(dd, kr_sendcheckmask + i,
7185 			       dd->cspec->sendchkenable[i]);
7186 
7187 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7188 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7189 			       dd->cspec->sendgrhchk[i]);
7190 		qib_write_kreg(dd, kr_sendibpktmask + i,
7191 			       dd->cspec->sendibchk[i]);
7192 	}
7193 
7194 	/*
7195 	 * Be sure whatever we did was seen by the chip and acted upon,
7196 	 * before we return.  Mostly important for which >= 2.
7197 	 */
7198 	qib_read_kreg32(dd, kr_scratch);
7199 }
7200 
7201 
7202 /* useful for trigger analyzers, etc. */
7203 static void writescratch(struct qib_devdata *dd, u32 val)
7204 {
7205 	qib_write_kreg(dd, kr_scratch, val);
7206 }
7207 
7208 /* Dummy for now, use chip regs soon */
7209 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7210 {
7211 	return -ENXIO;
7212 }
7213 
7214 /**
7215  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7216  * @dev: the pci_dev for qlogic_ib device
7217  * @ent: pci_device_id struct for this dev
7218  *
7219  * Also allocates, inits, and returns the devdata struct for this
7220  * device instance
7221  *
7222  * This is global, and is called directly at init to set up the
7223  * chip-specific function pointers for later use.
7224  */
7225 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7226 					   const struct pci_device_id *ent)
7227 {
7228 	struct qib_devdata *dd;
7229 	int ret, i;
7230 	u32 tabsize, actual_cnt = 0;
7231 
7232 	dd = qib_alloc_devdata(pdev,
7233 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7234 		sizeof(struct qib_chip_specific) +
7235 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7236 	if (IS_ERR(dd))
7237 		goto bail;
7238 
7239 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7240 	dd->f_cleanup           = qib_setup_7322_cleanup;
7241 	dd->f_clear_tids        = qib_7322_clear_tids;
7242 	dd->f_free_irq          = qib_7322_free_irq;
7243 	dd->f_get_base_info     = qib_7322_get_base_info;
7244 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7245 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7246 	dd->f_gpio_mod          = gpio_7322_mod;
7247 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7248 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7249 	dd->f_ib_updown         = qib_7322_ib_updown;
7250 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7251 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7252 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7253 	dd->f_late_initreg      = qib_late_7322_initreg;
7254 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7255 	dd->f_portcntr          = qib_portcntr_7322;
7256 	dd->f_put_tid           = qib_7322_put_tid;
7257 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7258 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7259 	dd->f_read_cntrs        = qib_read_7322cntrs;
7260 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7261 	dd->f_reset             = qib_do_7322_reset;
7262 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7263 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7264 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7265 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7266 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7267 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7268 	dd->f_sendctrl          = sendctrl_7322_mod;
7269 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7270 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7271 	dd->f_iblink_state      = qib_7322_iblink_state;
7272 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7273 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7274 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7275 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7276 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7277 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7278 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7279 	dd->f_setextled         = qib_setup_7322_setextled;
7280 	dd->f_txchk_change      = qib_7322_txchk_change;
7281 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7282 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7283 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7284 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7285 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7286 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7287 	dd->f_writescratch      = writescratch;
7288 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7289 #ifdef CONFIG_INFINIBAND_QIB_DCA
7290 	dd->f_notify_dca	= qib_7322_notify_dca;
7291 #endif
7292 	/*
7293 	 * Do remaining PCIe setup and save PCIe values in dd.
7294 	 * Any error printing is already done by the init code.
7295 	 * On return, we have the chip mapped, but chip registers
7296 	 * are not set up until start of qib_init_7322_variables.
7297 	 */
7298 	ret = qib_pcie_ddinit(dd, pdev, ent);
7299 	if (ret < 0)
7300 		goto bail_free;
7301 
7302 	/* initialize chip-specific variables */
7303 	ret = qib_init_7322_variables(dd);
7304 	if (ret)
7305 		goto bail_cleanup;
7306 
7307 	if (qib_mini_init || !dd->num_pports)
7308 		goto bail;
7309 
7310 	/*
7311 	 * Determine number of vectors we want; depends on port count
7312 	 * and number of configured kernel receive queues actually used.
7313 	 * Should also depend on whether sdma is enabled or not, but
7314 	 * that's such a rare testing case it's not worth worrying about.
7315 	 */
7316 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7317 	for (i = 0; i < tabsize; i++)
7318 		if ((i < ARRAY_SIZE(irq_table) &&
7319 		     irq_table[i].port <= dd->num_pports) ||
7320 		    (i >= ARRAY_SIZE(irq_table) &&
7321 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7322 			actual_cnt++;
7323 	/* reduce by ctxt's < 2 */
7324 	if (qib_krcvq01_no_msi)
7325 		actual_cnt -= dd->num_pports;
7326 
7327 	tabsize = actual_cnt;
7328 	dd->cspec->msix_entries = kzalloc(tabsize *
7329 			sizeof(struct qib_msix_entry), GFP_KERNEL);
7330 	if (!dd->cspec->msix_entries)
7331 		tabsize = 0;
7332 
7333 	for (i = 0; i < tabsize; i++)
7334 		dd->cspec->msix_entries[i].msix.entry = i;
7335 
7336 	if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7337 		qib_dev_err(dd,
7338 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7339 	/* may be less than we wanted, if not enough available */
7340 	dd->cspec->num_msix_entries = tabsize;
7341 
7342 	/* setup interrupt handler */
7343 	qib_setup_7322_interrupt(dd, 1);
7344 
7345 	/* clear diagctrl register, in case diags were running and crashed */
7346 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7347 #ifdef CONFIG_INFINIBAND_QIB_DCA
7348 	if (!dca_add_requester(&pdev->dev)) {
7349 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7350 		dd->flags |= QIB_DCA_ENABLED;
7351 		qib_setup_dca(dd);
7352 	}
7353 #endif
7354 	goto bail;
7355 
7356 bail_cleanup:
7357 	qib_pcie_ddcleanup(dd);
7358 bail_free:
7359 	qib_free_devdata(dd);
7360 	dd = ERR_PTR(ret);
7361 bail:
7362 	return dd;
7363 }
7364 
7365 /*
7366  * Set the table entry at the specified index from the table specifed.
7367  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7368  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7369  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7370  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7371  */
7372 #define DDS_ENT_AMP_LSB 14
7373 #define DDS_ENT_MAIN_LSB 9
7374 #define DDS_ENT_POST_LSB 5
7375 #define DDS_ENT_PRE_XTRA_LSB 3
7376 #define DDS_ENT_PRE_LSB 0
7377 
7378 /*
7379  * Set one entry in the TxDDS table for spec'd port
7380  * ridx picks one of the entries, while tp points
7381  * to the appropriate table entry.
7382  */
7383 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7384 		      const struct txdds_ent *tp)
7385 {
7386 	struct qib_devdata *dd = ppd->dd;
7387 	u32 pack_ent;
7388 	int regidx;
7389 
7390 	/* Get correct offset in chip-space, and in source table */
7391 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7392 	/*
7393 	 * We do not use qib_write_kreg_port() because it was intended
7394 	 * only for registers in the lower "port specific" pages.
7395 	 * So do index calculation  by hand.
7396 	 */
7397 	if (ppd->hw_pidx)
7398 		regidx += (dd->palign / sizeof(u64));
7399 
7400 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7401 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7402 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7403 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7404 	qib_write_kreg(dd, regidx, pack_ent);
7405 	/* Prevent back-to-back writes by hitting scratch */
7406 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7407 }
7408 
7409 static const struct vendor_txdds_ent vendor_txdds[] = {
7410 	{ /* Amphenol 1m 30awg NoEq */
7411 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7412 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7413 	},
7414 	{ /* Amphenol 3m 28awg NoEq */
7415 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7416 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7417 	},
7418 	{ /* Finisar 3m OM2 Optical */
7419 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7420 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7421 	},
7422 	{ /* Finisar 30m OM2 Optical */
7423 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7424 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7425 	},
7426 	{ /* Finisar Default OM2 Optical */
7427 		{ 0x00, 0x90, 0x65 }, NULL,
7428 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7429 	},
7430 	{ /* Gore 1m 30awg NoEq */
7431 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7432 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7433 	},
7434 	{ /* Gore 2m 30awg NoEq */
7435 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7436 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7437 	},
7438 	{ /* Gore 1m 28awg NoEq */
7439 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7440 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7441 	},
7442 	{ /* Gore 3m 28awg NoEq */
7443 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7444 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7445 	},
7446 	{ /* Gore 5m 24awg Eq */
7447 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7448 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7449 	},
7450 	{ /* Gore 7m 24awg Eq */
7451 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7452 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7453 	},
7454 	{ /* Gore 5m 26awg Eq */
7455 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7456 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7457 	},
7458 	{ /* Gore 7m 26awg Eq */
7459 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7460 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7461 	},
7462 	{ /* Intersil 12m 24awg Active */
7463 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7464 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7465 	},
7466 	{ /* Intersil 10m 28awg Active */
7467 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7468 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7469 	},
7470 	{ /* Intersil 7m 30awg Active */
7471 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7472 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7473 	},
7474 	{ /* Intersil 5m 32awg Active */
7475 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7476 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7477 	},
7478 	{ /* Intersil Default Active */
7479 		{ 0x00, 0x30, 0xB4 }, NULL,
7480 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7481 	},
7482 	{ /* Luxtera 20m Active Optical */
7483 		{ 0x00, 0x25, 0x63 }, NULL,
7484 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7485 	},
7486 	{ /* Molex 1M Cu loopback */
7487 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7488 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7489 	},
7490 	{ /* Molex 2m 28awg NoEq */
7491 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7492 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7493 	},
7494 };
7495 
7496 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7497 	/* amp, pre, main, post */
7498 	{  2, 2, 15,  6 },	/* Loopback */
7499 	{  0, 0,  0,  1 },	/*  2 dB */
7500 	{  0, 0,  0,  2 },	/*  3 dB */
7501 	{  0, 0,  0,  3 },	/*  4 dB */
7502 	{  0, 0,  0,  4 },	/*  5 dB */
7503 	{  0, 0,  0,  5 },	/*  6 dB */
7504 	{  0, 0,  0,  6 },	/*  7 dB */
7505 	{  0, 0,  0,  7 },	/*  8 dB */
7506 	{  0, 0,  0,  8 },	/*  9 dB */
7507 	{  0, 0,  0,  9 },	/* 10 dB */
7508 	{  0, 0,  0, 10 },	/* 11 dB */
7509 	{  0, 0,  0, 11 },	/* 12 dB */
7510 	{  0, 0,  0, 12 },	/* 13 dB */
7511 	{  0, 0,  0, 13 },	/* 14 dB */
7512 	{  0, 0,  0, 14 },	/* 15 dB */
7513 	{  0, 0,  0, 15 },	/* 16 dB */
7514 };
7515 
7516 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7517 	/* amp, pre, main, post */
7518 	{  2, 2, 15,  6 },	/* Loopback */
7519 	{  0, 0,  0,  8 },	/*  2 dB */
7520 	{  0, 0,  0,  8 },	/*  3 dB */
7521 	{  0, 0,  0,  9 },	/*  4 dB */
7522 	{  0, 0,  0,  9 },	/*  5 dB */
7523 	{  0, 0,  0, 10 },	/*  6 dB */
7524 	{  0, 0,  0, 10 },	/*  7 dB */
7525 	{  0, 0,  0, 11 },	/*  8 dB */
7526 	{  0, 0,  0, 11 },	/*  9 dB */
7527 	{  0, 0,  0, 12 },	/* 10 dB */
7528 	{  0, 0,  0, 12 },	/* 11 dB */
7529 	{  0, 0,  0, 13 },	/* 12 dB */
7530 	{  0, 0,  0, 13 },	/* 13 dB */
7531 	{  0, 0,  0, 14 },	/* 14 dB */
7532 	{  0, 0,  0, 14 },	/* 15 dB */
7533 	{  0, 0,  0, 15 },	/* 16 dB */
7534 };
7535 
7536 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7537 	/* amp, pre, main, post */
7538 	{  2, 2, 15,  6 },	/* Loopback */
7539 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7540 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7541 	{  0, 1,  0, 11 },	/*  4 dB */
7542 	{  0, 1,  0, 13 },	/*  5 dB */
7543 	{  0, 1,  0, 15 },	/*  6 dB */
7544 	{  0, 1,  3, 15 },	/*  7 dB */
7545 	{  0, 1,  7, 15 },	/*  8 dB */
7546 	{  0, 1,  7, 15 },	/*  9 dB */
7547 	{  0, 1,  8, 15 },	/* 10 dB */
7548 	{  0, 1,  9, 15 },	/* 11 dB */
7549 	{  0, 1, 10, 15 },	/* 12 dB */
7550 	{  0, 2,  6, 15 },	/* 13 dB */
7551 	{  0, 2,  7, 15 },	/* 14 dB */
7552 	{  0, 2,  8, 15 },	/* 15 dB */
7553 	{  0, 2,  9, 15 },	/* 16 dB */
7554 };
7555 
7556 /*
7557  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7558  * These are mostly used for mez cards going through connectors
7559  * and backplane traces, but can be used to add other "unusual"
7560  * table values as well.
7561  */
7562 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7563 	/* amp, pre, main, post */
7564 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7565 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7566 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7567 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7568 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7569 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7570 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7571 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7572 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7573 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7574 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7575 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7576 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7577 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7578 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7579 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7580 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7581 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7582 };
7583 
7584 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7585 	/* amp, pre, main, post */
7586 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7587 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7588 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7589 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7590 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7591 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7592 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7593 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7594 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7595 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7596 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7597 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7598 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7599 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7600 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7601 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7602 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7603 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7604 };
7605 
7606 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7607 	/* amp, pre, main, post */
7608 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7609 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7610 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7611 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7612 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7613 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7614 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7615 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7616 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7617 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7618 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7619 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7620 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7621 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7622 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7623 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7624 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7625 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7626 };
7627 
7628 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7629 	/* amp, pre, main, post */
7630 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7631 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7632 };
7633 
7634 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7635 					       unsigned atten)
7636 {
7637 	/*
7638 	 * The attenuation table starts at 2dB for entry 1,
7639 	 * with entry 0 being the loopback entry.
7640 	 */
7641 	if (atten <= 2)
7642 		atten = 1;
7643 	else if (atten > TXDDS_TABLE_SZ)
7644 		atten = TXDDS_TABLE_SZ - 1;
7645 	else
7646 		atten--;
7647 	return txdds + atten;
7648 }
7649 
7650 /*
7651  * if override is set, the module parameter txselect has a value
7652  * for this specific port, so use it, rather than our normal mechanism.
7653  */
7654 static void find_best_ent(struct qib_pportdata *ppd,
7655 			  const struct txdds_ent **sdr_dds,
7656 			  const struct txdds_ent **ddr_dds,
7657 			  const struct txdds_ent **qdr_dds, int override)
7658 {
7659 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7660 	int idx;
7661 
7662 	/* Search table of known cables */
7663 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7664 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7665 
7666 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7667 		    (!v->partnum ||
7668 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7669 			*sdr_dds = &v->sdr;
7670 			*ddr_dds = &v->ddr;
7671 			*qdr_dds = &v->qdr;
7672 			return;
7673 		}
7674 	}
7675 
7676 	/* Active cables don't have attenuation so we only set SERDES
7677 	 * settings to account for the attenuation of the board traces. */
7678 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7679 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7680 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7681 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7682 		return;
7683 	}
7684 
7685 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7686 						      qd->atten[1])) {
7687 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7688 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7689 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7690 		return;
7691 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7692 		/*
7693 		 * If we have no (or incomplete) data from the cable
7694 		 * EEPROM, or no QSFP, or override is set, use the
7695 		 * module parameter value to index into the attentuation
7696 		 * table.
7697 		 */
7698 		idx = ppd->cpspec->no_eep;
7699 		*sdr_dds = &txdds_sdr[idx];
7700 		*ddr_dds = &txdds_ddr[idx];
7701 		*qdr_dds = &txdds_qdr[idx];
7702 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7703 		/* similar to above, but index into the "extra" table. */
7704 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7705 		*sdr_dds = &txdds_extra_sdr[idx];
7706 		*ddr_dds = &txdds_extra_ddr[idx];
7707 		*qdr_dds = &txdds_extra_qdr[idx];
7708 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7709 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7710 					  TXDDS_MFG_SZ)) {
7711 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7712 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7713 			ppd->dd->unit, ppd->port, idx);
7714 		*sdr_dds = &txdds_extra_mfg[idx];
7715 		*ddr_dds = &txdds_extra_mfg[idx];
7716 		*qdr_dds = &txdds_extra_mfg[idx];
7717 	} else {
7718 		/* this shouldn't happen, it's range checked */
7719 		*sdr_dds = txdds_sdr + qib_long_atten;
7720 		*ddr_dds = txdds_ddr + qib_long_atten;
7721 		*qdr_dds = txdds_qdr + qib_long_atten;
7722 	}
7723 }
7724 
7725 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7726 {
7727 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7728 	struct txdds_ent *dds;
7729 	int idx;
7730 	int single_ent = 0;
7731 
7732 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7733 
7734 	/* for mez cards or override, use the selected value for all entries */
7735 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7736 		single_ent = 1;
7737 
7738 	/* Fill in the first entry with the best entry found. */
7739 	set_txdds(ppd, 0, sdr_dds);
7740 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7741 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7742 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7743 		QIBL_LINKACTIVE)) {
7744 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7745 					   QIB_IB_QDR ?  qdr_dds :
7746 					   (ppd->link_speed_active ==
7747 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7748 		write_tx_serdes_param(ppd, dds);
7749 	}
7750 
7751 	/* Fill in the remaining entries with the default table values. */
7752 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7753 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7754 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7755 			  single_ent ? ddr_dds : txdds_ddr + idx);
7756 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7757 			  single_ent ? qdr_dds : txdds_qdr + idx);
7758 	}
7759 }
7760 
7761 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7762 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7763 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7764 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7765 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7766 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7767 #define AHB_TRANS_TRIES 10
7768 
7769 /*
7770  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7771  * 5=subsystem which is why most calls have "chan + chan >> 1"
7772  * for the channel argument.
7773  */
7774 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7775 		    u32 data, u32 mask)
7776 {
7777 	u32 rd_data, wr_data, sz_mask;
7778 	u64 trans, acc, prev_acc;
7779 	u32 ret = 0xBAD0BAD;
7780 	int tries;
7781 
7782 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7783 	/* From this point on, make sure we return access */
7784 	acc = (quad << 1) | 1;
7785 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7786 
7787 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7788 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7789 		if (trans & AHB_TRANS_RDY)
7790 			break;
7791 	}
7792 	if (tries >= AHB_TRANS_TRIES) {
7793 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7794 		goto bail;
7795 	}
7796 
7797 	/* If mask is not all 1s, we need to read, but different SerDes
7798 	 * entities have different sizes
7799 	 */
7800 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7801 	wr_data = data & mask & sz_mask;
7802 	if ((~mask & sz_mask) != 0) {
7803 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7804 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7805 
7806 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7807 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7808 			if (trans & AHB_TRANS_RDY)
7809 				break;
7810 		}
7811 		if (tries >= AHB_TRANS_TRIES) {
7812 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7813 				    AHB_TRANS_TRIES);
7814 			goto bail;
7815 		}
7816 		/* Re-read in case host split reads and read data first */
7817 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7818 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7819 		wr_data |= (rd_data & ~mask & sz_mask);
7820 	}
7821 
7822 	/* If mask is not zero, we need to write. */
7823 	if (mask & sz_mask) {
7824 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7825 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7826 		trans |= AHB_WR;
7827 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7828 
7829 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7830 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7831 			if (trans & AHB_TRANS_RDY)
7832 				break;
7833 		}
7834 		if (tries >= AHB_TRANS_TRIES) {
7835 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7836 				    AHB_TRANS_TRIES);
7837 			goto bail;
7838 		}
7839 	}
7840 	ret = wr_data;
7841 bail:
7842 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7843 	return ret;
7844 }
7845 
7846 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7847 			     unsigned mask)
7848 {
7849 	struct qib_devdata *dd = ppd->dd;
7850 	int chan;
7851 	u32 rbc;
7852 
7853 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7854 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7855 			data, mask);
7856 		rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7857 			      addr, 0, 0);
7858 	}
7859 }
7860 
7861 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7862 {
7863 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7864 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7865 
7866 	if (enable && !state) {
7867 		pr_info("IB%u:%u Turning LOS on\n",
7868 			ppd->dd->unit, ppd->port);
7869 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7870 	} else if (!enable && state) {
7871 		pr_info("IB%u:%u Turning LOS off\n",
7872 			ppd->dd->unit, ppd->port);
7873 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7874 	}
7875 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7876 }
7877 
7878 static int serdes_7322_init(struct qib_pportdata *ppd)
7879 {
7880 	int ret = 0;
7881 
7882 	if (ppd->dd->cspec->r1)
7883 		ret = serdes_7322_init_old(ppd);
7884 	else
7885 		ret = serdes_7322_init_new(ppd);
7886 	return ret;
7887 }
7888 
7889 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7890 {
7891 	u32 le_val;
7892 
7893 	/*
7894 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7895 	 * for adapters with QSFP
7896 	 */
7897 	init_txdds_table(ppd, 0);
7898 
7899 	/* ensure no tx overrides from earlier driver loads */
7900 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7901 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7902 		reset_tx_deemphasis_override));
7903 
7904 	/* Patch some SerDes defaults to "Better for IB" */
7905 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7906 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7907 
7908 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7909 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7910 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7911 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7912 
7913 	/* May be overridden in qsfp_7322_event */
7914 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7915 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7916 
7917 	/* enable LE1 adaptation for all but QME, which is disabled */
7918 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7919 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7920 
7921 	/* Clear cmode-override, may be set from older driver */
7922 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7923 
7924 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7925 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7926 
7927 	/* setup LoS params; these are subsystem, so chan == 5 */
7928 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7929 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7930 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7931 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7932 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7933 
7934 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7935 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7936 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7937 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7938 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7939 
7940 	/* LoS filter select enabled */
7941 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7942 
7943 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7944 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7945 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7946 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7947 
7948 	serdes_7322_los_enable(ppd, 1);
7949 
7950 	/* rxbistena; set 0 to avoid effects of it switch later */
7951 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7952 
7953 	/* Configure 4 DFE taps, and only they adapt */
7954 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7955 
7956 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7957 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7958 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7959 
7960 	/*
7961 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7962 	 * always on, and QDR is initially enabled; later disabled.
7963 	 */
7964 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7965 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7966 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7967 			    ppd->dd->cspec->r1 ?
7968 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7969 	ppd->cpspec->qdr_dfe_on = 1;
7970 
7971 	/* FLoop LOS gate: PPM filter  enabled */
7972 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7973 
7974 	/* rx offset center enabled */
7975 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7976 
7977 	if (!ppd->dd->cspec->r1) {
7978 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7979 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7980 	}
7981 
7982 	/* Set the frequency loop bandwidth to 15 */
7983 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7984 
7985 	return 0;
7986 }
7987 
7988 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7989 {
7990 	unsigned long tend;
7991 	u32 le_val, rxcaldone;
7992 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
7993 
7994 	/* Clear cmode-override, may be set from older driver */
7995 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7996 
7997 	/* ensure no tx overrides from earlier driver loads */
7998 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7999 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8000 		reset_tx_deemphasis_override));
8001 
8002 	/* START OF LSI SUGGESTED SERDES BRINGUP */
8003 	/* Reset - Calibration Setup */
8004 	/*       Stop DFE adaptaion */
8005 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8006 	/*       Disable LE1 */
8007 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8008 	/*       Disable autoadapt for LE1 */
8009 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8010 	/*       Disable LE2 */
8011 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8012 	/*       Disable VGA */
8013 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8014 	/*       Disable AFE Offset Cancel */
8015 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8016 	/*       Disable Timing Loop */
8017 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8018 	/*       Disable Frequency Loop */
8019 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8020 	/*       Disable Baseline Wander Correction */
8021 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8022 	/*       Disable RX Calibration */
8023 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8024 	/*       Disable RX Offset Calibration */
8025 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8026 	/*       Select BB CDR */
8027 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8028 	/*       CDR Step Size */
8029 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8030 	/*       Enable phase Calibration */
8031 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8032 	/*       DFE Bandwidth [2:14-12] */
8033 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8034 	/*       DFE Config (4 taps only) */
8035 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8036 	/*       Gain Loop Bandwidth */
8037 	if (!ppd->dd->cspec->r1) {
8038 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8039 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8040 	} else {
8041 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8042 	}
8043 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8044 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8045 	/*       Data Rate Select [5:7-6] (leave as default) */
8046 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8047 
8048 	/* RX REST */
8049 	/*       Single- or Multi-channel reset */
8050 	/*       RX Analog reset */
8051 	/*       RX Digital reset */
8052 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8053 	msleep(20);
8054 	/*       RX Analog reset */
8055 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8056 	msleep(20);
8057 	/*       RX Digital reset */
8058 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8059 	msleep(20);
8060 
8061 	/* setup LoS params; these are subsystem, so chan == 5 */
8062 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8063 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8064 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8065 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8066 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8067 
8068 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8069 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8070 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8071 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8072 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8073 
8074 	/* LoS filter select enabled */
8075 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8076 
8077 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8078 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8079 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8080 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8081 
8082 	/* Turn on LOS on initial SERDES init */
8083 	serdes_7322_los_enable(ppd, 1);
8084 	/* FLoop LOS gate: PPM filter  enabled */
8085 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8086 
8087 	/* RX LATCH CALIBRATION */
8088 	/*       Enable Eyefinder Phase Calibration latch */
8089 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8090 	/*       Enable RX Offset Calibration latch */
8091 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8092 	msleep(20);
8093 	/*       Start Calibration */
8094 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8095 	tend = jiffies + msecs_to_jiffies(500);
8096 	while (chan_done && !time_is_before_jiffies(tend)) {
8097 		msleep(20);
8098 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8099 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8100 					    (chan + (chan >> 1)),
8101 					    25, 0, 0);
8102 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8103 			    (~chan_done & (1 << chan)) == 0)
8104 				chan_done &= ~(1 << chan);
8105 		}
8106 	}
8107 	if (chan_done) {
8108 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8109 			 IBSD(ppd->hw_pidx), chan_done);
8110 	} else {
8111 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8112 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8113 					    (chan + (chan >> 1)),
8114 					    25, 0, 0);
8115 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8116 				pr_info("Serdes %d chan %d calibration failed\n",
8117 					IBSD(ppd->hw_pidx), chan);
8118 		}
8119 	}
8120 
8121 	/*       Turn off Calibration */
8122 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8123 	msleep(20);
8124 
8125 	/* BRING RX UP */
8126 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8127 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8128 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8129 	/*       Set LE2 Loop bandwidth */
8130 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8131 	/*       Enable LE2 */
8132 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8133 	msleep(20);
8134 	/*       Enable H0 only */
8135 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8136 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8137 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8138 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8139 	/*       Enable VGA */
8140 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8141 	msleep(20);
8142 	/*       Set Frequency Loop Bandwidth */
8143 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8144 	/*       Enable Frequency Loop */
8145 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8146 	/*       Set Timing Loop Bandwidth */
8147 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8148 	/*       Enable Timing Loop */
8149 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8150 	msleep(50);
8151 	/*       Enable DFE
8152 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8153 	 *       always on, and QDR is initially enabled; later disabled.
8154 	 */
8155 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8156 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8157 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8158 			    ppd->dd->cspec->r1 ?
8159 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8160 	ppd->cpspec->qdr_dfe_on = 1;
8161 	/*       Disable LE1  */
8162 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8163 	/*       Disable auto adapt for LE1 */
8164 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8165 	msleep(20);
8166 	/*       Enable AFE Offset Cancel */
8167 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8168 	/*       Enable Baseline Wander Correction */
8169 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8170 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8171 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8172 	/* VGA output common mode */
8173 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8174 
8175 	/*
8176 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8177 	 * for adapters with QSFP
8178 	 */
8179 	init_txdds_table(ppd, 0);
8180 
8181 	return 0;
8182 }
8183 
8184 /* start adjust QMH serdes parameters */
8185 
8186 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8187 {
8188 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8189 		9, code << 9, 0x3f << 9);
8190 }
8191 
8192 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8193 	int enable, u32 tapenable)
8194 {
8195 	if (enable)
8196 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8197 			1, 3 << 10, 0x1f << 10);
8198 	else
8199 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8200 			1, 0, 0x1f << 10);
8201 }
8202 
8203 /* Set clock to 1, 0, 1, 0 */
8204 static void clock_man(struct qib_pportdata *ppd, int chan)
8205 {
8206 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8207 		4, 0x4000, 0x4000);
8208 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8209 		4, 0, 0x4000);
8210 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8211 		4, 0x4000, 0x4000);
8212 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8213 		4, 0, 0x4000);
8214 }
8215 
8216 /*
8217  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8218  * The caller must pass the settings appropriate for the current speed,
8219  * or not care if they are correct for the current speed.
8220  */
8221 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8222 				  struct txdds_ent *txdds)
8223 {
8224 	u64 deemph;
8225 
8226 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8227 	/* field names for amp, main, post, pre, respectively */
8228 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8229 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8230 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8231 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8232 
8233 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8234 			   tx_override_deemphasis_select);
8235 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8236 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8237 				       txampcntl_d2a);
8238 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8239 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8240 				   txc0_ena);
8241 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8242 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8243 				    txcp1_ena);
8244 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8245 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8246 				    txcn1_ena);
8247 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8248 }
8249 
8250 /*
8251  * Set the parameters for mez cards on link bounce, so they are
8252  * always exactly what was requested.  Similar logic to init_txdds
8253  * but does just the serdes.
8254  */
8255 static void adj_tx_serdes(struct qib_pportdata *ppd)
8256 {
8257 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8258 	struct txdds_ent *dds;
8259 
8260 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8261 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8262 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8263 				ddr_dds : sdr_dds));
8264 	write_tx_serdes_param(ppd, dds);
8265 }
8266 
8267 /* set QDR forced value for H1, if needed */
8268 static void force_h1(struct qib_pportdata *ppd)
8269 {
8270 	int chan;
8271 
8272 	ppd->cpspec->qdr_reforce = 0;
8273 	if (!ppd->dd->cspec->r1)
8274 		return;
8275 
8276 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8277 		set_man_mode_h1(ppd, chan, 1, 0);
8278 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8279 		clock_man(ppd, chan);
8280 		set_man_mode_h1(ppd, chan, 0, 0);
8281 	}
8282 }
8283 
8284 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8285 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8286 
8287 #define R_OPCODE_LSB 3
8288 #define R_OP_NOP 0
8289 #define R_OP_SHIFT 2
8290 #define R_OP_UPDATE 3
8291 #define R_TDI_LSB 2
8292 #define R_TDO_LSB 1
8293 #define R_RDY 1
8294 
8295 static int qib_r_grab(struct qib_devdata *dd)
8296 {
8297 	u64 val = SJA_EN;
8298 
8299 	qib_write_kreg(dd, kr_r_access, val);
8300 	qib_read_kreg32(dd, kr_scratch);
8301 	return 0;
8302 }
8303 
8304 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8305  * returns the current state of R_TDO
8306  */
8307 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8308 {
8309 	u64 val;
8310 	int timeout;
8311 
8312 	for (timeout = 0; timeout < 100 ; ++timeout) {
8313 		val = qib_read_kreg32(dd, kr_r_access);
8314 		if (val & R_RDY)
8315 			return (val >> R_TDO_LSB) & 1;
8316 	}
8317 	return -1;
8318 }
8319 
8320 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8321 		       int len, u8 *inp, u8 *outp)
8322 {
8323 	u64 valbase, val;
8324 	int ret, pos;
8325 
8326 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8327 		(R_OP_SHIFT << R_OPCODE_LSB);
8328 	ret = qib_r_wait_for_rdy(dd);
8329 	if (ret < 0)
8330 		goto bail;
8331 	for (pos = 0; pos < len; ++pos) {
8332 		val = valbase;
8333 		if (outp) {
8334 			outp[pos >> 3] &= ~(1 << (pos & 7));
8335 			outp[pos >> 3] |= (ret << (pos & 7));
8336 		}
8337 		if (inp) {
8338 			int tdi = inp[pos >> 3] >> (pos & 7);
8339 
8340 			val |= ((tdi & 1) << R_TDI_LSB);
8341 		}
8342 		qib_write_kreg(dd, kr_r_access, val);
8343 		qib_read_kreg32(dd, kr_scratch);
8344 		ret = qib_r_wait_for_rdy(dd);
8345 		if (ret < 0)
8346 			break;
8347 	}
8348 	/* Restore to NOP between operations. */
8349 	val =  SJA_EN | (bisten << BISTEN_LSB);
8350 	qib_write_kreg(dd, kr_r_access, val);
8351 	qib_read_kreg32(dd, kr_scratch);
8352 	ret = qib_r_wait_for_rdy(dd);
8353 
8354 	if (ret >= 0)
8355 		ret = pos;
8356 bail:
8357 	return ret;
8358 }
8359 
8360 static int qib_r_update(struct qib_devdata *dd, int bisten)
8361 {
8362 	u64 val;
8363 	int ret;
8364 
8365 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8366 	ret = qib_r_wait_for_rdy(dd);
8367 	if (ret >= 0) {
8368 		qib_write_kreg(dd, kr_r_access, val);
8369 		qib_read_kreg32(dd, kr_scratch);
8370 	}
8371 	return ret;
8372 }
8373 
8374 #define BISTEN_PORT_SEL 15
8375 #define LEN_PORT_SEL 625
8376 #define BISTEN_AT 17
8377 #define LEN_AT 156
8378 #define BISTEN_ETM 16
8379 #define LEN_ETM 632
8380 
8381 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8382 
8383 /* these are common for all IB port use cases. */
8384 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8385 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8386 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8387 };
8388 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8389 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8390 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8391 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8392 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8393 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8394 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8395 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8396 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8397 };
8398 static u8 at[BIT2BYTE(LEN_AT)] = {
8399 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8400 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8401 };
8402 
8403 /* used for IB1 or IB2, only one in use */
8404 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8405 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8406 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8407 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8408 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8409 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8410 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8411 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8412 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8413 };
8414 
8415 /* used when both IB1 and IB2 are in use */
8416 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8417 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8418 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8419 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8420 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8421 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8422 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8423 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8424 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8425 };
8426 
8427 /* used when only IB1 is in use */
8428 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8429 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8430 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8431 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8432 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8433 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8434 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8435 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8436 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8437 };
8438 
8439 /* used when only IB2 is in use */
8440 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8441 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8442 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8443 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8444 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8445 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8446 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8447 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8448 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8449 };
8450 
8451 /* used when both IB1 and IB2 are in use */
8452 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8453 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8454 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8455 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8456 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8457 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8458 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8459 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8460 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8461 };
8462 
8463 /*
8464  * Do setup to properly handle IB link recovery; if port is zero, we
8465  * are initializing to cover both ports; otherwise we are initializing
8466  * to cover a single port card, or the port has reached INIT and we may
8467  * need to switch coverage types.
8468  */
8469 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8470 {
8471 	u8 *portsel, *etm;
8472 	struct qib_devdata *dd = ppd->dd;
8473 
8474 	if (!ppd->dd->cspec->r1)
8475 		return;
8476 	if (!both) {
8477 		dd->cspec->recovery_ports_initted++;
8478 		ppd->cpspec->recovery_init = 1;
8479 	}
8480 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8481 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8482 		etm = atetm_1port;
8483 	} else {
8484 		portsel = portsel_2port;
8485 		etm = atetm_2port;
8486 	}
8487 
8488 	if (qib_r_grab(dd) < 0 ||
8489 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8490 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8491 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8492 		qib_r_update(dd, BISTEN_AT) < 0 ||
8493 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8494 			    portsel, NULL) < 0 ||
8495 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8496 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8497 		qib_r_update(dd, BISTEN_AT) < 0 ||
8498 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8499 		qib_r_update(dd, BISTEN_ETM) < 0)
8500 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8501 }
8502 
8503 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8504 {
8505 	struct qib_devdata *dd = ppd->dd;
8506 	u64 fmask;
8507 
8508 	if (dd->cspec->recovery_ports_initted != 1)
8509 		return; /* rest doesn't apply to dualport */
8510 	qib_write_kreg(dd, kr_control, dd->control |
8511 		       SYM_MASK(Control, FreezeMode));
8512 	(void)qib_read_kreg64(dd, kr_scratch);
8513 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8514 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8515 	if (!fmask) {
8516 		/*
8517 		 * require a powercycle before we'll work again, and make
8518 		 * sure we get no more interrupts, and don't turn off
8519 		 * freeze.
8520 		 */
8521 		ppd->dd->cspec->stay_in_freeze = 1;
8522 		qib_7322_set_intr_state(ppd->dd, 0);
8523 		qib_write_kreg(dd, kr_fmask, 0ULL);
8524 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8525 		return; /* eventually reset */
8526 	}
8527 
8528 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8529 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8530 
8531 	/* don't do the full clear_freeze(), not needed for this */
8532 	qib_write_kreg(dd, kr_control, dd->control);
8533 	qib_read_kreg32(dd, kr_scratch);
8534 	/* take IBC out of reset */
8535 	if (ppd->link_speed_supported) {
8536 		ppd->cpspec->ibcctrl_a &=
8537 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8538 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8539 				    ppd->cpspec->ibcctrl_a);
8540 		qib_read_kreg32(dd, kr_scratch);
8541 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8542 			qib_set_ib_7322_lstate(ppd, 0,
8543 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8544 	}
8545 }
8546