1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic iSCSI HBA Driver
4 * Copyright (c) 2003-2013 QLogic Corporation
5 */
6 #include <linux/delay.h>
7 #include <linux/io.h>
8 #include <linux/pci.h>
9 #include <linux/ratelimit.h>
10 #include "ql4_def.h"
11 #include "ql4_glbl.h"
12 #include "ql4_inline.h"
13
14 #include <linux/io-64-nonatomic-lo-hi.h>
15
16 #define TIMEOUT_100_MS 100
17 #define MASK(n) DMA_BIT_MASK(n)
18 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
19 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
20 #define MS_WIN(addr) (addr & 0x0ffc0000)
21 #define QLA82XX_PCI_MN_2M (0)
22 #define QLA82XX_PCI_MS_2M (0x80000)
23 #define QLA82XX_PCI_OCM0_2M (0xc0000)
24 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
25 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
26
27 /* CRB window related */
28 #define CRB_BLK(off) ((off >> 20) & 0x3f)
29 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
30 #define CRB_WINDOW_2M (0x130060)
31 #define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
32 ((off) & 0xf0000))
33 #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
34 #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
35 #define CRB_INDIRECT_2M (0x1e0000UL)
36
37 static inline void __iomem *
qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host * ha,unsigned long off)38 qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off)
39 {
40 if ((off < ha->first_page_group_end) &&
41 (off >= ha->first_page_group_start))
42 return (void __iomem *)(ha->nx_pcibase + off);
43
44 return NULL;
45 }
46
47 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
48 0x410000AC, 0x410000B8, 0x410000BC };
49 #define MAX_CRB_XFORM 60
50 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
51 static int qla4_8xxx_crb_table_initialized;
52
53 #define qla4_8xxx_crb_addr_transform(name) \
54 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
55 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
56 static void
qla4_82xx_crb_addr_transform_setup(void)57 qla4_82xx_crb_addr_transform_setup(void)
58 {
59 qla4_8xxx_crb_addr_transform(XDMA);
60 qla4_8xxx_crb_addr_transform(TIMR);
61 qla4_8xxx_crb_addr_transform(SRE);
62 qla4_8xxx_crb_addr_transform(SQN3);
63 qla4_8xxx_crb_addr_transform(SQN2);
64 qla4_8xxx_crb_addr_transform(SQN1);
65 qla4_8xxx_crb_addr_transform(SQN0);
66 qla4_8xxx_crb_addr_transform(SQS3);
67 qla4_8xxx_crb_addr_transform(SQS2);
68 qla4_8xxx_crb_addr_transform(SQS1);
69 qla4_8xxx_crb_addr_transform(SQS0);
70 qla4_8xxx_crb_addr_transform(RPMX7);
71 qla4_8xxx_crb_addr_transform(RPMX6);
72 qla4_8xxx_crb_addr_transform(RPMX5);
73 qla4_8xxx_crb_addr_transform(RPMX4);
74 qla4_8xxx_crb_addr_transform(RPMX3);
75 qla4_8xxx_crb_addr_transform(RPMX2);
76 qla4_8xxx_crb_addr_transform(RPMX1);
77 qla4_8xxx_crb_addr_transform(RPMX0);
78 qla4_8xxx_crb_addr_transform(ROMUSB);
79 qla4_8xxx_crb_addr_transform(SN);
80 qla4_8xxx_crb_addr_transform(QMN);
81 qla4_8xxx_crb_addr_transform(QMS);
82 qla4_8xxx_crb_addr_transform(PGNI);
83 qla4_8xxx_crb_addr_transform(PGND);
84 qla4_8xxx_crb_addr_transform(PGN3);
85 qla4_8xxx_crb_addr_transform(PGN2);
86 qla4_8xxx_crb_addr_transform(PGN1);
87 qla4_8xxx_crb_addr_transform(PGN0);
88 qla4_8xxx_crb_addr_transform(PGSI);
89 qla4_8xxx_crb_addr_transform(PGSD);
90 qla4_8xxx_crb_addr_transform(PGS3);
91 qla4_8xxx_crb_addr_transform(PGS2);
92 qla4_8xxx_crb_addr_transform(PGS1);
93 qla4_8xxx_crb_addr_transform(PGS0);
94 qla4_8xxx_crb_addr_transform(PS);
95 qla4_8xxx_crb_addr_transform(PH);
96 qla4_8xxx_crb_addr_transform(NIU);
97 qla4_8xxx_crb_addr_transform(I2Q);
98 qla4_8xxx_crb_addr_transform(EG);
99 qla4_8xxx_crb_addr_transform(MN);
100 qla4_8xxx_crb_addr_transform(MS);
101 qla4_8xxx_crb_addr_transform(CAS2);
102 qla4_8xxx_crb_addr_transform(CAS1);
103 qla4_8xxx_crb_addr_transform(CAS0);
104 qla4_8xxx_crb_addr_transform(CAM);
105 qla4_8xxx_crb_addr_transform(C2C1);
106 qla4_8xxx_crb_addr_transform(C2C0);
107 qla4_8xxx_crb_addr_transform(SMB);
108 qla4_8xxx_crb_addr_transform(OCM0);
109 qla4_8xxx_crb_addr_transform(I2C0);
110
111 qla4_8xxx_crb_table_initialized = 1;
112 }
113
114 static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
115 {{{0, 0, 0, 0} } }, /* 0: PCI */
116 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
117 {1, 0x0110000, 0x0120000, 0x130000},
118 {1, 0x0120000, 0x0122000, 0x124000},
119 {1, 0x0130000, 0x0132000, 0x126000},
120 {1, 0x0140000, 0x0142000, 0x128000},
121 {1, 0x0150000, 0x0152000, 0x12a000},
122 {1, 0x0160000, 0x0170000, 0x110000},
123 {1, 0x0170000, 0x0172000, 0x12e000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {1, 0x01e0000, 0x01e0800, 0x122000},
131 {0, 0x0000000, 0x0000000, 0x000000} } },
132 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
133 {{{0, 0, 0, 0} } }, /* 3: */
134 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
135 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
136 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
137 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
138 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {1, 0x08f0000, 0x08f2000, 0x172000} } },
154 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {1, 0x09f0000, 0x09f2000, 0x176000} } },
170 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {0, 0x0000000, 0x0000000, 0x000000},
174 {0, 0x0000000, 0x0000000, 0x000000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000},
179 {0, 0x0000000, 0x0000000, 0x000000},
180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
186 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000},
197 {0, 0x0000000, 0x0000000, 0x000000},
198 {0, 0x0000000, 0x0000000, 0x000000},
199 {0, 0x0000000, 0x0000000, 0x000000},
200 {0, 0x0000000, 0x0000000, 0x000000},
201 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
202 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
203 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
204 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
205 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
206 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
207 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
208 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
209 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
210 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
211 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
212 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
213 {{{0, 0, 0, 0} } }, /* 23: */
214 {{{0, 0, 0, 0} } }, /* 24: */
215 {{{0, 0, 0, 0} } }, /* 25: */
216 {{{0, 0, 0, 0} } }, /* 26: */
217 {{{0, 0, 0, 0} } }, /* 27: */
218 {{{0, 0, 0, 0} } }, /* 28: */
219 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
220 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
221 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
222 {{{0} } }, /* 32: PCI */
223 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
224 {1, 0x2110000, 0x2120000, 0x130000},
225 {1, 0x2120000, 0x2122000, 0x124000},
226 {1, 0x2130000, 0x2132000, 0x126000},
227 {1, 0x2140000, 0x2142000, 0x128000},
228 {1, 0x2150000, 0x2152000, 0x12a000},
229 {1, 0x2160000, 0x2170000, 0x110000},
230 {1, 0x2170000, 0x2172000, 0x12e000},
231 {0, 0x0000000, 0x0000000, 0x000000},
232 {0, 0x0000000, 0x0000000, 0x000000},
233 {0, 0x0000000, 0x0000000, 0x000000},
234 {0, 0x0000000, 0x0000000, 0x000000},
235 {0, 0x0000000, 0x0000000, 0x000000},
236 {0, 0x0000000, 0x0000000, 0x000000},
237 {0, 0x0000000, 0x0000000, 0x000000},
238 {0, 0x0000000, 0x0000000, 0x000000} } },
239 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
240 {{{0} } }, /* 35: */
241 {{{0} } }, /* 36: */
242 {{{0} } }, /* 37: */
243 {{{0} } }, /* 38: */
244 {{{0} } }, /* 39: */
245 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
246 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
247 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
248 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
249 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
250 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
251 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
252 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
253 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
254 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
255 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
256 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
257 {{{0} } }, /* 52: */
258 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
259 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
260 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
261 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
262 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
263 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
264 {{{0} } }, /* 59: I2C0 */
265 {{{0} } }, /* 60: I2C1 */
266 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */
267 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
268 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
269 };
270
271 /*
272 * top 12 bits of crb internal address (hub, agent)
273 */
274 static unsigned qla4_82xx_crb_hub_agt[64] = {
275 0,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
279 0,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
289 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
291 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
292 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
293 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
294 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
295 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
296 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
297 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
298 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
299 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
300 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
301 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
302 0,
303 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
304 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
305 0,
306 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
307 0,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
309 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
310 0,
311 0,
312 0,
313 0,
314 0,
315 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
316 0,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
319 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
320 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
321 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
323 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
324 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
325 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
326 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
327 0,
328 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
329 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
330 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
331 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
332 0,
333 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
334 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
335 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
336 0,
337 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
338 0,
339 };
340
341 /* Device states */
342 static char *qdev_state[] = {
343 "Unknown",
344 "Cold",
345 "Initializing",
346 "Ready",
347 "Need Reset",
348 "Need Quiescent",
349 "Failed",
350 "Quiescent",
351 };
352
353 /*
354 * In: 'off' is offset from CRB space in 128M pci map
355 * Out: 'off' is 2M pci map addr
356 * side effect: lock crb window
357 */
358 static void
qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host * ha,ulong * off)359 qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
360 {
361 u32 win_read;
362
363 ha->crb_win = CRB_HI(*off);
364 writel(ha->crb_win,
365 (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
366
367 /* Read back value to make sure write has gone through before trying
368 * to use it. */
369 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
370 if (win_read != ha->crb_win) {
371 DEBUG2(ql4_printk(KERN_INFO, ha,
372 "%s: Written crbwin (0x%x) != Read crbwin (0x%x),"
373 " off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
374 }
375 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
376 }
377
378 #define CRB_WIN_LOCK_TIMEOUT 100000000
379
380 /*
381 * Context: atomic
382 */
qla4_82xx_crb_win_lock(struct scsi_qla_host * ha)383 static int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
384 {
385 int done = 0, timeout = 0;
386
387 while (!done) {
388 /* acquire semaphore3 from PCI HW block */
389 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
390 if (done == 1)
391 break;
392 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
393 return -1;
394
395 timeout++;
396 udelay(10);
397 }
398 qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
399 return 0;
400 }
401
qla4_82xx_crb_win_unlock(struct scsi_qla_host * ha)402 void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
403 {
404 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
405 }
406
407 void
qla4_82xx_wr_32(struct scsi_qla_host * ha,ulong off,u32 data)408 qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
409 {
410 unsigned long flags = 0;
411 int rv;
412
413 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
414
415 BUG_ON(rv == -1);
416
417 if (rv == 1) {
418 write_lock_irqsave(&ha->hw_lock, flags);
419 qla4_82xx_crb_win_lock(ha);
420 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
421 }
422
423 writel(data, (void __iomem *)off);
424
425 if (rv == 1) {
426 qla4_82xx_crb_win_unlock(ha);
427 write_unlock_irqrestore(&ha->hw_lock, flags);
428 }
429 }
430
qla4_82xx_rd_32(struct scsi_qla_host * ha,ulong off)431 uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
432 {
433 unsigned long flags = 0;
434 int rv;
435 u32 data;
436
437 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
438
439 BUG_ON(rv == -1);
440
441 if (rv == 1) {
442 write_lock_irqsave(&ha->hw_lock, flags);
443 qla4_82xx_crb_win_lock(ha);
444 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
445 }
446 data = readl((void __iomem *)off);
447
448 if (rv == 1) {
449 qla4_82xx_crb_win_unlock(ha);
450 write_unlock_irqrestore(&ha->hw_lock, flags);
451 }
452 return data;
453 }
454
455 /* Minidump related functions */
qla4_82xx_md_rd_32(struct scsi_qla_host * ha,uint32_t off,uint32_t * data)456 int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
457 {
458 uint32_t win_read, off_value;
459 int rval = QLA_SUCCESS;
460
461 off_value = off & 0xFFFF0000;
462 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
463
464 /*
465 * Read back value to make sure write has gone through before trying
466 * to use it.
467 */
468 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
469 if (win_read != off_value) {
470 DEBUG2(ql4_printk(KERN_INFO, ha,
471 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
472 __func__, off_value, win_read, off));
473 rval = QLA_ERROR;
474 } else {
475 off_value = off & 0x0000FFFF;
476 *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
477 ha->nx_pcibase));
478 }
479 return rval;
480 }
481
qla4_82xx_md_wr_32(struct scsi_qla_host * ha,uint32_t off,uint32_t data)482 int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
483 {
484 uint32_t win_read, off_value;
485 int rval = QLA_SUCCESS;
486
487 off_value = off & 0xFFFF0000;
488 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
489
490 /* Read back value to make sure write has gone through before trying
491 * to use it.
492 */
493 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
494 if (win_read != off_value) {
495 DEBUG2(ql4_printk(KERN_INFO, ha,
496 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
497 __func__, off_value, win_read, off));
498 rval = QLA_ERROR;
499 } else {
500 off_value = off & 0x0000FFFF;
501 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
502 ha->nx_pcibase));
503 }
504 return rval;
505 }
506
507 #define IDC_LOCK_TIMEOUT 100000000
508
509 /**
510 * qla4_82xx_idc_lock - hw_lock
511 * @ha: pointer to adapter structure
512 *
513 * General purpose lock used to synchronize access to
514 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
515 *
516 * Context: task, can sleep
517 **/
qla4_82xx_idc_lock(struct scsi_qla_host * ha)518 int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
519 {
520 int done = 0, timeout = 0;
521
522 might_sleep();
523
524 while (!done) {
525 /* acquire semaphore5 from PCI HW block */
526 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
527 if (done == 1)
528 break;
529 if (timeout >= IDC_LOCK_TIMEOUT)
530 return -1;
531
532 timeout++;
533 msleep(100);
534 }
535 return 0;
536 }
537
qla4_82xx_idc_unlock(struct scsi_qla_host * ha)538 void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
539 {
540 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
541 }
542
543 int
qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host * ha,ulong * off)544 qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
545 {
546 struct crb_128M_2M_sub_block_map *m;
547
548 if (*off >= QLA82XX_CRB_MAX)
549 return -1;
550
551 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
552 *off = (*off - QLA82XX_PCI_CAMQM) +
553 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
554 return 0;
555 }
556
557 if (*off < QLA82XX_PCI_CRBSPACE)
558 return -1;
559
560 *off -= QLA82XX_PCI_CRBSPACE;
561 /*
562 * Try direct map
563 */
564
565 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
566
567 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
568 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
569 return 0;
570 }
571
572 /*
573 * Not in direct map, use crb window
574 */
575 return 1;
576 }
577
578 /*
579 * check memory access boundary.
580 * used by test agent. support ddr access only for now
581 */
582 static unsigned long
qla4_82xx_pci_mem_bound_check(struct scsi_qla_host * ha,unsigned long long addr,int size)583 qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
584 unsigned long long addr, int size)
585 {
586 if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
587 QLA8XXX_ADDR_DDR_NET_MAX) ||
588 !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
589 QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
590 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
591 return 0;
592 }
593 return 1;
594 }
595
596 static int qla4_82xx_pci_set_window_warning_count;
597
598 static unsigned long
qla4_82xx_pci_set_window(struct scsi_qla_host * ha,unsigned long long addr)599 qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
600 {
601 int window;
602 u32 win_read;
603
604 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
605 QLA8XXX_ADDR_DDR_NET_MAX)) {
606 /* DDR network side */
607 window = MN_WIN(addr);
608 ha->ddr_mn_window = window;
609 qla4_82xx_wr_32(ha, ha->mn_win_crb |
610 QLA82XX_PCI_CRBSPACE, window);
611 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
612 QLA82XX_PCI_CRBSPACE);
613 if ((win_read << 17) != window) {
614 ql4_printk(KERN_WARNING, ha,
615 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
616 __func__, window, win_read);
617 }
618 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
619 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
620 QLA8XXX_ADDR_OCM0_MAX)) {
621 unsigned int temp1;
622 /* if bits 19:18&17:11 are on */
623 if ((addr & 0x00ff800) == 0xff800) {
624 printk("%s: QM access not handled.\n", __func__);
625 addr = -1UL;
626 }
627
628 window = OCM_WIN(addr);
629 ha->ddr_mn_window = window;
630 qla4_82xx_wr_32(ha, ha->mn_win_crb |
631 QLA82XX_PCI_CRBSPACE, window);
632 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
633 QLA82XX_PCI_CRBSPACE);
634 temp1 = ((window & 0x1FF) << 7) |
635 ((window & 0x0FFFE0000) >> 17);
636 if (win_read != temp1) {
637 printk("%s: Written OCMwin (0x%x) != Read"
638 " OCMwin (0x%x)\n", __func__, temp1, win_read);
639 }
640 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
641
642 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
643 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
644 /* QDR network side */
645 window = MS_WIN(addr);
646 ha->qdr_sn_window = window;
647 qla4_82xx_wr_32(ha, ha->ms_win_crb |
648 QLA82XX_PCI_CRBSPACE, window);
649 win_read = qla4_82xx_rd_32(ha,
650 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
651 if (win_read != window) {
652 printk("%s: Written MSwin (0x%x) != Read "
653 "MSwin (0x%x)\n", __func__, window, win_read);
654 }
655 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
656
657 } else {
658 /*
659 * peg gdb frequently accesses memory that doesn't exist,
660 * this limits the chit chat so debugging isn't slowed down.
661 */
662 if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
663 (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
664 printk("%s: Warning:%s Unknown address range!\n",
665 __func__, DRIVER_NAME);
666 }
667 addr = -1UL;
668 }
669 return addr;
670 }
671
672 /* check if address is in the same windows as the previous access */
qla4_82xx_pci_is_same_window(struct scsi_qla_host * ha,unsigned long long addr)673 static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
674 unsigned long long addr)
675 {
676 int window;
677 unsigned long long qdr_max;
678
679 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
680
681 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
682 QLA8XXX_ADDR_DDR_NET_MAX)) {
683 /* DDR network side */
684 BUG(); /* MN access can not come here */
685 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
686 QLA8XXX_ADDR_OCM0_MAX)) {
687 return 1;
688 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
689 QLA8XXX_ADDR_OCM1_MAX)) {
690 return 1;
691 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
692 qdr_max)) {
693 /* QDR network side */
694 window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
695 if (ha->qdr_sn_window == window)
696 return 1;
697 }
698
699 return 0;
700 }
701
qla4_82xx_pci_mem_read_direct(struct scsi_qla_host * ha,u64 off,void * data,int size)702 static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
703 u64 off, void *data, int size)
704 {
705 unsigned long flags;
706 void __iomem *addr;
707 int ret = 0;
708 u64 start;
709 void __iomem *mem_ptr = NULL;
710 unsigned long mem_base;
711 unsigned long mem_page;
712
713 write_lock_irqsave(&ha->hw_lock, flags);
714
715 /*
716 * If attempting to access unknown address or straddle hw windows,
717 * do not access.
718 */
719 start = qla4_82xx_pci_set_window(ha, off);
720 if ((start == -1UL) ||
721 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
722 write_unlock_irqrestore(&ha->hw_lock, flags);
723 printk(KERN_ERR"%s out of bound pci memory access. "
724 "offset is 0x%llx\n", DRIVER_NAME, off);
725 return -1;
726 }
727
728 addr = qla4_8xxx_pci_base_offsetfset(ha, start);
729 if (!addr) {
730 write_unlock_irqrestore(&ha->hw_lock, flags);
731 mem_base = pci_resource_start(ha->pdev, 0);
732 mem_page = start & PAGE_MASK;
733 /* Map two pages whenever user tries to access addresses in two
734 consecutive pages.
735 */
736 if (mem_page != ((start + size - 1) & PAGE_MASK))
737 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
738 else
739 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
740
741 if (mem_ptr == NULL) {
742 *(u8 *)data = 0;
743 return -1;
744 }
745 addr = mem_ptr;
746 addr += start & (PAGE_SIZE - 1);
747 write_lock_irqsave(&ha->hw_lock, flags);
748 }
749
750 switch (size) {
751 case 1:
752 *(u8 *)data = readb(addr);
753 break;
754 case 2:
755 *(u16 *)data = readw(addr);
756 break;
757 case 4:
758 *(u32 *)data = readl(addr);
759 break;
760 case 8:
761 *(u64 *)data = readq(addr);
762 break;
763 default:
764 ret = -1;
765 break;
766 }
767 write_unlock_irqrestore(&ha->hw_lock, flags);
768
769 if (mem_ptr)
770 iounmap(mem_ptr);
771 return ret;
772 }
773
774 static int
qla4_82xx_pci_mem_write_direct(struct scsi_qla_host * ha,u64 off,void * data,int size)775 qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
776 void *data, int size)
777 {
778 unsigned long flags;
779 void __iomem *addr;
780 int ret = 0;
781 u64 start;
782 void __iomem *mem_ptr = NULL;
783 unsigned long mem_base;
784 unsigned long mem_page;
785
786 write_lock_irqsave(&ha->hw_lock, flags);
787
788 /*
789 * If attempting to access unknown address or straddle hw windows,
790 * do not access.
791 */
792 start = qla4_82xx_pci_set_window(ha, off);
793 if ((start == -1UL) ||
794 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
795 write_unlock_irqrestore(&ha->hw_lock, flags);
796 printk(KERN_ERR"%s out of bound pci memory access. "
797 "offset is 0x%llx\n", DRIVER_NAME, off);
798 return -1;
799 }
800
801 addr = qla4_8xxx_pci_base_offsetfset(ha, start);
802 if (!addr) {
803 write_unlock_irqrestore(&ha->hw_lock, flags);
804 mem_base = pci_resource_start(ha->pdev, 0);
805 mem_page = start & PAGE_MASK;
806 /* Map two pages whenever user tries to access addresses in two
807 consecutive pages.
808 */
809 if (mem_page != ((start + size - 1) & PAGE_MASK))
810 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
811 else
812 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
813 if (mem_ptr == NULL)
814 return -1;
815
816 addr = mem_ptr;
817 addr += start & (PAGE_SIZE - 1);
818 write_lock_irqsave(&ha->hw_lock, flags);
819 }
820
821 switch (size) {
822 case 1:
823 writeb(*(u8 *)data, addr);
824 break;
825 case 2:
826 writew(*(u16 *)data, addr);
827 break;
828 case 4:
829 writel(*(u32 *)data, addr);
830 break;
831 case 8:
832 writeq(*(u64 *)data, addr);
833 break;
834 default:
835 ret = -1;
836 break;
837 }
838 write_unlock_irqrestore(&ha->hw_lock, flags);
839 if (mem_ptr)
840 iounmap(mem_ptr);
841 return ret;
842 }
843
844 #define MTU_FUDGE_FACTOR 100
845
846 static unsigned long
qla4_82xx_decode_crb_addr(unsigned long addr)847 qla4_82xx_decode_crb_addr(unsigned long addr)
848 {
849 int i;
850 unsigned long base_addr, offset, pci_base;
851
852 if (!qla4_8xxx_crb_table_initialized)
853 qla4_82xx_crb_addr_transform_setup();
854
855 pci_base = ADDR_ERROR;
856 base_addr = addr & 0xfff00000;
857 offset = addr & 0x000fffff;
858
859 for (i = 0; i < MAX_CRB_XFORM; i++) {
860 if (crb_addr_xform[i] == base_addr) {
861 pci_base = i << 20;
862 break;
863 }
864 }
865 if (pci_base == ADDR_ERROR)
866 return pci_base;
867 else
868 return pci_base + offset;
869 }
870
871 static long rom_max_timeout = 100;
872 static long qla4_82xx_rom_lock_timeout = 100;
873
874 /*
875 * Context: task, can_sleep
876 */
877 static int
qla4_82xx_rom_lock(struct scsi_qla_host * ha)878 qla4_82xx_rom_lock(struct scsi_qla_host *ha)
879 {
880 int done = 0, timeout = 0;
881
882 might_sleep();
883
884 while (!done) {
885 /* acquire semaphore2 from PCI HW block */
886 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
887 if (done == 1)
888 break;
889 if (timeout >= qla4_82xx_rom_lock_timeout)
890 return -1;
891
892 timeout++;
893 msleep(20);
894 }
895 qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
896 return 0;
897 }
898
899 static void
qla4_82xx_rom_unlock(struct scsi_qla_host * ha)900 qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
901 {
902 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
903 }
904
905 static int
qla4_82xx_wait_rom_done(struct scsi_qla_host * ha)906 qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
907 {
908 long timeout = 0;
909 long done = 0 ;
910
911 while (done == 0) {
912 done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
913 done &= 2;
914 timeout++;
915 if (timeout >= rom_max_timeout) {
916 printk("%s: Timeout reached waiting for rom done",
917 DRIVER_NAME);
918 return -1;
919 }
920 }
921 return 0;
922 }
923
924 static int
qla4_82xx_do_rom_fast_read(struct scsi_qla_host * ha,int addr,int * valp)925 qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
926 {
927 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
928 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
929 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
930 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
931 if (qla4_82xx_wait_rom_done(ha)) {
932 printk("%s: Error waiting for rom done\n", DRIVER_NAME);
933 return -1;
934 }
935 /* reset abyte_cnt and dummy_byte_cnt */
936 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
937 udelay(10);
938 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
939
940 *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
941 return 0;
942 }
943
944 static int
qla4_82xx_rom_fast_read(struct scsi_qla_host * ha,int addr,int * valp)945 qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
946 {
947 int ret, loops = 0;
948
949 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
950 udelay(100);
951 loops++;
952 }
953 if (loops >= 50000) {
954 ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
955 DRIVER_NAME);
956 return -1;
957 }
958 ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
959 qla4_82xx_rom_unlock(ha);
960 return ret;
961 }
962
963 /*
964 * This routine does CRB initialize sequence
965 * to put the ISP into operational state
966 */
967 static int
qla4_82xx_pinit_from_rom(struct scsi_qla_host * ha,int verbose)968 qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
969 {
970 int addr, val;
971 int i ;
972 struct crb_addr_pair *buf;
973 unsigned long off;
974 unsigned offset, n;
975
976 /* Halt all the indiviual PEGs and other blocks of the ISP */
977 qla4_82xx_rom_lock(ha);
978
979 /* disable all I2Q */
980 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
981 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
982 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
983 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
984 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
985 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
986
987 /* disable all niu interrupts */
988 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
989 /* disable xge rx/tx */
990 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
991 /* disable xg1 rx/tx */
992 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
993 /* disable sideband mac */
994 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
995 /* disable ap0 mac */
996 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
997 /* disable ap1 mac */
998 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
999
1000 /* halt sre */
1001 val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1002 qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1003
1004 /* halt epg */
1005 qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1006
1007 /* halt timers */
1008 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1009 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1010 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1011 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1012 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1013 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1014
1015 /* halt pegs */
1016 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1017 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1018 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1019 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1020 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1021 msleep(5);
1022
1023 /* big hammer */
1024 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1025 /* don't reset CAM block on reset */
1026 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1027 else
1028 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1029
1030 qla4_82xx_rom_unlock(ha);
1031
1032 /* Read the signature value from the flash.
1033 * Offset 0: Contain signature (0xcafecafe)
1034 * Offset 4: Offset and number of addr/value pairs
1035 * that present in CRB initialize sequence
1036 */
1037 if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1038 qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
1039 ql4_printk(KERN_WARNING, ha,
1040 "[ERROR] Reading crb_init area: n: %08x\n", n);
1041 return -1;
1042 }
1043
1044 /* Offset in flash = lower 16 bits
1045 * Number of enteries = upper 16 bits
1046 */
1047 offset = n & 0xffffU;
1048 n = (n >> 16) & 0xffffU;
1049
1050 /* number of addr/value pair should not exceed 1024 enteries */
1051 if (n >= 1024) {
1052 ql4_printk(KERN_WARNING, ha,
1053 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1054 DRIVER_NAME, __func__, n);
1055 return -1;
1056 }
1057
1058 ql4_printk(KERN_INFO, ha,
1059 "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
1060
1061 buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
1062 if (buf == NULL) {
1063 ql4_printk(KERN_WARNING, ha,
1064 "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
1065 return -1;
1066 }
1067
1068 for (i = 0; i < n; i++) {
1069 if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1070 qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
1071 0) {
1072 kfree(buf);
1073 return -1;
1074 }
1075
1076 buf[i].addr = addr;
1077 buf[i].data = val;
1078 }
1079
1080 for (i = 0; i < n; i++) {
1081 /* Translate internal CRB initialization
1082 * address to PCI bus address
1083 */
1084 off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1085 QLA82XX_PCI_CRBSPACE;
1086 /* Not all CRB addr/value pair to be written,
1087 * some of them are skipped
1088 */
1089
1090 /* skip if LS bit is set*/
1091 if (off & 0x1) {
1092 DEBUG2(ql4_printk(KERN_WARNING, ha,
1093 "Skip CRB init replay for offset = 0x%lx\n", off));
1094 continue;
1095 }
1096
1097 /* skipping cold reboot MAGIC */
1098 if (off == QLA82XX_CAM_RAM(0x1fc))
1099 continue;
1100
1101 /* do not reset PCI */
1102 if (off == (ROMUSB_GLB + 0xbc))
1103 continue;
1104
1105 /* skip core clock, so that firmware can increase the clock */
1106 if (off == (ROMUSB_GLB + 0xc8))
1107 continue;
1108
1109 /* skip the function enable register */
1110 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1111 continue;
1112
1113 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1114 continue;
1115
1116 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1117 continue;
1118
1119 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1120 continue;
1121
1122 if (off == ADDR_ERROR) {
1123 ql4_printk(KERN_WARNING, ha,
1124 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1125 DRIVER_NAME, buf[i].addr);
1126 continue;
1127 }
1128
1129 qla4_82xx_wr_32(ha, off, buf[i].data);
1130
1131 /* ISP requires much bigger delay to settle down,
1132 * else crb_window returns 0xffffffff
1133 */
1134 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1135 msleep(1000);
1136
1137 /* ISP requires millisec delay between
1138 * successive CRB register updation
1139 */
1140 msleep(1);
1141 }
1142
1143 kfree(buf);
1144
1145 /* Resetting the data and instruction cache */
1146 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1147 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1148 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1149
1150 /* Clear all protocol processing engines */
1151 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1152 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1153 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1154 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1155 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1156 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1157 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1158 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1159
1160 return 0;
1161 }
1162
1163 /**
1164 * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
1165 * @ha: Pointer to adapter structure
1166 * @addr: Flash address to write to
1167 * @data: Data to be written
1168 * @count: word_count to be written
1169 *
1170 * Return: On success return QLA_SUCCESS
1171 * On error return QLA_ERROR
1172 **/
qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host * ha,uint64_t addr,uint32_t * data,uint32_t count)1173 int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
1174 uint32_t *data, uint32_t count)
1175 {
1176 int i, j;
1177 uint32_t agt_ctrl;
1178 unsigned long flags;
1179 int ret_val = QLA_SUCCESS;
1180
1181 /* Only 128-bit aligned access */
1182 if (addr & 0xF) {
1183 ret_val = QLA_ERROR;
1184 goto exit_ms_mem_write;
1185 }
1186
1187 write_lock_irqsave(&ha->hw_lock, flags);
1188
1189 /* Write address */
1190 ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1191 if (ret_val == QLA_ERROR) {
1192 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
1193 __func__);
1194 goto exit_ms_mem_write_unlock;
1195 }
1196
1197 for (i = 0; i < count; i++, addr += 16) {
1198 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
1199 QLA8XXX_ADDR_QDR_NET_MAX)) ||
1200 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
1201 QLA8XXX_ADDR_DDR_NET_MAX)))) {
1202 ret_val = QLA_ERROR;
1203 goto exit_ms_mem_write_unlock;
1204 }
1205
1206 ret_val = ha->isp_ops->wr_reg_indirect(ha,
1207 MD_MIU_TEST_AGT_ADDR_LO,
1208 addr);
1209 /* Write data */
1210 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1211 MD_MIU_TEST_AGT_WRDATA_LO,
1212 *data++);
1213 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1214 MD_MIU_TEST_AGT_WRDATA_HI,
1215 *data++);
1216 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1217 MD_MIU_TEST_AGT_WRDATA_ULO,
1218 *data++);
1219 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1220 MD_MIU_TEST_AGT_WRDATA_UHI,
1221 *data++);
1222 if (ret_val == QLA_ERROR) {
1223 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
1224 __func__);
1225 goto exit_ms_mem_write_unlock;
1226 }
1227
1228 /* Check write status */
1229 ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
1230 MIU_TA_CTL_WRITE_ENABLE);
1231 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1232 MD_MIU_TEST_AGT_CTRL,
1233 MIU_TA_CTL_WRITE_START);
1234 if (ret_val == QLA_ERROR) {
1235 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
1236 __func__);
1237 goto exit_ms_mem_write_unlock;
1238 }
1239
1240 for (j = 0; j < MAX_CTL_CHECK; j++) {
1241 ret_val = ha->isp_ops->rd_reg_indirect(ha,
1242 MD_MIU_TEST_AGT_CTRL,
1243 &agt_ctrl);
1244 if (ret_val == QLA_ERROR) {
1245 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
1246 __func__);
1247 goto exit_ms_mem_write_unlock;
1248 }
1249 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1250 break;
1251 }
1252
1253 /* Status check failed */
1254 if (j >= MAX_CTL_CHECK) {
1255 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
1256 __func__);
1257 ret_val = QLA_ERROR;
1258 goto exit_ms_mem_write_unlock;
1259 }
1260 }
1261
1262 exit_ms_mem_write_unlock:
1263 write_unlock_irqrestore(&ha->hw_lock, flags);
1264
1265 exit_ms_mem_write:
1266 return ret_val;
1267 }
1268
1269 static int
qla4_82xx_load_from_flash(struct scsi_qla_host * ha,uint32_t image_start)1270 qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1271 {
1272 int i, rval = 0;
1273 long size = 0;
1274 long flashaddr, memaddr;
1275 u64 data;
1276 u32 high, low;
1277
1278 flashaddr = memaddr = ha->hw.flt_region_bootload;
1279 size = (image_start - flashaddr) / 8;
1280
1281 DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
1282 ha->host_no, __func__, flashaddr, image_start));
1283
1284 for (i = 0; i < size; i++) {
1285 if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1286 (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
1287 (int *)&high))) {
1288 rval = -1;
1289 goto exit_load_from_flash;
1290 }
1291 data = ((u64)high << 32) | low ;
1292 rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1293 if (rval)
1294 goto exit_load_from_flash;
1295
1296 flashaddr += 8;
1297 memaddr += 8;
1298
1299 if (i % 0x1000 == 0)
1300 msleep(1);
1301
1302 }
1303
1304 udelay(100);
1305
1306 read_lock(&ha->hw_lock);
1307 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1308 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1309 read_unlock(&ha->hw_lock);
1310
1311 exit_load_from_flash:
1312 return rval;
1313 }
1314
qla4_82xx_load_fw(struct scsi_qla_host * ha,uint32_t image_start)1315 static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1316 {
1317 u32 rst;
1318
1319 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1320 if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
1321 printk(KERN_WARNING "%s: Error during CRB Initialization\n",
1322 __func__);
1323 return QLA_ERROR;
1324 }
1325
1326 udelay(500);
1327
1328 /* at this point, QM is in reset. This could be a problem if there are
1329 * incoming d* transition queue messages. QM/PCIE could wedge.
1330 * To get around this, QM is brought out of reset.
1331 */
1332
1333 rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
1334 /* unreset qm */
1335 rst &= ~(1 << 28);
1336 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
1337
1338 if (qla4_82xx_load_from_flash(ha, image_start)) {
1339 printk("%s: Error trying to load fw from flash!\n", __func__);
1340 return QLA_ERROR;
1341 }
1342
1343 return QLA_SUCCESS;
1344 }
1345
1346 int
qla4_82xx_pci_mem_read_2M(struct scsi_qla_host * ha,u64 off,void * data,int size)1347 qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
1348 u64 off, void *data, int size)
1349 {
1350 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1351 int shift_amount;
1352 uint32_t temp;
1353 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1354
1355 /*
1356 * If not MN, go check for MS or invalid.
1357 */
1358
1359 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1360 mem_crb = QLA82XX_CRB_QDR_NET;
1361 else {
1362 mem_crb = QLA82XX_CRB_DDR_NET;
1363 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1364 return qla4_82xx_pci_mem_read_direct(ha,
1365 off, data, size);
1366 }
1367
1368
1369 off8 = off & 0xfffffff0;
1370 off0[0] = off & 0xf;
1371 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1372 shift_amount = 4;
1373
1374 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1375 off0[1] = 0;
1376 sz[1] = size - sz[0];
1377
1378 for (i = 0; i < loop; i++) {
1379 temp = off8 + (i << shift_amount);
1380 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1381 temp = 0;
1382 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1383 temp = MIU_TA_CTL_ENABLE;
1384 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1385 temp = MIU_TA_CTL_START_ENABLE;
1386 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1387
1388 for (j = 0; j < MAX_CTL_CHECK; j++) {
1389 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1390 if ((temp & MIU_TA_CTL_BUSY) == 0)
1391 break;
1392 }
1393
1394 if (j >= MAX_CTL_CHECK) {
1395 printk_ratelimited(KERN_ERR
1396 "%s: failed to read through agent\n",
1397 __func__);
1398 break;
1399 }
1400
1401 start = off0[i] >> 2;
1402 end = (off0[i] + sz[i] - 1) >> 2;
1403 for (k = start; k <= end; k++) {
1404 temp = qla4_82xx_rd_32(ha,
1405 mem_crb + MIU_TEST_AGT_RDDATA(k));
1406 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1407 }
1408 }
1409
1410 if (j >= MAX_CTL_CHECK)
1411 return -1;
1412
1413 if ((off0[0] & 7) == 0) {
1414 val = word[0];
1415 } else {
1416 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1417 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1418 }
1419
1420 switch (size) {
1421 case 1:
1422 *(uint8_t *)data = val;
1423 break;
1424 case 2:
1425 *(uint16_t *)data = val;
1426 break;
1427 case 4:
1428 *(uint32_t *)data = val;
1429 break;
1430 case 8:
1431 *(uint64_t *)data = val;
1432 break;
1433 }
1434 return 0;
1435 }
1436
1437 int
qla4_82xx_pci_mem_write_2M(struct scsi_qla_host * ha,u64 off,void * data,int size)1438 qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
1439 u64 off, void *data, int size)
1440 {
1441 int i, j, ret = 0, loop, sz[2], off0;
1442 int scale, shift_amount, startword;
1443 uint32_t temp;
1444 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1445
1446 /*
1447 * If not MN, go check for MS or invalid.
1448 */
1449 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1450 mem_crb = QLA82XX_CRB_QDR_NET;
1451 else {
1452 mem_crb = QLA82XX_CRB_DDR_NET;
1453 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1454 return qla4_82xx_pci_mem_write_direct(ha,
1455 off, data, size);
1456 }
1457
1458 off0 = off & 0x7;
1459 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1460 sz[1] = size - sz[0];
1461
1462 off8 = off & 0xfffffff0;
1463 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1464 shift_amount = 4;
1465 scale = 2;
1466 startword = (off & 0xf)/8;
1467
1468 for (i = 0; i < loop; i++) {
1469 if (qla4_82xx_pci_mem_read_2M(ha, off8 +
1470 (i << shift_amount), &word[i * scale], 8))
1471 return -1;
1472 }
1473
1474 switch (size) {
1475 case 1:
1476 tmpw = *((uint8_t *)data);
1477 break;
1478 case 2:
1479 tmpw = *((uint16_t *)data);
1480 break;
1481 case 4:
1482 tmpw = *((uint32_t *)data);
1483 break;
1484 case 8:
1485 default:
1486 tmpw = *((uint64_t *)data);
1487 break;
1488 }
1489
1490 if (sz[0] == 8)
1491 word[startword] = tmpw;
1492 else {
1493 word[startword] &=
1494 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1495 word[startword] |= tmpw << (off0 * 8);
1496 }
1497
1498 if (sz[1] != 0) {
1499 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1500 word[startword+1] |= tmpw >> (sz[0] * 8);
1501 }
1502
1503 for (i = 0; i < loop; i++) {
1504 temp = off8 + (i << shift_amount);
1505 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1506 temp = 0;
1507 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1508 temp = word[i * scale] & 0xffffffff;
1509 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1510 temp = (word[i * scale] >> 32) & 0xffffffff;
1511 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1512 temp = word[i*scale + 1] & 0xffffffff;
1513 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
1514 temp);
1515 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1516 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
1517 temp);
1518
1519 temp = MIU_TA_CTL_WRITE_ENABLE;
1520 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1521 temp = MIU_TA_CTL_WRITE_START;
1522 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1523
1524 for (j = 0; j < MAX_CTL_CHECK; j++) {
1525 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1526 if ((temp & MIU_TA_CTL_BUSY) == 0)
1527 break;
1528 }
1529
1530 if (j >= MAX_CTL_CHECK) {
1531 if (printk_ratelimit())
1532 ql4_printk(KERN_ERR, ha,
1533 "%s: failed to read through agent\n",
1534 __func__);
1535 ret = -1;
1536 break;
1537 }
1538 }
1539
1540 return ret;
1541 }
1542
qla4_82xx_cmdpeg_ready(struct scsi_qla_host * ha,int pegtune_val)1543 static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1544 {
1545 u32 val = 0;
1546 int retries = 60;
1547
1548 if (!pegtune_val) {
1549 do {
1550 val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
1551 if ((val == PHAN_INITIALIZE_COMPLETE) ||
1552 (val == PHAN_INITIALIZE_ACK))
1553 return 0;
1554 set_current_state(TASK_UNINTERRUPTIBLE);
1555 schedule_timeout(500);
1556
1557 } while (--retries);
1558
1559 if (!retries) {
1560 pegtune_val = qla4_82xx_rd_32(ha,
1561 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1562 printk(KERN_WARNING "%s: init failed, "
1563 "pegtune_val = %x\n", __func__, pegtune_val);
1564 return -1;
1565 }
1566 }
1567 return 0;
1568 }
1569
qla4_82xx_rcvpeg_ready(struct scsi_qla_host * ha)1570 static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
1571 {
1572 uint32_t state = 0;
1573 int loops = 0;
1574
1575 /* Window 1 call */
1576 read_lock(&ha->hw_lock);
1577 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1578 read_unlock(&ha->hw_lock);
1579
1580 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
1581 udelay(100);
1582 /* Window 1 call */
1583 read_lock(&ha->hw_lock);
1584 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1585 read_unlock(&ha->hw_lock);
1586
1587 loops++;
1588 }
1589
1590 if (loops >= 30000) {
1591 DEBUG2(ql4_printk(KERN_INFO, ha,
1592 "Receive Peg initialization not complete: 0x%x.\n", state));
1593 return QLA_ERROR;
1594 }
1595
1596 return QLA_SUCCESS;
1597 }
1598
1599 void
qla4_8xxx_set_drv_active(struct scsi_qla_host * ha)1600 qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1601 {
1602 uint32_t drv_active;
1603
1604 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1605
1606 /*
1607 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1608 * shift 1 by func_num to set a bit for the function.
1609 * For ISP8022, drv_active has 4 bits per function
1610 */
1611 if (is_qla8032(ha) || is_qla8042(ha))
1612 drv_active |= (1 << ha->func_num);
1613 else
1614 drv_active |= (1 << (ha->func_num * 4));
1615
1616 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1617 __func__, ha->host_no, drv_active);
1618 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1619 }
1620
1621 void
qla4_8xxx_clear_drv_active(struct scsi_qla_host * ha)1622 qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1623 {
1624 uint32_t drv_active;
1625
1626 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1627
1628 /*
1629 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1630 * shift 1 by func_num to set a bit for the function.
1631 * For ISP8022, drv_active has 4 bits per function
1632 */
1633 if (is_qla8032(ha) || is_qla8042(ha))
1634 drv_active &= ~(1 << (ha->func_num));
1635 else
1636 drv_active &= ~(1 << (ha->func_num * 4));
1637
1638 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1639 __func__, ha->host_no, drv_active);
1640 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1641 }
1642
qla4_8xxx_need_reset(struct scsi_qla_host * ha)1643 inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1644 {
1645 uint32_t drv_state, drv_active;
1646 int rval;
1647
1648 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1649 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1650
1651 /*
1652 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1653 * shift 1 by func_num to set a bit for the function.
1654 * For ISP8022, drv_active has 4 bits per function
1655 */
1656 if (is_qla8032(ha) || is_qla8042(ha))
1657 rval = drv_state & (1 << ha->func_num);
1658 else
1659 rval = drv_state & (1 << (ha->func_num * 4));
1660
1661 if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
1662 rval = 1;
1663
1664 return rval;
1665 }
1666
qla4_8xxx_set_rst_ready(struct scsi_qla_host * ha)1667 void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1668 {
1669 uint32_t drv_state;
1670
1671 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1672
1673 /*
1674 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1675 * shift 1 by func_num to set a bit for the function.
1676 * For ISP8022, drv_active has 4 bits per function
1677 */
1678 if (is_qla8032(ha) || is_qla8042(ha))
1679 drv_state |= (1 << ha->func_num);
1680 else
1681 drv_state |= (1 << (ha->func_num * 4));
1682
1683 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1684 __func__, ha->host_no, drv_state);
1685 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1686 }
1687
qla4_8xxx_clear_rst_ready(struct scsi_qla_host * ha)1688 void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1689 {
1690 uint32_t drv_state;
1691
1692 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1693
1694 /*
1695 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1696 * shift 1 by func_num to set a bit for the function.
1697 * For ISP8022, drv_active has 4 bits per function
1698 */
1699 if (is_qla8032(ha) || is_qla8042(ha))
1700 drv_state &= ~(1 << ha->func_num);
1701 else
1702 drv_state &= ~(1 << (ha->func_num * 4));
1703
1704 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1705 __func__, ha->host_no, drv_state);
1706 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1707 }
1708
1709 static inline void
qla4_8xxx_set_qsnt_ready(struct scsi_qla_host * ha)1710 qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1711 {
1712 uint32_t qsnt_state;
1713
1714 qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1715
1716 /*
1717 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1718 * shift 1 by func_num to set a bit for the function.
1719 * For ISP8022, drv_active has 4 bits per function.
1720 */
1721 if (is_qla8032(ha) || is_qla8042(ha))
1722 qsnt_state |= (1 << ha->func_num);
1723 else
1724 qsnt_state |= (2 << (ha->func_num * 4));
1725
1726 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
1727 }
1728
1729
1730 static int
qla4_82xx_start_firmware(struct scsi_qla_host * ha,uint32_t image_start)1731 qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1732 {
1733 uint16_t lnk;
1734
1735 /* scrub dma mask expansion register */
1736 qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1737
1738 /* Overwrite stale initialization register values */
1739 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1740 qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
1741 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
1742 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
1743
1744 if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
1745 printk("%s: Error trying to start fw!\n", __func__);
1746 return QLA_ERROR;
1747 }
1748
1749 /* Handshake with the card before we register the devices. */
1750 if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
1751 printk("%s: Error during card handshake!\n", __func__);
1752 return QLA_ERROR;
1753 }
1754
1755 /* Negotiated Link width */
1756 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1757 ha->link_width = (lnk >> 4) & 0x3f;
1758
1759 /* Synchronize with Receive peg */
1760 return qla4_82xx_rcvpeg_ready(ha);
1761 }
1762
qla4_82xx_try_start_fw(struct scsi_qla_host * ha)1763 int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
1764 {
1765 int rval;
1766
1767 /*
1768 * FW Load priority:
1769 * 1) Operational firmware residing in flash.
1770 * 2) Fail
1771 */
1772
1773 ql4_printk(KERN_INFO, ha,
1774 "FW: Retrieving flash offsets from FLT/FDT ...\n");
1775 rval = qla4_8xxx_get_flash_info(ha);
1776 if (rval != QLA_SUCCESS)
1777 return rval;
1778
1779 ql4_printk(KERN_INFO, ha,
1780 "FW: Attempting to load firmware from flash...\n");
1781 rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
1782
1783 if (rval != QLA_SUCCESS) {
1784 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
1785 " FAILED...\n");
1786 return rval;
1787 }
1788
1789 return rval;
1790 }
1791
qla4_82xx_rom_lock_recovery(struct scsi_qla_host * ha)1792 void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
1793 {
1794 if (qla4_82xx_rom_lock(ha)) {
1795 /* Someone else is holding the lock. */
1796 dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
1797 }
1798
1799 /*
1800 * Either we got the lock, or someone
1801 * else died while holding it.
1802 * In either case, unlock.
1803 */
1804 qla4_82xx_rom_unlock(ha);
1805 }
1806
ql4_84xx_poll_wait_for_ready(struct scsi_qla_host * ha,uint32_t addr1,uint32_t mask)1807 static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
1808 uint32_t addr1, uint32_t mask)
1809 {
1810 unsigned long timeout;
1811 uint32_t rval = QLA_SUCCESS;
1812 uint32_t temp;
1813
1814 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1815 do {
1816 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
1817 if ((temp & mask) != 0)
1818 break;
1819
1820 if (time_after_eq(jiffies, timeout)) {
1821 ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
1822 return QLA_ERROR;
1823 }
1824 } while (1);
1825
1826 return rval;
1827 }
1828
ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host * ha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr,uint32_t * data_ptr)1829 static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
1830 uint32_t addr3, uint32_t mask, uint32_t addr,
1831 uint32_t *data_ptr)
1832 {
1833 int rval = QLA_SUCCESS;
1834 uint32_t temp;
1835 uint32_t data;
1836
1837 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1838 if (rval)
1839 goto exit_ipmdio_rd_reg;
1840
1841 temp = (0x40000000 | addr);
1842 ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
1843
1844 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1845 if (rval)
1846 goto exit_ipmdio_rd_reg;
1847
1848 ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
1849 *data_ptr = data;
1850
1851 exit_ipmdio_rd_reg:
1852 return rval;
1853 }
1854
1855
ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host * ha,uint32_t addr1,uint32_t addr2,uint32_t addr3,uint32_t mask)1856 static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
1857 uint32_t addr1,
1858 uint32_t addr2,
1859 uint32_t addr3,
1860 uint32_t mask)
1861 {
1862 unsigned long timeout;
1863 uint32_t temp;
1864 uint32_t rval = QLA_SUCCESS;
1865
1866 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1867 do {
1868 ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
1869 if ((temp & 0x1) != 1)
1870 break;
1871 if (time_after_eq(jiffies, timeout)) {
1872 ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
1873 return QLA_ERROR;
1874 }
1875 } while (1);
1876
1877 return rval;
1878 }
1879
ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host * ha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr,uint32_t value)1880 static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
1881 uint32_t addr1, uint32_t addr3,
1882 uint32_t mask, uint32_t addr,
1883 uint32_t value)
1884 {
1885 int rval = QLA_SUCCESS;
1886
1887 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1888 if (rval)
1889 goto exit_ipmdio_wr_reg;
1890
1891 ha->isp_ops->wr_reg_indirect(ha, addr3, value);
1892 ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
1893
1894 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1895 if (rval)
1896 goto exit_ipmdio_wr_reg;
1897
1898 exit_ipmdio_wr_reg:
1899 return rval;
1900 }
1901
qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)1902 static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1903 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1904 uint32_t **d_ptr)
1905 {
1906 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1907 struct qla8xxx_minidump_entry_crb *crb_hdr;
1908 uint32_t *data_ptr = *d_ptr;
1909
1910 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1911 crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1912 r_addr = crb_hdr->addr;
1913 r_stride = crb_hdr->crb_strd.addr_stride;
1914 loop_cnt = crb_hdr->op_count;
1915
1916 for (i = 0; i < loop_cnt; i++) {
1917 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1918 *data_ptr++ = cpu_to_le32(r_addr);
1919 *data_ptr++ = cpu_to_le32(r_value);
1920 r_addr += r_stride;
1921 }
1922 *d_ptr = data_ptr;
1923 }
1924
qla4_83xx_check_dma_engine_state(struct scsi_qla_host * ha)1925 static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
1926 {
1927 int rval = QLA_SUCCESS;
1928 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1929 uint64_t dma_base_addr = 0;
1930 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1931
1932 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1933 ha->fw_dump_tmplt_hdr;
1934 dma_eng_num =
1935 tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1936 dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1937 (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1938
1939 /* Read the pex-dma's command-status-and-control register. */
1940 rval = ha->isp_ops->rd_reg_indirect(ha,
1941 (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
1942 &cmd_sts_and_cntrl);
1943
1944 if (rval)
1945 return QLA_ERROR;
1946
1947 /* Check if requested pex-dma engine is available. */
1948 if (cmd_sts_and_cntrl & BIT_31)
1949 return QLA_SUCCESS;
1950 else
1951 return QLA_ERROR;
1952 }
1953
qla4_83xx_start_pex_dma(struct scsi_qla_host * ha,struct qla4_83xx_minidump_entry_rdmem_pex_dma * m_hdr)1954 static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
1955 struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
1956 {
1957 int rval = QLA_SUCCESS, wait = 0;
1958 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1959 uint64_t dma_base_addr = 0;
1960 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1961
1962 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1963 ha->fw_dump_tmplt_hdr;
1964 dma_eng_num =
1965 tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1966 dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1967 (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1968
1969 rval = ha->isp_ops->wr_reg_indirect(ha,
1970 dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
1971 m_hdr->desc_card_addr);
1972 if (rval)
1973 goto error_exit;
1974
1975 rval = ha->isp_ops->wr_reg_indirect(ha,
1976 dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
1977 if (rval)
1978 goto error_exit;
1979
1980 rval = ha->isp_ops->wr_reg_indirect(ha,
1981 dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
1982 m_hdr->start_dma_cmd);
1983 if (rval)
1984 goto error_exit;
1985
1986 /* Wait for dma operation to complete. */
1987 for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
1988 rval = ha->isp_ops->rd_reg_indirect(ha,
1989 (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
1990 &cmd_sts_and_cntrl);
1991 if (rval)
1992 goto error_exit;
1993
1994 if ((cmd_sts_and_cntrl & BIT_1) == 0)
1995 break;
1996 else
1997 udelay(10);
1998 }
1999
2000 /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2001 if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
2002 rval = QLA_ERROR;
2003 goto error_exit;
2004 }
2005
2006 error_exit:
2007 return rval;
2008 }
2009
qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2010 static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
2011 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2012 uint32_t **d_ptr)
2013 {
2014 int rval = QLA_SUCCESS;
2015 struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2016 uint32_t size, read_size;
2017 uint8_t *data_ptr = (uint8_t *)*d_ptr;
2018 void *rdmem_buffer = NULL;
2019 dma_addr_t rdmem_dma;
2020 struct qla4_83xx_pex_dma_descriptor dma_desc;
2021
2022 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2023
2024 rval = qla4_83xx_check_dma_engine_state(ha);
2025 if (rval != QLA_SUCCESS) {
2026 DEBUG2(ql4_printk(KERN_INFO, ha,
2027 "%s: DMA engine not available. Fallback to rdmem-read.\n",
2028 __func__));
2029 return QLA_ERROR;
2030 }
2031
2032 m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
2033 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2034 QLA83XX_PEX_DMA_READ_SIZE,
2035 &rdmem_dma, GFP_KERNEL);
2036 if (!rdmem_buffer) {
2037 DEBUG2(ql4_printk(KERN_INFO, ha,
2038 "%s: Unable to allocate rdmem dma buffer\n",
2039 __func__));
2040 return QLA_ERROR;
2041 }
2042
2043 /* Prepare pex-dma descriptor to be written to MS memory. */
2044 /* dma-desc-cmd layout:
2045 * 0-3: dma-desc-cmd 0-3
2046 * 4-7: pcid function number
2047 * 8-15: dma-desc-cmd 8-15
2048 */
2049 dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2050 dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2051 dma_desc.dma_bus_addr = rdmem_dma;
2052
2053 size = 0;
2054 read_size = 0;
2055 /*
2056 * Perform rdmem operation using pex-dma.
2057 * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
2058 */
2059 while (read_size < m_hdr->read_data_size) {
2060 if (m_hdr->read_data_size - read_size >=
2061 QLA83XX_PEX_DMA_READ_SIZE)
2062 size = QLA83XX_PEX_DMA_READ_SIZE;
2063 else {
2064 size = (m_hdr->read_data_size - read_size);
2065
2066 if (rdmem_buffer)
2067 dma_free_coherent(&ha->pdev->dev,
2068 QLA83XX_PEX_DMA_READ_SIZE,
2069 rdmem_buffer, rdmem_dma);
2070
2071 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
2072 &rdmem_dma,
2073 GFP_KERNEL);
2074 if (!rdmem_buffer) {
2075 DEBUG2(ql4_printk(KERN_INFO, ha,
2076 "%s: Unable to allocate rdmem dma buffer\n",
2077 __func__));
2078 return QLA_ERROR;
2079 }
2080 dma_desc.dma_bus_addr = rdmem_dma;
2081 }
2082
2083 dma_desc.src_addr = m_hdr->read_addr + read_size;
2084 dma_desc.cmd.read_data_size = size;
2085
2086 /* Prepare: Write pex-dma descriptor to MS memory. */
2087 rval = qla4_8xxx_ms_mem_write_128b(ha,
2088 (uint64_t)m_hdr->desc_card_addr,
2089 (uint32_t *)&dma_desc,
2090 (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
2091 if (rval != QLA_SUCCESS) {
2092 ql4_printk(KERN_INFO, ha,
2093 "%s: Error writing rdmem-dma-init to MS !!!\n",
2094 __func__);
2095 goto error_exit;
2096 }
2097
2098 DEBUG2(ql4_printk(KERN_INFO, ha,
2099 "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
2100 __func__, size));
2101 /* Execute: Start pex-dma operation. */
2102 rval = qla4_83xx_start_pex_dma(ha, m_hdr);
2103 if (rval != QLA_SUCCESS) {
2104 DEBUG2(ql4_printk(KERN_INFO, ha,
2105 "scsi(%ld): start-pex-dma failed rval=0x%x\n",
2106 ha->host_no, rval));
2107 goto error_exit;
2108 }
2109
2110 memcpy(data_ptr, rdmem_buffer, size);
2111 data_ptr += size;
2112 read_size += size;
2113 }
2114
2115 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
2116
2117 *d_ptr = (uint32_t *)data_ptr;
2118
2119 error_exit:
2120 if (rdmem_buffer)
2121 dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
2122 rdmem_dma);
2123
2124 return rval;
2125 }
2126
qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2127 static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
2128 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2129 uint32_t **d_ptr)
2130 {
2131 uint32_t addr, r_addr, c_addr, t_r_addr;
2132 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2133 unsigned long p_wait, w_time, p_mask;
2134 uint32_t c_value_w, c_value_r;
2135 struct qla8xxx_minidump_entry_cache *cache_hdr;
2136 int rval = QLA_ERROR;
2137 uint32_t *data_ptr = *d_ptr;
2138
2139 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2140 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
2141
2142 loop_count = cache_hdr->op_count;
2143 r_addr = cache_hdr->read_addr;
2144 c_addr = cache_hdr->control_addr;
2145 c_value_w = cache_hdr->cache_ctrl.write_value;
2146
2147 t_r_addr = cache_hdr->tag_reg_addr;
2148 t_value = cache_hdr->addr_ctrl.init_tag_value;
2149 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2150 p_wait = cache_hdr->cache_ctrl.poll_wait;
2151 p_mask = cache_hdr->cache_ctrl.poll_mask;
2152
2153 for (i = 0; i < loop_count; i++) {
2154 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
2155
2156 if (c_value_w)
2157 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
2158
2159 if (p_mask) {
2160 w_time = jiffies + p_wait;
2161 do {
2162 ha->isp_ops->rd_reg_indirect(ha, c_addr,
2163 &c_value_r);
2164 if ((c_value_r & p_mask) == 0) {
2165 break;
2166 } else if (time_after_eq(jiffies, w_time)) {
2167 /* capturing dump failed */
2168 return rval;
2169 }
2170 } while (1);
2171 }
2172
2173 addr = r_addr;
2174 for (k = 0; k < r_cnt; k++) {
2175 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
2176 *data_ptr++ = cpu_to_le32(r_value);
2177 addr += cache_hdr->read_ctrl.read_addr_stride;
2178 }
2179
2180 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2181 }
2182 *d_ptr = data_ptr;
2183 return QLA_SUCCESS;
2184 }
2185
qla4_8xxx_minidump_process_control(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr)2186 static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
2187 struct qla8xxx_minidump_entry_hdr *entry_hdr)
2188 {
2189 struct qla8xxx_minidump_entry_crb *crb_entry;
2190 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
2191 uint32_t crb_addr;
2192 unsigned long wtime;
2193 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2194 int i;
2195
2196 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2197 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
2198 ha->fw_dump_tmplt_hdr;
2199 crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
2200
2201 crb_addr = crb_entry->addr;
2202 for (i = 0; i < crb_entry->op_count; i++) {
2203 opcode = crb_entry->crb_ctrl.opcode;
2204 if (opcode & QLA8XXX_DBG_OPCODE_WR) {
2205 ha->isp_ops->wr_reg_indirect(ha, crb_addr,
2206 crb_entry->value_1);
2207 opcode &= ~QLA8XXX_DBG_OPCODE_WR;
2208 }
2209 if (opcode & QLA8XXX_DBG_OPCODE_RW) {
2210 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2211 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2212 opcode &= ~QLA8XXX_DBG_OPCODE_RW;
2213 }
2214 if (opcode & QLA8XXX_DBG_OPCODE_AND) {
2215 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2216 read_value &= crb_entry->value_2;
2217 opcode &= ~QLA8XXX_DBG_OPCODE_AND;
2218 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
2219 read_value |= crb_entry->value_3;
2220 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
2221 }
2222 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2223 }
2224 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
2225 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2226 read_value |= crb_entry->value_3;
2227 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2228 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
2229 }
2230 if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
2231 poll_time = crb_entry->crb_strd.poll_timeout;
2232 wtime = jiffies + poll_time;
2233 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2234
2235 do {
2236 if ((read_value & crb_entry->value_2) ==
2237 crb_entry->value_1) {
2238 break;
2239 } else if (time_after_eq(jiffies, wtime)) {
2240 /* capturing dump failed */
2241 rval = QLA_ERROR;
2242 break;
2243 } else {
2244 ha->isp_ops->rd_reg_indirect(ha,
2245 crb_addr, &read_value);
2246 }
2247 } while (1);
2248 opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
2249 }
2250
2251 if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
2252 if (crb_entry->crb_strd.state_index_a) {
2253 index = crb_entry->crb_strd.state_index_a;
2254 addr = tmplt_hdr->saved_state_array[index];
2255 } else {
2256 addr = crb_addr;
2257 }
2258
2259 ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
2260 index = crb_entry->crb_ctrl.state_index_v;
2261 tmplt_hdr->saved_state_array[index] = read_value;
2262 opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
2263 }
2264
2265 if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
2266 if (crb_entry->crb_strd.state_index_a) {
2267 index = crb_entry->crb_strd.state_index_a;
2268 addr = tmplt_hdr->saved_state_array[index];
2269 } else {
2270 addr = crb_addr;
2271 }
2272
2273 if (crb_entry->crb_ctrl.state_index_v) {
2274 index = crb_entry->crb_ctrl.state_index_v;
2275 read_value =
2276 tmplt_hdr->saved_state_array[index];
2277 } else {
2278 read_value = crb_entry->value_1;
2279 }
2280
2281 ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
2282 opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
2283 }
2284
2285 if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
2286 index = crb_entry->crb_ctrl.state_index_v;
2287 read_value = tmplt_hdr->saved_state_array[index];
2288 read_value <<= crb_entry->crb_ctrl.shl;
2289 read_value >>= crb_entry->crb_ctrl.shr;
2290 if (crb_entry->value_2)
2291 read_value &= crb_entry->value_2;
2292 read_value |= crb_entry->value_3;
2293 read_value += crb_entry->value_1;
2294 tmplt_hdr->saved_state_array[index] = read_value;
2295 opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
2296 }
2297 crb_addr += crb_entry->crb_strd.addr_stride;
2298 }
2299 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
2300 return rval;
2301 }
2302
qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2303 static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
2304 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2305 uint32_t **d_ptr)
2306 {
2307 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2308 struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
2309 uint32_t *data_ptr = *d_ptr;
2310
2311 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2312 ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
2313 r_addr = ocm_hdr->read_addr;
2314 r_stride = ocm_hdr->read_addr_stride;
2315 loop_cnt = ocm_hdr->op_count;
2316
2317 DEBUG2(ql4_printk(KERN_INFO, ha,
2318 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2319 __func__, r_addr, r_stride, loop_cnt));
2320
2321 for (i = 0; i < loop_cnt; i++) {
2322 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2323 *data_ptr++ = cpu_to_le32(r_value);
2324 r_addr += r_stride;
2325 }
2326 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
2327 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
2328 *d_ptr = data_ptr;
2329 }
2330
qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2331 static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
2332 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2333 uint32_t **d_ptr)
2334 {
2335 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2336 struct qla8xxx_minidump_entry_mux *mux_hdr;
2337 uint32_t *data_ptr = *d_ptr;
2338
2339 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2340 mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
2341 r_addr = mux_hdr->read_addr;
2342 s_addr = mux_hdr->select_addr;
2343 s_stride = mux_hdr->select_value_stride;
2344 s_value = mux_hdr->select_value;
2345 loop_cnt = mux_hdr->op_count;
2346
2347 for (i = 0; i < loop_cnt; i++) {
2348 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2349 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2350 *data_ptr++ = cpu_to_le32(s_value);
2351 *data_ptr++ = cpu_to_le32(r_value);
2352 s_value += s_stride;
2353 }
2354 *d_ptr = data_ptr;
2355 }
2356
qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2357 static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
2358 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2359 uint32_t **d_ptr)
2360 {
2361 uint32_t addr, r_addr, c_addr, t_r_addr;
2362 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2363 uint32_t c_value_w;
2364 struct qla8xxx_minidump_entry_cache *cache_hdr;
2365 uint32_t *data_ptr = *d_ptr;
2366
2367 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
2368 loop_count = cache_hdr->op_count;
2369 r_addr = cache_hdr->read_addr;
2370 c_addr = cache_hdr->control_addr;
2371 c_value_w = cache_hdr->cache_ctrl.write_value;
2372
2373 t_r_addr = cache_hdr->tag_reg_addr;
2374 t_value = cache_hdr->addr_ctrl.init_tag_value;
2375 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2376
2377 for (i = 0; i < loop_count; i++) {
2378 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
2379 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
2380 addr = r_addr;
2381 for (k = 0; k < r_cnt; k++) {
2382 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
2383 *data_ptr++ = cpu_to_le32(r_value);
2384 addr += cache_hdr->read_ctrl.read_addr_stride;
2385 }
2386 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2387 }
2388 *d_ptr = data_ptr;
2389 }
2390
qla4_8xxx_minidump_process_queue(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2391 static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
2392 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2393 uint32_t **d_ptr)
2394 {
2395 uint32_t s_addr, r_addr;
2396 uint32_t r_stride, r_value, r_cnt, qid = 0;
2397 uint32_t i, k, loop_cnt;
2398 struct qla8xxx_minidump_entry_queue *q_hdr;
2399 uint32_t *data_ptr = *d_ptr;
2400
2401 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2402 q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
2403 s_addr = q_hdr->select_addr;
2404 r_cnt = q_hdr->rd_strd.read_addr_cnt;
2405 r_stride = q_hdr->rd_strd.read_addr_stride;
2406 loop_cnt = q_hdr->op_count;
2407
2408 for (i = 0; i < loop_cnt; i++) {
2409 ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
2410 r_addr = q_hdr->read_addr;
2411 for (k = 0; k < r_cnt; k++) {
2412 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2413 *data_ptr++ = cpu_to_le32(r_value);
2414 r_addr += r_stride;
2415 }
2416 qid += q_hdr->q_strd.queue_id_stride;
2417 }
2418 *d_ptr = data_ptr;
2419 }
2420
2421 #define MD_DIRECT_ROM_WINDOW 0x42110030
2422 #define MD_DIRECT_ROM_READ_BASE 0x42150000
2423
qla4_82xx_minidump_process_rdrom(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2424 static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2425 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2426 uint32_t **d_ptr)
2427 {
2428 uint32_t r_addr, r_value;
2429 uint32_t i, loop_cnt;
2430 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2431 uint32_t *data_ptr = *d_ptr;
2432
2433 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2434 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2435 r_addr = rom_hdr->read_addr;
2436 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
2437
2438 DEBUG2(ql4_printk(KERN_INFO, ha,
2439 "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
2440 __func__, r_addr, loop_cnt));
2441
2442 for (i = 0; i < loop_cnt; i++) {
2443 ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
2444 (r_addr & 0xFFFF0000));
2445 ha->isp_ops->rd_reg_indirect(ha,
2446 MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
2447 &r_value);
2448 *data_ptr++ = cpu_to_le32(r_value);
2449 r_addr += sizeof(uint32_t);
2450 }
2451 *d_ptr = data_ptr;
2452 }
2453
2454 #define MD_MIU_TEST_AGT_CTRL 0x41000090
2455 #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
2456 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
2457
__qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2458 static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2459 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2460 uint32_t **d_ptr)
2461 {
2462 uint32_t r_addr, r_value, r_data;
2463 uint32_t i, j, loop_cnt;
2464 struct qla8xxx_minidump_entry_rdmem *m_hdr;
2465 unsigned long flags;
2466 uint32_t *data_ptr = *d_ptr;
2467
2468 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2469 m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
2470 r_addr = m_hdr->read_addr;
2471 loop_cnt = m_hdr->read_data_size/16;
2472
2473 DEBUG2(ql4_printk(KERN_INFO, ha,
2474 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2475 __func__, r_addr, m_hdr->read_data_size));
2476
2477 if (r_addr & 0xf) {
2478 DEBUG2(ql4_printk(KERN_INFO, ha,
2479 "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2480 __func__, r_addr));
2481 return QLA_ERROR;
2482 }
2483
2484 if (m_hdr->read_data_size % 16) {
2485 DEBUG2(ql4_printk(KERN_INFO, ha,
2486 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2487 __func__, m_hdr->read_data_size));
2488 return QLA_ERROR;
2489 }
2490
2491 DEBUG2(ql4_printk(KERN_INFO, ha,
2492 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2493 __func__, r_addr, m_hdr->read_data_size, loop_cnt));
2494
2495 write_lock_irqsave(&ha->hw_lock, flags);
2496 for (i = 0; i < loop_cnt; i++) {
2497 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
2498 r_addr);
2499 r_value = 0;
2500 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
2501 r_value);
2502 r_value = MIU_TA_CTL_ENABLE;
2503 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2504 r_value = MIU_TA_CTL_START_ENABLE;
2505 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2506
2507 for (j = 0; j < MAX_CTL_CHECK; j++) {
2508 ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
2509 &r_value);
2510 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2511 break;
2512 }
2513
2514 if (j >= MAX_CTL_CHECK) {
2515 printk_ratelimited(KERN_ERR
2516 "%s: failed to read through agent\n",
2517 __func__);
2518 write_unlock_irqrestore(&ha->hw_lock, flags);
2519 return QLA_SUCCESS;
2520 }
2521
2522 for (j = 0; j < 4; j++) {
2523 ha->isp_ops->rd_reg_indirect(ha,
2524 MD_MIU_TEST_AGT_RDDATA[j],
2525 &r_data);
2526 *data_ptr++ = cpu_to_le32(r_data);
2527 }
2528
2529 r_addr += 16;
2530 }
2531 write_unlock_irqrestore(&ha->hw_lock, flags);
2532
2533 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
2534 __func__, (loop_cnt * 16)));
2535
2536 *d_ptr = data_ptr;
2537 return QLA_SUCCESS;
2538 }
2539
qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2540 static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2541 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2542 uint32_t **d_ptr)
2543 {
2544 uint32_t *data_ptr = *d_ptr;
2545 int rval = QLA_SUCCESS;
2546
2547 rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
2548 if (rval != QLA_SUCCESS)
2549 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2550 &data_ptr);
2551 *d_ptr = data_ptr;
2552 return rval;
2553 }
2554
qla4_8xxx_mark_entry_skipped(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,int index)2555 static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2556 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2557 int index)
2558 {
2559 entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
2560 DEBUG2(ql4_printk(KERN_INFO, ha,
2561 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2562 ha->host_no, index, entry_hdr->entry_type,
2563 entry_hdr->d_ctrl.entry_capture_mask));
2564 /* If driver encounters a new entry type that it cannot process,
2565 * it should just skip the entry and adjust the total buffer size by
2566 * from subtracting the skipped bytes from it
2567 */
2568 ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
2569 }
2570
2571 /* ISP83xx functions to process new minidump entries... */
qla83xx_minidump_process_pollrd(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2572 static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
2573 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2574 uint32_t **d_ptr)
2575 {
2576 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2577 uint16_t s_stride, i;
2578 uint32_t *data_ptr = *d_ptr;
2579 uint32_t rval = QLA_SUCCESS;
2580 struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
2581
2582 pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
2583 s_addr = le32_to_cpu(pollrd_hdr->select_addr);
2584 r_addr = le32_to_cpu(pollrd_hdr->read_addr);
2585 s_value = le32_to_cpu(pollrd_hdr->select_value);
2586 s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
2587
2588 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2589 poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
2590
2591 for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
2592 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2593 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2594 while (1) {
2595 ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
2596
2597 if ((r_value & poll_mask) != 0) {
2598 break;
2599 } else {
2600 msleep(1);
2601 if (--poll_wait == 0) {
2602 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2603 __func__);
2604 rval = QLA_ERROR;
2605 goto exit_process_pollrd;
2606 }
2607 }
2608 }
2609 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2610 *data_ptr++ = cpu_to_le32(s_value);
2611 *data_ptr++ = cpu_to_le32(r_value);
2612 s_value += s_stride;
2613 }
2614
2615 *d_ptr = data_ptr;
2616
2617 exit_process_pollrd:
2618 return rval;
2619 }
2620
qla4_84xx_minidump_process_rddfe(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2621 static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
2622 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2623 uint32_t **d_ptr)
2624 {
2625 int loop_cnt;
2626 uint32_t addr1, addr2, value, data, temp, wrval;
2627 uint8_t stride, stride2;
2628 uint16_t count;
2629 uint32_t poll, mask, modify_mask;
2630 uint32_t wait_count = 0;
2631 uint32_t *data_ptr = *d_ptr;
2632 struct qla8044_minidump_entry_rddfe *rddfe;
2633 uint32_t rval = QLA_SUCCESS;
2634
2635 rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
2636 addr1 = le32_to_cpu(rddfe->addr_1);
2637 value = le32_to_cpu(rddfe->value);
2638 stride = le32_to_cpu(rddfe->stride);
2639 stride2 = le32_to_cpu(rddfe->stride2);
2640 count = le32_to_cpu(rddfe->count);
2641
2642 poll = le32_to_cpu(rddfe->poll);
2643 mask = le32_to_cpu(rddfe->mask);
2644 modify_mask = le32_to_cpu(rddfe->modify_mask);
2645
2646 addr2 = addr1 + stride;
2647
2648 for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
2649 ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
2650
2651 wait_count = 0;
2652 while (wait_count < poll) {
2653 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2654 if ((temp & mask) != 0)
2655 break;
2656 wait_count++;
2657 }
2658
2659 if (wait_count == poll) {
2660 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2661 rval = QLA_ERROR;
2662 goto exit_process_rddfe;
2663 } else {
2664 ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
2665 temp = temp & modify_mask;
2666 temp = (temp | ((loop_cnt << 16) | loop_cnt));
2667 wrval = ((temp << 16) | temp);
2668
2669 ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
2670 ha->isp_ops->wr_reg_indirect(ha, addr1, value);
2671
2672 wait_count = 0;
2673 while (wait_count < poll) {
2674 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2675 if ((temp & mask) != 0)
2676 break;
2677 wait_count++;
2678 }
2679 if (wait_count == poll) {
2680 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2681 __func__);
2682 rval = QLA_ERROR;
2683 goto exit_process_rddfe;
2684 }
2685
2686 ha->isp_ops->wr_reg_indirect(ha, addr1,
2687 ((0x40000000 | value) +
2688 stride2));
2689 wait_count = 0;
2690 while (wait_count < poll) {
2691 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2692 if ((temp & mask) != 0)
2693 break;
2694 wait_count++;
2695 }
2696
2697 if (wait_count == poll) {
2698 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2699 __func__);
2700 rval = QLA_ERROR;
2701 goto exit_process_rddfe;
2702 }
2703
2704 ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
2705
2706 *data_ptr++ = cpu_to_le32(wrval);
2707 *data_ptr++ = cpu_to_le32(data);
2708 }
2709 }
2710
2711 *d_ptr = data_ptr;
2712 exit_process_rddfe:
2713 return rval;
2714 }
2715
qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2716 static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
2717 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2718 uint32_t **d_ptr)
2719 {
2720 int rval = QLA_SUCCESS;
2721 uint32_t addr1, addr2, value1, value2, data, selval;
2722 uint8_t stride1, stride2;
2723 uint32_t addr3, addr4, addr5, addr6, addr7;
2724 uint16_t count, loop_cnt;
2725 uint32_t mask;
2726 uint32_t *data_ptr = *d_ptr;
2727 struct qla8044_minidump_entry_rdmdio *rdmdio;
2728
2729 rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
2730 addr1 = le32_to_cpu(rdmdio->addr_1);
2731 addr2 = le32_to_cpu(rdmdio->addr_2);
2732 value1 = le32_to_cpu(rdmdio->value_1);
2733 stride1 = le32_to_cpu(rdmdio->stride_1);
2734 stride2 = le32_to_cpu(rdmdio->stride_2);
2735 count = le32_to_cpu(rdmdio->count);
2736
2737 mask = le32_to_cpu(rdmdio->mask);
2738 value2 = le32_to_cpu(rdmdio->value_2);
2739
2740 addr3 = addr1 + stride1;
2741
2742 for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
2743 rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2744 addr3, mask);
2745 if (rval)
2746 goto exit_process_rdmdio;
2747
2748 addr4 = addr2 - stride1;
2749 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
2750 value2);
2751 if (rval)
2752 goto exit_process_rdmdio;
2753
2754 addr5 = addr2 - (2 * stride1);
2755 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
2756 value1);
2757 if (rval)
2758 goto exit_process_rdmdio;
2759
2760 addr6 = addr2 - (3 * stride1);
2761 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
2762 addr6, 0x2);
2763 if (rval)
2764 goto exit_process_rdmdio;
2765
2766 rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2767 addr3, mask);
2768 if (rval)
2769 goto exit_process_rdmdio;
2770
2771 addr7 = addr2 - (4 * stride1);
2772 rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
2773 mask, addr7, &data);
2774 if (rval)
2775 goto exit_process_rdmdio;
2776
2777 selval = (value2 << 18) | (value1 << 2) | 2;
2778
2779 stride2 = le32_to_cpu(rdmdio->stride_2);
2780 *data_ptr++ = cpu_to_le32(selval);
2781 *data_ptr++ = cpu_to_le32(data);
2782
2783 value1 = value1 + stride2;
2784 *d_ptr = data_ptr;
2785 }
2786
2787 exit_process_rdmdio:
2788 return rval;
2789 }
2790
qla4_84xx_minidump_process_pollwr(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2791 static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
2792 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2793 uint32_t **d_ptr)
2794 {
2795 uint32_t addr1, addr2, value1, value2, poll, r_value;
2796 struct qla8044_minidump_entry_pollwr *pollwr_hdr;
2797 uint32_t wait_count = 0;
2798 uint32_t rval = QLA_SUCCESS;
2799
2800 pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
2801 addr1 = le32_to_cpu(pollwr_hdr->addr_1);
2802 addr2 = le32_to_cpu(pollwr_hdr->addr_2);
2803 value1 = le32_to_cpu(pollwr_hdr->value_1);
2804 value2 = le32_to_cpu(pollwr_hdr->value_2);
2805
2806 poll = le32_to_cpu(pollwr_hdr->poll);
2807
2808 while (wait_count < poll) {
2809 ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2810
2811 if ((r_value & poll) != 0)
2812 break;
2813
2814 wait_count++;
2815 }
2816
2817 if (wait_count == poll) {
2818 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2819 rval = QLA_ERROR;
2820 goto exit_process_pollwr;
2821 }
2822
2823 ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
2824 ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
2825
2826 wait_count = 0;
2827 while (wait_count < poll) {
2828 ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2829
2830 if ((r_value & poll) != 0)
2831 break;
2832 wait_count++;
2833 }
2834
2835 exit_process_pollwr:
2836 return rval;
2837 }
2838
qla83xx_minidump_process_rdmux2(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2839 static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
2840 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2841 uint32_t **d_ptr)
2842 {
2843 uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2844 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2845 struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
2846 uint32_t *data_ptr = *d_ptr;
2847
2848 rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
2849 sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
2850 sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
2851 sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
2852 sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
2853 sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
2854 read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
2855
2856 for (i = 0; i < rdmux2_hdr->op_count; i++) {
2857 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
2858 t_sel_val = sel_val1 & sel_val_mask;
2859 *data_ptr++ = cpu_to_le32(t_sel_val);
2860
2861 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2862 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2863
2864 *data_ptr++ = cpu_to_le32(data);
2865
2866 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
2867 t_sel_val = sel_val2 & sel_val_mask;
2868 *data_ptr++ = cpu_to_le32(t_sel_val);
2869
2870 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2871 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2872
2873 *data_ptr++ = cpu_to_le32(data);
2874
2875 sel_val1 += rdmux2_hdr->select_value_stride;
2876 sel_val2 += rdmux2_hdr->select_value_stride;
2877 }
2878
2879 *d_ptr = data_ptr;
2880 }
2881
qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2882 static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
2883 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2884 uint32_t **d_ptr)
2885 {
2886 uint32_t poll_wait, poll_mask, r_value, data;
2887 uint32_t addr_1, addr_2, value_1, value_2;
2888 uint32_t *data_ptr = *d_ptr;
2889 uint32_t rval = QLA_SUCCESS;
2890 struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
2891
2892 poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
2893 addr_1 = le32_to_cpu(poll_hdr->addr_1);
2894 addr_2 = le32_to_cpu(poll_hdr->addr_2);
2895 value_1 = le32_to_cpu(poll_hdr->value_1);
2896 value_2 = le32_to_cpu(poll_hdr->value_2);
2897 poll_mask = le32_to_cpu(poll_hdr->poll_mask);
2898
2899 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
2900
2901 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2902 while (1) {
2903 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2904
2905 if ((r_value & poll_mask) != 0) {
2906 break;
2907 } else {
2908 msleep(1);
2909 if (--poll_wait == 0) {
2910 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
2911 __func__);
2912 rval = QLA_ERROR;
2913 goto exit_process_pollrdmwr;
2914 }
2915 }
2916 }
2917
2918 ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
2919 data &= le32_to_cpu(poll_hdr->modify_mask);
2920 ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
2921 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
2922
2923 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2924 while (1) {
2925 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2926
2927 if ((r_value & poll_mask) != 0) {
2928 break;
2929 } else {
2930 msleep(1);
2931 if (--poll_wait == 0) {
2932 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
2933 __func__);
2934 rval = QLA_ERROR;
2935 goto exit_process_pollrdmwr;
2936 }
2937 }
2938 }
2939
2940 *data_ptr++ = cpu_to_le32(addr_2);
2941 *data_ptr++ = cpu_to_le32(data);
2942 *d_ptr = data_ptr;
2943
2944 exit_process_pollrdmwr:
2945 return rval;
2946 }
2947
qla4_83xx_minidump_process_rdrom(struct scsi_qla_host * ha,struct qla8xxx_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2948 static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2949 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2950 uint32_t **d_ptr)
2951 {
2952 uint32_t fl_addr, u32_count, rval;
2953 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2954 uint32_t *data_ptr = *d_ptr;
2955
2956 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2957 fl_addr = le32_to_cpu(rom_hdr->read_addr);
2958 u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
2959
2960 DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2961 __func__, fl_addr, u32_count));
2962
2963 rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
2964 (u8 *)(data_ptr), u32_count);
2965
2966 if (rval == QLA_ERROR) {
2967 ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
2968 __func__, u32_count);
2969 goto exit_process_rdrom;
2970 }
2971
2972 data_ptr += u32_count;
2973 *d_ptr = data_ptr;
2974
2975 exit_process_rdrom:
2976 return rval;
2977 }
2978
2979 /**
2980 * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
2981 * @ha: pointer to adapter structure
2982 **/
qla4_8xxx_collect_md_data(struct scsi_qla_host * ha)2983 static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2984 {
2985 int num_entry_hdr = 0;
2986 struct qla8xxx_minidump_entry_hdr *entry_hdr;
2987 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2988 uint32_t *data_ptr;
2989 uint32_t data_collected = 0;
2990 int i, rval = QLA_ERROR;
2991 uint64_t now;
2992 uint32_t timestamp;
2993
2994 ha->fw_dump_skip_size = 0;
2995 if (!ha->fw_dump) {
2996 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
2997 __func__, ha->host_no);
2998 return rval;
2999 }
3000
3001 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
3002 ha->fw_dump_tmplt_hdr;
3003 data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
3004 ha->fw_dump_tmplt_size);
3005 data_collected += ha->fw_dump_tmplt_size;
3006
3007 num_entry_hdr = tmplt_hdr->num_of_entries;
3008 ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
3009 __func__, data_ptr);
3010 ql4_printk(KERN_INFO, ha,
3011 "[%s]: no of entry headers in Template: 0x%x\n",
3012 __func__, num_entry_hdr);
3013 ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
3014 __func__, ha->fw_dump_capture_mask);
3015 ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
3016 __func__, ha->fw_dump_size, ha->fw_dump_size);
3017
3018 /* Update current timestamp before taking dump */
3019 now = get_jiffies_64();
3020 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3021 tmplt_hdr->driver_timestamp = timestamp;
3022
3023 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
3024 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
3025 tmplt_hdr->first_entry_offset);
3026
3027 if (is_qla8032(ha) || is_qla8042(ha))
3028 tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
3029 tmplt_hdr->ocm_window_reg[ha->func_num];
3030
3031 /* Walk through the entry headers - validate/perform required action */
3032 for (i = 0; i < num_entry_hdr; i++) {
3033 if (data_collected > ha->fw_dump_size) {
3034 ql4_printk(KERN_INFO, ha,
3035 "Data collected: [0x%x], Total Dump size: [0x%x]\n",
3036 data_collected, ha->fw_dump_size);
3037 return rval;
3038 }
3039
3040 if (!(entry_hdr->d_ctrl.entry_capture_mask &
3041 ha->fw_dump_capture_mask)) {
3042 entry_hdr->d_ctrl.driver_flags |=
3043 QLA8XXX_DBG_SKIPPED_FLAG;
3044 goto skip_nxt_entry;
3045 }
3046
3047 DEBUG2(ql4_printk(KERN_INFO, ha,
3048 "Data collected: [0x%x], Dump size left:[0x%x]\n",
3049 data_collected,
3050 (ha->fw_dump_size - data_collected)));
3051
3052 /* Decode the entry type and take required action to capture
3053 * debug data
3054 */
3055 switch (entry_hdr->entry_type) {
3056 case QLA8XXX_RDEND:
3057 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3058 break;
3059 case QLA8XXX_CNTRL:
3060 rval = qla4_8xxx_minidump_process_control(ha,
3061 entry_hdr);
3062 if (rval != QLA_SUCCESS) {
3063 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3064 goto md_failed;
3065 }
3066 break;
3067 case QLA8XXX_RDCRB:
3068 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
3069 &data_ptr);
3070 break;
3071 case QLA8XXX_RDMEM:
3072 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
3073 &data_ptr);
3074 if (rval != QLA_SUCCESS) {
3075 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3076 goto md_failed;
3077 }
3078 break;
3079 case QLA8XXX_BOARD:
3080 case QLA8XXX_RDROM:
3081 if (is_qla8022(ha)) {
3082 qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
3083 &data_ptr);
3084 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3085 rval = qla4_83xx_minidump_process_rdrom(ha,
3086 entry_hdr,
3087 &data_ptr);
3088 if (rval != QLA_SUCCESS)
3089 qla4_8xxx_mark_entry_skipped(ha,
3090 entry_hdr,
3091 i);
3092 }
3093 break;
3094 case QLA8XXX_L2DTG:
3095 case QLA8XXX_L2ITG:
3096 case QLA8XXX_L2DAT:
3097 case QLA8XXX_L2INS:
3098 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
3099 &data_ptr);
3100 if (rval != QLA_SUCCESS) {
3101 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3102 goto md_failed;
3103 }
3104 break;
3105 case QLA8XXX_L1DTG:
3106 case QLA8XXX_L1ITG:
3107 case QLA8XXX_L1DAT:
3108 case QLA8XXX_L1INS:
3109 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
3110 &data_ptr);
3111 break;
3112 case QLA8XXX_RDOCM:
3113 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
3114 &data_ptr);
3115 break;
3116 case QLA8XXX_RDMUX:
3117 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
3118 &data_ptr);
3119 break;
3120 case QLA8XXX_QUEUE:
3121 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
3122 &data_ptr);
3123 break;
3124 case QLA83XX_POLLRD:
3125 if (is_qla8022(ha)) {
3126 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3127 break;
3128 }
3129 rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
3130 &data_ptr);
3131 if (rval != QLA_SUCCESS)
3132 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3133 break;
3134 case QLA83XX_RDMUX2:
3135 if (is_qla8022(ha)) {
3136 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3137 break;
3138 }
3139 qla83xx_minidump_process_rdmux2(ha, entry_hdr,
3140 &data_ptr);
3141 break;
3142 case QLA83XX_POLLRDMWR:
3143 if (is_qla8022(ha)) {
3144 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3145 break;
3146 }
3147 rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
3148 &data_ptr);
3149 if (rval != QLA_SUCCESS)
3150 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3151 break;
3152 case QLA8044_RDDFE:
3153 rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
3154 &data_ptr);
3155 if (rval != QLA_SUCCESS)
3156 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3157 break;
3158 case QLA8044_RDMDIO:
3159 rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
3160 &data_ptr);
3161 if (rval != QLA_SUCCESS)
3162 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3163 break;
3164 case QLA8044_POLLWR:
3165 rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
3166 &data_ptr);
3167 if (rval != QLA_SUCCESS)
3168 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3169 break;
3170 case QLA8XXX_RDNOP:
3171 default:
3172 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3173 break;
3174 }
3175
3176 data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
3177 skip_nxt_entry:
3178 /* next entry in the template */
3179 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
3180 (((uint8_t *)entry_hdr) +
3181 entry_hdr->entry_size);
3182 }
3183
3184 if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
3185 ql4_printk(KERN_INFO, ha,
3186 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
3187 data_collected, ha->fw_dump_size);
3188 rval = QLA_ERROR;
3189 goto md_failed;
3190 }
3191
3192 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
3193 __func__, i));
3194 md_failed:
3195 return rval;
3196 }
3197
3198 /**
3199 * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
3200 * @ha: pointer to adapter structure
3201 * @code: uevent code to act upon
3202 **/
qla4_8xxx_uevent_emit(struct scsi_qla_host * ha,u32 code)3203 static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
3204 {
3205 char event_string[40];
3206 char *envp[] = { event_string, NULL };
3207
3208 switch (code) {
3209 case QL4_UEVENT_CODE_FW_DUMP:
3210 snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
3211 ha->host_no);
3212 break;
3213 default:
3214 /*do nothing*/
3215 break;
3216 }
3217
3218 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
3219 }
3220
qla4_8xxx_get_minidump(struct scsi_qla_host * ha)3221 void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
3222 {
3223 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
3224 !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
3225 if (!qla4_8xxx_collect_md_data(ha)) {
3226 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
3227 set_bit(AF_82XX_FW_DUMPED, &ha->flags);
3228 } else {
3229 ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
3230 __func__);
3231 }
3232 }
3233 }
3234
3235 /**
3236 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
3237 * @ha: pointer to adapter structure
3238 *
3239 * Note: IDC lock must be held upon entry
3240 **/
qla4_8xxx_device_bootstrap(struct scsi_qla_host * ha)3241 int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
3242 {
3243 int rval = QLA_ERROR;
3244 int i;
3245 uint32_t old_count, count;
3246 int need_reset = 0;
3247
3248 need_reset = ha->isp_ops->need_reset(ha);
3249
3250 if (need_reset) {
3251 /* We are trying to perform a recovery here. */
3252 if (test_bit(AF_FW_RECOVERY, &ha->flags))
3253 ha->isp_ops->rom_lock_recovery(ha);
3254 } else {
3255 old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
3256 for (i = 0; i < 10; i++) {
3257 msleep(200);
3258 count = qla4_8xxx_rd_direct(ha,
3259 QLA8XXX_PEG_ALIVE_COUNTER);
3260 if (count != old_count) {
3261 rval = QLA_SUCCESS;
3262 goto dev_ready;
3263 }
3264 }
3265 ha->isp_ops->rom_lock_recovery(ha);
3266 }
3267
3268 /* set to DEV_INITIALIZING */
3269 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3270 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3271 QLA8XXX_DEV_INITIALIZING);
3272
3273 ha->isp_ops->idc_unlock(ha);
3274
3275 if (is_qla8022(ha))
3276 qla4_8xxx_get_minidump(ha);
3277
3278 rval = ha->isp_ops->restart_firmware(ha);
3279 ha->isp_ops->idc_lock(ha);
3280
3281 if (rval != QLA_SUCCESS) {
3282 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3283 qla4_8xxx_clear_drv_active(ha);
3284 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3285 QLA8XXX_DEV_FAILED);
3286 return rval;
3287 }
3288
3289 dev_ready:
3290 ql4_printk(KERN_INFO, ha, "HW State: READY\n");
3291 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
3292
3293 return rval;
3294 }
3295
3296 /**
3297 * qla4_82xx_need_reset_handler - Code to start reset sequence
3298 * @ha: pointer to adapter structure
3299 *
3300 * Note: IDC lock must be held upon entry
3301 **/
3302 static void
qla4_82xx_need_reset_handler(struct scsi_qla_host * ha)3303 qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
3304 {
3305 uint32_t dev_state, drv_state, drv_active;
3306 uint32_t active_mask = 0xFFFFFFFF;
3307 unsigned long reset_timeout;
3308
3309 ql4_printk(KERN_INFO, ha,
3310 "Performing ISP error recovery\n");
3311
3312 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
3313 qla4_82xx_idc_unlock(ha);
3314 ha->isp_ops->disable_intrs(ha);
3315 qla4_82xx_idc_lock(ha);
3316 }
3317
3318 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
3319 DEBUG2(ql4_printk(KERN_INFO, ha,
3320 "%s(%ld): reset acknowledged\n",
3321 __func__, ha->host_no));
3322 qla4_8xxx_set_rst_ready(ha);
3323 } else {
3324 active_mask = (~(1 << (ha->func_num * 4)));
3325 }
3326
3327 /* wait for 10 seconds for reset ack from all functions */
3328 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3329
3330 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3331 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3332
3333 ql4_printk(KERN_INFO, ha,
3334 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
3335 __func__, ha->host_no, drv_state, drv_active);
3336
3337 while (drv_state != (drv_active & active_mask)) {
3338 if (time_after_eq(jiffies, reset_timeout)) {
3339 ql4_printk(KERN_INFO, ha,
3340 "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
3341 DRIVER_NAME, drv_state, drv_active);
3342 break;
3343 }
3344
3345 /*
3346 * When reset_owner times out, check which functions
3347 * acked/did not ack
3348 */
3349 if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
3350 ql4_printk(KERN_INFO, ha,
3351 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
3352 __func__, ha->host_no, drv_state,
3353 drv_active);
3354 }
3355 qla4_82xx_idc_unlock(ha);
3356 msleep(1000);
3357 qla4_82xx_idc_lock(ha);
3358
3359 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3360 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3361 }
3362
3363 /* Clear RESET OWNER as we are not going to use it any further */
3364 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
3365
3366 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3367 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
3368 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3369
3370 /* Force to DEV_COLD unless someone else is starting a reset */
3371 if (dev_state != QLA8XXX_DEV_INITIALIZING) {
3372 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3373 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
3374 qla4_8xxx_set_rst_ready(ha);
3375 }
3376 }
3377
3378 /**
3379 * qla4_8xxx_need_qsnt_handler - Code to start qsnt
3380 * @ha: pointer to adapter structure
3381 **/
3382 void
qla4_8xxx_need_qsnt_handler(struct scsi_qla_host * ha)3383 qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
3384 {
3385 ha->isp_ops->idc_lock(ha);
3386 qla4_8xxx_set_qsnt_ready(ha);
3387 ha->isp_ops->idc_unlock(ha);
3388 }
3389
qla4_82xx_set_idc_ver(struct scsi_qla_host * ha)3390 static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
3391 {
3392 int idc_ver;
3393 uint32_t drv_active;
3394
3395 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3396 if (drv_active == (1 << (ha->func_num * 4))) {
3397 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
3398 QLA82XX_IDC_VERSION);
3399 ql4_printk(KERN_INFO, ha,
3400 "%s: IDC version updated to %d\n", __func__,
3401 QLA82XX_IDC_VERSION);
3402 } else {
3403 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3404 if (QLA82XX_IDC_VERSION != idc_ver) {
3405 ql4_printk(KERN_INFO, ha,
3406 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
3407 __func__, QLA82XX_IDC_VERSION, idc_ver);
3408 }
3409 }
3410 }
3411
qla4_83xx_set_idc_ver(struct scsi_qla_host * ha)3412 static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
3413 {
3414 int idc_ver;
3415 uint32_t drv_active;
3416 int rval = QLA_SUCCESS;
3417
3418 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3419 if (drv_active == (1 << ha->func_num)) {
3420 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3421 idc_ver &= (~0xFF);
3422 idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
3423 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
3424 ql4_printk(KERN_INFO, ha,
3425 "%s: IDC version updated to %d\n", __func__,
3426 idc_ver);
3427 } else {
3428 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3429 idc_ver &= 0xFF;
3430 if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
3431 ql4_printk(KERN_INFO, ha,
3432 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
3433 __func__, QLA83XX_IDC_VER_MAJ_VALUE,
3434 idc_ver);
3435 rval = QLA_ERROR;
3436 goto exit_set_idc_ver;
3437 }
3438 }
3439
3440 /* Update IDC_MINOR_VERSION */
3441 idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
3442 idc_ver &= ~(0x03 << (ha->func_num * 2));
3443 idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
3444 qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
3445
3446 exit_set_idc_ver:
3447 return rval;
3448 }
3449
qla4_8xxx_update_idc_reg(struct scsi_qla_host * ha)3450 int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
3451 {
3452 uint32_t drv_active;
3453 int rval = QLA_SUCCESS;
3454
3455 if (test_bit(AF_INIT_DONE, &ha->flags))
3456 goto exit_update_idc_reg;
3457
3458 ha->isp_ops->idc_lock(ha);
3459 qla4_8xxx_set_drv_active(ha);
3460
3461 /*
3462 * If we are the first driver to load and
3463 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
3464 */
3465 if (is_qla8032(ha) || is_qla8042(ha)) {
3466 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3467 if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
3468 qla4_83xx_clear_idc_dontreset(ha);
3469 }
3470
3471 if (is_qla8022(ha)) {
3472 qla4_82xx_set_idc_ver(ha);
3473 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3474 rval = qla4_83xx_set_idc_ver(ha);
3475 if (rval == QLA_ERROR)
3476 qla4_8xxx_clear_drv_active(ha);
3477 }
3478
3479 ha->isp_ops->idc_unlock(ha);
3480
3481 exit_update_idc_reg:
3482 return rval;
3483 }
3484
3485 /**
3486 * qla4_8xxx_device_state_handler - Adapter state machine
3487 * @ha: pointer to host adapter structure.
3488 *
3489 * Note: IDC lock must be UNLOCKED upon entry
3490 **/
qla4_8xxx_device_state_handler(struct scsi_qla_host * ha)3491 int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
3492 {
3493 uint32_t dev_state;
3494 int rval = QLA_SUCCESS;
3495 unsigned long dev_init_timeout;
3496
3497 rval = qla4_8xxx_update_idc_reg(ha);
3498 if (rval == QLA_ERROR)
3499 goto exit_state_handler;
3500
3501 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3502 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
3503 dev_state, dev_state < MAX_STATES ?
3504 qdev_state[dev_state] : "Unknown"));
3505
3506 /* wait for 30 seconds for device to go ready */
3507 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3508
3509 ha->isp_ops->idc_lock(ha);
3510 while (1) {
3511
3512 if (time_after_eq(jiffies, dev_init_timeout)) {
3513 ql4_printk(KERN_WARNING, ha,
3514 "%s: Device Init Failed 0x%x = %s\n",
3515 DRIVER_NAME,
3516 dev_state, dev_state < MAX_STATES ?
3517 qdev_state[dev_state] : "Unknown");
3518 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3519 QLA8XXX_DEV_FAILED);
3520 }
3521
3522 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3523 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
3524 dev_state, dev_state < MAX_STATES ?
3525 qdev_state[dev_state] : "Unknown");
3526
3527 /* NOTE: Make sure idc unlocked upon exit of switch statement */
3528 switch (dev_state) {
3529 case QLA8XXX_DEV_READY:
3530 goto exit;
3531 case QLA8XXX_DEV_COLD:
3532 rval = qla4_8xxx_device_bootstrap(ha);
3533 goto exit;
3534 case QLA8XXX_DEV_INITIALIZING:
3535 ha->isp_ops->idc_unlock(ha);
3536 msleep(1000);
3537 ha->isp_ops->idc_lock(ha);
3538 break;
3539 case QLA8XXX_DEV_NEED_RESET:
3540 /*
3541 * For ISP8324 and ISP8042, if NEED_RESET is set by any
3542 * driver, it should be honored, irrespective of
3543 * IDC_CTRL DONTRESET_BIT0
3544 */
3545 if (is_qla8032(ha) || is_qla8042(ha)) {
3546 qla4_83xx_need_reset_handler(ha);
3547 } else if (is_qla8022(ha)) {
3548 if (!ql4xdontresethba) {
3549 qla4_82xx_need_reset_handler(ha);
3550 /* Update timeout value after need
3551 * reset handler */
3552 dev_init_timeout = jiffies +
3553 (ha->nx_dev_init_timeout * HZ);
3554 } else {
3555 ha->isp_ops->idc_unlock(ha);
3556 msleep(1000);
3557 ha->isp_ops->idc_lock(ha);
3558 }
3559 }
3560 break;
3561 case QLA8XXX_DEV_NEED_QUIESCENT:
3562 /* idc locked/unlocked in handler */
3563 qla4_8xxx_need_qsnt_handler(ha);
3564 break;
3565 case QLA8XXX_DEV_QUIESCENT:
3566 ha->isp_ops->idc_unlock(ha);
3567 msleep(1000);
3568 ha->isp_ops->idc_lock(ha);
3569 break;
3570 case QLA8XXX_DEV_FAILED:
3571 ha->isp_ops->idc_unlock(ha);
3572 qla4xxx_dead_adapter_cleanup(ha);
3573 rval = QLA_ERROR;
3574 ha->isp_ops->idc_lock(ha);
3575 goto exit;
3576 default:
3577 ha->isp_ops->idc_unlock(ha);
3578 qla4xxx_dead_adapter_cleanup(ha);
3579 rval = QLA_ERROR;
3580 ha->isp_ops->idc_lock(ha);
3581 goto exit;
3582 }
3583 }
3584 exit:
3585 ha->isp_ops->idc_unlock(ha);
3586 exit_state_handler:
3587 return rval;
3588 }
3589
qla4_8xxx_load_risc(struct scsi_qla_host * ha)3590 int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
3591 {
3592 int retval;
3593
3594 /* clear the interrupt */
3595 if (is_qla8032(ha) || is_qla8042(ha)) {
3596 writel(0, &ha->qla4_83xx_reg->risc_intr);
3597 readl(&ha->qla4_83xx_reg->risc_intr);
3598 } else if (is_qla8022(ha)) {
3599 writel(0, &ha->qla4_82xx_reg->host_int);
3600 readl(&ha->qla4_82xx_reg->host_int);
3601 }
3602
3603 retval = qla4_8xxx_device_state_handler(ha);
3604
3605 /* Initialize request and response queues. */
3606 if (retval == QLA_SUCCESS)
3607 qla4xxx_init_rings(ha);
3608
3609 if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
3610 retval = qla4xxx_request_irqs(ha);
3611
3612 return retval;
3613 }
3614
3615 /*****************************************************************************/
3616 /* Flash Manipulation Routines */
3617 /*****************************************************************************/
3618
3619 #define OPTROM_BURST_SIZE 0x1000
3620 #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
3621
3622 #define FARX_DATA_FLAG BIT_31
3623 #define FARX_ACCESS_FLASH_CONF 0x7FFD0000
3624 #define FARX_ACCESS_FLASH_DATA 0x7FF00000
3625
3626 static inline uint32_t
flash_conf_addr(struct ql82xx_hw_data * hw,uint32_t faddr)3627 flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
3628 {
3629 return hw->flash_conf_off | faddr;
3630 }
3631
3632 static uint32_t *
qla4_82xx_read_flash_data(struct scsi_qla_host * ha,uint32_t * dwptr,uint32_t faddr,uint32_t length)3633 qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
3634 uint32_t faddr, uint32_t length)
3635 {
3636 uint32_t i;
3637 uint32_t val;
3638 int loops = 0;
3639 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
3640 udelay(100);
3641 cond_resched();
3642 loops++;
3643 }
3644 if (loops >= 50000) {
3645 ql4_printk(KERN_WARNING, ha, "ROM lock failed\n");
3646 return dwptr;
3647 }
3648
3649 /* Dword reads to flash. */
3650 for (i = 0; i < length/4; i++, faddr += 4) {
3651 if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
3652 ql4_printk(KERN_WARNING, ha,
3653 "Do ROM fast read failed\n");
3654 goto done_read;
3655 }
3656 dwptr[i] = cpu_to_le32(val);
3657 }
3658
3659 done_read:
3660 qla4_82xx_rom_unlock(ha);
3661 return dwptr;
3662 }
3663
3664 /*
3665 * Address and length are byte address
3666 */
3667 static uint8_t *
qla4_82xx_read_optrom_data(struct scsi_qla_host * ha,uint8_t * buf,uint32_t offset,uint32_t length)3668 qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
3669 uint32_t offset, uint32_t length)
3670 {
3671 qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
3672 return buf;
3673 }
3674
3675 static int
qla4_8xxx_find_flt_start(struct scsi_qla_host * ha,uint32_t * start)3676 qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start)
3677 {
3678 const char *loc, *locations[] = { "DEF", "PCI" };
3679
3680 /*
3681 * FLT-location structure resides after the last PCI region.
3682 */
3683
3684 /* Begin with sane defaults. */
3685 loc = locations[0];
3686 *start = FA_FLASH_LAYOUT_ADDR_82;
3687
3688 DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
3689 return QLA_SUCCESS;
3690 }
3691
3692 static void
qla4_8xxx_get_flt_info(struct scsi_qla_host * ha,uint32_t flt_addr)3693 qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
3694 {
3695 const char *loc, *locations[] = { "DEF", "FLT" };
3696 uint16_t *wptr;
3697 uint16_t cnt, chksum;
3698 uint32_t start, status;
3699 struct qla_flt_header *flt;
3700 struct qla_flt_region *region;
3701 struct ql82xx_hw_data *hw = &ha->hw;
3702
3703 hw->flt_region_flt = flt_addr;
3704 wptr = (uint16_t *)ha->request_ring;
3705 flt = (struct qla_flt_header *)ha->request_ring;
3706 region = (struct qla_flt_region *)&flt[1];
3707
3708 if (is_qla8022(ha)) {
3709 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3710 flt_addr << 2, OPTROM_BURST_SIZE);
3711 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3712 status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
3713 (uint8_t *)ha->request_ring,
3714 0x400);
3715 if (status != QLA_SUCCESS)
3716 goto no_flash_data;
3717 }
3718
3719 if (*wptr == cpu_to_le16(0xffff))
3720 goto no_flash_data;
3721 if (flt->version != cpu_to_le16(1)) {
3722 DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
3723 "version=0x%x length=0x%x checksum=0x%x.\n",
3724 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
3725 le16_to_cpu(flt->checksum)));
3726 goto no_flash_data;
3727 }
3728
3729 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
3730 for (chksum = 0; cnt; cnt--)
3731 chksum += le16_to_cpu(*wptr++);
3732 if (chksum) {
3733 DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
3734 "version=0x%x length=0x%x checksum=0x%x.\n",
3735 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
3736 chksum));
3737 goto no_flash_data;
3738 }
3739
3740 loc = locations[1];
3741 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
3742 for ( ; cnt; cnt--, region++) {
3743 /* Store addresses as DWORD offsets. */
3744 start = le32_to_cpu(region->start) >> 2;
3745
3746 DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
3747 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
3748 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
3749
3750 switch (le32_to_cpu(region->code) & 0xff) {
3751 case FLT_REG_FDT:
3752 hw->flt_region_fdt = start;
3753 break;
3754 case FLT_REG_BOOT_CODE_82:
3755 hw->flt_region_boot = start;
3756 break;
3757 case FLT_REG_FW_82:
3758 case FLT_REG_FW_82_1:
3759 hw->flt_region_fw = start;
3760 break;
3761 case FLT_REG_BOOTLOAD_82:
3762 hw->flt_region_bootload = start;
3763 break;
3764 case FLT_REG_ISCSI_PARAM:
3765 hw->flt_iscsi_param = start;
3766 break;
3767 case FLT_REG_ISCSI_CHAP:
3768 hw->flt_region_chap = start;
3769 hw->flt_chap_size = le32_to_cpu(region->size);
3770 break;
3771 case FLT_REG_ISCSI_DDB:
3772 hw->flt_region_ddb = start;
3773 hw->flt_ddb_size = le32_to_cpu(region->size);
3774 break;
3775 }
3776 }
3777 goto done;
3778
3779 no_flash_data:
3780 /* Use hardcoded defaults. */
3781 loc = locations[0];
3782
3783 hw->flt_region_fdt = FA_FLASH_DESCR_ADDR_82;
3784 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
3785 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
3786 hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
3787 hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2;
3788 hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
3789 hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2;
3790 hw->flt_ddb_size = FA_FLASH_DDB_SIZE;
3791
3792 done:
3793 DEBUG2(ql4_printk(KERN_INFO, ha,
3794 "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n",
3795 loc, hw->flt_region_flt, hw->flt_region_fdt,
3796 hw->flt_region_boot, hw->flt_region_bootload,
3797 hw->flt_region_fw, hw->flt_region_chap,
3798 hw->flt_chap_size, hw->flt_region_ddb,
3799 hw->flt_ddb_size));
3800 }
3801
3802 static void
qla4_82xx_get_fdt_info(struct scsi_qla_host * ha)3803 qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
3804 {
3805 #define FLASH_BLK_SIZE_4K 0x1000
3806 #define FLASH_BLK_SIZE_32K 0x8000
3807 #define FLASH_BLK_SIZE_64K 0x10000
3808 const char *loc, *locations[] = { "MID", "FDT" };
3809 uint16_t cnt, chksum;
3810 uint16_t *wptr;
3811 struct qla_fdt_layout *fdt;
3812 uint16_t mid = 0;
3813 uint16_t fid = 0;
3814 struct ql82xx_hw_data *hw = &ha->hw;
3815
3816 hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3817 hw->flash_data_off = FARX_ACCESS_FLASH_DATA;
3818
3819 wptr = (uint16_t *)ha->request_ring;
3820 fdt = (struct qla_fdt_layout *)ha->request_ring;
3821 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3822 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
3823
3824 if (*wptr == cpu_to_le16(0xffff))
3825 goto no_flash_data;
3826
3827 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
3828 fdt->sig[3] != 'D')
3829 goto no_flash_data;
3830
3831 for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
3832 cnt++)
3833 chksum += le16_to_cpu(*wptr++);
3834
3835 if (chksum) {
3836 DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
3837 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
3838 le16_to_cpu(fdt->version)));
3839 goto no_flash_data;
3840 }
3841
3842 loc = locations[1];
3843 mid = le16_to_cpu(fdt->man_id);
3844 fid = le16_to_cpu(fdt->id);
3845 hw->fdt_wrt_disable = fdt->wrt_disable_bits;
3846 hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd);
3847 hw->fdt_block_size = le32_to_cpu(fdt->block_size);
3848
3849 if (fdt->unprotect_sec_cmd) {
3850 hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 |
3851 fdt->unprotect_sec_cmd);
3852 hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
3853 flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) :
3854 flash_conf_addr(hw, 0x0336);
3855 }
3856 goto done;
3857
3858 no_flash_data:
3859 loc = locations[0];
3860 hw->fdt_block_size = FLASH_BLK_SIZE_64K;
3861 done:
3862 DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
3863 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
3864 hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd,
3865 hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable,
3866 hw->fdt_block_size));
3867 }
3868
3869 static void
qla4_82xx_get_idc_param(struct scsi_qla_host * ha)3870 qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
3871 {
3872 #define QLA82XX_IDC_PARAM_ADDR 0x003e885c
3873 uint32_t *wptr;
3874
3875 if (!is_qla8022(ha))
3876 return;
3877 wptr = (uint32_t *)ha->request_ring;
3878 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3879 QLA82XX_IDC_PARAM_ADDR , 8);
3880
3881 if (*wptr == cpu_to_le32(0xffffffff)) {
3882 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
3883 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
3884 } else {
3885 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
3886 ha->nx_reset_timeout = le32_to_cpu(*wptr);
3887 }
3888
3889 DEBUG2(ql4_printk(KERN_DEBUG, ha,
3890 "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout));
3891 DEBUG2(ql4_printk(KERN_DEBUG, ha,
3892 "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout));
3893 return;
3894 }
3895
qla4_82xx_queue_mbox_cmd(struct scsi_qla_host * ha,uint32_t * mbx_cmd,int in_count)3896 void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
3897 int in_count)
3898 {
3899 int i;
3900
3901 /* Load all mailbox registers, except mailbox 0. */
3902 for (i = 1; i < in_count; i++)
3903 writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
3904
3905 /* Wakeup firmware */
3906 writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
3907 readl(&ha->qla4_82xx_reg->mailbox_in[0]);
3908 writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
3909 readl(&ha->qla4_82xx_reg->hint);
3910 }
3911
qla4_82xx_process_mbox_intr(struct scsi_qla_host * ha,int out_count)3912 void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
3913 {
3914 int intr_status;
3915
3916 intr_status = readl(&ha->qla4_82xx_reg->host_int);
3917 if (intr_status & ISRX_82XX_RISC_INT) {
3918 ha->mbox_status_count = out_count;
3919 intr_status = readl(&ha->qla4_82xx_reg->host_status);
3920 ha->isp_ops->interrupt_service_routine(ha, intr_status);
3921
3922 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
3923 (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
3924 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
3925 0xfbff);
3926 }
3927 }
3928
3929 int
qla4_8xxx_get_flash_info(struct scsi_qla_host * ha)3930 qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
3931 {
3932 int ret;
3933 uint32_t flt_addr;
3934
3935 ret = qla4_8xxx_find_flt_start(ha, &flt_addr);
3936 if (ret != QLA_SUCCESS)
3937 return ret;
3938
3939 qla4_8xxx_get_flt_info(ha, flt_addr);
3940 if (is_qla8022(ha)) {
3941 qla4_82xx_get_fdt_info(ha);
3942 qla4_82xx_get_idc_param(ha);
3943 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3944 qla4_83xx_get_idc_param(ha);
3945 }
3946
3947 return QLA_SUCCESS;
3948 }
3949
3950 /**
3951 * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance
3952 * @ha: pointer to host adapter structure.
3953 *
3954 * Remarks:
3955 * For iSCSI, throws away all I/O and AENs into bit bucket, so they will
3956 * not be available after successful return. Driver must cleanup potential
3957 * outstanding I/O's after calling this funcion.
3958 **/
3959 int
qla4_8xxx_stop_firmware(struct scsi_qla_host * ha)3960 qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
3961 {
3962 int status;
3963 uint32_t mbox_cmd[MBOX_REG_COUNT];
3964 uint32_t mbox_sts[MBOX_REG_COUNT];
3965
3966 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
3967 memset(&mbox_sts, 0, sizeof(mbox_sts));
3968
3969 mbox_cmd[0] = MBOX_CMD_STOP_FW;
3970 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1,
3971 &mbox_cmd[0], &mbox_sts[0]);
3972
3973 DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no,
3974 __func__, status));
3975 return status;
3976 }
3977
3978 /**
3979 * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
3980 * @ha: pointer to host adapter structure.
3981 **/
3982 int
qla4_82xx_isp_reset(struct scsi_qla_host * ha)3983 qla4_82xx_isp_reset(struct scsi_qla_host *ha)
3984 {
3985 int rval;
3986 uint32_t dev_state;
3987
3988 qla4_82xx_idc_lock(ha);
3989 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3990
3991 if (dev_state == QLA8XXX_DEV_READY) {
3992 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3993 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3994 QLA8XXX_DEV_NEED_RESET);
3995 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
3996 } else
3997 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
3998
3999 qla4_82xx_idc_unlock(ha);
4000
4001 rval = qla4_8xxx_device_state_handler(ha);
4002
4003 qla4_82xx_idc_lock(ha);
4004 qla4_8xxx_clear_rst_ready(ha);
4005 qla4_82xx_idc_unlock(ha);
4006
4007 if (rval == QLA_SUCCESS) {
4008 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
4009 clear_bit(AF_FW_RECOVERY, &ha->flags);
4010 }
4011
4012 return rval;
4013 }
4014
4015 /**
4016 * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number
4017 * @ha: pointer to host adapter structure.
4018 *
4019 **/
qla4_8xxx_get_sys_info(struct scsi_qla_host * ha)4020 int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4021 {
4022 uint32_t mbox_cmd[MBOX_REG_COUNT];
4023 uint32_t mbox_sts[MBOX_REG_COUNT];
4024 struct mbx_sys_info *sys_info;
4025 dma_addr_t sys_info_dma;
4026 int status = QLA_ERROR;
4027
4028 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4029 &sys_info_dma, GFP_KERNEL);
4030 if (sys_info == NULL) {
4031 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4032 ha->host_no, __func__));
4033 return status;
4034 }
4035
4036 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4037 memset(&mbox_sts, 0, sizeof(mbox_sts));
4038
4039 mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO;
4040 mbox_cmd[1] = LSDW(sys_info_dma);
4041 mbox_cmd[2] = MSDW(sys_info_dma);
4042 mbox_cmd[4] = sizeof(*sys_info);
4043
4044 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0],
4045 &mbox_sts[0]) != QLA_SUCCESS) {
4046 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n",
4047 ha->host_no, __func__));
4048 goto exit_validate_mac82;
4049 }
4050
4051 /* Make sure we receive the minimum required data to cache internally */
4052 if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
4053 offsetof(struct mbx_sys_info, reserved)) {
4054 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
4055 " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
4056 goto exit_validate_mac82;
4057 }
4058
4059 /* Save M.A.C. address & serial_number */
4060 ha->port_num = sys_info->port_num;
4061 memcpy(ha->my_mac, &sys_info->mac_addr[0],
4062 min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
4063 memcpy(ha->serial_number, &sys_info->serial_number,
4064 min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
4065 memcpy(ha->model_name, &sys_info->board_id_str,
4066 min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
4067 ha->phy_port_cnt = sys_info->phys_port_cnt;
4068 ha->phy_port_num = sys_info->port_num;
4069 ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
4070
4071 DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
4072 ha->host_no, __func__, ha->my_mac, ha->serial_number));
4073
4074 status = QLA_SUCCESS;
4075
4076 exit_validate_mac82:
4077 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
4078 sys_info_dma);
4079 return status;
4080 }
4081
4082 /* Interrupt handling helpers. */
4083
qla4_8xxx_intr_enable(struct scsi_qla_host * ha)4084 int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
4085 {
4086 uint32_t mbox_cmd[MBOX_REG_COUNT];
4087 uint32_t mbox_sts[MBOX_REG_COUNT];
4088
4089 DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
4090
4091 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4092 memset(&mbox_sts, 0, sizeof(mbox_sts));
4093 mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
4094 mbox_cmd[1] = INTR_ENABLE;
4095 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
4096 &mbox_sts[0]) != QLA_SUCCESS) {
4097 DEBUG2(ql4_printk(KERN_INFO, ha,
4098 "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
4099 __func__, mbox_sts[0]));
4100 return QLA_ERROR;
4101 }
4102 return QLA_SUCCESS;
4103 }
4104
qla4_8xxx_intr_disable(struct scsi_qla_host * ha)4105 int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
4106 {
4107 uint32_t mbox_cmd[MBOX_REG_COUNT];
4108 uint32_t mbox_sts[MBOX_REG_COUNT];
4109
4110 DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
4111
4112 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4113 memset(&mbox_sts, 0, sizeof(mbox_sts));
4114 mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
4115 mbox_cmd[1] = INTR_DISABLE;
4116 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
4117 &mbox_sts[0]) != QLA_SUCCESS) {
4118 DEBUG2(ql4_printk(KERN_INFO, ha,
4119 "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
4120 __func__, mbox_sts[0]));
4121 return QLA_ERROR;
4122 }
4123
4124 return QLA_SUCCESS;
4125 }
4126
4127 void
qla4_82xx_enable_intrs(struct scsi_qla_host * ha)4128 qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
4129 {
4130 qla4_8xxx_intr_enable(ha);
4131
4132 spin_lock_irq(&ha->hardware_lock);
4133 /* BIT 10 - reset */
4134 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
4135 spin_unlock_irq(&ha->hardware_lock);
4136 set_bit(AF_INTERRUPTS_ON, &ha->flags);
4137 }
4138
4139 void
qla4_82xx_disable_intrs(struct scsi_qla_host * ha)4140 qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
4141 {
4142 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
4143 qla4_8xxx_intr_disable(ha);
4144
4145 spin_lock_irq(&ha->hardware_lock);
4146 /* BIT 10 - set */
4147 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
4148 spin_unlock_irq(&ha->hardware_lock);
4149 }
4150
4151 int
qla4_8xxx_enable_msix(struct scsi_qla_host * ha)4152 qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
4153 {
4154 int ret;
4155
4156 ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
4157 QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
4158 if (ret < 0) {
4159 ql4_printk(KERN_WARNING, ha,
4160 "MSI-X: Failed to enable support -- %d/%d\n",
4161 QLA_MSIX_ENTRIES, ret);
4162 return ret;
4163 }
4164
4165 ret = request_irq(pci_irq_vector(ha->pdev, 0),
4166 qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
4167 ha);
4168 if (ret)
4169 goto out_free_vectors;
4170
4171 ret = request_irq(pci_irq_vector(ha->pdev, 1),
4172 qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
4173 if (ret)
4174 goto out_free_default_irq;
4175
4176 return 0;
4177
4178 out_free_default_irq:
4179 free_irq(pci_irq_vector(ha->pdev, 0), ha);
4180 out_free_vectors:
4181 pci_free_irq_vectors(ha->pdev);
4182 return ret;
4183 }
4184
qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host * ha)4185 int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
4186 {
4187 int status = QLA_SUCCESS;
4188
4189 /* Dont retry adapter initialization if IRQ allocation failed */
4190 if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
4191 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
4192 __func__);
4193 status = QLA_ERROR;
4194 goto exit_init_adapter_failure;
4195 }
4196
4197 /* Since interrupts are registered in start_firmware for
4198 * 8xxx, release them here if initialize_adapter fails
4199 * and retry adapter initialization */
4200 qla4xxx_free_irqs(ha);
4201
4202 exit_init_adapter_failure:
4203 return status;
4204 }
4205