1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 QLogic Corporation. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
29 *
30 * ***********************************************************************
31 * * **
32 * * NOTICE **
33 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
34 * * ALL RIGHTS RESERVED **
35 * * **
36 * ***********************************************************************
37 *
38 */
39
40 #include <ql_apps.h>
41 #include <ql_api.h>
42 #include <ql_debug.h>
43 #include <ql_init.h>
44 #include <ql_mbx.h>
45 #include <ql_nx.h>
46
47 /*
48 * Local Function Prototypes.
49 */
50 static void ql_crb_addr_transform_setup(ql_adapter_state_t *);
51 static void ql_8021_pci_set_crbwindow_2M(ql_adapter_state_t *, uint64_t *);
52 static int ql_8021_crb_win_lock(ql_adapter_state_t *);
53 static void ql_8021_crb_win_unlock(ql_adapter_state_t *);
54 static int ql_8021_pci_get_crb_addr_2M(ql_adapter_state_t *, uint64_t *);
55 static uint32_t ql_8021_pci_mem_bound_check(ql_adapter_state_t *, uint64_t,
56 uint32_t);
57 static uint64_t ql_8021_pci_set_window(ql_adapter_state_t *, uint64_t);
58 static int ql_8021_pci_is_same_window(ql_adapter_state_t *, uint64_t);
59 static int ql_8021_pci_mem_read_direct(ql_adapter_state_t *, uint64_t, void *,
60 uint32_t);
61 static int ql_8021_pci_mem_write_direct(ql_adapter_state_t *, uint64_t, void *,
62 uint32_t);
63 static int ql_8021_pci_mem_read_2M(ql_adapter_state_t *, uint64_t, void *,
64 uint32_t);
65 static int ql_8021_pci_mem_write_2M(ql_adapter_state_t *, uint64_t, void *,
66 uint32_t);
67 static uint32_t ql_8021_decode_crb_addr(ql_adapter_state_t *, uint32_t);
68 static int ql_8021_rom_lock(ql_adapter_state_t *);
69 static void ql_8021_rom_unlock(ql_adapter_state_t *);
70 static int ql_8021_wait_rom_done(ql_adapter_state_t *);
71 static int ql_8021_wait_flash_done(ql_adapter_state_t *);
72 static int ql_8021_do_rom_fast_read(ql_adapter_state_t *, uint32_t, uint32_t *);
73 static int ql_8021_rom_fast_read(ql_adapter_state_t *, uint32_t, uint32_t *);
74 static int ql_8021_do_rom_write(ql_adapter_state_t *, uint32_t, uint32_t);
75 static int ql_8021_do_rom_erase(ql_adapter_state_t *, uint32_t);
76 static int ql_8021_phantom_init(ql_adapter_state_t *);
77 static int ql_8021_pinit_from_rom(ql_adapter_state_t *);
78 static int ql_8021_load_from_flash(ql_adapter_state_t *);
79 static int ql_8021_load_firmware(ql_adapter_state_t *);
80 static int ql_8021_reset_hw(ql_adapter_state_t *, int);
81 static int ql_8021_init_p3p(ql_adapter_state_t *);
82 static int ql_8021_hw_lock(ql_adapter_state_t *, uint32_t);
83 static void ql_8021_hw_unlock(ql_adapter_state_t *);
84 static void ql_8021_need_reset_handler(ql_adapter_state_t *);
85 static int ql_8021_load_fw(ql_adapter_state_t *);
86 static uint32_t ql_8021_check_fw_alive(ql_adapter_state_t *);
87 static int ql_8021_get_fw_dump(ql_adapter_state_t *);
88 static void ql_8021_md_parse_template(ql_adapter_state_t *, caddr_t, caddr_t,
89 uint32_t, uint32_t);
90 static int ql_8021_md_rdcrb(ql_adapter_state_t *, md_entry_rdcrb_t *,
91 uint32_t *);
92 static int ql_8021_md_L2Cache(ql_adapter_state_t *, md_entry_cache_t *,
93 uint32_t *);
94 static int ql_8021_md_L1Cache(ql_adapter_state_t *, md_entry_cache_t *,
95 uint32_t *);
96 static int ql_8021_md_rdocm(ql_adapter_state_t *, md_entry_rdocm_t *,
97 uint32_t *);
98 static int ql_8021_md_rdmem(ql_adapter_state_t *, md_entry_rdmem_t *,
99 uint32_t *);
100 static int ql_8021_md_rdrom(ql_adapter_state_t *, md_entry_rdrom_t *,
101 uint32_t *);
102 static int ql_8021_md_rdmux(ql_adapter_state_t *, md_entry_mux_t *,
103 uint32_t *);
104 static int ql_8021_md_rdqueue(ql_adapter_state_t *, md_entry_queue_t *,
105 uint32_t *);
106 static int ql_8021_md_cntrl(ql_adapter_state_t *, md_template_hdr_t *,
107 md_entry_cntrl_t *);
108 static void ql_8021_md_entry_err_chk(ql_adapter_state_t *, md_entry_t *,
109 uint32_t, int);
110 static uint32_t ql_8021_md_template_checksum(ql_adapter_state_t *);
111 static uint32_t ql_8021_read_reg(ql_adapter_state_t *, uint32_t);
112 static void ql_8021_write_reg(ql_adapter_state_t *, uint32_t, uint32_t);
113 static uint32_t ql_8021_read_ocm(ql_adapter_state_t *, uint32_t);
114
115 /*
116 * Local Data.
117 */
118 static uint32_t crb_addr_xform[MAX_CRB_XFORM];
119 static int crb_table_initialized = 0;
120 static int pci_set_window_warning_count = 0;
121
122 static struct legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
123
124 static crb_128M_2M_block_map_t crb_128M_2M_map[64] = {
125 {{{0, 0, 0, 0}}}, /* 0: PCI */
126 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
127 {1, 0x0110000, 0x0120000, 0x130000},
128 {1, 0x0120000, 0x0122000, 0x124000},
129 {1, 0x0130000, 0x0132000, 0x126000},
130 {1, 0x0140000, 0x0142000, 0x128000},
131 {1, 0x0150000, 0x0152000, 0x12a000},
132 {1, 0x0160000, 0x0170000, 0x110000},
133 {1, 0x0170000, 0x0172000, 0x12e000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {1, 0x01e0000, 0x01e0800, 0x122000},
141 {0, 0x0000000, 0x0000000, 0x000000}}},
142 {{{1, 0x0200000, 0x0210000, 0x180000}}}, /* 2: MN */
143 {{{0, 0, 0, 0}}}, /* 3: */
144 {{{1, 0x0400000, 0x0401000, 0x169000}}}, /* 4: P2NR1 */
145 {{{1, 0x0500000, 0x0510000, 0x140000}}}, /* 5: SRE */
146 {{{1, 0x0600000, 0x0610000, 0x1c0000}}}, /* 6: NIU */
147 {{{1, 0x0700000, 0x0704000, 0x1b8000}}}, /* 7: QM */
148 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {1, 0x08f0000, 0x08f2000, 0x172000}}},
164 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1 */
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {0, 0x0000000, 0x0000000, 0x000000},
174 {0, 0x0000000, 0x0000000, 0x000000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000},
179 {1, 0x09f0000, 0x09f2000, 0x176000}}},
180 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2 */
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {1, 0x0af0000, 0x0af2000, 0x17a000}}},
196 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3 */
197 {0, 0x0000000, 0x0000000, 0x000000},
198 {0, 0x0000000, 0x0000000, 0x000000},
199 {0, 0x0000000, 0x0000000, 0x000000},
200 {0, 0x0000000, 0x0000000, 0x000000},
201 {0, 0x0000000, 0x0000000, 0x000000},
202 {0, 0x0000000, 0x0000000, 0x000000},
203 {0, 0x0000000, 0x0000000, 0x000000},
204 {0, 0x0000000, 0x0000000, 0x000000},
205 {0, 0x0000000, 0x0000000, 0x000000},
206 {0, 0x0000000, 0x0000000, 0x000000},
207 {0, 0x0000000, 0x0000000, 0x000000},
208 {0, 0x0000000, 0x0000000, 0x000000},
209 {0, 0x0000000, 0x0000000, 0x000000},
210 {0, 0x0000000, 0x0000000, 0x000000},
211 {1, 0x0bf0000, 0x0bf2000, 0x17e000}}},
212 {{{1, 0x0c00000, 0x0c04000, 0x1d4000}}}, /* 12: I2Q */
213 {{{1, 0x0d00000, 0x0d04000, 0x1a4000}}}, /* 13: TMR */
214 {{{1, 0x0e00000, 0x0e04000, 0x1a0000}}}, /* 14: ROMUSB */
215 {{{1, 0x0f00000, 0x0f01000, 0x164000}}}, /* 15: PEG4 */
216 {{{0, 0x1000000, 0x1004000, 0x1a8000}}}, /* 16: XDMA */
217 {{{1, 0x1100000, 0x1101000, 0x160000}}}, /* 17: PEG0 */
218 {{{1, 0x1200000, 0x1201000, 0x161000}}}, /* 18: PEG1 */
219 {{{1, 0x1300000, 0x1301000, 0x162000}}}, /* 19: PEG2 */
220 {{{1, 0x1400000, 0x1401000, 0x163000}}}, /* 20: PEG3 */
221 {{{1, 0x1500000, 0x1501000, 0x165000}}}, /* 21: P2ND */
222 {{{1, 0x1600000, 0x1601000, 0x166000}}}, /* 22: P2NI */
223 {{{0, 0, 0, 0}}}, /* 23: */
224 {{{0, 0, 0, 0}}}, /* 24: */
225 {{{0, 0, 0, 0}}}, /* 25: */
226 {{{0, 0, 0, 0}}}, /* 26: */
227 {{{0, 0, 0, 0}}}, /* 27: */
228 {{{0, 0, 0, 0}}}, /* 28: */
229 {{{1, 0x1d00000, 0x1d10000, 0x190000}}}, /* 29: MS */
230 {{{1, 0x1e00000, 0x1e01000, 0x16a000}}}, /* 30: P2NR2 */
231 {{{1, 0x1f00000, 0x1f10000, 0x150000}}}, /* 31: EPG */
232 {{{0}}}, /* 32: PCI */
233 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
234 {1, 0x2110000, 0x2120000, 0x130000},
235 {1, 0x2120000, 0x2122000, 0x124000},
236 {1, 0x2130000, 0x2132000, 0x126000},
237 {1, 0x2140000, 0x2142000, 0x128000},
238 {1, 0x2150000, 0x2152000, 0x12a000},
239 {1, 0x2160000, 0x2170000, 0x110000},
240 {1, 0x2170000, 0x2172000, 0x12e000},
241 {0, 0x0000000, 0x0000000, 0x000000},
242 {0, 0x0000000, 0x0000000, 0x000000},
243 {0, 0x0000000, 0x0000000, 0x000000},
244 {0, 0x0000000, 0x0000000, 0x000000},
245 {0, 0x0000000, 0x0000000, 0x000000},
246 {0, 0x0000000, 0x0000000, 0x000000},
247 {0, 0x0000000, 0x0000000, 0x000000},
248 {0, 0x0000000, 0x0000000, 0x000000}}},
249 {{{1, 0x2200000, 0x2204000, 0x1b0000}}}, /* 34: CAM */
250 {{{0}}}, /* 35: */
251 {{{0}}}, /* 36: */
252 {{{0}}}, /* 37: */
253 {{{0}}}, /* 38: */
254 {{{0}}}, /* 39: */
255 {{{1, 0x2800000, 0x2804000, 0x1a4000}}}, /* 40: TMR */
256 {{{1, 0x2900000, 0x2901000, 0x16b000}}}, /* 41: P2NR3 */
257 {{{1, 0x2a00000, 0x2a00400, 0x1ac400}}}, /* 42: RPMX1 */
258 {{{1, 0x2b00000, 0x2b00400, 0x1ac800}}}, /* 43: RPMX2 */
259 {{{1, 0x2c00000, 0x2c00400, 0x1acc00}}}, /* 44: RPMX3 */
260 {{{1, 0x2d00000, 0x2d00400, 0x1ad000}}}, /* 45: RPMX4 */
261 {{{1, 0x2e00000, 0x2e00400, 0x1ad400}}}, /* 46: RPMX5 */
262 {{{1, 0x2f00000, 0x2f00400, 0x1ad800}}}, /* 47: RPMX6 */
263 {{{1, 0x3000000, 0x3000400, 0x1adc00}}}, /* 48: RPMX7 */
264 {{{0, 0x3100000, 0x3104000, 0x1a8000}}}, /* 49: XDMA */
265 {{{1, 0x3200000, 0x3204000, 0x1d4000}}}, /* 50: I2Q */
266 {{{1, 0x3300000, 0x3304000, 0x1a0000}}}, /* 51: ROMUSB */
267 {{{0}}}, /* 52: */
268 {{{1, 0x3500000, 0x3500400, 0x1ac000}}}, /* 53: RPMX0 */
269 {{{1, 0x3600000, 0x3600400, 0x1ae000}}}, /* 54: RPMX8 */
270 {{{1, 0x3700000, 0x3700400, 0x1ae400}}}, /* 55: RPMX9 */
271 {{{1, 0x3800000, 0x3804000, 0x1d0000}}}, /* 56: OCM0 */
272 {{{1, 0x3900000, 0x3904000, 0x1b4000}}}, /* 57: CRYPTO */
273 {{{1, 0x3a00000, 0x3a04000, 0x1d8000}}}, /* 58: SMB */
274 {{{0}}}, /* 59: I2C0 */
275 {{{0}}}, /* 60: I2C1 */
276 {{{1, 0x3d00000, 0x3d04000, 0x1dc000}}}, /* 61: LPC */
277 {{{1, 0x3e00000, 0x3e01000, 0x167000}}}, /* 62: P2NC */
278 {{{1, 0x3f00000, 0x3f01000, 0x168000}}} /* 63: P2NR0 */
279 };
280
281 /*
282 * top 12 bits of crb internal address (hub, agent)
283 */
284 static uint32_t crb_hub_agt[64] = {
285 0,
286 UNM_HW_CRB_HUB_AGT_ADR_PS,
287 UNM_HW_CRB_HUB_AGT_ADR_MN,
288 UNM_HW_CRB_HUB_AGT_ADR_MS,
289 0,
290 UNM_HW_CRB_HUB_AGT_ADR_SRE,
291 UNM_HW_CRB_HUB_AGT_ADR_NIU,
292 UNM_HW_CRB_HUB_AGT_ADR_QMN,
293 UNM_HW_CRB_HUB_AGT_ADR_SQN0,
294 UNM_HW_CRB_HUB_AGT_ADR_SQN1,
295 UNM_HW_CRB_HUB_AGT_ADR_SQN2,
296 UNM_HW_CRB_HUB_AGT_ADR_SQN3,
297 UNM_HW_CRB_HUB_AGT_ADR_I2Q,
298 UNM_HW_CRB_HUB_AGT_ADR_TIMR,
299 UNM_HW_CRB_HUB_AGT_ADR_ROMUSB,
300 UNM_HW_CRB_HUB_AGT_ADR_PGN4,
301 UNM_HW_CRB_HUB_AGT_ADR_XDMA,
302 UNM_HW_CRB_HUB_AGT_ADR_PGN0,
303 UNM_HW_CRB_HUB_AGT_ADR_PGN1,
304 UNM_HW_CRB_HUB_AGT_ADR_PGN2,
305 UNM_HW_CRB_HUB_AGT_ADR_PGN3,
306 UNM_HW_CRB_HUB_AGT_ADR_PGND,
307 UNM_HW_CRB_HUB_AGT_ADR_PGNI,
308 UNM_HW_CRB_HUB_AGT_ADR_PGS0,
309 UNM_HW_CRB_HUB_AGT_ADR_PGS1,
310 UNM_HW_CRB_HUB_AGT_ADR_PGS2,
311 UNM_HW_CRB_HUB_AGT_ADR_PGS3,
312 0,
313 UNM_HW_CRB_HUB_AGT_ADR_PGSI,
314 UNM_HW_CRB_HUB_AGT_ADR_SN,
315 0,
316 UNM_HW_CRB_HUB_AGT_ADR_EG,
317 0,
318 UNM_HW_CRB_HUB_AGT_ADR_PS,
319 UNM_HW_CRB_HUB_AGT_ADR_CAM,
320 0,
321 0,
322 0,
323 0,
324 0,
325 UNM_HW_CRB_HUB_AGT_ADR_TIMR,
326 0,
327 UNM_HW_CRB_HUB_AGT_ADR_RPMX1,
328 UNM_HW_CRB_HUB_AGT_ADR_RPMX2,
329 UNM_HW_CRB_HUB_AGT_ADR_RPMX3,
330 UNM_HW_CRB_HUB_AGT_ADR_RPMX4,
331 UNM_HW_CRB_HUB_AGT_ADR_RPMX5,
332 UNM_HW_CRB_HUB_AGT_ADR_RPMX6,
333 UNM_HW_CRB_HUB_AGT_ADR_RPMX7,
334 UNM_HW_CRB_HUB_AGT_ADR_XDMA,
335 UNM_HW_CRB_HUB_AGT_ADR_I2Q,
336 UNM_HW_CRB_HUB_AGT_ADR_ROMUSB,
337 0,
338 UNM_HW_CRB_HUB_AGT_ADR_RPMX0,
339 UNM_HW_CRB_HUB_AGT_ADR_RPMX8,
340 UNM_HW_CRB_HUB_AGT_ADR_RPMX9,
341 UNM_HW_CRB_HUB_AGT_ADR_OCM0,
342 0,
343 UNM_HW_CRB_HUB_AGT_ADR_SMB,
344 UNM_HW_CRB_HUB_AGT_ADR_I2C0,
345 UNM_HW_CRB_HUB_AGT_ADR_I2C1,
346 0,
347 UNM_HW_CRB_HUB_AGT_ADR_PGNC,
348 0,
349 };
350
351 /* ARGSUSED */
352 static void
ql_crb_addr_transform_setup(ql_adapter_state_t * ha)353 ql_crb_addr_transform_setup(ql_adapter_state_t *ha)
354 {
355 crb_addr_transform(XDMA);
356 crb_addr_transform(TIMR);
357 crb_addr_transform(SRE);
358 crb_addr_transform(SQN3);
359 crb_addr_transform(SQN2);
360 crb_addr_transform(SQN1);
361 crb_addr_transform(SQN0);
362 crb_addr_transform(SQS3);
363 crb_addr_transform(SQS2);
364 crb_addr_transform(SQS1);
365 crb_addr_transform(SQS0);
366 crb_addr_transform(RPMX7);
367 crb_addr_transform(RPMX6);
368 crb_addr_transform(RPMX5);
369 crb_addr_transform(RPMX4);
370 crb_addr_transform(RPMX3);
371 crb_addr_transform(RPMX2);
372 crb_addr_transform(RPMX1);
373 crb_addr_transform(RPMX0);
374 crb_addr_transform(ROMUSB);
375 crb_addr_transform(SN);
376 crb_addr_transform(QMN);
377 crb_addr_transform(QMS);
378 crb_addr_transform(PGNI);
379 crb_addr_transform(PGND);
380 crb_addr_transform(PGN3);
381 crb_addr_transform(PGN2);
382 crb_addr_transform(PGN1);
383 crb_addr_transform(PGN0);
384 crb_addr_transform(PGSI);
385 crb_addr_transform(PGSD);
386 crb_addr_transform(PGS3);
387 crb_addr_transform(PGS2);
388 crb_addr_transform(PGS1);
389 crb_addr_transform(PGS0);
390 crb_addr_transform(PS);
391 crb_addr_transform(PH);
392 crb_addr_transform(NIU);
393 crb_addr_transform(I2Q);
394 crb_addr_transform(EG);
395 crb_addr_transform(MN);
396 crb_addr_transform(MS);
397 crb_addr_transform(CAS2);
398 crb_addr_transform(CAS1);
399 crb_addr_transform(CAS0);
400 crb_addr_transform(CAM);
401 crb_addr_transform(C2C1);
402 crb_addr_transform(C2C0);
403 crb_addr_transform(SMB);
404 crb_addr_transform(OCM0);
405 /*
406 * Used only in P3 just define it for P2 also.
407 */
408 crb_addr_transform(I2C0);
409
410 crb_table_initialized = 1;
411 }
412
413 /*
414 * In: 'off' is offset from CRB space in 128M pci map
415 * Out: 'off' is 2M pci map addr
416 * side effect: lock crb window
417 */
418 static void
ql_8021_pci_set_crbwindow_2M(ql_adapter_state_t * ha,uint64_t * off)419 ql_8021_pci_set_crbwindow_2M(ql_adapter_state_t *ha, uint64_t *off)
420 {
421 uint32_t win_read, crb_win;
422
423 crb_win = (uint32_t)CRB_HI(*off);
424 WRT_REG_DWORD(ha, CRB_WINDOW_2M + ha->nx_pcibase, crb_win);
425
426 /*
427 * Read back value to make sure write has gone through before trying
428 * to use it.
429 */
430 win_read = RD_REG_DWORD(ha, CRB_WINDOW_2M + ha->nx_pcibase);
431 if (win_read != crb_win) {
432 EL(ha, "Written crbwin (0x%x) != Read crbwin (0x%x), "
433 "off=0x%llx\n", crb_win, win_read, *off);
434 }
435 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + (uintptr_t)ha->nx_pcibase;
436 }
437
438 void
ql_8021_wr_32(ql_adapter_state_t * ha,uint64_t off,uint32_t data)439 ql_8021_wr_32(ql_adapter_state_t *ha, uint64_t off, uint32_t data)
440 {
441 int rv;
442
443 rv = ql_8021_pci_get_crb_addr_2M(ha, &off);
444 if (rv == -1) {
445 cmn_err(CE_PANIC, "ql_8021_wr_32, ql_8021_pci_get_crb_addr_"
446 "2M=-1\n");
447 }
448 if (rv == 1) {
449 (void) ql_8021_crb_win_lock(ha);
450 ql_8021_pci_set_crbwindow_2M(ha, &off);
451 }
452
453 WRT_REG_DWORD(ha, (uintptr_t)off, data);
454
455 if (rv == 1) {
456 ql_8021_crb_win_unlock(ha);
457 }
458 }
459
460 void
ql_8021_rd_32(ql_adapter_state_t * ha,uint64_t off,uint32_t * data)461 ql_8021_rd_32(ql_adapter_state_t *ha, uint64_t off, uint32_t *data)
462 {
463 int rv;
464 uint32_t n;
465
466 rv = ql_8021_pci_get_crb_addr_2M(ha, &off);
467 if (rv == -1) {
468 cmn_err(CE_PANIC, "ql_8021_rd_32, ql_8021_pci_get_crb_addr_"
469 "2M=-1\n");
470 }
471
472 if (rv == 1) {
473 (void) ql_8021_crb_win_lock(ha);
474 ql_8021_pci_set_crbwindow_2M(ha, &off);
475 }
476 n = RD_REG_DWORD(ha, (uintptr_t)off);
477
478 if (data != NULL) {
479 *data = n;
480 }
481
482 if (rv == 1) {
483 ql_8021_crb_win_unlock(ha);
484 }
485 }
486
487 static int
ql_8021_crb_win_lock(ql_adapter_state_t * ha)488 ql_8021_crb_win_lock(ql_adapter_state_t *ha)
489 {
490 uint32_t done = 0, timeout = 0;
491
492 while (!done) {
493 /* acquire semaphore3 from PCI HW block */
494 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM7_LOCK), &done);
495 if (done == 1) {
496 break;
497 }
498 if (timeout >= CRB_WIN_LOCK_TIMEOUT) {
499 EL(ha, "timeout\n");
500 return (-1);
501 }
502 timeout++;
503
504 /* Yield CPU */
505 delay(1);
506 }
507 ql_8021_wr_32(ha, UNM_CRB_WIN_LOCK_ID, ha->pci_function_number);
508
509 return (0);
510 }
511
512 static void
ql_8021_crb_win_unlock(ql_adapter_state_t * ha)513 ql_8021_crb_win_unlock(ql_adapter_state_t *ha)
514 {
515 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM7_UNLOCK), NULL);
516 }
517
518 static int
ql_8021_pci_get_crb_addr_2M(ql_adapter_state_t * ha,uint64_t * off)519 ql_8021_pci_get_crb_addr_2M(ql_adapter_state_t *ha, uint64_t *off)
520 {
521 crb_128M_2M_sub_block_map_t *m;
522
523 if (*off >= UNM_CRB_MAX) {
524 EL(ha, "%llx >= %llx\n", *off, UNM_CRB_MAX);
525 return (-1);
526 }
527
528 if (*off >= UNM_PCI_CAMQM && (*off < UNM_PCI_CAMQM_2M_END)) {
529 *off = (*off - UNM_PCI_CAMQM) + UNM_PCI_CAMQM_2M_BASE +
530 (uintptr_t)ha->nx_pcibase;
531 return (0);
532 }
533
534 if (*off < UNM_PCI_CRBSPACE) {
535 EL(ha, "%llx < %llx\n", *off, UNM_PCI_CRBSPACE);
536 return (-1);
537 }
538
539 *off -= UNM_PCI_CRBSPACE;
540 /*
541 * Try direct map
542 */
543
544 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
545
546 if (m->valid && ((uint64_t)m->start_128M <= *off) &&
547 ((uint64_t)m->end_128M > *off)) {
548 *off = (uint64_t)(*off + m->start_2M - m->start_128M +
549 (uintptr_t)ha->nx_pcibase);
550 return (0);
551 }
552
553 /*
554 * Not in direct map, use crb window
555 */
556 return (1);
557 }
558
559 /*
560 * check memory access boundary.
561 * used by test agent. support ddr access only for now
562 */
563 /* ARGSUSED */
564 static uint32_t
ql_8021_pci_mem_bound_check(ql_adapter_state_t * ha,uint64_t addr,uint32_t size)565 ql_8021_pci_mem_bound_check(ql_adapter_state_t *ha, uint64_t addr,
566 uint32_t size)
567 {
568 /*LINTED suspicious 0 comparison*/
569 if (!QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_DDR_NET,
570 UNM_ADDR_DDR_NET_MAX) ||
571 /*LINTED suspicious 0 comparison*/
572 !QL_8021_ADDR_IN_RANGE(addr + size - 1, UNM_ADDR_DDR_NET,
573 UNM_ADDR_DDR_NET_MAX) ||
574 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
575 return (0);
576 }
577
578 return (1);
579 }
580
581 static uint64_t
ql_8021_pci_set_window(ql_adapter_state_t * ha,uint64_t addr)582 ql_8021_pci_set_window(ql_adapter_state_t *ha, uint64_t addr)
583 {
584 uint32_t window, win_read;
585
586 /*LINTED suspicious 0 comparison*/
587 if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_DDR_NET,
588 UNM_ADDR_DDR_NET_MAX)) {
589 /* DDR network side */
590 window = (uint32_t)MN_WIN(addr);
591 ql_8021_wr_32(ha, UNM_PCI_CRBSPACE, window);
592 ql_8021_rd_32(ha, UNM_PCI_CRBSPACE, &win_read);
593 if ((win_read << 17) != window) {
594 EL(ha, "Warning, Written MNwin (0x%x) != Read MNwin "
595 "(0x%x)\n", window, win_read);
596 }
597 addr = GET_MEM_OFFS_2M(addr) + UNM_PCI_DDR_NET;
598 } else if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_OCM0,
599 UNM_ADDR_OCM0_MAX)) {
600 uint32_t temp1;
601
602 if ((addr & 0x00ff800) == 0xff800) {
603 /* if bits 19:18&17:11 are on */
604 EL(ha, "QM access not handled\n");
605 addr = -1UL;
606 }
607
608 window = (uint32_t)OCM_WIN(addr);
609 ql_8021_wr_32(ha, UNM_PCI_CRBSPACE, window);
610 ql_8021_rd_32(ha, UNM_PCI_CRBSPACE, &win_read);
611 temp1 = ((window & 0x1FF) << 7) |
612 ((window & 0x0FFFE0000) >> 17);
613 if (win_read != temp1) {
614 EL(ha, "Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
615 temp1, win_read);
616 }
617 addr = GET_MEM_OFFS_2M(addr) + UNM_PCI_OCM0_2M;
618 } else if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_QDR_NET,
619 NX_P3_ADDR_QDR_NET_MAX)) {
620 /* QDR network side */
621 window = (uint32_t)MS_WIN(addr);
622 ha->qdr_sn_window = window;
623 ql_8021_wr_32(ha, UNM_PCI_CRBSPACE, window);
624 ql_8021_rd_32(ha, UNM_PCI_CRBSPACE, &win_read);
625 if (win_read != window) {
626 EL(ha, "Written MSwin (0x%x) != Read MSwin (0x%x)\n",
627 window, win_read);
628 }
629 addr = GET_MEM_OFFS_2M(addr) + UNM_PCI_QDR_NET;
630 } else {
631 /*
632 * peg gdb frequently accesses memory that doesn't exist,
633 * this limits the chit chat so debugging isn't slowed down.
634 */
635 if ((pci_set_window_warning_count++ < 8) ||
636 (pci_set_window_warning_count % 64 == 0)) {
637 EL(ha, "Unknown address range\n");
638 }
639 addr = -1UL;
640 }
641
642 return (addr);
643 }
644
645 /* check if address is in the same windows as the previous access */
646 static int
ql_8021_pci_is_same_window(ql_adapter_state_t * ha,uint64_t addr)647 ql_8021_pci_is_same_window(ql_adapter_state_t *ha, uint64_t addr)
648 {
649 uint32_t window;
650 uint64_t qdr_max;
651
652 qdr_max = NX_P3_ADDR_QDR_NET_MAX;
653
654 /*LINTED suspicious 0 comparison*/
655 if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_DDR_NET,
656 UNM_ADDR_DDR_NET_MAX)) {
657 /* DDR network side */
658 EL(ha, "DDR network side\n");
659 return (0); /* MN access can not come here */
660 } else if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_OCM0,
661 UNM_ADDR_OCM0_MAX)) {
662 return (1);
663 } else if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_OCM1,
664 UNM_ADDR_OCM1_MAX)) {
665 return (1);
666 } else if (QL_8021_ADDR_IN_RANGE(addr, UNM_ADDR_QDR_NET, qdr_max)) {
667 /* QDR network side */
668 window = (uint32_t)(((addr - UNM_ADDR_QDR_NET) >> 22) & 0x3f);
669 if (ha->qdr_sn_window == window) {
670 return (1);
671 }
672 }
673
674 return (0);
675 }
676
677 static int
ql_8021_pci_mem_read_direct(ql_adapter_state_t * ha,uint64_t off,void * data,uint32_t size)678 ql_8021_pci_mem_read_direct(ql_adapter_state_t *ha, uint64_t off, void *data,
679 uint32_t size)
680 {
681 void *addr;
682 int ret = 0;
683 uint64_t start;
684
685 /*
686 * If attempting to access unknown address or straddle hw windows,
687 * do not access.
688 */
689 if (((start = ql_8021_pci_set_window(ha, off)) == -1UL) ||
690 (ql_8021_pci_is_same_window(ha, off + size - 1) == 0)) {
691 EL(ha, "out of bound pci memory access. offset is 0x%llx\n",
692 off);
693 return (-1);
694 }
695
696 addr = (void *)((uint8_t *)ha->nx_pcibase + start);
697
698 switch (size) {
699 case 1:
700 *(uint8_t *)data = RD_REG_BYTE(ha, addr);
701 break;
702 case 2:
703 *(uint16_t *)data = RD_REG_WORD(ha, addr);
704 break;
705 case 4:
706 *(uint32_t *)data = RD_REG_DWORD(ha, addr);
707 break;
708 case 8:
709 *(uint64_t *)data = RD_REG_DDWORD(ha, addr);
710 break;
711 default:
712 EL(ha, "invalid size=%x\n", size);
713 ret = -1;
714 break;
715 }
716
717 return (ret);
718 }
719
720 static int
ql_8021_pci_mem_write_direct(ql_adapter_state_t * ha,uint64_t off,void * data,uint32_t size)721 ql_8021_pci_mem_write_direct(ql_adapter_state_t *ha, uint64_t off, void *data,
722 uint32_t size)
723 {
724 void *addr;
725 int ret = 0;
726 uint64_t start;
727
728 /*
729 * If attempting to access unknown address or straddle hw windows,
730 * do not access.
731 */
732 if (((start = ql_8021_pci_set_window(ha, off)) == -1UL) ||
733 (ql_8021_pci_is_same_window(ha, off + size -1) == 0)) {
734 EL(ha, "out of bound pci memory access. offset is 0x%llx\n",
735 off);
736 return (-1);
737 }
738
739 addr = (void *)((uint8_t *)ha->nx_pcibase + start);
740
741 switch (size) {
742 case 1:
743 WRT_REG_BYTE(ha, addr, *(uint8_t *)data);
744 break;
745 case 2:
746 WRT_REG_WORD(ha, addr, *(uint16_t *)data);
747 break;
748 case 4:
749 WRT_REG_DWORD(ha, addr, *(uint32_t *)data);
750 break;
751 case 8:
752 WRT_REG_DDWORD(ha, addr, *(uint64_t *)data);
753 break;
754 default:
755 EL(ha, "invalid size=%x\n", size);
756 ret = -1;
757 break;
758 }
759
760 return (ret);
761 }
762
763 static int
ql_8021_pci_mem_read_2M(ql_adapter_state_t * ha,uint64_t off,void * data,uint32_t size)764 ql_8021_pci_mem_read_2M(ql_adapter_state_t *ha, uint64_t off, void *data,
765 uint32_t size)
766 {
767 int j = 0;
768 uint32_t i, temp, sz[2], loop, shift_amount;
769 uint64_t start, end, k;
770 uint64_t off8, off0[2], val, mem_crb, word[2] = {0, 0};
771
772 /*
773 * If not MN, go check for MS or invalid.
774 */
775
776 if (off >= UNM_ADDR_QDR_NET && off <= NX_P3_ADDR_QDR_NET_MAX) {
777 mem_crb = UNM_CRB_QDR_NET;
778 } else {
779 mem_crb = UNM_CRB_DDR_NET;
780 if (ql_8021_pci_mem_bound_check(ha, off, size) == 0) {
781 return (ql_8021_pci_mem_read_direct(ha, off, data,
782 size));
783 }
784 }
785
786 if (NX_IS_REVISION_P3PLUS(ha->rev_id)) {
787 off8 = off & 0xfffffff0;
788 off0[0] = off & 0xf;
789 sz[0] = (uint32_t)(((uint64_t)size < (16 - off0[0])) ? size :
790 (16 - off0[0]));
791 shift_amount = 4;
792 } else {
793 off8 = off & 0xfffffff8;
794 off0[0] = off & 0x7;
795 sz[0] = (uint32_t)(((uint64_t)size < (8 - off0[0])) ? size :
796 (8 - off0[0]));
797 shift_amount = 3;
798 }
799 loop = (uint32_t)(((off0[0] + size - 1) >> shift_amount) + 1);
800 off0[1] = 0;
801 sz[1] = size - sz[0];
802
803 /*
804 * don't lock here - write_wx gets the lock if each time
805 * write_lock_irqsave(&adapter->adapter_lock, flags);
806 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
807 */
808
809 for (i = 0; i < loop; i++) {
810 temp = (uint32_t)(off8 + (i << shift_amount));
811 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
812 temp = 0;
813 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
814 temp = MIU_TA_CTL_ENABLE;
815 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
816 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
817 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
818
819 for (j = 0; j < MAX_CTL_CHECK; j++) {
820 ql_8021_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL, &temp);
821 if ((temp & MIU_TA_CTL_BUSY) == 0) {
822 break;
823 }
824 }
825
826 if (j >= MAX_CTL_CHECK) {
827 EL(ha, "failed to read through agent\n");
828 break;
829 }
830
831 start = off0[i] >> 2;
832 end = (off0[i] + sz[i] - 1) >> 2;
833 for (k = start; k <= end; k++) {
834 ql_8021_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k),
835 &temp);
836 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
837 }
838 }
839
840 /*
841 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
842 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
843 */
844
845 if (j >= MAX_CTL_CHECK) {
846 return (-1);
847 }
848
849 if ((off0[0] & 7) == 0) {
850 val = word[0];
851 } else {
852 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
853 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
854 }
855
856 switch (size) {
857 case 1:
858 *(uint8_t *)data = (uint8_t)val;
859 break;
860 case 2:
861 *(uint16_t *)data = (uint16_t)val;
862 break;
863 case 4:
864 *(uint32_t *)data = (uint32_t)val;
865 break;
866 case 8:
867 *(uint64_t *)data = val;
868 break;
869 }
870
871 return (0);
872 }
873
874 static int
ql_8021_pci_mem_write_2M(ql_adapter_state_t * ha,uint64_t off,void * data,uint32_t size)875 ql_8021_pci_mem_write_2M(ql_adapter_state_t *ha, uint64_t off, void *data,
876 uint32_t size)
877 {
878 int j, ret = 0;
879 uint32_t i, temp, loop, sz[2];
880 uint32_t scale, shift_amount, p3p, startword;
881 uint64_t off8, off0, mem_crb, tmpw, word[2] = {0, 0};
882
883 /*
884 * If not MN, go check for MS or invalid.
885 */
886 if (off >= UNM_ADDR_QDR_NET && off <= NX_P3_ADDR_QDR_NET_MAX) {
887 mem_crb = UNM_CRB_QDR_NET;
888 } else {
889 mem_crb = UNM_CRB_DDR_NET;
890 if (ql_8021_pci_mem_bound_check(ha, off, size) == 0) {
891 return (ql_8021_pci_mem_write_direct(ha, off, data,
892 size));
893 }
894 }
895
896 off0 = off & 0x7;
897 sz[0] = (uint32_t)(((uint64_t)size < (8 - off0)) ? size : (8 - off0));
898 sz[1] = size - sz[0];
899
900 if (NX_IS_REVISION_P3PLUS(ha->rev_id)) {
901 off8 = off & 0xfffffff0;
902 loop = (uint32_t)((((off & 0xf) + size - 1) >> 4) + 1);
903 shift_amount = 4;
904 scale = 2;
905 p3p = 1;
906 startword = (uint32_t)((off & 0xf) / 8);
907 } else {
908 off8 = off & 0xfffffff8;
909 loop = (uint32_t)(((off0 + size - 1) >> 3) + 1);
910 shift_amount = 3;
911 scale = 1;
912 p3p = 0;
913 startword = 0;
914 }
915
916 if (p3p || (size != 8) || (off0 != 0)) {
917 for (i = 0; i < loop; i++) {
918 if (ql_8021_pci_mem_read_2M(ha, off8 +
919 (i << shift_amount), &word[i * scale], 8)) {
920 EL(ha, "8021_pci_mem_read_2M != 0\n");
921 return (-1);
922 }
923 }
924 }
925
926 switch (size) {
927 case 1:
928 tmpw = (uint64_t)(*((uint8_t *)data));
929 break;
930 case 2:
931 tmpw = (uint64_t)(*((uint16_t *)data));
932 break;
933 case 4:
934 tmpw = (uint64_t)(*((uint32_t *)data));
935 break;
936 case 8:
937 default:
938 tmpw = *((uint64_t *)data);
939 break;
940 }
941
942 if (p3p) {
943 if (sz[0] == 8) {
944 word[startword] = tmpw;
945 } else {
946 word[startword] &= ~((~(~0ULL << (sz[0] * 8))) <<
947 (off0 * 8));
948 word[startword] |= tmpw << (off0 * 8);
949 }
950 if (sz[1] != 0) {
951 word[startword + 1] &= ~(~0ULL << (sz[1] * 8));
952 word[startword + 1] |= tmpw >> (sz[0] * 8);
953 }
954 } else {
955 word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
956 word[startword] |= tmpw << (off0 * 8);
957
958 if (loop == 2) {
959 word[1] &= ~(~0ULL << (sz[1] * 8));
960 word[1] |= tmpw >> (sz[0] * 8);
961 }
962 }
963
964 /*
965 * don't lock here - write_wx gets the lock if each time
966 * write_lock_irqsave(&adapter->adapter_lock, flags);
967 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
968 */
969
970 for (i = 0; i < loop; i++) {
971 temp = (uint32_t)(off8 + (i << shift_amount));
972 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
973 temp = 0;
974 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
975 temp = (uint32_t)(word[i * scale] & 0xffffffff);
976 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_LO, temp);
977 temp = (uint32_t)((word[i * scale] >> 32) & 0xffffffff);
978 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_HI, temp);
979 if (p3p) {
980 temp = (uint32_t)(word[i * scale + 1] & 0xffffffff);
981 ql_8021_wr_32(ha,
982 mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
983 temp = (uint32_t)((word[i * scale + 1] >> 32) &
984 0xffffffff);
985 ql_8021_wr_32(ha,
986 mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
987 }
988 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
989 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
990 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
991 ql_8021_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
992
993 for (j = 0; j < MAX_CTL_CHECK; j++) {
994 ql_8021_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL, &temp);
995 if ((temp & MIU_TA_CTL_BUSY) == 0)
996 break;
997 }
998
999 if (j >= MAX_CTL_CHECK) {
1000 EL(ha, "failed to write through agent\n");
1001 ret = -1;
1002 break;
1003 }
1004 }
1005
1006 return (ret);
1007 }
1008
1009 static uint32_t
ql_8021_decode_crb_addr(ql_adapter_state_t * ha,uint32_t addr)1010 ql_8021_decode_crb_addr(ql_adapter_state_t *ha, uint32_t addr)
1011 {
1012 int i;
1013 uint32_t base_addr, offset, pci_base;
1014
1015 if (!crb_table_initialized) {
1016 ql_crb_addr_transform_setup(ha);
1017 }
1018
1019 pci_base = ADDR_ERROR;
1020 base_addr = addr & 0xfff00000;
1021 offset = addr & 0x000fffff;
1022
1023 for (i = 0; i < MAX_CRB_XFORM; i++) {
1024 if (crb_addr_xform[i] == base_addr) {
1025 pci_base = i << 20;
1026 break;
1027 }
1028 }
1029 if (pci_base == ADDR_ERROR) {
1030 return (pci_base);
1031 } else {
1032 return (pci_base + offset);
1033 }
1034 }
1035
1036 static int
ql_8021_hw_lock(ql_adapter_state_t * ha,uint32_t timer)1037 ql_8021_hw_lock(ql_adapter_state_t *ha, uint32_t timer)
1038 {
1039 uint32_t done = 0, timeout = 0;
1040
1041 while (!done) {
1042 /* acquire semaphore5 from PCI HW block */
1043 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM5_LOCK), &done);
1044 if (done == 1) {
1045 break;
1046 }
1047 if (timeout >= timer) {
1048 EL(ha, "timeout\n");
1049 return (-1);
1050 }
1051 timeout++;
1052
1053 /*
1054 * Yield CPU
1055 */
1056 delay(1);
1057 }
1058
1059 return (0);
1060 }
1061
1062 static void
ql_8021_hw_unlock(ql_adapter_state_t * ha)1063 ql_8021_hw_unlock(ql_adapter_state_t *ha)
1064 {
1065 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM5_UNLOCK), NULL);
1066 }
1067
1068 static int
ql_8021_rom_lock(ql_adapter_state_t * ha)1069 ql_8021_rom_lock(ql_adapter_state_t *ha)
1070 {
1071 uint32_t done = 0, timeout = 0;
1072
1073 while (!done) {
1074 /* acquire semaphore2 from PCI HW block */
1075 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM2_LOCK), &done);
1076 if (done == 1) {
1077 break;
1078 }
1079 if (timeout >= ROM_LOCK_TIMEOUT) {
1080 EL(ha, "timeout\n");
1081 return (-1);
1082 }
1083 timeout++;
1084
1085 /*
1086 * Yield CPU
1087 */
1088 delay(1);
1089 }
1090 ql_8021_wr_32(ha, UNM_ROM_LOCK_ID, ROM_LOCK_DRIVER);
1091
1092 return (0);
1093 }
1094
1095 static void
ql_8021_rom_unlock(ql_adapter_state_t * ha)1096 ql_8021_rom_unlock(ql_adapter_state_t *ha)
1097 {
1098 ql_8021_rd_32(ha, UNM_PCIE_REG(PCIE_SEM2_UNLOCK), NULL);
1099 }
1100
1101 static int
ql_8021_wait_rom_done(ql_adapter_state_t * ha)1102 ql_8021_wait_rom_done(ql_adapter_state_t *ha)
1103 {
1104 uint32_t timeout = 0, done = 0;
1105
1106 while (done == 0) {
1107 ql_8021_rd_32(ha, UNM_ROMUSB_GLB_STATUS, &done);
1108 done &= 2;
1109 timeout++;
1110 if (timeout >= ROM_MAX_TIMEOUT) {
1111 EL(ha, "Timeout reached waiting for rom done\n");
1112 return (-1);
1113 }
1114 }
1115
1116 return (0);
1117 }
1118
1119 static int
ql_8021_wait_flash_done(ql_adapter_state_t * ha)1120 ql_8021_wait_flash_done(ql_adapter_state_t *ha)
1121 {
1122 clock_t timer;
1123 uint32_t status;
1124
1125 for (timer = 500000; timer; timer--) {
1126 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1127 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1128 UNM_ROMUSB_ROM_RDSR_INSTR);
1129 if (ql_8021_wait_rom_done(ha)) {
1130 EL(ha, "Error waiting for rom done2\n");
1131 return (-1);
1132 }
1133
1134 /* Get status. */
1135 ql_8021_rd_32(ha, UNM_ROMUSB_ROM_RDATA, &status);
1136 if (!(status & BIT_0)) {
1137 return (0);
1138 }
1139 drv_usecwait(10);
1140 }
1141
1142 EL(ha, "timeout status=%x\n", status);
1143 return (-1);
1144 }
1145
1146 static int
ql_8021_do_rom_fast_read(ql_adapter_state_t * ha,uint32_t addr,uint32_t * valp)1147 ql_8021_do_rom_fast_read(ql_adapter_state_t *ha, uint32_t addr, uint32_t *valp)
1148 {
1149 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ADDRESS, addr);
1150 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
1151 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 3);
1152 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1153 UNM_ROMUSB_ROM_FAST_RD_INSTR);
1154 if (ql_8021_wait_rom_done(ha)) {
1155 EL(ha, "Error waiting for rom done\n");
1156 return (-1);
1157 }
1158 /* reset abyte_cnt and dummy_byte_cnt */
1159 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
1160 drv_usecwait(10);
1161 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1162
1163 ql_8021_rd_32(ha, UNM_ROMUSB_ROM_RDATA, valp);
1164
1165 return (0);
1166 }
1167
1168 int
ql_8021_rom_fast_read(ql_adapter_state_t * ha,uint32_t addr,uint32_t * valp)1169 ql_8021_rom_fast_read(ql_adapter_state_t *ha, uint32_t addr, uint32_t *valp)
1170 {
1171 int ret, loops = 0;
1172
1173 while ((ql_8021_rom_lock(ha) != 0) && (loops < 500000)) {
1174 drv_usecwait(10);
1175 loops++;
1176 }
1177 if (loops >= 50000) {
1178 EL(ha, "rom_lock failed\n");
1179 return (-1);
1180 }
1181 ret = ql_8021_do_rom_fast_read(ha, addr, valp);
1182 ql_8021_rom_unlock(ha);
1183
1184 return (ret);
1185 }
1186
1187 static int
ql_8021_do_rom_write(ql_adapter_state_t * ha,uint32_t addr,uint32_t data)1188 ql_8021_do_rom_write(ql_adapter_state_t *ha, uint32_t addr, uint32_t data)
1189 {
1190 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1191 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1192 UNM_ROMUSB_ROM_WREN_INSTR);
1193 if (ql_8021_wait_rom_done(ha)) {
1194 EL(ha, "Error waiting for rom done\n");
1195 return (-1);
1196 }
1197
1198 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_WDATA, data);
1199 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ADDRESS, addr);
1200 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 3);
1201 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1202 UNM_ROMUSB_ROM_PP_INSTR);
1203 if (ql_8021_wait_rom_done(ha)) {
1204 EL(ha, "Error waiting for rom done1\n");
1205 return (-1);
1206 }
1207
1208 if (ql_8021_wait_flash_done(ha)) {
1209 EL(ha, "Error waiting for flash done\n");
1210 return (-1);
1211 }
1212
1213 return (0);
1214 }
1215
1216 static int
ql_8021_do_rom_erase(ql_adapter_state_t * ha,uint32_t addr)1217 ql_8021_do_rom_erase(ql_adapter_state_t *ha, uint32_t addr)
1218 {
1219 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1220 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1221 UNM_ROMUSB_ROM_WREN_INSTR);
1222 if (ql_8021_wait_rom_done(ha)) {
1223 EL(ha, "Error waiting for rom done\n");
1224 return (-1);
1225 }
1226
1227 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ADDRESS, addr);
1228 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 3);
1229 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1230 UNM_ROMUSB_ROM_SE_INSTR);
1231 if (ql_8021_wait_rom_done(ha)) {
1232 EL(ha, "Error waiting for rom done1\n");
1233 return (-1);
1234 }
1235
1236 if (ql_8021_wait_flash_done(ha)) {
1237 EL(ha, "Error waiting for flash done\n");
1238 return (-1);
1239 }
1240
1241 return (0);
1242 }
1243
1244 int
ql_8021_rom_read(ql_adapter_state_t * ha,uint32_t addr,uint32_t * bp)1245 ql_8021_rom_read(ql_adapter_state_t *ha, uint32_t addr, uint32_t *bp)
1246 {
1247 int ret;
1248
1249 ret = ql_8021_rom_fast_read(ha, addr << 2, bp) == 0 ? QL_SUCCESS :
1250 QL_FUNCTION_FAILED;
1251
1252 return (ret);
1253 }
1254
1255 int
ql_8021_rom_write(ql_adapter_state_t * ha,uint32_t addr,uint32_t data)1256 ql_8021_rom_write(ql_adapter_state_t *ha, uint32_t addr, uint32_t data)
1257 {
1258 int ret, loops = 0;
1259
1260 while ((ql_8021_rom_lock(ha) != 0) && (loops < 500000)) {
1261 drv_usecwait(10);
1262 loops++;
1263 }
1264 if (loops >= 50000) {
1265 EL(ha, "rom_lock failed\n");
1266 ret = QL_FUNCTION_TIMEOUT;
1267 } else {
1268 ret = ql_8021_do_rom_write(ha, addr << 2, data) == 0 ?
1269 QL_SUCCESS : QL_FUNCTION_FAILED;
1270 ql_8021_rom_unlock(ha);
1271 }
1272
1273 return (ret);
1274 }
1275
1276 int
ql_8021_rom_erase(ql_adapter_state_t * ha,uint32_t addr)1277 ql_8021_rom_erase(ql_adapter_state_t *ha, uint32_t addr)
1278 {
1279 int ret, loops = 0;
1280
1281 while ((ql_8021_rom_lock(ha) != 0) && (loops < 500000)) {
1282 drv_usecwait(10);
1283 loops++;
1284 }
1285 if (loops >= 50000) {
1286 EL(ha, "rom_lock failed\n");
1287 ret = QL_FUNCTION_TIMEOUT;
1288 } else {
1289 ret = ql_8021_do_rom_erase(ha, addr << 2) == 0 ? QL_SUCCESS :
1290 QL_FUNCTION_FAILED;
1291 ql_8021_rom_unlock(ha);
1292 }
1293
1294 return (ret);
1295 }
1296
1297 int
ql_8021_rom_wrsr(ql_adapter_state_t * ha,uint32_t data)1298 ql_8021_rom_wrsr(ql_adapter_state_t *ha, uint32_t data)
1299 {
1300 int ret = QL_SUCCESS, loops = 0;
1301
1302 while ((ql_8021_rom_lock(ha) != 0) && (loops < 500000)) {
1303 drv_usecwait(10);
1304 loops++;
1305 }
1306 if (loops >= 50000) {
1307 EL(ha, "rom_lock failed\n");
1308 ret = QL_FUNCTION_TIMEOUT;
1309 } else {
1310 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1311 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1312 UNM_ROMUSB_ROM_WREN_INSTR);
1313 if (ql_8021_wait_rom_done(ha)) {
1314 EL(ha, "Error waiting for rom done\n");
1315 ret = QL_FUNCTION_FAILED;
1316 } else {
1317 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_WDATA, data);
1318 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_ABYTE_CNT, 0);
1319 ql_8021_wr_32(ha, UNM_ROMUSB_ROM_INSTR_OPCODE,
1320 UNM_ROMUSB_ROM_WRSR_INSTR);
1321 if (ql_8021_wait_rom_done(ha)) {
1322 EL(ha, "Error waiting for rom done1\n");
1323 ret = QL_FUNCTION_FAILED;
1324 } else if (ql_8021_wait_flash_done(ha)) {
1325 EL(ha, "Error waiting for flash done\n");
1326 ret = QL_FUNCTION_FAILED;
1327 }
1328 }
1329 ql_8021_rom_unlock(ha);
1330 }
1331
1332 return (ret);
1333 }
1334
1335 static int
ql_8021_phantom_init(ql_adapter_state_t * ha)1336 ql_8021_phantom_init(ql_adapter_state_t *ha)
1337 {
1338 uint32_t val = 0, err = 0;
1339 int retries = 60;
1340
1341 do {
1342 ql_8021_rd_32(ha, CRB_CMDPEG_STATE, &val);
1343
1344 switch (val) {
1345 case PHAN_INITIALIZE_COMPLETE:
1346 case PHAN_INITIALIZE_ACK:
1347 EL(ha, "success=%xh\n", val);
1348 return (0);
1349 case PHAN_INITIALIZE_FAILED:
1350 EL(ha, "PHAN_INITIALIZE_FAILED\n");
1351 err = 1;
1352 break;
1353 default:
1354 break;
1355 }
1356
1357 if (err) {
1358 break;
1359 }
1360 /* 500 msec wait */
1361 delay(50);
1362
1363 } while (--retries);
1364
1365 if (!err) {
1366 ql_8021_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1367 }
1368
1369 EL(ha, "firmware init failed=%x\n", val);
1370 return (-1);
1371 }
1372
1373 static int
ql_8021_pinit_from_rom(ql_adapter_state_t * ha)1374 ql_8021_pinit_from_rom(ql_adapter_state_t *ha)
1375 {
1376 int init_delay = 0;
1377 struct crb_addr_pair *buf;
1378 uint32_t offset, off, i, n, addr, val;
1379
1380 /* Grab the lock so that no one can read flash when we reset the chip */
1381 (void) ql_8021_rom_lock(ha);
1382 ql_8021_wr_32(ha, UNM_ROMUSB_GLB_SW_RESET, 0xffffffff);
1383 /* Just in case it was held when we reset the chip */
1384 ql_8021_rom_unlock(ha);
1385 delay(100);
1386
1387 if (ql_8021_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafe ||
1388 ql_8021_rom_fast_read(ha, 4, &n) != 0) {
1389 EL(ha, "ERROR Reading crb_init area: n: %08x\n", n);
1390 return (-1);
1391 }
1392 offset = n & 0xffff;
1393 n = (n >> 16) & 0xffff;
1394 if (n >= 1024) {
1395 EL(ha, "n=0x%x Error! NetXen card flash not initialized\n", n);
1396 return (-1);
1397 }
1398
1399 buf = kmem_zalloc(n * sizeof (struct crb_addr_pair), KM_SLEEP);
1400 if (buf == NULL) {
1401 EL(ha, "Unable to zalloc memory\n");
1402 return (-1);
1403 }
1404
1405 for (i = 0; i < n; i++) {
1406 if (ql_8021_rom_fast_read(ha, 8 * i + 4 * offset, &val) != 0 ||
1407 ql_8021_rom_fast_read(ha, 8 * i + 4 * offset + 4, &addr) !=
1408 0) {
1409 kmem_free(buf, n * sizeof (struct crb_addr_pair));
1410 EL(ha, "ql_8021_rom_fast_read != 0 to zalloc memory\n");
1411 return (-1);
1412 }
1413
1414 buf[i].addr = addr;
1415 buf[i].data = val;
1416 }
1417
1418 for (i = 0; i < n; i++) {
1419 off = ql_8021_decode_crb_addr(ha, buf[i].addr);
1420 if (off == ADDR_ERROR) {
1421 EL(ha, "Err: Unknown addr: 0x%lx\n", buf[i].addr);
1422 continue;
1423 }
1424 off += UNM_PCI_CRBSPACE;
1425
1426 if (off & 1) {
1427 continue;
1428 }
1429
1430 /* skipping cold reboot MAGIC */
1431 if (off == UNM_RAM_COLD_BOOT) {
1432 continue;
1433 }
1434 if (off == (UNM_CRB_I2C0 + 0x1c)) {
1435 continue;
1436 }
1437 /* do not reset PCI */
1438 if (off == (ROMUSB_GLB + 0xbc)) {
1439 continue;
1440 }
1441 if (off == (ROMUSB_GLB + 0xa8)) {
1442 continue;
1443 }
1444 if (off == (ROMUSB_GLB + 0xc8)) { /* core clock */
1445 continue;
1446 }
1447 if (off == (ROMUSB_GLB + 0x24)) { /* MN clock */
1448 continue;
1449 }
1450 if (off == (ROMUSB_GLB + 0x1c)) { /* MS clock */
1451 continue;
1452 }
1453 if ((off & 0x0ff00000) == UNM_CRB_DDR_NET) {
1454 continue;
1455 }
1456 if (off == (UNM_CRB_PEG_NET_1 + 0x18) &&
1457 !NX_IS_REVISION_P3PLUS(ha->rev_id)) {
1458 buf[i].data = 0x1020;
1459 }
1460 /* skip the function enable register */
1461 if (off == UNM_PCIE_REG(PCIE_SETUP_FUNCTION)) {
1462 continue;
1463 }
1464 if (off == UNM_PCIE_REG(PCIE_SETUP_FUNCTION2)) {
1465 continue;
1466 }
1467 if ((off & 0x0ff00000) == UNM_CRB_SMB) {
1468 continue;
1469 }
1470
1471 /* After writing this register, HW needs time for CRB */
1472 /* to quiet down (else crb_window returns 0xffffffff) */
1473 init_delay = 1;
1474 if (off == UNM_ROMUSB_GLB_SW_RESET) {
1475 init_delay = 100; /* Sleep 1000 msecs */
1476 }
1477
1478 ql_8021_wr_32(ha, off, buf[i].data);
1479
1480 delay(init_delay);
1481 }
1482 kmem_free(buf, n * sizeof (struct crb_addr_pair));
1483
1484 /* disable_peg_cache_all */
1485
1486 /* p2dn replyCount */
1487 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_D + 0xec, 0x1e);
1488 /* disable_peg_cache 0 */
1489 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_D + 0x4c, 8);
1490 /* disable_peg_cache 1 */
1491 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_I + 0x4c, 8);
1492
1493 /* peg_clr_all */
1494 /* peg_clr 0 */
1495 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_0 + 0x8, 0);
1496 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_0 + 0xc, 0);
1497 /* peg_clr 1 */
1498 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_1 + 0x8, 0);
1499 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_1 + 0xc, 0);
1500 /* peg_clr 2 */
1501 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_2 + 0x8, 0);
1502 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_2 + 0xc, 0);
1503 /* peg_clr 3 */
1504 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_3 + 0x8, 0);
1505 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_3 + 0xc, 0);
1506
1507 return (0);
1508 }
1509
1510 static int
ql_8021_load_from_flash(ql_adapter_state_t * ha)1511 ql_8021_load_from_flash(ql_adapter_state_t *ha)
1512 {
1513 int i;
1514 uint32_t flashaddr, memaddr;
1515 uint32_t high, low, size;
1516 uint64_t data;
1517
1518 size = ha->bootloader_size / 2;
1519 memaddr = flashaddr = ha->bootloader_addr << 2;
1520
1521 for (i = 0; i < size; i++) {
1522 if ((ql_8021_rom_fast_read(ha, flashaddr, &low)) ||
1523 (ql_8021_rom_fast_read(ha, flashaddr + 4, &high))) {
1524 EL(ha, "ql_8021_rom_fast_read != 0\n");
1525 return (-1);
1526 }
1527 data = ((uint64_t)high << 32) | low;
1528 if (ql_8021_pci_mem_write_2M(ha, memaddr, &data, 8)) {
1529 EL(ha, "qla_fc_8021_pci_mem_write_2M != 0\n");
1530 return (-1);
1531 }
1532 flashaddr += 8;
1533 memaddr += 8;
1534
1535 /* Allow other system activity. */
1536 if (i % 0x1000 == 0) {
1537 /* Delay for 1 tick (10ms). */
1538 delay(1);
1539 }
1540 }
1541
1542 #if 0
1543 /* Allow other system activity, delay for 1 tick (10ms). */
1544 delay(1);
1545
1546 size = ha->flash_fw_size / 2;
1547 memaddr = flashaddr = ha->flash_fw_addr << 2;
1548
1549 for (i = 0; i < size; i++) {
1550 if ((ql_8021_rom_fast_read(ha, flashaddr, &low)) ||
1551 (ql_8021_rom_fast_read(ha, flashaddr + 4, &high))) {
1552 EL(ha, "ql_8021_rom_fast_read3 != 0\n");
1553 return (-1);
1554 }
1555 data = ((uint64_t)high << 32) | low;
1556 (void) ql_8021_pci_mem_write_2M(ha, memaddr, &data, 8);
1557 flashaddr += 8;
1558 memaddr += 8;
1559
1560 /* Allow other system activity. */
1561 if (i % 0x1000 == 0) {
1562 /* Delay for 1 tick (10ms). */
1563 delay(1);
1564 }
1565 }
1566 #endif
1567 return (0);
1568 }
1569
1570 static int
ql_8021_load_firmware(ql_adapter_state_t * ha)1571 ql_8021_load_firmware(ql_adapter_state_t *ha)
1572 {
1573 uint64_t data;
1574 uint32_t i, flashaddr, size;
1575 uint8_t *bp, n, *dp;
1576
1577 bp = (uint8_t *)(ha->risc_fw[0].code);
1578 dp = (uint8_t *)&size;
1579 for (n = 0; n < 4; n++) {
1580 dp[n] = *bp++;
1581 }
1582 LITTLE_ENDIAN_32(&size);
1583 EL(ha, "signature=%x\n", size);
1584
1585 size = ha->bootloader_size / 2;
1586 flashaddr = ha->bootloader_addr << 2;
1587
1588 bp = (uint8_t *)(ha->risc_fw[0].code + flashaddr);
1589 dp = (uint8_t *)&data;
1590 for (i = 0; i < size; i++) {
1591 for (n = 0; n < 8; n++) {
1592 dp[n] = *bp++;
1593 }
1594 LITTLE_ENDIAN_64(&data);
1595 if (ql_8021_pci_mem_write_2M(ha, flashaddr, &data, 8)) {
1596 EL(ha, "qla_fc_8021_pci_mem_write_2M != 0\n");
1597 return (-1);
1598 }
1599 flashaddr += 8;
1600 }
1601
1602 bp = (uint8_t *)(ha->risc_fw[0].code + FW_SIZE_OFFSET);
1603 dp = (uint8_t *)&size;
1604 for (n = 0; n < 4; n++) {
1605 dp[n] = *bp++;
1606 }
1607 LITTLE_ENDIAN_32(&size);
1608 EL(ha, "IMAGE_START size=%llx\n", size);
1609 size = (size + 7) / 8;
1610
1611 flashaddr = ha->flash_fw_addr << 2;
1612 bp = (uint8_t *)(ha->risc_fw[0].code + flashaddr);
1613
1614 dp = (uint8_t *)&data;
1615 for (i = 0; i < size; i++) {
1616 for (n = 0; n < 8; n++) {
1617 dp[n] = *bp++;
1618 }
1619 LITTLE_ENDIAN_64(&data);
1620 if (ql_8021_pci_mem_write_2M(ha, flashaddr, &data, 8)) {
1621 EL(ha, "qla_fc_8021_pci_mem_write_2M != 0\n");
1622 return (-1);
1623 }
1624 flashaddr += 8;
1625 }
1626
1627 return (0);
1628 }
1629
1630 static int
ql_8021_init_p3p(ql_adapter_state_t * ha)1631 ql_8021_init_p3p(ql_adapter_state_t *ha)
1632 {
1633 uint32_t data;
1634
1635 /* ??? */
1636 ql_8021_wr_32(ha, UNM_PORT_MODE_ADDR, UNM_PORT_MODE_AUTO_NEG);
1637 delay(drv_usectohz(1000000));
1638
1639 /* CAM RAM Cold Boot Register */
1640 ql_8021_rd_32(ha, UNM_RAM_COLD_BOOT, &data);
1641 if (data == 0x55555555) {
1642 ql_8021_rd_32(ha, UNM_ROMUSB_GLB_SW_RESET, &data);
1643 if (data != 0x80000f) {
1644 EL(ha, "CRB_UNM_GLB_SW_RST=%x exit\n", data);
1645 return (-1);
1646 }
1647 ql_8021_wr_32(ha, UNM_RAM_COLD_BOOT, 0);
1648 }
1649 ql_8021_rd_32(ha, UNM_ROMUSB_GLB_PEGTUNE_DONE, &data);
1650 data |= 1;
1651 ql_8021_wr_32(ha, UNM_ROMUSB_GLB_PEGTUNE_DONE, data);
1652
1653 /*
1654 * ???
1655 * data = ha->pci_bus_addr | BIT_31;
1656 * ql_8021_wr_32(ha, UNM_BUS_DEV_NO, data);
1657 */
1658
1659 return (0);
1660 }
1661
1662 /* ARGSUSED */
1663 void
ql_8021_reset_chip(ql_adapter_state_t * ha)1664 ql_8021_reset_chip(ql_adapter_state_t *ha)
1665 {
1666 /*
1667 * Disable interrupts does not work on a per function bases
1668 * leave them enabled
1669 */
1670 ql_8021_enable_intrs(ha);
1671
1672 ADAPTER_STATE_LOCK(ha);
1673 ha->flags |= INTERRUPTS_ENABLED;
1674 ADAPTER_STATE_UNLOCK(ha);
1675 if (!(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
1676 (void) ql_stop_firmware(ha);
1677 }
1678 }
1679
1680 static int
ql_8021_reset_hw(ql_adapter_state_t * ha,int type)1681 ql_8021_reset_hw(ql_adapter_state_t *ha, int type)
1682 {
1683 int ret;
1684 uint32_t rst;
1685
1686 /* scrub dma mask expansion register */
1687 ql_8021_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1688
1689 /* Overwrite stale initialization register values */
1690 ql_8021_wr_32(ha, CRB_CMDPEG_STATE, 0);
1691 ql_8021_wr_32(ha, CRB_RCVPEG_STATE, 0);
1692 ql_8021_wr_32(ha, UNM_PEG_HALT_STATUS1, 0);
1693 ql_8021_wr_32(ha, UNM_PEG_HALT_STATUS2, 0);
1694
1695 /*
1696 * This reset sequence is to provide a graceful shutdown of the
1697 * different hardware blocks prior to performing an ASIC Reset,
1698 * has to be done before writing 0xffffffff to ASIC_RESET.
1699 */
1700 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x10, 0);
1701 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x14, 0);
1702 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x18, 0);
1703 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x1c, 0);
1704 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x20, 0);
1705 ql_8021_wr_32(ha, UNM_CRB_I2Q + 0x24, 0);
1706 ql_8021_wr_32(ha, UNM_CRB_NIU + 0x40, 0xff);
1707 ql_8021_wr_32(ha, UNM_CRB_NIU + 0x70000, 0x0);
1708 ql_8021_wr_32(ha, UNM_CRB_NIU + 0x80000, 0x0);
1709 ql_8021_wr_32(ha, UNM_CRB_NIU + 0x90000, 0x0);
1710 ql_8021_wr_32(ha, UNM_CRB_NIU + 0xa0000, 0x0);
1711 ql_8021_wr_32(ha, UNM_CRB_NIU + 0xb0000, 0x0);
1712 ql_8021_wr_32(ha, UNM_CRB_SRE + 0x1000, 0x28ff000c);
1713 ql_8021_wr_32(ha, UNM_CRB_EPG + 0x1300, 0x1);
1714 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x0, 0x0);
1715 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x8, 0x0);
1716 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x10, 0x0);
1717 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x18, 0x0);
1718 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x100, 0x0);
1719 ql_8021_wr_32(ha, UNM_CRB_TIMER + 0x200, 0x0);
1720 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_0 + 0x3C, 0x1);
1721 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_1 + 0x3C, 0x1);
1722 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_2 + 0x3C, 0x1);
1723 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_3 + 0x3C, 0x1);
1724 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_4 + 0x3C, 0x1);
1725 delay(1);
1726
1727 ret = ql_8021_pinit_from_rom(ha);
1728 if (ret) {
1729 EL(ha, "pinit_from_rom ret=%d\n", ret);
1730 return (ret);
1731 }
1732 delay(1);
1733
1734 /* Bring QM and CAMRAM out of reset */
1735 ql_8021_rd_32(ha, UNM_ROMUSB_GLB_SW_RESET, &rst);
1736 rst &= ~((1 << 28) | (1 << 24));
1737 ql_8021_wr_32(ha, UNM_ROMUSB_GLB_SW_RESET, rst);
1738
1739 switch (type) {
1740 case 0:
1741 ret = ql_8021_init_p3p(ha);
1742 break;
1743 case 1:
1744 ret = ql_8021_load_from_flash(ha);
1745 break;
1746 case 2:
1747 ret = ql_8021_load_firmware(ha);
1748 break;
1749 }
1750 delay(1);
1751
1752 ql_8021_wr_32(ha, UNM_CRB_PEG_NET_0 + 0x18, 0x1020);
1753 ql_8021_wr_32(ha, UNM_ROMUSB_GLB_SW_RESET, 0x80001e);
1754
1755 if (ret) {
1756 EL(ha, "type=%d, ret=%d\n", type, ret);
1757 } else {
1758 ret = ql_8021_phantom_init(ha);
1759 }
1760 return (ret);
1761 }
1762
1763 static int
ql_8021_load_fw(ql_adapter_state_t * ha)1764 ql_8021_load_fw(ql_adapter_state_t *ha)
1765 {
1766 int rv = 0;
1767
1768 GLOBAL_HW_LOCK();
1769 if (ha->risc_fw[0].code) {
1770 EL(ha, "from driver\n");
1771 rv = ql_8021_reset_hw(ha, 2);
1772 } else {
1773 /*
1774 * BIOS method
1775 * ql_8021_reset_hw(ha, 0)
1776 */
1777 EL(ha, "from flash\n");
1778 rv = ql_8021_reset_hw(ha, 1);
1779 }
1780 if (rv == 0) {
1781 ql_8021_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1782 ql_8021_wr_32(ha, UNM_PEG_HALT_STATUS1, 0x0);
1783 ql_8021_wr_32(ha, UNM_PEG_HALT_STATUS2, 0x0);
1784
1785 GLOBAL_HW_UNLOCK();
1786
1787 ADAPTER_STATE_LOCK(ha);
1788 ha->flags &= ~INTERRUPTS_ENABLED;
1789 ADAPTER_STATE_UNLOCK(ha);
1790
1791 /* clear the mailbox command pointer. */
1792 INTR_LOCK(ha);
1793 ha->mcp = NULL;
1794 INTR_UNLOCK(ha);
1795
1796 MBX_REGISTER_LOCK(ha);
1797 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
1798 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
1799 MBX_REGISTER_UNLOCK(ha);
1800
1801 (void) ql_8021_enable_intrs(ha);
1802
1803 ADAPTER_STATE_LOCK(ha);
1804 ha->flags |= INTERRUPTS_ENABLED;
1805 ADAPTER_STATE_UNLOCK(ha);
1806 } else {
1807 GLOBAL_HW_UNLOCK();
1808 }
1809
1810 if (rv == 0) {
1811 ql_8021_rd_32(ha, UNM_FW_VERSION_MAJOR, &ha->fw_major_version);
1812 ql_8021_rd_32(ha, UNM_FW_VERSION_MINOR, &ha->fw_minor_version);
1813 ql_8021_rd_32(ha, UNM_FW_VERSION_SUB, &ha->fw_subminor_version);
1814 EL(ha, "fw v%d.%02d.%02d\n", ha->fw_major_version,
1815 ha->fw_minor_version, ha->fw_subminor_version);
1816 } else {
1817 EL(ha, "status = -1\n");
1818 }
1819
1820 return (rv);
1821 }
1822
1823 void
ql_8021_clr_hw_intr(ql_adapter_state_t * ha)1824 ql_8021_clr_hw_intr(ql_adapter_state_t *ha)
1825 {
1826 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1827 ql_8021_rd_32(ha, ISR_INT_VECTOR, NULL);
1828 ql_8021_rd_32(ha, ISR_INT_VECTOR, NULL);
1829 }
1830
1831 void
ql_8021_clr_fw_intr(ql_adapter_state_t * ha)1832 ql_8021_clr_fw_intr(ql_adapter_state_t *ha)
1833 {
1834 WRT32_IO_REG(ha, nx_risc_int, 0);
1835 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xfbff);
1836 }
1837
1838 void
ql_8021_enable_intrs(ql_adapter_state_t * ha)1839 ql_8021_enable_intrs(ql_adapter_state_t *ha)
1840 {
1841 GLOBAL_HW_LOCK();
1842 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1843 GLOBAL_HW_UNLOCK();
1844 if (!(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
1845 (void) ql_toggle_interrupt(ha, 1);
1846 }
1847 }
1848
1849 void
ql_8021_disable_intrs(ql_adapter_state_t * ha)1850 ql_8021_disable_intrs(ql_adapter_state_t *ha)
1851 {
1852 (void) ql_toggle_interrupt(ha, 0);
1853 GLOBAL_HW_LOCK();
1854 ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
1855 GLOBAL_HW_UNLOCK();
1856 }
1857
1858 void
ql_8021_update_crb_int_ptr(ql_adapter_state_t * ha)1859 ql_8021_update_crb_int_ptr(ql_adapter_state_t *ha)
1860 {
1861 struct legacy_intr_set *nx_legacy_intr;
1862
1863 ha->qdr_sn_window = (uint32_t)-1;
1864 nx_legacy_intr = &legacy_intr[ha->pci_function_number];
1865
1866 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
1867 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
1868 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
1869 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
1870 }
1871
1872 void
ql_8021_set_drv_active(ql_adapter_state_t * ha)1873 ql_8021_set_drv_active(ql_adapter_state_t *ha)
1874 {
1875 uint32_t val;
1876
1877 if (ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT)) {
1878 return;
1879 }
1880
1881 ql_8021_rd_32(ha, CRB_DRV_ACTIVE, &val);
1882 if (val == 0xffffffff) {
1883 val = (1 << (ha->pci_function_number * 4));
1884 } else {
1885 val |= (1 << (ha->pci_function_number * 4));
1886 }
1887 ql_8021_wr_32(ha, CRB_DRV_ACTIVE, val);
1888
1889 ql_8021_hw_unlock(ha);
1890 }
1891
1892 void
ql_8021_clr_drv_active(ql_adapter_state_t * ha)1893 ql_8021_clr_drv_active(ql_adapter_state_t *ha)
1894 {
1895 uint32_t val;
1896
1897 if (ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT)) {
1898 return;
1899 }
1900
1901 ql_8021_rd_32(ha, CRB_DRV_ACTIVE, &val);
1902 val &= ~(1 << (ha->pci_function_number * 4));
1903 ql_8021_wr_32(ha, CRB_DRV_ACTIVE, val);
1904
1905 ql_8021_hw_unlock(ha);
1906 }
1907
1908 static void
ql_8021_need_reset_handler(ql_adapter_state_t * ha)1909 ql_8021_need_reset_handler(ql_adapter_state_t *ha)
1910 {
1911 uint32_t drv_state, drv_active, cnt;
1912
1913 ql_8021_rd_32(ha, CRB_DRV_STATE, &drv_state);
1914 if (drv_state == 0xffffffff) {
1915 drv_state = 0;
1916 }
1917 if (!(ha->ql_dump_state & QL_DUMPING)) {
1918 drv_state |= (1 << (ha->pci_function_number * 4));
1919 }
1920 ql_8021_wr_32(ha, CRB_DRV_STATE, drv_state);
1921
1922 for (cnt = 60; cnt; cnt--) {
1923 ql_8021_hw_unlock(ha);
1924 delay(100);
1925 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
1926
1927 ql_8021_rd_32(ha, CRB_DRV_STATE, &drv_state);
1928 ql_8021_rd_32(ha, CRB_DRV_ACTIVE, &drv_active);
1929 if (ha->ql_dump_state & QL_DUMPING) {
1930 drv_state |= (1 << (ha->pci_function_number * 4));
1931 }
1932 if (drv_state == drv_active) {
1933 if (ha->ql_dump_state & QL_DUMPING) {
1934 ql_8021_wr_32(ha, CRB_DRV_STATE, drv_state);
1935 }
1936 break;
1937 }
1938 }
1939 }
1940
1941 int
ql_8021_fw_reload(ql_adapter_state_t * ha)1942 ql_8021_fw_reload(ql_adapter_state_t *ha)
1943 {
1944 int rval;
1945
1946 (void) ql_stall_driver(ha, BIT_0);
1947
1948 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
1949 ql_8021_wr_32(ha, CRB_DEV_STATE, NX_DEV_INITIALIZING);
1950 ql_8021_hw_unlock(ha);
1951
1952 rval = ql_8021_load_fw(ha) == 0 ? NX_DEV_READY : NX_DEV_FAILED;
1953
1954 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
1955 ql_8021_wr_32(ha, CRB_DEV_STATE, rval);
1956 ql_8021_hw_unlock(ha);
1957
1958 TASK_DAEMON_LOCK(ha);
1959 ha->task_daemon_flags &= ~(TASK_DAEMON_STALLED_FLG | DRIVER_STALL);
1960 TASK_DAEMON_UNLOCK(ha);
1961
1962 if (rval != NX_DEV_READY) {
1963 EL(ha, "status=%xh\n", QL_FUNCTION_FAILED);
1964 return (QL_FUNCTION_FAILED);
1965 }
1966 return (QL_SUCCESS);
1967 }
1968
1969 void
ql_8021_idc_poll(ql_adapter_state_t * ha)1970 ql_8021_idc_poll(ql_adapter_state_t *ha)
1971 {
1972 uint32_t new_state;
1973
1974 if (ha->ql_dump_state & QL_DUMPING) {
1975 return;
1976 }
1977 new_state = ql_8021_check_fw_alive(ha);
1978
1979 if (new_state == NX_DEV_NEED_RESET &&
1980 !(ha->ql_dump_state & QL_DUMPING ||
1981 (ha->ql_dump_state & QL_DUMP_VALID &&
1982 !(ha->ql_dump_state & QL_DUMP_UPLOADED)))) {
1983 (void) ql_dump_firmware(ha);
1984 } else {
1985 (void) ql_8021_idc_handler(ha, new_state);
1986 }
1987 }
1988
1989 int
ql_8021_idc_handler(ql_adapter_state_t * ha,uint32_t new_state)1990 ql_8021_idc_handler(ql_adapter_state_t *ha, uint32_t new_state)
1991 {
1992 int rval;
1993 uint32_t dev_state, drv_state, loop;
1994 ql_mbx_data_t mr;
1995 boolean_t stalled = B_FALSE, reset_needed = B_FALSE;
1996 boolean_t force_load = B_FALSE;
1997
1998 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
1999
2000 /* wait for 180 seconds for device to go ready */
2001 for (loop = 180; loop; loop--) {
2002 if (new_state != NX_DEV_POLL) {
2003 ql_8021_wr_32(ha, CRB_DEV_STATE, new_state);
2004 dev_state = new_state;
2005 new_state = NX_DEV_POLL;
2006 } else {
2007 ql_8021_rd_32(ha, CRB_DEV_STATE, &dev_state);
2008 }
2009
2010 switch (dev_state) {
2011 case 0xffffffff:
2012 case NX_DEV_COLD:
2013 if (ha->dev_state != dev_state) {
2014 EL(ha, "dev_state=NX_DEV_COLD\n");
2015 }
2016 rval = NX_DEV_COLD;
2017 ql_8021_wr_32(ha, CRB_DEV_STATE, NX_DEV_INITIALIZING);
2018 ql_8021_wr_32(ha, CRB_DRV_IDC_VERSION, NX_IDC_VERSION);
2019 ql_8021_hw_unlock(ha);
2020 if (!force_load &&
2021 ql_get_fw_version(ha, &mr, 2) == QL_SUCCESS &&
2022 (mr.mb[1] | mr.mb[2] | mr.mb[3])) {
2023 ql_8021_rd_32(ha, UNM_FW_VERSION_MAJOR,
2024 &ha->fw_major_version);
2025 ql_8021_rd_32(ha, UNM_FW_VERSION_MINOR,
2026 &ha->fw_minor_version);
2027 ql_8021_rd_32(ha, UNM_FW_VERSION_SUB,
2028 &ha->fw_subminor_version);
2029 rval = NX_DEV_READY;
2030 } else {
2031 if (!stalled) {
2032 TASK_DAEMON_LOCK(ha);
2033 ha->task_daemon_flags |=
2034 TASK_DAEMON_STALLED_FLG;
2035 TASK_DAEMON_UNLOCK(ha);
2036 ql_abort_queues(ha);
2037 stalled = B_TRUE;
2038 }
2039 if (ha->ql_dump_state & QL_DUMPING) {
2040 (void) ql_8021_get_fw_dump(ha);
2041 }
2042 rval = ql_8021_load_fw(ha) == 0 ?
2043 NX_DEV_READY : NX_DEV_FAILED;
2044 }
2045 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
2046 ql_8021_wr_32(ha, CRB_DEV_STATE, rval);
2047 break;
2048 case NX_DEV_READY:
2049 if (ha->dev_state != dev_state) {
2050 EL(ha, "dev_state=NX_DEV_READY\n");
2051 }
2052 rval = NX_DEV_READY;
2053 loop = 1;
2054 break;
2055 case NX_DEV_FAILED:
2056 if (ha->dev_state != dev_state) {
2057 EL(ha, "dev_state=NX_DEV_FAILED\n");
2058 }
2059 rval = NX_DEV_FAILED;
2060 loop = 1;
2061 break;
2062 case NX_DEV_NEED_RESET:
2063 if (ha->dev_state != dev_state) {
2064 EL(ha, "dev_state=NX_DEV_NEED_RESET\n");
2065 }
2066 rval = NX_DEV_NEED_RESET;
2067 ql_8021_need_reset_handler(ha);
2068 /*
2069 * Force to DEV_COLD unless someone else is starting
2070 * a reset
2071 */
2072 ql_8021_rd_32(ha, CRB_DEV_STATE, &dev_state);
2073 if (dev_state == NX_DEV_NEED_RESET) {
2074 EL(ha, "HW State: COLD/RE-INIT\n");
2075 ql_8021_wr_32(ha, CRB_DEV_STATE, NX_DEV_COLD);
2076 force_load = B_TRUE;
2077 }
2078 reset_needed = B_TRUE;
2079 break;
2080 case NX_DEV_NEED_QUIESCENT:
2081 if (ha->dev_state != dev_state) {
2082 EL(ha, "dev_state=NX_DEV_NEED_QUIESCENT\n");
2083 }
2084 ql_8021_rd_32(ha, CRB_DRV_STATE, &drv_state);
2085 drv_state |= (2 << (ha->pci_function_number * 4));
2086 ql_8021_wr_32(ha, CRB_DRV_STATE, drv_state);
2087 ql_8021_hw_unlock(ha);
2088 if (!stalled) {
2089 TASK_DAEMON_LOCK(ha);
2090 ha->task_daemon_flags |=
2091 TASK_DAEMON_STALLED_FLG;
2092 TASK_DAEMON_UNLOCK(ha);
2093 (void) ql_stall_driver(ha, BIT_0);
2094 stalled = B_TRUE;
2095 }
2096 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
2097 break;
2098 case NX_DEV_INITIALIZING:
2099 if (ha->dev_state != dev_state) {
2100 EL(ha, "dev_state=NX_DEV_INITIALIZING\n");
2101 }
2102 ql_8021_hw_unlock(ha);
2103 if (!stalled) {
2104 TASK_DAEMON_LOCK(ha);
2105 ha->task_daemon_flags |=
2106 TASK_DAEMON_STALLED_FLG;
2107 TASK_DAEMON_UNLOCK(ha);
2108 ql_awaken_task_daemon(ha, NULL,
2109 DRIVER_STALL, 0);
2110 stalled = B_TRUE;
2111 ql_requeue_all_cmds(ha);
2112 ADAPTER_STATE_LOCK(ha);
2113 ha->flags &= ~INTERRUPTS_ENABLED;
2114 ADAPTER_STATE_UNLOCK(ha);
2115 }
2116 delay(100);
2117 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
2118 reset_needed = B_TRUE;
2119 break;
2120 case NX_DEV_QUIESCENT:
2121 if (ha->dev_state != dev_state) {
2122 EL(ha, "dev_state=NX_DEV_QUIESCENT\n");
2123 }
2124 ql_8021_hw_unlock(ha);
2125 delay(100);
2126 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
2127 break;
2128 default:
2129 if (ha->dev_state != dev_state) {
2130 EL(ha, "dev_state=%x, default\n", dev_state);
2131 }
2132 ql_8021_hw_unlock(ha);
2133 delay(100);
2134 (void) ql_8021_hw_lock(ha, IDC_LOCK_TIMEOUT);
2135 break;
2136 }
2137 ha->dev_state = dev_state;
2138 }
2139
2140 /* Clear reset ready and quiescent flags. */
2141 ql_8021_rd_32(ha, CRB_DRV_STATE, &drv_state);
2142 drv_state &= ~(1 << (ha->pci_function_number * 4));
2143 drv_state &= ~(2 << (ha->pci_function_number * 4));
2144 ql_8021_wr_32(ha, CRB_DRV_STATE, drv_state);
2145
2146 ql_8021_hw_unlock(ha);
2147 if (reset_needed && ha->flags & ONLINE &&
2148 !(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
2149 delay(100);
2150 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
2151 }
2152 if (stalled) {
2153 TASK_DAEMON_LOCK(ha);
2154 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
2155 TASK_DAEMON_UNLOCK(ha);
2156 ql_restart_driver(ha);
2157 }
2158 return (rval);
2159 }
2160
2161 void
ql_8021_wr_req_in(ql_adapter_state_t * ha,uint32_t index)2162 ql_8021_wr_req_in(ql_adapter_state_t *ha, uint32_t index)
2163 {
2164 index = index << 16 | ha->pci_function_number << 5 | 4;
2165
2166 if (NX_IS_REVISION_P3PLUS_B0(ha->rev_id)) {
2167 uint64_t addr;
2168
2169 addr = ha->function_number ? (uint64_t)CRB_PORT_1_REQIN :
2170 (uint64_t)CRB_PORT_0_REQIN;
2171 ql_8021_wr_32(ha, addr, index);
2172 } else {
2173 do {
2174 ddi_put32(ha->db_dev_handle, ha->nx_req_in, index);
2175 } while (RD_REG_DWORD(ha, ha->db_read) != index);
2176 }
2177 }
2178
2179 /* Called every 2 seconds */
2180 static uint32_t
ql_8021_check_fw_alive(ql_adapter_state_t * ha)2181 ql_8021_check_fw_alive(ql_adapter_state_t *ha)
2182 {
2183 uint32_t dev_state, fw_heartbeat_counter, cnt, data[7];
2184 uint32_t new_state = NX_DEV_POLL;
2185
2186 ql_8021_rd_32(ha, CRB_DEV_STATE, &dev_state);
2187 if (dev_state != NX_DEV_READY) {
2188 return (new_state);
2189 }
2190
2191 ql_8021_rd_32(ha, UNM_PEG_ALIVE_COUNTER, &fw_heartbeat_counter);
2192
2193 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2194 ha->seconds_since_last_heartbeat++;
2195 /* FW not alive after 6 seconds */
2196 if (ha->seconds_since_last_heartbeat == 3) {
2197 ha->seconds_since_last_heartbeat = 0;
2198 /* FW not alive after 5 milliseconds */
2199 for (cnt = 5; cnt; cnt--) {
2200 ql_8021_rd_32(ha, UNM_PEG_ALIVE_COUNTER,
2201 &fw_heartbeat_counter);
2202 if (ha->fw_heartbeat_counter !=
2203 fw_heartbeat_counter) {
2204 break;
2205 }
2206 drv_usecwait(1000);
2207 }
2208 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2209 EL(ha, "nx_dev_need_reset\n");
2210 ql_8021_rd_32(ha, UNM_PEG_HALT_STATUS1,
2211 &data[0]);
2212 ql_8021_rd_32(ha, UNM_PEG_HALT_STATUS2,
2213 &data[1]);
2214 ql_8021_rd_32(ha, UNM_CRB_PEG_NET_0 + 0x3C,
2215 &data[2]);
2216 ql_8021_rd_32(ha, UNM_CRB_PEG_NET_1 + 0x3C,
2217 &data[3]);
2218 ql_8021_rd_32(ha, UNM_CRB_PEG_NET_2 + 0x3C,
2219 &data[4]);
2220 ql_8021_rd_32(ha, UNM_CRB_PEG_NET_3 + 0x3C,
2221 &data[5]);
2222 ql_8021_rd_32(ha, UNM_CRB_PEG_NET_4 + 0x3C,
2223 &data[6]);
2224 EL(ha, "halt_status1=%xh, halt_status2=%xh,\n"
2225 "peg_pc0=%xh, peg_pc1=%xh, peg_pc2=%xh, "
2226 "peg_pc3=%xh, peg_pc4=%xh\n", data[0],
2227 data[1], data[2], data[3], data[4],
2228 data[5], data[6]);
2229 new_state = NX_DEV_NEED_RESET;
2230 }
2231 }
2232 } else {
2233 ha->seconds_since_last_heartbeat = 0;
2234 }
2235
2236 ha->fw_heartbeat_counter = fw_heartbeat_counter;
2237 return (new_state);
2238 }
2239
2240 int
ql_8021_reset_fw(ql_adapter_state_t * ha)2241 ql_8021_reset_fw(ql_adapter_state_t *ha)
2242 {
2243 return (ql_8021_idc_handler(ha, NX_DEV_NEED_RESET));
2244 }
2245
2246 int
ql_8021_fw_chk(ql_adapter_state_t * ha)2247 ql_8021_fw_chk(ql_adapter_state_t *ha)
2248 {
2249 uint32_t dev_state, new_state = NX_DEV_POLL;
2250 int rval;
2251 ql_mbx_data_t mr;
2252
2253 ql_8021_rd_32(ha, CRB_DEV_STATE, &dev_state);
2254 switch (dev_state) {
2255 case 0xffffffff:
2256 case NX_DEV_COLD:
2257 case NX_DEV_NEED_RESET:
2258 case NX_DEV_NEED_QUIESCENT:
2259 case NX_DEV_INITIALIZING:
2260 case NX_DEV_QUIESCENT:
2261 case NX_DEV_BADOBADO:
2262 break;
2263 case NX_DEV_READY:
2264 if (ql_get_fw_version(ha, &mr, 2) != QL_SUCCESS ||
2265 (mr.mb[1] | mr.mb[2] | mr.mb[3]) == 0) {
2266 EL(ha, "version check needs reset\n", dev_state);
2267 new_state = NX_DEV_NEED_RESET;
2268 }
2269 break;
2270 case NX_DEV_FAILED:
2271 EL(ha, "device needs reset\n");
2272 new_state = NX_DEV_NEED_RESET;
2273 break;
2274 default:
2275 EL(ha, "state=%xh needs reset\n", dev_state);
2276 new_state = NX_DEV_COLD;
2277 break;
2278 }
2279
2280 /* Test for firmware running. */
2281 rval = ql_8021_idc_handler(ha, new_state) == NX_DEV_READY ?
2282 QL_SUCCESS : QL_FUNCTION_FAILED;
2283
2284 return (rval);
2285 }
2286
2287 /* ****************************************************************** */
2288 /* ***************** NetXen MiniDump Functions ********************** */
2289 /* ****************************************************************** */
2290
2291 /*
2292 * ql_8021_get_fw_dump
2293 *
2294 * Input:
2295 * pi: FC port info pointer.
2296 *
2297 * Returns:
2298 * qla driver local function return status codes
2299 *
2300 * Context:
2301 * Interrupt or Kernel context, no mailbox commands allowed.
2302 */
2303 static int
ql_8021_get_fw_dump(ql_adapter_state_t * ha)2304 ql_8021_get_fw_dump(ql_adapter_state_t *ha)
2305 {
2306 uint32_t tsize, cnt, *dp, *bp;
2307
2308 QL_PRINT_10(ha, "started\n");
2309
2310 tsize = ha->dmp_template.size;
2311 cnt = (uint32_t)(tsize / sizeof (uint32_t));
2312 dp = (uint32_t *)ha->ql_dump_ptr;
2313 bp = (uint32_t *)ha->dmp_template.bp;
2314 while (cnt--) {
2315 *dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
2316 }
2317 ql_8021_md_parse_template(ha, ha->ql_dump_ptr, (caddr_t)dp,
2318 ha->md_capture_size - tsize, ha->md_capture_mask);
2319
2320 #ifdef _BIG_ENDIAN
2321 cnt = (uint32_t)(ha->ql_dump_size / sizeof (uint32_t));
2322 dp = (uint32_t *)ha->ql_dump_ptr;
2323 while (cnt--) {
2324 ql_chg_endian((uint8_t *)dp, 4);
2325 dp++;
2326 }
2327 #endif
2328 QL_PRINT_10(ha, "done\n");
2329 return (QL_SUCCESS);
2330 }
2331
2332 /*
2333 * ql_8021_get_md_template
2334 * Get mini-dump template
2335 *
2336 * Input:
2337 * ha: adapter state pointer.
2338 *
2339 * Returns:
2340 * ql local function return status code.
2341 *
2342 * Context:
2343 * Kernel context.
2344 */
2345 int
ql_8021_get_md_template(ql_adapter_state_t * ha)2346 ql_8021_get_md_template(ql_adapter_state_t *ha)
2347 {
2348 ql_mbx_data_t mr;
2349 uint32_t tsize, chksum;
2350 int rval;
2351
2352 rval = ql_get_md_template(ha, NULL, &mr, 0, GTO_TEMPLATE_SIZE);
2353 if (rval != QL_SUCCESS ||
2354 (tsize = SHORT_TO_LONG(mr.mb[2], mr.mb[3])) == 0) {
2355 EL(ha, "size=%xh status=%xh\n", tsize, rval);
2356 ha->md_capture_size = 0;
2357 ql_free_phys(ha, &ha->dmp_template);
2358 return (rval);
2359 }
2360 if (ha->dmp_template.dma_handle && ha->dmp_template.size != tsize) {
2361 ql_free_phys(ha, &ha->dmp_template);
2362 }
2363 ha->md_capture_mask = 0x1f;
2364 ha->md_capture_size = SHORT_TO_LONG(mr.mb[4], mr.mb[5]) +
2365 SHORT_TO_LONG(mr.mb[6], mr.mb[7]) +
2366 SHORT_TO_LONG(mr.mb[8], mr.mb[9]) +
2367 SHORT_TO_LONG(mr.mb[10], mr.mb[11]) + tsize;
2368 /*
2369 * Determine ascii dump file size
2370 * 2 ascii bytes per binary byte + a space and
2371 * a newline every 16 binary bytes
2372 */
2373 ha->risc_dump_size = ha->md_capture_size << 1;
2374 ha->risc_dump_size += ha->md_capture_size;
2375 ha->risc_dump_size += ha->md_capture_size / 16 + 1;
2376
2377 /* Allocate template buffer. */
2378 if (ha->dmp_template.dma_handle == NULL) {
2379 rval = ql_get_dma_mem(ha, &ha->dmp_template, tsize,
2380 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN);
2381 if (rval != QL_SUCCESS) {
2382 EL(ha, "unable to allocate template buffer, "
2383 "status=%xh\n", rval);
2384 ha->md_capture_size = 0;
2385 ql_free_phys(ha, &ha->dmp_template);
2386 return (rval);
2387 }
2388 }
2389 rval = ql_get_md_template(ha, &ha->dmp_template, &mr, 0, GTO_TEMPLATE);
2390 if (rval != QL_SUCCESS ||
2391 (chksum = ql_8021_md_template_checksum(ha))) {
2392 EL(ha, "status=%xh, chksum=%xh\n", rval, chksum);
2393 if (rval == QL_SUCCESS) {
2394 rval = QL_FUNCTION_FAILED;
2395 }
2396 ql_free_phys(ha, &ha->dmp_template);
2397 ha->md_capture_size = 0;
2398 }
2399
2400 return (rval);
2401 }
2402
2403 static void
ql_8021_md_parse_template(ql_adapter_state_t * ha,caddr_t template_buff,caddr_t dump_buff,uint32_t buff_size,uint32_t capture_mask)2404 ql_8021_md_parse_template(ql_adapter_state_t *ha, caddr_t template_buff,
2405 caddr_t dump_buff, uint32_t buff_size, uint32_t capture_mask)
2406 {
2407 int e_cnt, buff_level, esize;
2408 uint32_t num_of_entries;
2409 time_t time;
2410 caddr_t dbuff;
2411 int sane_start = 0, sane_end = 0;
2412 md_template_hdr_t *template_hdr;
2413 md_entry_t *entry;
2414
2415 if ((capture_mask & 0x3) != 0x3) {
2416 EL(ha, "capture mask %02xh below minimum needed for valid "
2417 "dump\n", capture_mask);
2418 return;
2419 }
2420 /* Setup parameters */
2421 template_hdr = (md_template_hdr_t *)template_buff;
2422 if (template_hdr->entry_type == TLHDR) {
2423 sane_start = 1;
2424 }
2425 (void) drv_getparm(TIME, &time);
2426 template_hdr->driver_timestamp = LSD(time);
2427 template_hdr->driver_capture_mask = capture_mask;
2428 num_of_entries = template_hdr->num_of_entries;
2429 entry = (md_entry_t *)((caddr_t)template_buff +
2430 template_hdr->first_entry_offset);
2431 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
2432 /*
2433 * If the capture_mask of the entry does not match capture mask
2434 * skip the entry after marking the driver_flags indicator.
2435 */
2436 if (!(entry->h.a.ecw.entry_capture_mask & capture_mask)) {
2437 entry->h.a.ecw.driver_flags = (uint8_t)
2438 (entry->h.a.ecw.driver_flags |
2439 QL_DBG_SKIPPED_FLAG);
2440 entry = (md_entry_t *)((char *)entry +
2441 entry->h.entry_size);
2442 continue;
2443 }
2444 /*
2445 * This is ONLY needed in implementations where
2446 * the capture buffer allocated is too small to capture
2447 * all of the required entries for a given capture mask.
2448 * We need to empty the buffer contents to a file
2449 * if possible, before processing the next entry
2450 * If the buff_full_flag is set, no further capture will
2451 * happen and all remaining non-control entries will be
2452 * skipped.
2453 */
2454 if (entry->h.entry_capture_size != 0) {
2455 if ((buff_level + entry->h.entry_capture_size) >
2456 buff_size) {
2457 entry = (md_entry_t *)((char *)entry +
2458 entry->h.entry_size);
2459 continue;
2460 }
2461 }
2462 /*
2463 * Decode the entry type and process it accordingly
2464 */
2465 switch (entry->h.entry_type) {
2466 case RDNOP:
2467 break;
2468 case RDEND:
2469 sane_end += 1;
2470 break;
2471 case RDCRB:
2472 dbuff = dump_buff + buff_level;
2473 esize = ql_8021_md_rdcrb(ha, (void *)entry,
2474 (void *)dbuff);
2475 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2476 buff_level += esize;
2477 break;
2478 case L2ITG:
2479 case L2DTG:
2480 case L2DAT:
2481 case L2INS:
2482 dbuff = dump_buff + buff_level;
2483 esize = ql_8021_md_L2Cache(ha, (void *)entry,
2484 (void *)dbuff);
2485 if (esize == -1) {
2486 entry->h.a.ecw.driver_flags = (uint8_t)
2487 (entry->h.a.ecw.driver_flags |
2488 QL_DBG_SKIPPED_FLAG);
2489 } else {
2490 ql_8021_md_entry_err_chk(ha, entry, esize,
2491 e_cnt);
2492 buff_level += esize;
2493 }
2494 break;
2495 case L1DAT:
2496 case L1INS:
2497 dbuff = dump_buff + buff_level;
2498 esize = ql_8021_md_L1Cache(ha, (void *)entry,
2499 (void *)dbuff);
2500 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2501 buff_level += esize;
2502 break;
2503 case RDOCM:
2504 dbuff = dump_buff + buff_level;
2505 esize = ql_8021_md_rdocm(ha, (void *)entry,
2506 (void *)dbuff);
2507 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2508 buff_level += esize;
2509 break;
2510 case RDMEM:
2511 dbuff = dump_buff + buff_level;
2512 esize = ql_8021_md_rdmem(ha, (void *)entry,
2513 (void *)dbuff);
2514 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2515 buff_level += esize;
2516 break;
2517 case BOARD:
2518 case RDROM:
2519 dbuff = dump_buff + buff_level;
2520 esize = ql_8021_md_rdrom(ha, (void *)entry,
2521 (void *)dbuff);
2522 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2523 buff_level += esize;
2524 break;
2525 case RDMUX:
2526 dbuff = dump_buff + buff_level;
2527 esize = ql_8021_md_rdmux(ha, (void *)entry,
2528 (void *)dbuff);
2529 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2530 buff_level += esize;
2531 break;
2532 case QUEUE:
2533 dbuff = dump_buff + buff_level;
2534 esize = ql_8021_md_rdqueue(ha, (void *)entry,
2535 (void *)dbuff);
2536 ql_8021_md_entry_err_chk(ha, entry, esize, e_cnt);
2537 buff_level += esize;
2538 break;
2539 case CNTRL:
2540 if (ql_8021_md_cntrl(ha, template_hdr,
2541 (void *)entry)) {
2542 entry->h.a.ecw.driver_flags = (uint8_t)
2543 (entry->h.a.ecw.driver_flags |
2544 QL_DBG_SKIPPED_FLAG);
2545 EL(ha, "Entry ID=%d, entry_type=%d non zero "
2546 "status\n", e_cnt, entry->h.entry_type);
2547 }
2548 break;
2549 default:
2550 entry->h.a.ecw.driver_flags = (uint8_t)
2551 (entry->h.a.ecw.driver_flags |
2552 QL_DBG_SKIPPED_FLAG);
2553 EL(ha, "Entry ID=%d, entry_type=%d unknown\n", e_cnt,
2554 entry->h.entry_type);
2555 break;
2556 }
2557 /* next entry in the template */
2558 entry = (md_entry_t *)((caddr_t)entry + entry->h.entry_size);
2559 }
2560 if (!sane_start || (sane_end > 1)) {
2561 EL(ha, "Template configuration error. Check Template\n");
2562 }
2563 QL_PRINT_10(ha, "Minidump num of entries=%d\n",
2564 template_hdr->num_of_entries);
2565 }
2566
2567 /*
2568 * Read CRB operation.
2569 */
2570 static int
ql_8021_md_rdcrb(ql_adapter_state_t * ha,md_entry_rdcrb_t * crbEntry,uint32_t * data_buff)2571 ql_8021_md_rdcrb(ql_adapter_state_t *ha, md_entry_rdcrb_t *crbEntry,
2572 uint32_t *data_buff)
2573 {
2574 uint32_t loop_cnt, op_count, addr, stride, value;
2575 int i;
2576
2577 addr = crbEntry->addr;
2578 op_count = crbEntry->op_count;
2579 stride = crbEntry->a.ac.addr_stride;
2580
2581 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
2582 value = ql_8021_read_reg(ha, addr);
2583 *data_buff++ = addr;
2584 *data_buff++ = value;
2585 addr = addr + stride;
2586 }
2587
2588 /*
2589 * for testing purpose we return amount of data written
2590 */
2591 i = (int)(loop_cnt * (2 * sizeof (uint32_t)));
2592
2593 return (i);
2594 }
2595
2596 /*
2597 * Handle L2 Cache.
2598 */
2599 static int
ql_8021_md_L2Cache(ql_adapter_state_t * ha,md_entry_cache_t * cacheEntry,uint32_t * data_buff)2600 ql_8021_md_L2Cache(ql_adapter_state_t *ha, md_entry_cache_t *cacheEntry,
2601 uint32_t *data_buff)
2602 {
2603 int i, k, tflag;
2604 uint32_t read_value, loop_cnt, read_cnt;
2605 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
2606 uint32_t cntl_value_w, tag_value, tag_value_stride;
2607 volatile uint8_t cntl_value_r;
2608 clock_t timeout, elapsed;
2609
2610 read_addr = cacheEntry->read_addr;
2611 loop_cnt = cacheEntry->op_count;
2612 cntrl_addr = cacheEntry->control_addr;
2613 cntl_value_w = CHAR_TO_SHORT(cacheEntry->b.cv.write_value[0],
2614 cacheEntry->b.cv.write_value[1]);
2615 tag_reg_addr = cacheEntry->tag_reg_addr;
2616 tag_value = CHAR_TO_SHORT(cacheEntry->a.sac.init_tag_value[0],
2617 cacheEntry->a.sac.init_tag_value[1]);
2618 tag_value_stride = CHAR_TO_SHORT(cacheEntry->a.sac.tag_value_stride[0],
2619 cacheEntry->a.sac.tag_value_stride[1]);
2620 read_cnt = cacheEntry->c.rac.read_addr_cnt;
2621
2622 for (i = 0; i < loop_cnt; i++) {
2623 ql_8021_write_reg(ha, tag_value, tag_reg_addr);
2624 if (cntl_value_w) {
2625 ql_8021_write_reg(ha, cntl_value_w, cntrl_addr);
2626 }
2627 if (cacheEntry->b.cv.poll_wait) {
2628 (void) drv_getparm(LBOLT, &timeout);
2629 timeout += drv_usectohz(cacheEntry->b.cv.poll_wait *
2630 1000) + 1;
2631 cntl_value_r = (uint8_t)ql_8021_read_reg(ha,
2632 cntrl_addr);
2633 tflag = 0;
2634 while (!tflag && ((cntl_value_r &
2635 cacheEntry->b.cv.poll_mask) != 0)) {
2636 (void) drv_getparm(LBOLT, &elapsed);
2637 if (elapsed > timeout) {
2638 tflag = 1;
2639 }
2640 cntl_value_r = (uint8_t)ql_8021_read_reg(ha,
2641 cntrl_addr);
2642 }
2643 if (tflag) {
2644 /*
2645 * Report timeout error. core dump capture
2646 * failed
2647 * Skip remaining entries. Write buffer out
2648 * to file
2649 * Use driver specific fields in template
2650 * header
2651 * to report this error.
2652 */
2653 EL(ha, "timeout\n");
2654 return (-1);
2655 }
2656 }
2657 addr = read_addr;
2658 for (k = 0; k < read_cnt; k++) {
2659 read_value = ql_8021_read_reg(ha, addr);
2660 *data_buff++ = read_value;
2661 addr += cacheEntry->c.rac.read_addr_stride;
2662 }
2663 tag_value += tag_value_stride;
2664 }
2665 i = (int)(read_cnt * loop_cnt * sizeof (uint32_t));
2666
2667 return (i);
2668 }
2669
2670 /*
2671 * Handle L1 Cache.
2672 */
2673 static int
ql_8021_md_L1Cache(ql_adapter_state_t * ha,md_entry_cache_t * cacheEntry,uint32_t * data_buff)2674 ql_8021_md_L1Cache(ql_adapter_state_t *ha, md_entry_cache_t *cacheEntry,
2675 uint32_t *data_buff)
2676 {
2677 int i, k;
2678 uint32_t read_value, tag_value, tag_value_stride;
2679 uint32_t read_cnt, loop_cnt;
2680 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
2681 volatile uint32_t cntl_value_w;
2682
2683 read_addr = cacheEntry->read_addr;
2684 loop_cnt = cacheEntry->op_count;
2685 cntrl_addr = cacheEntry->control_addr;
2686 cntl_value_w = CHAR_TO_SHORT(cacheEntry->b.cv.write_value[0],
2687 cacheEntry->b.cv.write_value[1]);
2688 tag_reg_addr = cacheEntry->tag_reg_addr;
2689 tag_value = CHAR_TO_SHORT(cacheEntry->a.sac.init_tag_value[0],
2690 cacheEntry->a.sac.init_tag_value[1]);
2691 tag_value_stride = CHAR_TO_SHORT(cacheEntry->a.sac.tag_value_stride[0],
2692 cacheEntry->a.sac.tag_value_stride[1]);
2693 read_cnt = cacheEntry->c.rac.read_addr_cnt;
2694
2695 for (i = 0; i < loop_cnt; i++) {
2696 ql_8021_write_reg(ha, tag_value, tag_reg_addr);
2697 ql_8021_write_reg(ha, cntl_value_w, cntrl_addr);
2698 addr = read_addr;
2699 for (k = 0; k < read_cnt; k++) {
2700 read_value = ql_8021_read_reg(ha, addr);
2701 *data_buff++ = read_value;
2702 addr += cacheEntry->c.rac.read_addr_stride;
2703 }
2704 tag_value += tag_value_stride;
2705 }
2706 i = (int)(read_cnt * loop_cnt * sizeof (uint32_t));
2707
2708 return (i);
2709 }
2710
2711 /*
2712 * Reading OCM memory
2713 */
2714 static int
ql_8021_md_rdocm(ql_adapter_state_t * ha,md_entry_rdocm_t * ocmEntry,uint32_t * data_buff)2715 ql_8021_md_rdocm(ql_adapter_state_t *ha, md_entry_rdocm_t *ocmEntry,
2716 uint32_t *data_buff)
2717 {
2718 int i;
2719 uint32_t addr, value, loop_cnt;
2720
2721 addr = ocmEntry->read_addr;
2722 loop_cnt = ocmEntry->op_count;
2723
2724 for (i = 0; i < loop_cnt; i++) {
2725 value = ql_8021_read_ocm(ha, addr);
2726 *data_buff++ = value;
2727 addr += ocmEntry->read_addr_stride;
2728 }
2729 i = (int)(loop_cnt * sizeof (value));
2730
2731 return (i);
2732 }
2733
2734 /*
2735 * Read memory
2736 */
2737 static int
ql_8021_md_rdmem(ql_adapter_state_t * ha,md_entry_rdmem_t * memEntry,uint32_t * data_buff)2738 ql_8021_md_rdmem(ql_adapter_state_t *ha, md_entry_rdmem_t *memEntry,
2739 uint32_t *data_buff)
2740 {
2741 int i, k;
2742 uint32_t addr, value, loop_cnt;
2743
2744 addr = memEntry->read_addr;
2745 loop_cnt = (uint32_t)(memEntry->read_data_size /
2746 (sizeof (uint32_t) * 4)); /* size in bytes / 16 */
2747
2748 ql_8021_write_reg(ha, 0, MD_MIU_TEST_AGT_ADDR_HI);
2749 for (i = 0; i < loop_cnt; i++) {
2750 /*
2751 * Write address
2752 */
2753 ql_8021_write_reg(ha, addr, MD_MIU_TEST_AGT_ADDR_LO);
2754 ql_8021_write_reg(ha, MD_TA_CTL_ENABLE,
2755 MD_MIU_TEST_AGT_CTRL);
2756 ql_8021_write_reg(ha, (MD_TA_CTL_START | MD_TA_CTL_ENABLE),
2757 MD_MIU_TEST_AGT_CTRL);
2758 /*
2759 * Check busy bit.
2760 */
2761 for (k = 0; k < MD_TA_CTL_CHECK; k++) {
2762 value = ql_8021_read_reg(ha, MD_MIU_TEST_AGT_CTRL);
2763 if ((value & MD_TA_CTL_BUSY) == 0) {
2764 break;
2765 }
2766 }
2767 if (k == MD_TA_CTL_CHECK) {
2768 i = (int)((uint_t)i * (sizeof (uint32_t) * 4));
2769 EL(ha, "failed to read=xh\n", i);
2770 return (i);
2771 }
2772 /*
2773 * Read data
2774 */
2775 value = ql_8021_read_reg(ha, MD_MIU_TEST_AGT_RDDATA_0_31);
2776 *data_buff++ = value;
2777 value = ql_8021_read_reg(ha, MD_MIU_TEST_AGT_RDDATA_32_63);
2778 *data_buff++ = value;
2779 value = ql_8021_read_reg(ha, MD_MIU_TEST_AGT_RDDATA_64_95);
2780 *data_buff++ = value;
2781 value = ql_8021_read_reg(ha, MD_MIU_TEST_AGT_RDDATA_96_127);
2782 *data_buff++ = value;
2783 /*
2784 * next address to read
2785 */
2786 addr = (uint32_t)(addr + (sizeof (uint32_t) * 4));
2787 }
2788 i = (int)(loop_cnt * (sizeof (uint32_t) * 4));
2789
2790 return (i);
2791 }
2792
2793 /*
2794 * Read Rom
2795 */
2796 static int
ql_8021_md_rdrom(ql_adapter_state_t * ha,md_entry_rdrom_t * romEntry,uint32_t * data_buff)2797 ql_8021_md_rdrom(ql_adapter_state_t *ha, md_entry_rdrom_t *romEntry,
2798 uint32_t *data_buff)
2799 {
2800 int i;
2801 uint32_t addr, waddr, raddr, value, loop_cnt;
2802
2803 addr = romEntry->read_addr;
2804 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
2805 loop_cnt = (uint32_t)(loop_cnt / sizeof (value));
2806
2807 for (i = 0; i < loop_cnt; i++) {
2808 waddr = addr & 0xFFFF0000;
2809 (void) ql_8021_rom_lock(ha);
2810 ql_8021_write_reg(ha, waddr, MD_DIRECT_ROM_WINDOW);
2811 raddr = MD_DIRECT_ROM_READ_BASE + (addr & 0x0000FFFF);
2812 value = ql_8021_read_reg(ha, raddr);
2813 ql_8021_rom_unlock(ha);
2814 *data_buff++ = value;
2815 addr = (uint32_t)(addr + sizeof (value));
2816 }
2817 i = (int)(loop_cnt * sizeof (value));
2818
2819 return (i);
2820 }
2821
2822 /*
2823 * Read MUX data
2824 */
2825 static int
ql_8021_md_rdmux(ql_adapter_state_t * ha,md_entry_mux_t * muxEntry,uint32_t * data_buff)2826 ql_8021_md_rdmux(ql_adapter_state_t *ha, md_entry_mux_t *muxEntry,
2827 uint32_t *data_buff)
2828 {
2829 uint32_t read_value, sel_value, loop_cnt;
2830 uint32_t read_addr, select_addr;
2831 int i;
2832
2833 select_addr = muxEntry->select_addr;
2834 sel_value = muxEntry->select_value;
2835 read_addr = muxEntry->read_addr;
2836
2837 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
2838 ql_8021_write_reg(ha, sel_value, select_addr);
2839 read_value = ql_8021_read_reg(ha, read_addr);
2840 *data_buff++ = sel_value;
2841 *data_buff++ = read_value;
2842 sel_value += muxEntry->select_value_stride;
2843 }
2844 i = (int)(loop_cnt * (2 * sizeof (uint32_t)));
2845
2846 return (i);
2847 }
2848
2849 /*
2850 * Handling Queue State Reads.
2851 */
2852 static int
ql_8021_md_rdqueue(ql_adapter_state_t * ha,md_entry_queue_t * queueEntry,uint32_t * data_buff)2853 ql_8021_md_rdqueue(ql_adapter_state_t *ha, md_entry_queue_t *queueEntry,
2854 uint32_t *data_buff)
2855 {
2856 int k;
2857 uint32_t read_value, read_addr, read_stride, select_addr;
2858 uint32_t queue_id, loop_cnt, read_cnt;
2859
2860 read_cnt = queueEntry->b.rac.read_addr_cnt;
2861 read_stride = queueEntry->b.rac.read_addr_stride;
2862 select_addr = queueEntry->select_addr;
2863
2864 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
2865 loop_cnt++) {
2866 ql_8021_write_reg(ha, queue_id, select_addr);
2867 read_addr = queueEntry->read_addr;
2868 for (k = 0; k < read_cnt; k++) {
2869 read_value = ql_8021_read_reg(ha, read_addr);
2870 *data_buff++ = read_value;
2871 read_addr += read_stride;
2872 }
2873 queue_id += CHAR_TO_SHORT(queueEntry->a.sac.queue_id_stride[0],
2874 queueEntry->a.sac.queue_id_stride[1]);
2875 }
2876 k = (int)(loop_cnt * (read_cnt * sizeof (uint32_t)));
2877
2878 return (k);
2879 }
2880
2881 /*
2882 * Handling control entries.
2883 */
2884 static int
ql_8021_md_cntrl(ql_adapter_state_t * ha,md_template_hdr_t * template_hdr,md_entry_cntrl_t * crbEntry)2885 ql_8021_md_cntrl(ql_adapter_state_t *ha, md_template_hdr_t *template_hdr,
2886 md_entry_cntrl_t *crbEntry)
2887 {
2888 int tflag;
2889 uint32_t opcode, read_value, addr, entry_addr, loop_cnt;
2890 clock_t timeout, elapsed;
2891
2892 entry_addr = crbEntry->addr;
2893
2894 for (loop_cnt = 0; loop_cnt < crbEntry->op_count; loop_cnt++) {
2895 opcode = crbEntry->b.cv.opcode;
2896 if (opcode & QL_DBG_OPCODE_WR) {
2897 ql_8021_write_reg(ha, crbEntry->value_1, entry_addr);
2898 opcode &= ~QL_DBG_OPCODE_WR;
2899 }
2900 if (opcode & QL_DBG_OPCODE_RW) {
2901 read_value = ql_8021_read_reg(ha, entry_addr);
2902 ql_8021_write_reg(ha, read_value, entry_addr);
2903 opcode &= ~QL_DBG_OPCODE_RW;
2904 }
2905 if (opcode & QL_DBG_OPCODE_AND) {
2906 read_value = ql_8021_read_reg(ha, entry_addr);
2907 read_value &= crbEntry->value_2;
2908 opcode &= ~QL_DBG_OPCODE_AND;
2909
2910 /* Checking for OR here to avoid extra register write */
2911 if (opcode & QL_DBG_OPCODE_OR) {
2912 read_value |= crbEntry->value_3;
2913 opcode &= ~QL_DBG_OPCODE_OR;
2914 }
2915 ql_8021_write_reg(ha, read_value, entry_addr);
2916 }
2917 if (opcode & QL_DBG_OPCODE_OR) {
2918 read_value = ql_8021_read_reg(ha, entry_addr);
2919 read_value |= crbEntry->value_3;
2920 ql_8021_write_reg(ha, read_value, entry_addr);
2921 opcode &= ~QL_DBG_OPCODE_OR;
2922 }
2923 if (opcode & QL_DBG_OPCODE_POLL) {
2924 opcode &= ~QL_DBG_OPCODE_POLL;
2925 (void) drv_getparm(LBOLT, &timeout);
2926 timeout += drv_usectohz(
2927 CHAR_TO_SHORT(crbEntry->a.ac.poll_timeout[0],
2928 crbEntry->a.ac.poll_timeout[1]) * 1000) + 1;
2929 addr = entry_addr;
2930 read_value = ql_8021_read_reg(ha, addr);
2931 tflag = 0;
2932 while (!tflag && ((read_value & crbEntry->value_2) !=
2933 crbEntry->value_1)) {
2934 (void) drv_getparm(LBOLT, &elapsed);
2935 if (elapsed > timeout) {
2936 tflag = 1;
2937 }
2938 read_value = ql_8021_read_reg(ha, addr);
2939 }
2940 if (tflag) {
2941 /*
2942 * Report timeout error. core dump capture
2943 * failed Skip remaining entries. Write buffer
2944 * out to file Use driver specific fields in
2945 * template header to report this error.
2946 */
2947 EL(ha, "timeout\n");
2948 return (-1);
2949 }
2950 }
2951 if (opcode & QL_DBG_OPCODE_RDSTATE) {
2952 /*
2953 * decide which address to use.
2954 */
2955 if (crbEntry->a.ac.state_index_a) {
2956 addr = template_hdr->saved_state_array[
2957 crbEntry->a.ac.state_index_a];
2958 } else {
2959 addr = entry_addr;
2960 }
2961 read_value = ql_8021_read_reg(ha, addr);
2962 template_hdr->saved_state_array[
2963 crbEntry->b.cv.state_index_v] = read_value;
2964 opcode &= ~QL_DBG_OPCODE_RDSTATE;
2965 }
2966 if (opcode & QL_DBG_OPCODE_WRSTATE) {
2967 /*
2968 * decide which value to use.
2969 */
2970 if (crbEntry->b.cv.state_index_v) {
2971 read_value = template_hdr->saved_state_array[
2972 crbEntry->b.cv.state_index_v];
2973 } else {
2974 read_value = crbEntry->value_1;
2975 }
2976 /*
2977 * decide which address to use.
2978 */
2979 if (crbEntry->a.ac.state_index_a) {
2980 addr = template_hdr->saved_state_array[
2981 crbEntry->a.ac.state_index_a];
2982 } else {
2983 addr = entry_addr;
2984 }
2985 ql_8021_write_reg(ha, read_value, addr);
2986 opcode &= ~QL_DBG_OPCODE_WRSTATE;
2987 }
2988 if (opcode & QL_DBG_OPCODE_MDSTATE) {
2989 /* Read value from saved state using index */
2990 read_value = template_hdr->saved_state_array[
2991 crbEntry->b.cv.state_index_v];
2992 /* Shift left operation */
2993 read_value <<= crbEntry->b.cv.shl;
2994 /* Shift right operation */
2995 read_value >>= crbEntry->b.cv.shr;
2996 /* check if AND mask is provided */
2997 if (crbEntry->value_2) {
2998 read_value &= crbEntry->value_2;
2999 }
3000 read_value |= crbEntry->value_3; /* OR operation */
3001 read_value += crbEntry->value_1; /* inc operation */
3002 /* Write value back to state area. */
3003 template_hdr->saved_state_array[
3004 crbEntry->b.cv.state_index_v] = read_value;
3005 opcode &= ~QL_DBG_OPCODE_MDSTATE;
3006 }
3007 entry_addr += crbEntry->a.ac.addr_stride;
3008 }
3009
3010 return (0);
3011 }
3012
3013 /*
3014 * Error Checking routines for template consistency.
3015 *
3016 * We catch an error where driver does not read
3017 * as much data as we expect from the entry.
3018 */
3019 static void
ql_8021_md_entry_err_chk(ql_adapter_state_t * ha,md_entry_t * entry,uint32_t esize,int e_cnt)3020 ql_8021_md_entry_err_chk(ql_adapter_state_t *ha, md_entry_t *entry,
3021 uint32_t esize, int e_cnt)
3022 {
3023 if (esize != entry->h.entry_capture_size) {
3024 EL(ha, "%d %04x Dump write count = %d did not match entry "
3025 "capture size = %d entry_count = %d\n", entry->h.entry_type,
3026 entry->h.a.ecw.entry_capture_mask, esize,
3027 entry->h.entry_capture_size, e_cnt);
3028 entry->h.entry_capture_size = esize;
3029 entry->h.a.ecw.driver_flags = (uint8_t)
3030 (entry->h.a.ecw.driver_flags | QL_DBG_SKIPPED_FLAG);
3031 }
3032 }
3033
3034 static uint32_t
ql_8021_md_template_checksum(ql_adapter_state_t * ha)3035 ql_8021_md_template_checksum(ql_adapter_state_t *ha)
3036 {
3037 uint64_t sum = 0;
3038 uint32_t cnt, *bp;
3039
3040 cnt = (uint32_t)(ha->dmp_template.size / sizeof (uint32_t));
3041 bp = ha->dmp_template.bp;
3042 while (cnt-- > 0) {
3043 sum += ddi_get32(ha->dmp_template.acc_handle, bp++);
3044 }
3045 while (sum >> 32) {
3046 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3047 }
3048 cnt = (uint32_t)~sum;
3049
3050 return (cnt);
3051 }
3052
3053 static uint32_t
ql_8021_read_reg(ql_adapter_state_t * ha,uint32_t addr)3054 ql_8021_read_reg(ql_adapter_state_t *ha, uint32_t addr)
3055 {
3056 uint32_t addr0, addr1, value;
3057
3058 (void) ql_8021_crb_win_lock(ha);
3059
3060 addr0 = addr & 0xFFFF0000;
3061 WRT_REG_DWORD(ha, (ha->nx_pcibase + 0x00130060), addr0);
3062 /* PCI posting read */
3063 (void) RD_REG_DWORD(ha, (ha->nx_pcibase + 0x00130060));
3064 addr1 = addr & 0x0000FFFF;
3065 value = RD_REG_DWORD(ha, (ha->nx_pcibase + 0x001E0000 + addr1));
3066
3067 ql_8021_crb_win_unlock(ha);
3068
3069 return (value);
3070 }
3071
3072 static void
ql_8021_write_reg(ql_adapter_state_t * ha,uint32_t value,uint32_t addr)3073 ql_8021_write_reg(ql_adapter_state_t *ha, uint32_t value, uint32_t addr)
3074 {
3075 uint32_t addr0, addr1;
3076
3077 (void) ql_8021_crb_win_lock(ha);
3078
3079 addr0 = addr & 0xFFFF0000;
3080 WRT_REG_DWORD(ha, (ha->nx_pcibase + 0x00130060), addr0);
3081 /* PCI posting read */
3082 (void) RD_REG_DWORD(ha, (ha->nx_pcibase + 0x00130060));
3083 addr1 = addr & 0x0000FFFF;
3084 WRT_REG_DWORD(ha, (ha->nx_pcibase + 0x001E0000 + addr1), value);
3085 /* PCI posting read */
3086 (void) RD_REG_DWORD(ha, (ha->nx_pcibase + 0x001E0000 + addr1));
3087
3088 ql_8021_crb_win_unlock(ha);
3089 }
3090
3091 static uint32_t
ql_8021_read_ocm(ql_adapter_state_t * ha,uint32_t addr)3092 ql_8021_read_ocm(ql_adapter_state_t *ha, uint32_t addr)
3093 {
3094 uint32_t value;
3095
3096 value = RD_REG_DWORD(ha, (ha->nx_pcibase + addr));
3097
3098 return (value);
3099 }
3100