xref: /linux/drivers/scsi/bfa/bfa_ioc_ct.c (revision 7aacf86b75bc5523d20fd9127104384fce51ce9c)
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include "bfad_drv.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 
24 BFA_TRC_FILE(CNA, IOC_CT);
25 
26 #define bfa_ioc_ct_sync_pos(__ioc)      \
27 		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
28 #define BFA_IOC_SYNC_REQD_SH    16
29 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
30 #define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
31 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
32 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
33 			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
34 
35 /*
36  * forward declarations
37  */
38 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
39 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
40 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
41 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
42 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
43 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
44 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
45 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
46 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
47 static void bfa_ioc_ct_set_cur_ioc_fwstate(
48 			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
49 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
50 static void bfa_ioc_ct_set_alt_ioc_fwstate(
51 			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
52 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
53 
54 static struct bfa_ioc_hwif_s hwif_ct;
55 static struct bfa_ioc_hwif_s hwif_ct2;
56 
57 /*
58  * Return true if firmware of current driver matches the running firmware.
59  */
60 static bfa_boolean_t
61 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
62 {
63 	enum bfi_ioc_state ioc_fwstate;
64 	u32 usecnt;
65 	struct bfi_ioc_image_hdr_s fwhdr;
66 
67 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
68 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
69 
70 	/*
71 	 * If usage count is 0, always return TRUE.
72 	 */
73 	if (usecnt == 0) {
74 		writel(1, ioc->ioc_regs.ioc_usage_reg);
75 		readl(ioc->ioc_regs.ioc_usage_sem_reg);
76 		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
77 		writel(0, ioc->ioc_regs.ioc_fail_sync);
78 		bfa_trc(ioc, usecnt);
79 		return BFA_TRUE;
80 	}
81 
82 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
83 	bfa_trc(ioc, ioc_fwstate);
84 
85 	/*
86 	 * Use count cannot be non-zero and chip in uninitialized state.
87 	 */
88 	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
89 
90 	/*
91 	 * Check if another driver with a different firmware is active
92 	 */
93 	bfa_ioc_fwver_get(ioc, &fwhdr);
94 	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
95 		readl(ioc->ioc_regs.ioc_usage_sem_reg);
96 		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
97 		bfa_trc(ioc, usecnt);
98 		return BFA_FALSE;
99 	}
100 
101 	/*
102 	 * Same firmware version. Increment the reference count.
103 	 */
104 	usecnt++;
105 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
106 	readl(ioc->ioc_regs.ioc_usage_sem_reg);
107 	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
108 	bfa_trc(ioc, usecnt);
109 	return BFA_TRUE;
110 }
111 
112 static void
113 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
114 {
115 	u32 usecnt;
116 
117 	/*
118 	 * decrement usage count
119 	 */
120 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
121 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
122 	WARN_ON(usecnt <= 0);
123 
124 	usecnt--;
125 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
126 	bfa_trc(ioc, usecnt);
127 
128 	readl(ioc->ioc_regs.ioc_usage_sem_reg);
129 	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
130 }
131 
132 /*
133  * Notify other functions on HB failure.
134  */
135 static void
136 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
137 {
138 	if (bfa_ioc_is_cna(ioc)) {
139 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
140 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
141 		/* Wait for halt to take effect */
142 		readl(ioc->ioc_regs.ll_halt);
143 		readl(ioc->ioc_regs.alt_ll_halt);
144 	} else {
145 		writel(~0U, ioc->ioc_regs.err_set);
146 		readl(ioc->ioc_regs.err_set);
147 	}
148 }
149 
150 /*
151  * Host to LPU mailbox message addresses
152  */
153 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
154 	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
155 	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
156 	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
157 	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
158 };
159 
160 /*
161  * Host <-> LPU mailbox command/status registers - port 0
162  */
163 static struct { u32 hfn, lpu; } ct_p0reg[] = {
164 	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
165 	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
166 	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
167 	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
168 };
169 
170 /*
171  * Host <-> LPU mailbox command/status registers - port 1
172  */
173 static struct { u32 hfn, lpu; } ct_p1reg[] = {
174 	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
175 	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
176 	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
177 	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
178 };
179 
180 static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
181 	ct2_reg[] = {
182 	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
183 	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
184 	  CT2_HOSTFN_LPU0_READ_STAT},
185 	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
186 	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
187 	  CT2_HOSTFN_LPU1_READ_STAT},
188 };
189 
190 static void
191 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
192 {
193 	void __iomem *rb;
194 	int		pcifn = bfa_ioc_pcifn(ioc);
195 
196 	rb = bfa_ioc_bar0(ioc);
197 
198 	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
199 	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
200 	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
201 
202 	if (ioc->port_id == 0) {
203 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
204 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
205 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
206 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
207 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
208 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
209 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
210 	} else {
211 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
212 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
213 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
214 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
215 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
216 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
217 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
218 	}
219 
220 	/*
221 	 * PSS control registers
222 	 */
223 	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
224 	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
225 	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
226 	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
227 
228 	/*
229 	 * IOC semaphore registers and serialization
230 	 */
231 	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
232 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
235 	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
236 
237 	/*
238 	 * sram memory access
239 	 */
240 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
241 	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
242 
243 	/*
244 	 * err set reg : for notification of hb failure in fcmode
245 	 */
246 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
247 }
248 
249 static void
250 bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
251 {
252 	void __iomem *rb;
253 	int	port = bfa_ioc_portid(ioc);
254 
255 	rb = bfa_ioc_bar0(ioc);
256 
257 	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
258 	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
259 	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
260 	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
261 	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
262 	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
263 
264 	if (port == 0) {
265 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
266 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
267 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
268 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
269 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
270 	} else {
271 		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
272 		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
273 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
274 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
275 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
276 	}
277 
278 	/*
279 	 * PSS control registers
280 	 */
281 	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
282 	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
283 	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
284 	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
285 
286 	/*
287 	 * IOC semaphore registers and serialization
288 	 */
289 	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
290 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
291 	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
292 	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
293 	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
294 
295 	/*
296 	 * sram memory access
297 	 */
298 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
299 	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
300 
301 	/*
302 	 * err set reg : for notification of hb failure in fcmode
303 	 */
304 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
305 }
306 
307 /*
308  * Initialize IOC to port mapping.
309  */
310 
311 #define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
312 static void
313 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
314 {
315 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
316 	u32	r32;
317 
318 	/*
319 	 * For catapult, base port id on personality register and IOC type
320 	 */
321 	r32 = readl(rb + FNC_PERS_REG);
322 	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
323 	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
324 
325 	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
326 	bfa_trc(ioc, ioc->port_id);
327 }
328 
329 static void
330 bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
331 {
332 	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
333 	u32	r32;
334 
335 	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
336 	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
337 
338 	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
339 	bfa_trc(ioc, ioc->port_id);
340 }
341 
342 /*
343  * Set interrupt mode for a function: INTX or MSIX
344  */
345 static void
346 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
347 {
348 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
349 	u32	r32, mode;
350 
351 	r32 = readl(rb + FNC_PERS_REG);
352 	bfa_trc(ioc, r32);
353 
354 	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
355 		__F0_INTX_STATUS;
356 
357 	/*
358 	 * If already in desired mode, do not change anything
359 	 */
360 	if ((!msix && mode) || (msix && !mode))
361 		return;
362 
363 	if (msix)
364 		mode = __F0_INTX_STATUS_MSIX;
365 	else
366 		mode = __F0_INTX_STATUS_INTA;
367 
368 	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
369 	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
370 	bfa_trc(ioc, r32);
371 
372 	writel(r32, rb + FNC_PERS_REG);
373 }
374 
375 bfa_boolean_t
376 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
377 {
378 	u32	r32;
379 
380 	r32 = readl(ioc->ioc_regs.lpu_read_stat);
381 	if (r32) {
382 		writel(1, ioc->ioc_regs.lpu_read_stat);
383 		return BFA_TRUE;
384 	}
385 
386 	return BFA_FALSE;
387 }
388 
389 /*
390  * Cleanup hw semaphore and usecnt registers
391  */
392 static void
393 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
394 {
395 
396 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
397 	writel(0, ioc->ioc_regs.ioc_usage_reg);
398 	readl(ioc->ioc_regs.ioc_usage_sem_reg);
399 	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
400 
401 	writel(0, ioc->ioc_regs.ioc_fail_sync);
402 	/*
403 	 * Read the hw sem reg to make sure that it is locked
404 	 * before we clear it. If it is not locked, writing 1
405 	 * will lock it instead of clearing it.
406 	 */
407 	readl(ioc->ioc_regs.ioc_sem_reg);
408 	writel(1, ioc->ioc_regs.ioc_sem_reg);
409 }
410 
411 static bfa_boolean_t
412 bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
413 {
414 	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
415 	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
416 
417 	/*
418 	 * Driver load time.  If the sync required bit for this PCI fn
419 	 * is set, it is due to an unclean exit by the driver for this
420 	 * PCI fn in the previous incarnation. Whoever comes here first
421 	 * should clean it up, no matter which PCI fn.
422 	 */
423 
424 	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
425 		writel(0, ioc->ioc_regs.ioc_fail_sync);
426 		writel(1, ioc->ioc_regs.ioc_usage_reg);
427 		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
428 		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
429 		return BFA_TRUE;
430 	}
431 
432 	return bfa_ioc_ct_sync_complete(ioc);
433 }
434 
435 /*
436  * Synchronized IOC failure processing routines
437  */
438 static void
439 bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
440 {
441 	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
442 	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
443 
444 	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
445 }
446 
447 static void
448 bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
449 {
450 	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
451 	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
452 					bfa_ioc_ct_sync_pos(ioc);
453 
454 	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
455 }
456 
457 static void
458 bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
459 {
460 	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
461 
462 	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
463 		ioc->ioc_regs.ioc_fail_sync);
464 }
465 
466 static bfa_boolean_t
467 bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
468 {
469 	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
470 	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
471 	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
472 	uint32_t tmp_ackd;
473 
474 	if (sync_ackd == 0)
475 		return BFA_TRUE;
476 
477 	/*
478 	 * The check below is to see whether any other PCI fn
479 	 * has reinitialized the ASIC (reset sync_ackd bits)
480 	 * and failed again while this IOC was waiting for hw
481 	 * semaphore (in bfa_iocpf_sm_semwait()).
482 	 */
483 	tmp_ackd = sync_ackd;
484 	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
485 		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
486 		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
487 
488 	if (sync_reqd == sync_ackd) {
489 		writel(bfa_ioc_ct_clear_sync_ackd(r32),
490 			ioc->ioc_regs.ioc_fail_sync);
491 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
492 		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
493 		return BFA_TRUE;
494 	}
495 
496 	/*
497 	 * If another PCI fn reinitialized and failed again while
498 	 * this IOC was waiting for hw sem, the sync_ackd bit for
499 	 * this IOC need to be set again to allow reinitialization.
500 	 */
501 	if (tmp_ackd != sync_ackd)
502 		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
503 
504 	return BFA_FALSE;
505 }
506 
507 /**
508  * Called from bfa_ioc_attach() to map asic specific calls.
509  */
510 static void
511 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
512 {
513 	hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
514 	hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
515 	hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
516 	hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
517 	hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
518 	hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
519 	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
520 	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
521 	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
522 	hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
523 	hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
524 	hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
525 	hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
526 }
527 
528 /**
529  * Called from bfa_ioc_attach() to map asic specific calls.
530  */
531 void
532 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
533 {
534 	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
535 
536 	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
537 	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
538 	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
539 	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
540 	ioc->ioc_hwif = &hwif_ct;
541 }
542 
543 /**
544  * Called from bfa_ioc_attach() to map asic specific calls.
545  */
546 void
547 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
548 {
549 	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
550 
551 	hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
552 	hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
553 	hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
554 	hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
555 	hwif_ct2.ioc_isr_mode_set = NULL;
556 	ioc->ioc_hwif = &hwif_ct2;
557 }
558 
559 /*
560  * Workaround for MSI-X resource allocation for catapult-2 with no asic block
561  */
562 #define HOSTFN_MSIX_DEFAULT		64
563 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
564 #define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
565 #define __MSIX_VT_NUMVT__MK		0x003ff800
566 #define __MSIX_VT_NUMVT__SH		11
567 #define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
568 #define __MSIX_VT_OFST_			0x000007ff
569 void
570 bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
571 {
572 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
573 	u32	r32;
574 
575 	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
576 	if (r32 & __MSIX_VT_NUMVT__MK) {
577 		writel(r32 & __MSIX_VT_OFST_,
578 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
579 		return;
580 	}
581 
582 	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
583 		HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
584 		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
585 	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
586 		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
587 }
588 
589 bfa_status_t
590 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
591 {
592 	u32	pll_sclk, pll_fclk, r32;
593 	bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
594 
595 	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
596 		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
597 		__APP_PLL_SCLK_JITLMT0_1(3U) |
598 		__APP_PLL_SCLK_CNTLMT0_1(1U);
599 	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
600 		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
601 		__APP_PLL_LCLK_JITLMT0_1(3U) |
602 		__APP_PLL_LCLK_CNTLMT0_1(1U);
603 
604 	if (fcmode) {
605 		writel(0, (rb + OP_MODE));
606 		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
607 			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
608 	} else {
609 		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
610 		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
611 	}
612 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
613 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
614 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
615 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
616 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
617 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
618 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
619 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
620 	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
621 			rb + APP_PLL_SCLK_CTL_REG);
622 	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
623 			rb + APP_PLL_LCLK_CTL_REG);
624 	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
625 		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
626 	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
627 		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
628 	readl(rb + HOSTFN0_INT_MSK);
629 	udelay(2000);
630 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
631 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
632 	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
633 	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
634 
635 	if (!fcmode) {
636 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
637 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
638 	}
639 	r32 = readl((rb + PSS_CTL_REG));
640 	r32 &= ~__PSS_LMEM_RESET;
641 	writel(r32, (rb + PSS_CTL_REG));
642 	udelay(1000);
643 	if (!fcmode) {
644 		writel(0, (rb + PMM_1T_RESET_REG_P0));
645 		writel(0, (rb + PMM_1T_RESET_REG_P1));
646 	}
647 
648 	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
649 	udelay(1000);
650 	r32 = readl((rb + MBIST_STAT_REG));
651 	writel(0, (rb + MBIST_CTL_REG));
652 	return BFA_STATUS_OK;
653 }
654 
655 static void
656 bfa_ioc_ct2_sclk_init(void __iomem *rb)
657 {
658 	u32 r32;
659 
660 	/*
661 	 * put s_clk PLL and PLL FSM in reset
662 	 */
663 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
664 	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
665 	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
666 		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
667 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
668 
669 	/*
670 	 * Ignore mode and program for the max clock (which is FC16)
671 	 * Firmware/NFC will do the PLL init appropiately
672 	 */
673 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
674 	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
675 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
676 
677 	/*
678 	 * while doing PLL init dont clock gate ethernet subsystem
679 	 */
680 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
681 	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
682 
683 	r32 = readl((rb + CT2_PCIE_MISC_REG));
684 	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
685 
686 	/*
687 	 * set sclk value
688 	 */
689 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
690 	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
691 		__APP_PLL_SCLK_CLK_DIV2);
692 	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
693 
694 	/*
695 	 * poll for s_clk lock or delay 1ms
696 	 */
697 	udelay(1000);
698 }
699 
700 static void
701 bfa_ioc_ct2_lclk_init(void __iomem *rb)
702 {
703 	u32 r32;
704 
705 	/*
706 	 * put l_clk PLL and PLL FSM in reset
707 	 */
708 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
709 	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
710 	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
711 		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
712 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
713 
714 	/*
715 	 * set LPU speed (set for FC16 which will work for other modes)
716 	 */
717 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
718 	writel(r32, (rb + CT2_CHIP_MISC_PRG));
719 
720 	/*
721 	 * set LPU half speed (set for FC16 which will work for other modes)
722 	 */
723 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
724 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
725 
726 	/*
727 	 * set lclk for mode (set for FC16)
728 	 */
729 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
730 	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
731 	r32 |= 0x20c1731b;
732 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
733 
734 	/*
735 	 * poll for s_clk lock or delay 1ms
736 	 */
737 	udelay(1000);
738 }
739 
740 static void
741 bfa_ioc_ct2_mem_init(void __iomem *rb)
742 {
743 	u32	r32;
744 
745 	r32 = readl((rb + PSS_CTL_REG));
746 	r32 &= ~__PSS_LMEM_RESET;
747 	writel(r32, (rb + PSS_CTL_REG));
748 	udelay(1000);
749 
750 	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
751 	udelay(1000);
752 	writel(0, (rb + CT2_MBIST_CTL_REG));
753 }
754 
755 void
756 bfa_ioc_ct2_mac_reset(void __iomem *rb)
757 {
758 	/* put port0, port1 MAC & AHB in reset */
759 	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
760 		rb + CT2_CSI_MAC_CONTROL_REG(0));
761 	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
762 		rb + CT2_CSI_MAC_CONTROL_REG(1));
763 }
764 
765 static void
766 bfa_ioc_ct2_enable_flash(void __iomem *rb)
767 {
768 	u32 r32;
769 
770 	r32 = readl((rb + PSS_GPIO_OUT_REG));
771 	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
772 	r32 = readl((rb + PSS_GPIO_OE_REG));
773 	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
774 }
775 
776 #define CT2_NFC_MAX_DELAY	1000
777 #define CT2_NFC_PAUSE_MAX_DELAY 4000
778 #define CT2_NFC_VER_VALID	0x147
779 #define CT2_NFC_STATE_RUNNING   0x20000001
780 #define BFA_IOC_PLL_POLL	1000000
781 
782 static bfa_boolean_t
783 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
784 {
785 	u32	r32;
786 
787 	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
788 	if (r32 & __NFC_CONTROLLER_HALTED)
789 		return BFA_TRUE;
790 
791 	return BFA_FALSE;
792 }
793 
794 static void
795 bfa_ioc_ct2_nfc_halt(void __iomem *rb)
796 {
797 	int	i;
798 
799 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
800 	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
801 		if (bfa_ioc_ct2_nfc_halted(rb))
802 			break;
803 		udelay(1000);
804 	}
805 	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
806 }
807 
808 static void
809 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
810 {
811 	u32	r32;
812 	int i;
813 
814 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
815 	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
816 		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
817 		if (!(r32 & __NFC_CONTROLLER_HALTED))
818 			return;
819 		udelay(1000);
820 	}
821 	WARN_ON(1);
822 }
823 
824 static void
825 bfa_ioc_ct2_clk_reset(void __iomem *rb)
826 {
827 	u32 r32;
828 
829 	bfa_ioc_ct2_sclk_init(rb);
830 	bfa_ioc_ct2_lclk_init(rb);
831 
832 	/*
833 	 * release soft reset on s_clk & l_clk
834 	 */
835 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
836 	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
837 			(rb + CT2_APP_PLL_SCLK_CTL_REG));
838 
839 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
840 	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
841 			(rb + CT2_APP_PLL_LCLK_CTL_REG));
842 
843 }
844 
845 static void
846 bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
847 {
848 	u32 r32, i;
849 
850 	r32 = readl((rb + PSS_CTL_REG));
851 	r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
852 	writel(r32, (rb + PSS_CTL_REG));
853 
854 	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
855 
856 	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
857 		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
858 
859 		if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
860 			break;
861 	}
862 	WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
863 
864 	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
865 		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
866 
867 		if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
868 			break;
869 	}
870 	WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
871 
872 	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
873 	WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
874 }
875 
876 static void
877 bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
878 {
879 	u32 r32;
880 	int i;
881 
882 	if (bfa_ioc_ct2_nfc_halted(rb))
883 		bfa_ioc_ct2_nfc_resume(rb);
884 	for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
885 		r32 = readl(rb + CT2_NFC_STS_REG);
886 		if (r32 == CT2_NFC_STATE_RUNNING)
887 			return;
888 		udelay(1000);
889 	}
890 
891 	r32 = readl(rb + CT2_NFC_STS_REG);
892 	WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
893 }
894 
895 bfa_status_t
896 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
897 {
898 	u32 wgn, r32, nfc_ver;
899 
900 	wgn = readl(rb + CT2_WGN_STATUS);
901 
902 	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
903 		/*
904 		 * If flash is corrupted, enable flash explicitly
905 		 */
906 		bfa_ioc_ct2_clk_reset(rb);
907 		bfa_ioc_ct2_enable_flash(rb);
908 
909 		bfa_ioc_ct2_mac_reset(rb);
910 
911 		bfa_ioc_ct2_clk_reset(rb);
912 		bfa_ioc_ct2_enable_flash(rb);
913 
914 	} else {
915 		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
916 
917 		if ((nfc_ver >= CT2_NFC_VER_VALID) &&
918 		    (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
919 
920 			bfa_ioc_ct2_wait_till_nfc_running(rb);
921 
922 			bfa_ioc_ct2_nfc_clk_reset(rb);
923 		} else {
924 			bfa_ioc_ct2_nfc_halt(rb);
925 
926 			bfa_ioc_ct2_clk_reset(rb);
927 			bfa_ioc_ct2_mac_reset(rb);
928 			bfa_ioc_ct2_clk_reset(rb);
929 
930 		}
931 	}
932 	/*
933 	* The very first PCIe DMA Read done by LPU fails with a fatal error,
934 	* when Address Translation Cache (ATC) has been enabled by system BIOS.
935 	*
936 	* Workaround:
937 	* Disable Invalidated Tag Match Enable capability by setting the bit 26
938 	* of CHIP_MISC_PRG to 0, by default it is set to 1.
939 	*/
940 	r32 = readl(rb + CT2_CHIP_MISC_PRG);
941 	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
942 
943 	/*
944 	 * Mask the interrupts and clear any
945 	 * pending interrupts left by BIOS/EFI
946 	 */
947 
948 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
949 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
950 
951 	/* For first time initialization, no need to clear interrupts */
952 	r32 = readl(rb + HOST_SEM5_REG);
953 	if (r32 & 0x1) {
954 		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
955 		if (r32 == 1) {
956 			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
957 			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
958 		}
959 		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
960 		if (r32 == 1) {
961 			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
962 			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
963 		}
964 	}
965 
966 	bfa_ioc_ct2_mem_init(rb);
967 
968 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
969 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
970 
971 	return BFA_STATUS_OK;
972 }
973 
974 static void
975 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
976 		enum bfi_ioc_state fwstate)
977 {
978 	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
979 }
980 
981 static enum bfi_ioc_state
982 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
983 {
984 	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
985 }
986 
987 static void
988 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
989 		enum bfi_ioc_state fwstate)
990 {
991 	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
992 }
993 
994 static enum bfi_ioc_state
995 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
996 {
997 	return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
998 }
999