xref: /freebsd/sys/dev/smartpqi/smartpqi_init.c (revision 1e4896b176ff664dc9c2fce5426bf2fdf8017a7d)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 
29 #include "smartpqi_includes.h"
30 
31 /*
32  * Request the adapter to get PQI capabilities supported.
33  */
34 static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
35 {
36 	int ret = PQI_STATUS_SUCCESS;
37 
38 	DBG_FUNC("IN\n");
39 
40 	gen_adm_req_iu_t	admin_req;
41 	gen_adm_resp_iu_t 	admin_resp;
42 	dma_mem_t		pqi_cap_dma_buf;
43 	pqi_dev_cap_t 		*capability = NULL;
44 	pqi_iu_layer_desc_t	*iu_layer_desc = NULL;
45 
46 	/* Allocate Non DMA memory */
47 	capability = os_mem_alloc(softs, sizeof(*capability));
48 	if (!capability) {
49 		DBG_ERR("Failed to allocate memory for capability\n");
50 		ret = PQI_STATUS_FAILURE;
51 		goto err_out;
52 	}
53 
54 	memset(&admin_req, 0, sizeof(admin_req));
55 	memset(&admin_resp, 0, sizeof(admin_resp));
56 
57 	memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
58 	pqi_cap_dma_buf.tag = "pqi_cap_buf";
59 	pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
60 	pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
61 
62 	ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
63 	if (ret) {
64 		DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
65 		goto err_dma_alloc;
66 	}
67 
68 	admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
69 	admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
70 	admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
71 	admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
72 	admin_req.req_type.general_func.sg_desc.type =	SGL_DESCRIPTOR_CODE_DATA_BLOCK;
73 
74 	ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
75 	if( PQI_STATUS_SUCCESS == ret) {
76                 memcpy(capability,
77 			pqi_cap_dma_buf.virt_addr,
78 			pqi_cap_dma_buf.size);
79 	} else {
80 		DBG_ERR("Failed to send admin req report pqi device capability\n");
81 		goto err_admin_req;
82 
83 	}
84 
85 	softs->pqi_dev_cap.max_iqs = capability->max_iqs;
86 	softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
87 	softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
88 	softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
89 	softs->pqi_dev_cap.max_oqs = capability->max_oqs;
90 	softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
91 	softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
92 	softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
93 
94 	iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
95 	softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
96 	softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
97 	softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
98 
99 	DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
100 	DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
101 	DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
102 	DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
103 	DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
104 	DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
105 	DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
106 	DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
107 	DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
108 	DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
109 	DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
110 
111 	os_mem_free(softs, (void *)capability,
112 		    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
113 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
114 
115 	DBG_FUNC("OUT\n");
116 	return ret;
117 
118 err_admin_req:
119 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
120 err_dma_alloc:
121 	if (capability)
122 		os_mem_free(softs, (void *)capability,
123 			    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
124 err_out:
125 	DBG_FUNC("failed OUT\n");
126 	return PQI_STATUS_FAILURE;
127 }
128 
129 /*
130  * Function used to deallocate the used rcb.
131  */
132 void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
133 {
134 
135 	uint32_t num_req;
136 	size_t size;
137 	int i;
138 
139 	DBG_FUNC("IN\n");
140 	num_req = softs->max_outstanding_io + 1;
141 	size = num_req * sizeof(rcb_t);
142 	for (i = 1; i < req_count; i++)
143 		os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
144 	os_mem_free(softs, (void *)softs->rcb, size);
145 	softs->rcb = NULL;
146 	DBG_FUNC("OUT\n");
147 }
148 
149 /*
150  * Allocate memory for rcb and SG descriptors.
151  */
152 static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
153 {
154 	int ret = PQI_STATUS_SUCCESS;
155 	int i = 0;
156 	uint32_t num_req = 0;
157 	uint32_t sg_buf_size = 0;
158 	uint64_t alloc_size = 0;
159 	rcb_t *rcb = NULL;
160 	rcb_t *prcb = NULL;
161 	DBG_FUNC("IN\n");
162 
163 	/* Set maximum outstanding requests */
164 	/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
165 	 * The rcb will be accessed by using the tag as index
166 	 * * As 0 tag index is not used, we need to allocate one extra.
167 	 */
168 	softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
169 	num_req = softs->max_outstanding_io + 1;
170 	DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
171 
172 	alloc_size = num_req * sizeof(rcb_t);
173 
174 	/* Allocate Non DMA memory */
175 	rcb = os_mem_alloc(softs, alloc_size);
176 	if (!rcb) {
177 		DBG_ERR("Failed to allocate memory for rcb\n");
178 		ret = PQI_STATUS_FAILURE;
179 		goto err_out;
180 	}
181 	softs->rcb = rcb;
182 
183 	/* Allocate sg dma memory for sg chain  */
184 	sg_buf_size = softs->pqi_cap.max_sg_elem *
185 			sizeof(sgt_t);
186 
187 	prcb = &softs->rcb[1];
188 	/* Initialize rcb */
189 	for(i=1; i < num_req; i++) {
190 		char tag[15];
191 		sprintf(tag, "sg_dma_buf%d", i);
192 		softs->sg_dma_desc[i].tag = tag;
193 		softs->sg_dma_desc[i].size = sg_buf_size;
194 		softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
195 
196 		ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
197 		if (ret) {
198 			DBG_ERR("Failed to Allocate sg desc %d\n", ret);
199 			ret = PQI_STATUS_FAILURE;
200 			goto error;
201 		}
202 		prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
203 		prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
204 		prcb ++;
205 	}
206 
207 	DBG_FUNC("OUT\n");
208 	return ret;
209 error:
210 	pqisrc_free_rcb(softs, i);
211 err_out:
212 	DBG_FUNC("failed OUT\n");
213 	return ret;
214 }
215 
216 /*
217  * Function used to decide the operational queue configuration params
218  * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
219  */
220 void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
221 {
222 	uint16_t total_iq_elements;
223 
224 	DBG_FUNC("IN\n");
225 
226 	DBG_INIT("softs->intr_count : %d  softs->num_cpus_online : %d",
227 		softs->intr_count, softs->num_cpus_online);
228 
229 	if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
230 		/* Share the event and Operational queue. */
231 		softs->num_op_obq = 1;
232 		softs->share_opq_and_eventq = true;
233 	}
234 	else {
235 		/* Note :  One OBQ (OBQ0) reserved for event queue */
236 		softs->num_op_obq = MIN(softs->num_cpus_online,
237 					softs->intr_count) - 1;
238 		softs->num_op_obq = softs->intr_count - 1;
239 		softs->share_opq_and_eventq = false;
240 	}
241 
242 	/*
243 	 * softs->num_cpus_online is set as number of physical CPUs,
244 	 * So we can have more queues/interrupts .
245 	 */
246 	if (softs->intr_count > 1)
247 		softs->share_opq_and_eventq = false;
248 
249 	DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
250 
251 	softs->num_op_raid_ibq = softs->num_op_obq;
252 	softs->num_op_aio_ibq = softs->num_op_raid_ibq;
253 	softs->ibq_elem_size =  softs->pqi_dev_cap.max_iq_elem_len * 16;
254 	softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
255 	if (softs->max_ib_iu_length_per_fw == 256 &&
256 	    softs->ob_spanning_supported) {
257 		/* older f/w that doesn't actually support spanning. */
258 		softs->max_ib_iu_length = softs->ibq_elem_size;
259 	} else {
260 		/* max. inbound IU length is an multiple of our inbound element size. */
261 		softs->max_ib_iu_length =
262 			(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
263 			 softs->ibq_elem_size;
264 
265 	}
266 	/* If Max. Outstanding IO came with Max. Spanning element count then,
267 		needed elements per IO are multiplication of
268 		Max.Outstanding IO and  Max.Spanning element */
269 	total_iq_elements = (softs->max_outstanding_io *
270 		(softs->max_ib_iu_length / softs->ibq_elem_size));
271 
272 	softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
273 	softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
274 		softs->pqi_dev_cap.max_iq_elements);
275 
276 	softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
277 	softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
278 		softs->pqi_dev_cap.max_oq_elements);
279 
280 	softs->max_sg_per_iu = ((softs->max_ib_iu_length -
281 				softs->ibq_elem_size) /
282 				sizeof(sgt_t)) +
283 				MAX_EMBEDDED_SG_IN_FIRST_IU;
284 
285 	DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
286 	DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
287 	DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
288 	DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
289 
290 	DBG_FUNC("OUT\n");
291 }
292 
293 /*
294  * Configure the operational queue parameters.
295  */
296 int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
297 {
298 	int ret = PQI_STATUS_SUCCESS;
299 
300 	/* Get the PQI capability,
301 		REPORT PQI DEVICE CAPABILITY request */
302 	ret = pqisrc_report_pqi_capability(softs);
303 	if (ret) {
304 		DBG_ERR("Failed to send report pqi dev capability request : %d\n",
305 				ret);
306 		goto err_out;
307 	}
308 
309 	/* Reserve required no of slots for internal requests */
310 	softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
311 
312 	/* Decide the Op queue configuration */
313 	pqisrc_decide_opq_config(softs);
314 
315 	DBG_FUNC("OUT\n");
316 	return ret;
317 
318 err_out:
319 	DBG_FUNC("OUT failed\n");
320 	return ret;
321 }
322 
323 /*
324  * Validate the PQI mode of adapter.
325  */
326 int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
327 {
328 	int ret = PQI_STATUS_FAILURE;
329 	int tmo = 0;
330 	uint64_t signature = 0;
331 
332 	DBG_FUNC("IN\n");
333 
334 	/* Check the PQI device signature */
335 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
336 	do {
337 		signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
338 
339 		if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
340 				sizeof(uint64_t)) == 0) {
341 			ret = PQI_STATUS_SUCCESS;
342 			break;
343 		}
344 		OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
345 	} while (tmo--);
346 
347 	PRINT_PQI_SIGNATURE(signature);
348 
349 	if (tmo <= 0) {
350 		DBG_ERR("PQI Signature is invalid\n");
351 		ret = PQI_STATUS_TIMEOUT;
352 		goto err_out;
353 	}
354 
355 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
356 	/* Check function and status code for the device */
357 	COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
358 		PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
359 	if (!tmo) {
360 		DBG_ERR("PQI device is not in IDLE state\n");
361 		ret = PQI_STATUS_TIMEOUT;
362 		goto err_out;
363 	}
364 
365 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
366 	/* Check the PQI device status register */
367 	COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
368 				PQI_DEV_STATE_AT_INIT, tmo);
369 	if (!tmo) {
370 		DBG_ERR("PQI Registers are not ready\n");
371 		ret = PQI_STATUS_TIMEOUT;
372 		goto err_out;
373 	}
374 
375 	DBG_FUNC("OUT\n");
376 	return ret;
377 err_out:
378 	DBG_FUNC("OUT failed\n");
379 	return ret;
380 }
381 
382 /*
383  * Get the PQI configuration table parameters.
384  * Currently using for heart-beat counter scratch-pad register.
385  */
386 int pqisrc_process_config_table(pqisrc_softstate_t *softs)
387 {
388 	int ret = PQI_STATUS_FAILURE;
389 	uint32_t config_table_size;
390 	uint32_t section_off;
391 	uint8_t *config_table_abs_addr;
392 	struct pqi_conf_table *conf_table;
393 	struct pqi_conf_table_section_header *section_hdr;
394 
395 	config_table_size = softs->pqi_cap.conf_tab_sz;
396 
397 	if (config_table_size < sizeof(*conf_table) ||
398 		config_table_size > PQI_CONF_TABLE_MAX_LEN) {
399 		DBG_ERR("Invalid PQI conf table length of %u\n",
400 			config_table_size);
401 		return ret;
402 	}
403 
404 	conf_table = os_mem_alloc(softs, config_table_size);
405 	if (!conf_table) {
406 		DBG_ERR("Failed to allocate memory for PQI conf table\n");
407 		return ret;
408 	}
409 
410 	config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
411 					softs->pqi_cap.conf_tab_off);
412 
413 	PCI_MEM_GET_BUF(softs, config_table_abs_addr,
414 			softs->pqi_cap.conf_tab_off,
415 			(uint8_t*)conf_table, config_table_size);
416 
417 	if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
418 			sizeof(conf_table->sign)) != 0) {
419 		DBG_ERR("Invalid PQI config signature\n");
420 		goto out;
421 	}
422 
423 	section_off = LE_32(conf_table->first_section_off);
424 
425 	while (section_off) {
426 		if (section_off+ sizeof(*section_hdr) >= config_table_size) {
427 			DBG_ERR("PQI config table section offset (%u) beyond \
428 			end of config table (config table length: %u)\n",
429 					section_off, config_table_size);
430 			break;
431 		}
432 
433 		section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
434 
435 		switch (LE_16(section_hdr->section_id)) {
436 		case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
437 		case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
438 		case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
439 		case PQI_CONF_TABLE_SECTION_DEBUG:
440 		break;
441 		case PQI_CONF_TABLE_SECTION_HEARTBEAT:
442 		softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
443 						section_off +
444 						offsetof(struct pqi_conf_table_heartbeat,
445 						heartbeat_counter);
446 		softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
447 							softs->heartbeat_counter_off);
448 		ret = PQI_STATUS_SUCCESS;
449 		break;
450 		default:
451 		DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
452 					LE_16(section_hdr->section_id));
453 		break;
454 		}
455 		section_off = LE_16(section_hdr->next_section_off);
456 	}
457 out:
458 	os_mem_free(softs, (void *)conf_table,config_table_size);
459 	return ret;
460 }
461 
462 /* Wait for PQI reset completion for the adapter*/
463 int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
464 {
465 	int ret = PQI_STATUS_SUCCESS;
466 	pqi_reset_reg_t reset_reg;
467 	int pqi_reset_timeout = 0;
468 	uint64_t val = 0;
469 	uint32_t max_timeout = 0;
470 
471 	val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
472 
473 	max_timeout = (val & 0xFFFF00000000) >> 32;
474 
475 	DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
476 
477 	while(1) {
478 		if (pqi_reset_timeout++ == max_timeout) {
479 			return PQI_STATUS_TIMEOUT;
480 		}
481 		OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
482 		reset_reg.all_bits = PCI_MEM_GET32(softs,
483 			&softs->pqi_reg->dev_reset, PQI_DEV_RESET);
484 		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
485 			break;
486 	}
487 
488 	return ret;
489 }
490 
491 /*
492  * Function used to perform PQI hard reset.
493  */
494 int pqi_reset(pqisrc_softstate_t *softs)
495 {
496 	int ret = PQI_STATUS_SUCCESS;
497 	uint32_t val = 0;
498 	pqi_reset_reg_t pqi_reset_reg;
499 
500 	DBG_FUNC("IN\n");
501 
502 	if (true == softs->ctrl_in_pqi_mode) {
503 
504 		if (softs->pqi_reset_quiesce_allowed) {
505 			val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
506 					LEGACY_SIS_IDBR);
507 			val |= SIS_PQI_RESET_QUIESCE;
508 			PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
509 					LEGACY_SIS_IDBR, LE_32(val));
510 			ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
511 			if (ret) {
512 				DBG_ERR("failed with error %d during quiesce\n", ret);
513 				return ret;
514 			}
515 		}
516 
517 		pqi_reset_reg.all_bits = 0;
518 		pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
519 		pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
520 
521 		PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
522 			LE_32(pqi_reset_reg.all_bits));
523 
524 		ret = pqisrc_wait_for_pqi_reset_completion(softs);
525 		if (ret) {
526 			DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
527 			return ret;
528 		}
529 	}
530 	softs->ctrl_in_pqi_mode = false;
531 	DBG_FUNC("OUT\n");
532 	return ret;
533 }
534 
535 /*
536  * Initialize the adapter with supported PQI configuration.
537  */
538 int pqisrc_pqi_init(pqisrc_softstate_t *softs)
539 {
540 	int ret = PQI_STATUS_SUCCESS;
541 
542 	DBG_FUNC("IN\n");
543 
544 	/* Check the PQI signature */
545 	ret = pqisrc_check_pqimode(softs);
546 	if(ret) {
547 		DBG_ERR("failed to switch to pqi\n");
548                 goto err_out;
549 	}
550 
551 	PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
552 	softs->ctrl_in_pqi_mode = true;
553 
554 	/* Get the No. of Online CPUs,NUMA/Processor config from OS */
555 	ret = os_get_processor_config(softs);
556 	if (ret) {
557 		DBG_ERR("Failed to get processor config from OS %d\n",
558 			ret);
559 		goto err_out;
560 	}
561 
562 	softs->intr_type = INTR_TYPE_NONE;
563 
564 	/* Get the interrupt count, type, priority available from OS */
565 	ret = os_get_intr_config(softs);
566 	if (ret) {
567 		DBG_ERR("Failed to get interrupt config from OS %d\n",
568 			ret);
569 		goto err_out;
570 	}
571 
572 	/*Enable/Set Legacy INTx Interrupt mask clear pqi register,
573 	 *if allocated interrupt is legacy type.
574 	 */
575 	if (INTR_TYPE_FIXED == softs->intr_type) {
576 		pqisrc_configure_legacy_intx(softs, true);
577 		sis_enable_intx(softs);
578 	}
579 
580 	/* Create Admin Queue pair*/
581 	ret = pqisrc_create_admin_queue(softs);
582 	if(ret) {
583                 DBG_ERR("Failed to configure admin queue\n");
584                 goto err_admin_queue;
585     	}
586 
587 	/* For creating event and IO operational queues we have to submit
588 	   admin IU requests.So Allocate resources for submitting IUs */
589 
590 	/* Allocate the request container block (rcb) */
591 	ret = pqisrc_allocate_rcb(softs);
592 	if (ret == PQI_STATUS_FAILURE) {
593                 DBG_ERR("Failed to allocate rcb \n");
594                 goto err_rcb;
595     	}
596 
597 	/* Allocate & initialize request id queue */
598 	ret = pqisrc_init_taglist(softs,&softs->taglist,
599 				softs->max_outstanding_io);
600 	if (ret) {
601 		DBG_ERR("Failed to allocate memory for request id q : %d\n",
602 			ret);
603 		goto err_taglist;
604 	}
605 
606 	ret = pqisrc_configure_op_queues(softs);
607 	if (ret) {
608 			DBG_ERR("Failed to configure op queue\n");
609 			goto err_config_opq;
610 	}
611 
612 	/* Create Operational queues */
613 	ret = pqisrc_create_op_queues(softs);
614 	if(ret) {
615                 DBG_ERR("Failed to create op queue\n");
616                 ret = PQI_STATUS_FAILURE;
617                 goto err_create_opq;
618         }
619 
620 	softs->ctrl_online = true;
621 
622 	DBG_FUNC("OUT\n");
623 	return ret;
624 
625 err_create_opq:
626 err_config_opq:
627 	pqisrc_destroy_taglist(softs,&softs->taglist);
628 err_taglist:
629 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
630 err_rcb:
631 	pqisrc_destroy_admin_queue(softs);
632 err_admin_queue:
633 	os_free_intr_config(softs);
634 err_out:
635 	DBG_FUNC("OUT failed\n");
636 	return PQI_STATUS_FAILURE;
637 }
638 
639 /* */
640 int pqisrc_force_sis(pqisrc_softstate_t *softs)
641 {
642 	int ret = PQI_STATUS_SUCCESS;
643 
644 	if (SIS_IS_KERNEL_PANIC(softs)) {
645 		DBG_INIT("Controller FW is not runnning");
646 		return PQI_STATUS_FAILURE;
647 	}
648 
649 	if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
650 		return ret;
651 	}
652 
653 	if (SIS_IS_KERNEL_UP(softs)) {
654 		PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
655 		return ret;
656 	}
657 	/* Disable interrupts ? */
658 	sis_disable_interrupt(softs);
659 
660 	/* reset pqi, this will delete queues */
661 	ret = pqi_reset(softs);
662 	if (ret) {
663 		return ret;
664 	}
665 	/* Re enable SIS */
666 	ret = pqisrc_reenable_sis(softs);
667 	if (ret) {
668 		return ret;
669 	}
670 
671 	PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
672 
673 	return ret;
674 }
675 
676 int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
677 {
678 	int ret = PQI_STATUS_SUCCESS;
679 	int tmo = PQI_CMND_COMPLETE_TMO;
680 
681 	COND_WAIT((softs->taglist.num_elem == softs->max_outstanding_io), tmo);
682 	if (!tmo) {
683 		DBG_ERR("Pending commands %x!!!",softs->taglist.num_elem);
684 		ret = PQI_STATUS_TIMEOUT;
685 	}
686 	return ret;
687 }
688 
689 void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
690 {
691 	int tag = 0;
692 	rcb_t *rcb;
693 
694 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
695 		rcb = &softs->rcb[tag];
696 		if(rcb->req_pending && is_internal_req(rcb)) {
697 			rcb->status = REQUEST_FAILED;
698 			rcb->req_pending = false;
699 		}
700 	}
701 }
702 
703 /*
704  * Uninitialize the resources used during PQI initialization.
705  */
706 void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
707 {
708 	int i, ret;
709 
710 	DBG_FUNC("IN\n");
711 
712 	/* Wait for any rescan to finish */
713 	pqisrc_wait_for_rescan_complete(softs);
714 
715 	/* Wait for commands to complete */
716 	ret = pqisrc_wait_for_cmnd_complete(softs);
717 
718 	/* Complete all pending commands. */
719 	if(ret != PQI_STATUS_SUCCESS) {
720 		pqisrc_complete_internal_cmds(softs);
721 		os_complete_outstanding_cmds_nodevice(softs);
722 	}
723 
724     if(softs->devlist_lockcreated==true){
725         os_uninit_spinlock(&softs->devlist_lock);
726         softs->devlist_lockcreated = false;
727     }
728 
729 	for (i = 0; i <  softs->num_op_raid_ibq; i++) {
730         /* OP RAID IB Q */
731         if(softs->op_raid_ib_q[i].lockcreated==true){
732 		OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
733 		softs->op_raid_ib_q[i].lockcreated = false;
734         }
735 
736         /* OP AIO IB Q */
737         if(softs->op_aio_ib_q[i].lockcreated==true){
738 		OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
739 		softs->op_aio_ib_q[i].lockcreated = false;
740         }
741 	}
742 
743 	/* Free Op queues */
744 	os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
745 	os_dma_mem_free(softs, &softs->op_obq_dma_mem);
746 	os_dma_mem_free(softs, &softs->event_q_dma_mem);
747 
748 	/* Free  rcb */
749 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
750 
751 	/* Free request id lists */
752 	pqisrc_destroy_taglist(softs,&softs->taglist);
753 
754 	if(softs->admin_ib_queue.lockcreated==true){
755 		OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
756         	softs->admin_ib_queue.lockcreated = false;
757 	}
758 
759 	/* Free Admin Queue */
760 	os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
761 
762 	/* Switch back to SIS mode */
763 	if (pqisrc_force_sis(softs)) {
764 		DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
765 	}
766 
767 	DBG_FUNC("OUT\n");
768 }
769 
770 /*
771  * Function to initialize the adapter settings.
772  */
773 int pqisrc_init(pqisrc_softstate_t *softs)
774 {
775 	int ret = 0;
776 	int i = 0, j = 0;
777 
778 	DBG_FUNC("IN\n");
779 
780 	check_struct_sizes();
781 
782 	/* Init the Sync interface */
783 	ret = pqisrc_sis_init(softs);
784 	if (ret) {
785 		DBG_ERR("SIS Init failed with error %d\n", ret);
786 		goto err_out;
787 	}
788 
789 	ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
790 	if(ret != PQI_STATUS_SUCCESS){
791 		DBG_ERR(" Failed to initialize scan lock\n");
792 		goto err_scan_lock;
793 	}
794 
795 	/* Init the PQI interface */
796 	ret = pqisrc_pqi_init(softs);
797 	if (ret) {
798 		DBG_ERR("PQI Init failed with error %d\n", ret);
799 		goto err_pqi;
800 	}
801 
802 	/* Setup interrupt */
803 	ret = os_setup_intr(softs);
804 	if (ret) {
805 		DBG_ERR("Interrupt setup failed with error %d\n", ret);
806 		goto err_intr;
807 	}
808 
809 	/* Report event configuration */
810         ret = pqisrc_report_event_config(softs);
811         if(ret){
812                 DBG_ERR(" Failed to configure Report events\n");
813 		goto err_event;
814 	}
815 
816 	/* Set event configuration*/
817         ret = pqisrc_set_event_config(softs);
818         if(ret){
819                 DBG_ERR(" Failed to configure Set events\n");
820                 goto err_event;
821         }
822 
823 	/* Check for For PQI spanning */
824 	ret = pqisrc_get_ctrl_fw_version(softs);
825         if(ret){
826                 DBG_ERR(" Failed to get ctrl fw version\n");
827 		goto err_fw_version;
828         }
829 
830 	/* update driver version in to FW */
831 	ret = pqisrc_write_driver_version_to_host_wellness(softs);
832 	if (ret) {
833 		DBG_ERR(" Failed to update driver version in to FW");
834 		goto err_host_wellness;
835 	}
836 
837 
838 	os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
839 	ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
840 	if(ret){
841 		DBG_ERR(" Failed to initialize devlist_lock\n");
842 		softs->devlist_lockcreated=false;
843 		goto err_lock;
844 	}
845 	softs->devlist_lockcreated = true;
846 
847 	OS_ATOMIC64_SET(softs, num_intrs, 0);
848 	softs->prev_num_intrs = softs->num_intrs;
849 
850 	/* Get the PQI configuration table to read heart-beat counter*/
851 	if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
852 		ret = pqisrc_process_config_table(softs);
853 		if (ret) {
854 			DBG_ERR("Failed to process PQI configuration table %d\n", ret);
855 			goto err_config_tab;
856 		}
857 	}
858 
859 	if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
860 		softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
861 
862 	/* Init device list */
863 	for(i = 0; i < PQI_MAX_DEVICES; i++)
864 		for(j = 0; j < PQI_MAX_MULTILUN; j++)
865 			softs->device_list[i][j] = NULL;
866 
867 	pqisrc_init_targetid_pool(softs);
868 
869 	DBG_FUNC("OUT\n");
870 	return ret;
871 
872 err_config_tab:
873 	if(softs->devlist_lockcreated==true){
874 		os_uninit_spinlock(&softs->devlist_lock);
875 		softs->devlist_lockcreated = false;
876 	}
877 err_lock:
878 err_fw_version:
879 err_event:
880 err_host_wellness:
881 	os_destroy_intr(softs);
882 err_intr:
883 	pqisrc_pqi_uninit(softs);
884 err_pqi:
885 	os_destroy_semaphore(&softs->scan_lock);
886 err_scan_lock:
887 	pqisrc_sis_uninit(softs);
888 err_out:
889 	DBG_FUNC("OUT failed\n");
890 	return ret;
891 }
892 
893 /*
894  * Write all data in the adapter's battery-backed cache to
895  * storage.
896  */
897 int pqisrc_flush_cache( pqisrc_softstate_t *softs,
898 			enum pqisrc_flush_cache_event_type event_type)
899 {
900 	int rval = PQI_STATUS_SUCCESS;
901 	pqisrc_raid_req_t request;
902 	pqisrc_bmic_flush_cache_t *flush_buff = NULL;
903 
904 	DBG_FUNC("IN\n");
905 
906 	if (pqisrc_ctrl_offline(softs))
907 		return PQI_STATUS_FAILURE;
908 
909 	flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
910 	if (!flush_buff) {
911 		DBG_ERR("Failed to allocate memory for flush cache params\n");
912 		rval = PQI_STATUS_FAILURE;
913 		return rval;
914 	}
915 
916 	flush_buff->halt_event = event_type;
917 
918 	memset(&request, 0, sizeof(request));
919 
920 	rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
921 			sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
922 			(uint8_t *)RAID_CTLR_LUNID, NULL);
923 	if (rval) {
924 		DBG_ERR("error in build send raid req ret=%d\n", rval);
925 	}
926 
927 	if (flush_buff)
928 		os_mem_free(softs, (void *)flush_buff,
929 			sizeof(pqisrc_bmic_flush_cache_t));
930 
931 	DBG_FUNC("OUT\n");
932 
933 	return rval;
934 }
935 
936 /*
937  * Uninitialize the adapter.
938  */
939 void pqisrc_uninit(pqisrc_softstate_t *softs)
940 {
941 	DBG_FUNC("IN\n");
942 
943 	pqisrc_pqi_uninit(softs);
944 
945 	pqisrc_sis_uninit(softs);
946 
947 	os_destroy_semaphore(&softs->scan_lock);
948 
949 	os_destroy_intr(softs);
950 
951 	pqisrc_cleanup_devices(softs);
952 
953 	DBG_FUNC("OUT\n");
954 }
955