xref: /freebsd/sys/dev/smartpqi/smartpqi_init.c (revision 2f513db72b034fd5ef7f080b11be5c711c15186a)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 
29 #include "smartpqi_includes.h"
30 
31 /*
32  * Request the adapter to get PQI capabilities supported.
33  */
34 static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
35 {
36 	int ret = PQI_STATUS_SUCCESS;
37 
38 	DBG_FUNC("IN\n");
39 
40 	gen_adm_req_iu_t	admin_req;
41 	gen_adm_resp_iu_t 	admin_resp;
42 	dma_mem_t		pqi_cap_dma_buf;
43 	pqi_dev_cap_t 		*capability = NULL;
44 	pqi_iu_layer_desc_t	*iu_layer_desc = NULL;
45 
46 	/* Allocate Non DMA memory */
47 	capability = os_mem_alloc(softs, sizeof(*capability));
48 	if (!capability) {
49 		DBG_ERR("Failed to allocate memory for capability\n");
50 		ret = PQI_STATUS_FAILURE;
51 		goto err_out;
52 	}
53 
54 	memset(&admin_req, 0, sizeof(admin_req));
55 	memset(&admin_resp, 0, sizeof(admin_resp));
56 
57 	memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
58 	pqi_cap_dma_buf.tag = "pqi_cap_buf";
59 	pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
60 	pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
61 
62 	ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
63 	if (ret) {
64 		DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
65 		goto err_dma_alloc;
66 	}
67 
68 	admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
69 	admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
70 	admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
71 	admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
72 	admin_req.req_type.general_func.sg_desc.type =	SGL_DESCRIPTOR_CODE_DATA_BLOCK;
73 
74 	ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
75 	if( PQI_STATUS_SUCCESS == ret) {
76                 memcpy(capability,
77 			pqi_cap_dma_buf.virt_addr,
78 			pqi_cap_dma_buf.size);
79 	} else {
80 		DBG_ERR("Failed to send admin req report pqi device capability\n");
81 		goto err_admin_req;
82 
83 	}
84 
85 	softs->pqi_dev_cap.max_iqs = capability->max_iqs;
86 	softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
87 	softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
88 	softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
89 	softs->pqi_dev_cap.max_oqs = capability->max_oqs;
90 	softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
91 	softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
92 	softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
93 
94 	iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
95 	softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
96 	softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
97 	softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
98 
99 	DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
100 	DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
101 	DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
102 	DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
103 	DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
104 	DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
105 	DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
106 	DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
107 	DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
108 	DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
109 	DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
110 
111 
112 	os_mem_free(softs, (void *)capability,
113 		    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
114 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
115 
116 	DBG_FUNC("OUT\n");
117 	return ret;
118 
119 err_admin_req:
120 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
121 err_dma_alloc:
122 	if (capability)
123 		os_mem_free(softs, (void *)capability,
124 			    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
125 err_out:
126 	DBG_FUNC("failed OUT\n");
127 	return PQI_STATUS_FAILURE;
128 }
129 
130 /*
131  * Function used to deallocate the used rcb.
132  */
133 void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
134 {
135 
136 	uint32_t num_req;
137 	size_t size;
138 	int i;
139 
140 	DBG_FUNC("IN\n");
141 	num_req = softs->max_outstanding_io + 1;
142 	size = num_req * sizeof(rcb_t);
143 	for (i = 1; i < req_count; i++)
144 		os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
145 	os_mem_free(softs, (void *)softs->rcb, size);
146 	softs->rcb = NULL;
147 	DBG_FUNC("OUT\n");
148 }
149 
150 
151 /*
152  * Allocate memory for rcb and SG descriptors.
153  */
154 static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
155 {
156 	int ret = PQI_STATUS_SUCCESS;
157 	int i = 0;
158 	uint32_t num_req = 0;
159 	uint32_t sg_buf_size = 0;
160 	uint64_t alloc_size = 0;
161 	rcb_t *rcb = NULL;
162 	rcb_t *prcb = NULL;
163 	DBG_FUNC("IN\n");
164 
165 	/* Set maximum outstanding requests */
166 	/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
167 	 * The rcb will be accessed by using the tag as index
168 	 * * As 0 tag index is not used, we need to allocate one extra.
169 	 */
170 	softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
171 	num_req = softs->max_outstanding_io + 1;
172 	DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
173 
174 	alloc_size = num_req * sizeof(rcb_t);
175 
176 	/* Allocate Non DMA memory */
177 	rcb = os_mem_alloc(softs, alloc_size);
178 	if (!rcb) {
179 		DBG_ERR("Failed to allocate memory for rcb\n");
180 		ret = PQI_STATUS_FAILURE;
181 		goto err_out;
182 	}
183 	softs->rcb = rcb;
184 
185 	/* Allocate sg dma memory for sg chain  */
186 	sg_buf_size = softs->pqi_cap.max_sg_elem *
187 			sizeof(sgt_t);
188 
189 	prcb = &softs->rcb[1];
190 	/* Initialize rcb */
191 	for(i=1; i < num_req; i++) {
192 		char tag[15];
193 		sprintf(tag, "sg_dma_buf%d", i);
194 		softs->sg_dma_desc[i].tag = tag;
195 		softs->sg_dma_desc[i].size = sg_buf_size;
196 		softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
197 
198 		ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
199 		if (ret) {
200 			DBG_ERR("Failed to Allocate sg desc %d\n", ret);
201 			ret = PQI_STATUS_FAILURE;
202 			goto error;
203 		}
204 		prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
205 		prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
206 		prcb ++;
207 	}
208 
209 	DBG_FUNC("OUT\n");
210 	return ret;
211 error:
212 	pqisrc_free_rcb(softs, i);
213 err_out:
214 	DBG_FUNC("failed OUT\n");
215 	return ret;
216 }
217 
218 /*
219  * Function used to decide the operational queue configuration params
220  * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
221  */
222 void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
223 {
224 	uint16_t total_iq_elements;
225 
226 	DBG_FUNC("IN\n");
227 
228 	DBG_INIT("softs->intr_count : %d  softs->num_cpus_online : %d",
229 		softs->intr_count, softs->num_cpus_online);
230 
231 	if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
232 		/* Share the event and Operational queue. */
233 		softs->num_op_obq = 1;
234 		softs->share_opq_and_eventq = true;
235 	}
236 	else {
237 		/* Note :  One OBQ (OBQ0) reserved for event queue */
238 		softs->num_op_obq = MIN(softs->num_cpus_online,
239 					softs->intr_count) - 1;
240 		softs->num_op_obq = softs->intr_count - 1;
241 		softs->share_opq_and_eventq = false;
242 	}
243 
244 	/*
245 	 * softs->num_cpus_online is set as number of physical CPUs,
246 	 * So we can have more queues/interrupts .
247 	 */
248 	if (softs->intr_count > 1)
249 		softs->share_opq_and_eventq = false;
250 
251 	DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
252 
253 	softs->num_op_raid_ibq = softs->num_op_obq;
254 	softs->num_op_aio_ibq = softs->num_op_raid_ibq;
255 	softs->ibq_elem_size =  softs->pqi_dev_cap.max_iq_elem_len * 16;
256 	softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
257 	if (softs->max_ib_iu_length_per_fw == 256 &&
258 	    softs->ob_spanning_supported) {
259 		/* older f/w that doesn't actually support spanning. */
260 		softs->max_ib_iu_length = softs->ibq_elem_size;
261 	} else {
262 		/* max. inbound IU length is an multiple of our inbound element size. */
263 		softs->max_ib_iu_length =
264 			(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
265 			 softs->ibq_elem_size;
266 
267 	}
268 	/* If Max. Outstanding IO came with Max. Spanning element count then,
269 		needed elements per IO are multiplication of
270 		Max.Outstanding IO and  Max.Spanning element */
271 	total_iq_elements = (softs->max_outstanding_io *
272 		(softs->max_ib_iu_length / softs->ibq_elem_size));
273 
274 	softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
275 	softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
276 		softs->pqi_dev_cap.max_iq_elements);
277 
278 	softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
279 	softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
280 		softs->pqi_dev_cap.max_oq_elements);
281 
282 	softs->max_sg_per_iu = ((softs->max_ib_iu_length -
283 				softs->ibq_elem_size) /
284 				sizeof(sgt_t)) +
285 				MAX_EMBEDDED_SG_IN_FIRST_IU;
286 
287 	DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
288 	DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
289 	DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
290 	DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
291 
292 	DBG_FUNC("OUT\n");
293 }
294 
295 /*
296  * Configure the operational queue parameters.
297  */
298 int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
299 {
300 	int ret = PQI_STATUS_SUCCESS;
301 
302 	/* Get the PQI capability,
303 		REPORT PQI DEVICE CAPABILITY request */
304 	ret = pqisrc_report_pqi_capability(softs);
305 	if (ret) {
306 		DBG_ERR("Failed to send report pqi dev capability request : %d\n",
307 				ret);
308 		goto err_out;
309 	}
310 
311 	/* Reserve required no of slots for internal requests */
312 	softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
313 
314 	/* Decide the Op queue configuration */
315 	pqisrc_decide_opq_config(softs);
316 
317 	DBG_FUNC("OUT\n");
318 	return ret;
319 
320 err_out:
321 	DBG_FUNC("OUT failed\n");
322 	return ret;
323 }
324 
325 /*
326  * Validate the PQI mode of adapter.
327  */
328 int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
329 {
330 	int ret = PQI_STATUS_FAILURE;
331 	int tmo = 0;
332 	uint64_t signature = 0;
333 
334 	DBG_FUNC("IN\n");
335 
336 	/* Check the PQI device signature */
337 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
338 	do {
339 		signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
340 
341 		if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
342 				sizeof(uint64_t)) == 0) {
343 			ret = PQI_STATUS_SUCCESS;
344 			break;
345 		}
346 		OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
347 	} while (tmo--);
348 
349 	PRINT_PQI_SIGNATURE(signature);
350 
351 	if (tmo <= 0) {
352 		DBG_ERR("PQI Signature is invalid\n");
353 		ret = PQI_STATUS_TIMEOUT;
354 		goto err_out;
355 	}
356 
357 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
358 	/* Check function and status code for the device */
359 	COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
360 		PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
361 	if (!tmo) {
362 		DBG_ERR("PQI device is not in IDLE state\n");
363 		ret = PQI_STATUS_TIMEOUT;
364 		goto err_out;
365 	}
366 
367 
368 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
369 	/* Check the PQI device status register */
370 	COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
371 				PQI_DEV_STATE_AT_INIT, tmo);
372 	if (!tmo) {
373 		DBG_ERR("PQI Registers are not ready\n");
374 		ret = PQI_STATUS_TIMEOUT;
375 		goto err_out;
376 	}
377 
378 	DBG_FUNC("OUT\n");
379 	return ret;
380 err_out:
381 	DBG_FUNC("OUT failed\n");
382 	return ret;
383 }
384 
385 /*
386  * Get the PQI configuration table parameters.
387  * Currently using for heart-beat counter scratch-pad register.
388  */
389 int pqisrc_process_config_table(pqisrc_softstate_t *softs)
390 {
391 	int ret = PQI_STATUS_FAILURE;
392 	uint32_t config_table_size;
393 	uint32_t section_off;
394 	uint8_t *config_table_abs_addr;
395 	struct pqi_conf_table *conf_table;
396 	struct pqi_conf_table_section_header *section_hdr;
397 
398 	config_table_size = softs->pqi_cap.conf_tab_sz;
399 
400 	if (config_table_size < sizeof(*conf_table) ||
401 		config_table_size > PQI_CONF_TABLE_MAX_LEN) {
402 		DBG_ERR("Invalid PQI conf table length of %u\n",
403 			config_table_size);
404 		return ret;
405 	}
406 
407 	conf_table = os_mem_alloc(softs, config_table_size);
408 	if (!conf_table) {
409 		DBG_ERR("Failed to allocate memory for PQI conf table\n");
410 		return ret;
411 	}
412 
413 	config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
414 					softs->pqi_cap.conf_tab_off);
415 
416 	PCI_MEM_GET_BUF(softs, config_table_abs_addr,
417 			softs->pqi_cap.conf_tab_off,
418 			(uint8_t*)conf_table, config_table_size);
419 
420 
421 	if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
422 			sizeof(conf_table->sign)) != 0) {
423 		DBG_ERR("Invalid PQI config signature\n");
424 		goto out;
425 	}
426 
427 	section_off = LE_32(conf_table->first_section_off);
428 
429 	while (section_off) {
430 
431 		if (section_off+ sizeof(*section_hdr) >= config_table_size) {
432 			DBG_ERR("PQI config table section offset (%u) beyond \
433 			end of config table (config table length: %u)\n",
434 					section_off, config_table_size);
435 			break;
436 		}
437 
438 		section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
439 
440 		switch (LE_16(section_hdr->section_id)) {
441 		case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
442 		case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
443 		case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
444 		case PQI_CONF_TABLE_SECTION_DEBUG:
445 		break;
446 		case PQI_CONF_TABLE_SECTION_HEARTBEAT:
447 		softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
448 						section_off +
449 						offsetof(struct pqi_conf_table_heartbeat,
450 						heartbeat_counter);
451 		softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
452 							softs->heartbeat_counter_off);
453 		ret = PQI_STATUS_SUCCESS;
454 		break;
455 		default:
456 		DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
457 					LE_16(section_hdr->section_id));
458 		break;
459 		}
460 		section_off = LE_16(section_hdr->next_section_off);
461 	}
462 out:
463 	os_mem_free(softs, (void *)conf_table,config_table_size);
464 	return ret;
465 }
466 
467 /* Wait for PQI reset completion for the adapter*/
468 int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
469 {
470 	int ret = PQI_STATUS_SUCCESS;
471 	pqi_reset_reg_t reset_reg;
472 	int pqi_reset_timeout = 0;
473 	uint64_t val = 0;
474 	uint32_t max_timeout = 0;
475 
476 	val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
477 
478 	max_timeout = (val & 0xFFFF00000000) >> 32;
479 
480 	DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
481 
482 	while(1) {
483 		if (pqi_reset_timeout++ == max_timeout) {
484 			return PQI_STATUS_TIMEOUT;
485 		}
486 		OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
487 		reset_reg.all_bits = PCI_MEM_GET32(softs,
488 			&softs->pqi_reg->dev_reset, PQI_DEV_RESET);
489 		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
490 			break;
491 	}
492 
493 	return ret;
494 }
495 
496 /*
497  * Function used to perform PQI hard reset.
498  */
499 int pqi_reset(pqisrc_softstate_t *softs)
500 {
501 	int ret = PQI_STATUS_SUCCESS;
502 	uint32_t val = 0;
503 	pqi_reset_reg_t pqi_reset_reg;
504 
505 	DBG_FUNC("IN\n");
506 
507 	if (true == softs->ctrl_in_pqi_mode) {
508 
509 		if (softs->pqi_reset_quiesce_allowed) {
510 			val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
511 					LEGACY_SIS_IDBR);
512 			val |= SIS_PQI_RESET_QUIESCE;
513 			PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
514 					LEGACY_SIS_IDBR, LE_32(val));
515 			ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
516 			if (ret) {
517 				DBG_ERR("failed with error %d during quiesce\n", ret);
518 				return ret;
519 			}
520 		}
521 
522 		pqi_reset_reg.all_bits = 0;
523 		pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
524 		pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
525 
526 		PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
527 			LE_32(pqi_reset_reg.all_bits));
528 
529 		ret = pqisrc_wait_for_pqi_reset_completion(softs);
530 		if (ret) {
531 			DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
532 			return ret;
533 		}
534 	}
535 	softs->ctrl_in_pqi_mode = false;
536 	DBG_FUNC("OUT\n");
537 	return ret;
538 }
539 
540 /*
541  * Initialize the adapter with supported PQI configuration.
542  */
543 int pqisrc_pqi_init(pqisrc_softstate_t *softs)
544 {
545 	int ret = PQI_STATUS_SUCCESS;
546 
547 	DBG_FUNC("IN\n");
548 
549 	/* Check the PQI signature */
550 	ret = pqisrc_check_pqimode(softs);
551 	if(ret) {
552 		DBG_ERR("failed to switch to pqi\n");
553                 goto err_out;
554 	}
555 
556 	PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
557 	softs->ctrl_in_pqi_mode = true;
558 
559 	/* Get the No. of Online CPUs,NUMA/Processor config from OS */
560 	ret = os_get_processor_config(softs);
561 	if (ret) {
562 		DBG_ERR("Failed to get processor config from OS %d\n",
563 			ret);
564 		goto err_out;
565 	}
566 
567 	softs->intr_type = INTR_TYPE_NONE;
568 
569 	/* Get the interrupt count, type, priority available from OS */
570 	ret = os_get_intr_config(softs);
571 	if (ret) {
572 		DBG_ERR("Failed to get interrupt config from OS %d\n",
573 			ret);
574 		goto err_out;
575 	}
576 
577 	/*Enable/Set Legacy INTx Interrupt mask clear pqi register,
578 	 *if allocated interrupt is legacy type.
579 	 */
580 	if (INTR_TYPE_FIXED == softs->intr_type) {
581 		pqisrc_configure_legacy_intx(softs, true);
582 		sis_enable_intx(softs);
583 	}
584 
585 	/* Create Admin Queue pair*/
586 	ret = pqisrc_create_admin_queue(softs);
587 	if(ret) {
588                 DBG_ERR("Failed to configure admin queue\n");
589                 goto err_admin_queue;
590     	}
591 
592 	/* For creating event and IO operational queues we have to submit
593 	   admin IU requests.So Allocate resources for submitting IUs */
594 
595 	/* Allocate the request container block (rcb) */
596 	ret = pqisrc_allocate_rcb(softs);
597 	if (ret == PQI_STATUS_FAILURE) {
598                 DBG_ERR("Failed to allocate rcb \n");
599                 goto err_rcb;
600     	}
601 
602 	/* Allocate & initialize request id queue */
603 	ret = pqisrc_init_taglist(softs,&softs->taglist,
604 				softs->max_outstanding_io);
605 	if (ret) {
606 		DBG_ERR("Failed to allocate memory for request id q : %d\n",
607 			ret);
608 		goto err_taglist;
609 	}
610 
611 	ret = pqisrc_configure_op_queues(softs);
612 	if (ret) {
613 			DBG_ERR("Failed to configure op queue\n");
614 			goto err_config_opq;
615 	}
616 
617 	/* Create Operational queues */
618 	ret = pqisrc_create_op_queues(softs);
619 	if(ret) {
620                 DBG_ERR("Failed to create op queue\n");
621                 ret = PQI_STATUS_FAILURE;
622                 goto err_create_opq;
623         }
624 
625 	softs->ctrl_online = true;
626 
627 	DBG_FUNC("OUT\n");
628 	return ret;
629 
630 err_create_opq:
631 err_config_opq:
632 	pqisrc_destroy_taglist(softs,&softs->taglist);
633 err_taglist:
634 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
635 err_rcb:
636 	pqisrc_destroy_admin_queue(softs);
637 err_admin_queue:
638 	os_free_intr_config(softs);
639 err_out:
640 	DBG_FUNC("OUT failed\n");
641 	return PQI_STATUS_FAILURE;
642 }
643 
644 /* */
645 int pqisrc_force_sis(pqisrc_softstate_t *softs)
646 {
647 	int ret = PQI_STATUS_SUCCESS;
648 
649 	if (SIS_IS_KERNEL_PANIC(softs)) {
650 		DBG_INIT("Controller FW is not runnning");
651 		return PQI_STATUS_FAILURE;
652 	}
653 
654 	if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
655 		return ret;
656 	}
657 
658 	if (SIS_IS_KERNEL_UP(softs)) {
659 		PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
660 		return ret;
661 	}
662 	/* Disable interrupts ? */
663 	sis_disable_interrupt(softs);
664 
665 	/* reset pqi, this will delete queues */
666 	ret = pqi_reset(softs);
667 	if (ret) {
668 		return ret;
669 	}
670 	/* Re enable SIS */
671 	ret = pqisrc_reenable_sis(softs);
672 	if (ret) {
673 		return ret;
674 	}
675 
676 	PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
677 
678 	return ret;
679 }
680 
681 int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
682 {
683 	int ret = PQI_STATUS_SUCCESS;
684 	int tmo = PQI_CMND_COMPLETE_TMO;
685 
686 	COND_WAIT((softs->taglist.num_elem == softs->max_outstanding_io), tmo);
687 	if (!tmo) {
688 		DBG_ERR("Pending commands %x!!!",softs->taglist.num_elem);
689 		ret = PQI_STATUS_TIMEOUT;
690 	}
691 	return ret;
692 }
693 
694 void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
695 {
696 	int tag = 0;
697 	rcb_t *rcb;
698 
699 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
700 		rcb = &softs->rcb[tag];
701 		if(rcb->req_pending && is_internal_req(rcb)) {
702 			rcb->status = REQUEST_FAILED;
703 			rcb->req_pending = false;
704 		}
705 	}
706 }
707 
708 /*
709  * Uninitialize the resources used during PQI initialization.
710  */
711 void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
712 {
713 	int i, ret;
714 
715 	DBG_FUNC("IN\n");
716 
717 	/* Wait for any rescan to finish */
718 	pqisrc_wait_for_rescan_complete(softs);
719 
720 	/* Wait for commands to complete */
721 	ret = pqisrc_wait_for_cmnd_complete(softs);
722 
723 	/* Complete all pending commands. */
724 	if(ret != PQI_STATUS_SUCCESS) {
725 		pqisrc_complete_internal_cmds(softs);
726 		os_complete_outstanding_cmds_nodevice(softs);
727 	}
728 
729     if(softs->devlist_lockcreated==true){
730         os_uninit_spinlock(&softs->devlist_lock);
731         softs->devlist_lockcreated = false;
732     }
733 
734 	for (i = 0; i <  softs->num_op_raid_ibq; i++) {
735         /* OP RAID IB Q */
736         if(softs->op_raid_ib_q[i].lockcreated==true){
737 		OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
738 		softs->op_raid_ib_q[i].lockcreated = false;
739         }
740 
741         /* OP AIO IB Q */
742         if(softs->op_aio_ib_q[i].lockcreated==true){
743 		OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
744 		softs->op_aio_ib_q[i].lockcreated = false;
745         }
746 	}
747 
748 	/* Free Op queues */
749 	os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
750 	os_dma_mem_free(softs, &softs->op_obq_dma_mem);
751 	os_dma_mem_free(softs, &softs->event_q_dma_mem);
752 
753 	/* Free  rcb */
754 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
755 
756 	/* Free request id lists */
757 	pqisrc_destroy_taglist(softs,&softs->taglist);
758 
759 	if(softs->admin_ib_queue.lockcreated==true){
760 		OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
761         	softs->admin_ib_queue.lockcreated = false;
762 	}
763 
764 	/* Free Admin Queue */
765 	os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
766 
767 	/* Switch back to SIS mode */
768 	if (pqisrc_force_sis(softs)) {
769 		DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
770 	}
771 
772 	DBG_FUNC("OUT\n");
773 }
774 
775 /*
776  * Function to initialize the adapter settings.
777  */
778 int pqisrc_init(pqisrc_softstate_t *softs)
779 {
780 	int ret = 0;
781 	int i = 0, j = 0;
782 
783 	DBG_FUNC("IN\n");
784 
785 	check_struct_sizes();
786 
787 	/* Init the Sync interface */
788 	ret = pqisrc_sis_init(softs);
789 	if (ret) {
790 		DBG_ERR("SIS Init failed with error %d\n", ret);
791 		goto err_out;
792 	}
793 
794 	ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
795 	if(ret != PQI_STATUS_SUCCESS){
796 		DBG_ERR(" Failed to initialize scan lock\n");
797 		goto err_scan_lock;
798 	}
799 
800 	/* Init the PQI interface */
801 	ret = pqisrc_pqi_init(softs);
802 	if (ret) {
803 		DBG_ERR("PQI Init failed with error %d\n", ret);
804 		goto err_pqi;
805 	}
806 
807 	/* Setup interrupt */
808 	ret = os_setup_intr(softs);
809 	if (ret) {
810 		DBG_ERR("Interrupt setup failed with error %d\n", ret);
811 		goto err_intr;
812 	}
813 
814 	/* Report event configuration */
815         ret = pqisrc_report_event_config(softs);
816         if(ret){
817                 DBG_ERR(" Failed to configure Report events\n");
818 		goto err_event;
819 	}
820 
821 	/* Set event configuration*/
822         ret = pqisrc_set_event_config(softs);
823         if(ret){
824                 DBG_ERR(" Failed to configure Set events\n");
825                 goto err_event;
826         }
827 
828 	/* Check for For PQI spanning */
829 	ret = pqisrc_get_ctrl_fw_version(softs);
830         if(ret){
831                 DBG_ERR(" Failed to get ctrl fw version\n");
832 		goto err_fw_version;
833         }
834 
835 	/* update driver version in to FW */
836 	ret = pqisrc_write_driver_version_to_host_wellness(softs);
837 	if (ret) {
838 		DBG_ERR(" Failed to update driver version in to FW");
839 		goto err_host_wellness;
840 	}
841 
842 
843 	os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
844 	ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
845 	if(ret){
846 		DBG_ERR(" Failed to initialize devlist_lock\n");
847 		softs->devlist_lockcreated=false;
848 		goto err_lock;
849 	}
850 	softs->devlist_lockcreated = true;
851 
852 	OS_ATOMIC64_SET(softs, num_intrs, 0);
853 	softs->prev_num_intrs = softs->num_intrs;
854 
855 
856 	/* Get the PQI configuration table to read heart-beat counter*/
857 	if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
858 		ret = pqisrc_process_config_table(softs);
859 		if (ret) {
860 			DBG_ERR("Failed to process PQI configuration table %d\n", ret);
861 			goto err_config_tab;
862 		}
863 	}
864 
865 	if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
866 		softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
867 
868 	/* Init device list */
869 	for(i = 0; i < PQI_MAX_DEVICES; i++)
870 		for(j = 0; j < PQI_MAX_MULTILUN; j++)
871 			softs->device_list[i][j] = NULL;
872 
873 	pqisrc_init_targetid_pool(softs);
874 
875 	DBG_FUNC("OUT\n");
876 	return ret;
877 
878 err_config_tab:
879 	if(softs->devlist_lockcreated==true){
880 		os_uninit_spinlock(&softs->devlist_lock);
881 		softs->devlist_lockcreated = false;
882 	}
883 err_lock:
884 err_fw_version:
885 err_event:
886 err_host_wellness:
887 	os_destroy_intr(softs);
888 err_intr:
889 	pqisrc_pqi_uninit(softs);
890 err_pqi:
891 	os_destroy_semaphore(&softs->scan_lock);
892 err_scan_lock:
893 	pqisrc_sis_uninit(softs);
894 err_out:
895 	DBG_FUNC("OUT failed\n");
896 	return ret;
897 }
898 
899 /*
900  * Write all data in the adapter's battery-backed cache to
901  * storage.
902  */
903 int pqisrc_flush_cache( pqisrc_softstate_t *softs,
904 			enum pqisrc_flush_cache_event_type event_type)
905 {
906 	int rval = PQI_STATUS_SUCCESS;
907 	pqisrc_raid_req_t request;
908 	pqisrc_bmic_flush_cache_t *flush_buff = NULL;
909 
910 	DBG_FUNC("IN\n");
911 
912 	if (pqisrc_ctrl_offline(softs))
913 		return PQI_STATUS_FAILURE;
914 
915 	flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
916 	if (!flush_buff) {
917 		DBG_ERR("Failed to allocate memory for flush cache params\n");
918 		rval = PQI_STATUS_FAILURE;
919 		return rval;
920 	}
921 
922 	flush_buff->halt_event = event_type;
923 
924 	memset(&request, 0, sizeof(request));
925 
926 	rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
927 			sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
928 			(uint8_t *)RAID_CTLR_LUNID, NULL);
929 	if (rval) {
930 		DBG_ERR("error in build send raid req ret=%d\n", rval);
931 	}
932 
933 	if (flush_buff)
934 		os_mem_free(softs, (void *)flush_buff,
935 			sizeof(pqisrc_bmic_flush_cache_t));
936 
937 	DBG_FUNC("OUT\n");
938 
939 	return rval;
940 }
941 
942 /*
943  * Uninitialize the adapter.
944  */
945 void pqisrc_uninit(pqisrc_softstate_t *softs)
946 {
947 	DBG_FUNC("IN\n");
948 
949 	pqisrc_pqi_uninit(softs);
950 
951 	pqisrc_sis_uninit(softs);
952 
953 	os_destroy_semaphore(&softs->scan_lock);
954 
955 	os_destroy_intr(softs);
956 
957 	pqisrc_cleanup_devices(softs);
958 
959 	DBG_FUNC("OUT\n");
960 }
961