xref: /freebsd/sys/dev/smartpqi/smartpqi_init.c (revision 7ea28254ec5376b5deb86c136e1838d0134dbb22)
1 /*-
2  * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 
27 #include "smartpqi_includes.h"
28 
29 /*
30  * Request the adapter to get PQI capabilities supported.
31  */
32 static int
pqisrc_report_pqi_capability(pqisrc_softstate_t * softs)33 pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
34 {
35 	int ret = PQI_STATUS_SUCCESS;
36 
37 	DBG_FUNC("IN\n");
38 
39 	gen_adm_req_iu_t	admin_req;
40 	gen_adm_resp_iu_t 	admin_resp;
41 	dma_mem_t		pqi_cap_dma_buf;
42 	pqi_dev_cap_t 		*capability = NULL;
43 	pqi_iu_layer_desc_t	*iu_layer_desc = NULL;
44 
45 	/* Allocate Non DMA memory */
46 	capability = os_mem_alloc(softs, sizeof(*capability));
47 	if (!capability) {
48 		DBG_ERR("Failed to allocate memory for capability\n");
49 		goto err_out;
50 	}
51 
52 	memset(&admin_req, 0, sizeof(admin_req));
53 	memset(&admin_resp, 0, sizeof(admin_resp));
54 
55 	memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
56 	os_strlcpy(pqi_cap_dma_buf.tag, "pqi_cap_buf", sizeof(pqi_cap_dma_buf.tag));
57 	pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
58 	pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
59 
60 	ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
61 	if (ret) {
62 		DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
63 		goto err_dma_alloc;
64 	}
65 
66 	admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
67 	admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
68 	admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
69 	admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
70 	admin_req.req_type.general_func.sg_desc.type =	SGL_DESCRIPTOR_CODE_DATA_BLOCK;
71 
72 	ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
73 	if( PQI_STATUS_SUCCESS == ret) {
74                 memcpy(capability,
75 			pqi_cap_dma_buf.virt_addr,
76 			pqi_cap_dma_buf.size);
77 	} else {
78 		DBG_ERR("Failed to send admin req report pqi device capability\n");
79 		goto err_admin_req;
80 
81 	}
82 
83 	softs->pqi_dev_cap.max_iqs = capability->max_iqs;
84 	softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
85 	softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
86 	softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
87 	softs->pqi_dev_cap.max_oqs = capability->max_oqs;
88 	softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
89 	softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
90 	softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
91 
92 	iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
93 	softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
94 	softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
95 	softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
96 
97 	DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
98 	DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
99 	DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
100 	DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
101 	DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
102 	DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
103 	DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
104 	DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
105 	DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
106 	DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
107 	DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
108 
109 	/* Not expecting these to change, could cause problems if they do */
110 	ASSERT(softs->pqi_dev_cap.max_iq_elem_len == PQISRC_OP_MAX_ELEM_SIZE);
111 	ASSERT(softs->pqi_dev_cap.min_iq_elem_len == PQISRC_OP_MIN_ELEM_SIZE);
112 	ASSERT(softs->max_ib_iu_length_per_fw == PQISRC_MAX_SPANNING_IU_LENGTH);
113 	ASSERT(softs->ib_spanning_supported == true);
114 
115 
116 	os_mem_free(softs, (void *)capability,
117 		    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
118 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
119 
120 	DBG_FUNC("OUT\n");
121 	return ret;
122 
123 err_admin_req:
124 	os_dma_mem_free(softs, &pqi_cap_dma_buf);
125 err_dma_alloc:
126 	if (capability)
127 		os_mem_free(softs, (void *)capability,
128 			    REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
129 err_out:
130 	DBG_FUNC("failed OUT\n");
131 	return PQI_STATUS_FAILURE;
132 }
133 
134 /*
135  * Function used to deallocate the used rcb.
136  */
137 void
pqisrc_free_rcb(pqisrc_softstate_t * softs,int req_count)138 pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
139 {
140 
141 	uint32_t num_req;
142 	size_t size;
143 	int i;
144 
145 	DBG_FUNC("IN\n");
146 	num_req = softs->max_outstanding_io + 1;
147 	size = num_req * sizeof(rcb_t);
148 	for (i = 1; i < req_count; i++)
149 		os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
150 	os_mem_free(softs, (void *)softs->rcb, size);
151 	softs->rcb = NULL;
152 	DBG_FUNC("OUT\n");
153 }
154 
155 
156 /*
157  * Allocate memory for rcb and SG descriptors.
158  * TODO : Sg list should be created separately
159  */
160 static int
pqisrc_allocate_rcb(pqisrc_softstate_t * softs)161 pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
162 {
163 	int ret = PQI_STATUS_SUCCESS;
164 	int i = 0;
165 	uint32_t num_req = 0;
166 	uint32_t sg_buf_size = 0;
167 	uint64_t alloc_size = 0;
168 	rcb_t *rcb = NULL;
169 	rcb_t *prcb = NULL;
170 	DBG_FUNC("IN\n");
171 
172 	/* Set maximum outstanding requests */
173 	/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
174 	 * The rcb will be accessed by using the tag as index
175      * As 0 tag index is not used, we need to allocate one extra.
176 	 */
177 	softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
178 	num_req = softs->max_outstanding_io + 1;
179 	DBG_INIT("Max Outstanding IO reset to %u\n", num_req);
180 
181 	alloc_size = num_req * sizeof(rcb_t);
182 
183 	/* Allocate Non DMA memory */
184 	rcb = os_mem_alloc(softs, alloc_size);
185 	if (!rcb) {
186 		DBG_ERR("Failed to allocate memory for rcb\n");
187 		ret = PQI_STATUS_FAILURE;
188 		goto err_out;
189 	}
190 	softs->rcb = rcb;
191 
192 	/* Allocate sg dma memory for sg chain  */
193 	sg_buf_size = softs->pqi_cap.max_sg_elem *
194 			sizeof(sgt_t);
195 
196 	prcb = &softs->rcb[1];
197 	/* Initialize rcb */
198 	for(i=1; i < num_req; i++) {
199 		/* TODO:Here tag is local variable */
200 		char tag[15];
201 		sprintf(tag, "sg_dma_buf%d", i);
202 		os_strlcpy(softs->sg_dma_desc[i].tag, tag, sizeof(softs->sg_dma_desc[i].tag));
203 		softs->sg_dma_desc[i].size = sg_buf_size;
204 		softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
205 
206 		ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
207 		if (ret) {
208 			DBG_ERR("Failed to Allocate sg desc %d\n", ret);
209 			ret = PQI_STATUS_FAILURE;
210 			goto error;
211 		}
212 		prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
213 		prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
214 		prcb ++;
215 	}
216 
217 	DBG_FUNC("OUT\n");
218 	return ret;
219 error:
220 	pqisrc_free_rcb(softs, i);
221 err_out:
222 	DBG_FUNC("failed OUT\n");
223 	return ret;
224 }
225 
226 /*
227  * Function used to decide the operational queue configuration params
228  * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
229  */
230 void
pqisrc_decide_opq_config(pqisrc_softstate_t * softs)231 pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
232 {
233 	uint16_t total_iq_elements;
234 
235 	DBG_FUNC("IN\n");
236 
237 	DBG_INIT("softs->intr_count : %d  softs->num_cpus_online : %d",
238 		softs->intr_count, softs->num_cpus_online);
239 
240 	/* TODO : Get the number of IB and OB queues from OS layer */
241 
242 	if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
243 		/* Share the event and Operational queue. */
244 		softs->num_op_obq = 1;
245 		softs->share_opq_and_eventq = true;
246 	}
247 	else {
248 		/* Note :  One OBQ (OBQ0) reserved for event queue */
249 		softs->num_op_obq = MIN(softs->num_cpus_online,
250 					softs->intr_count) - 1;
251 		softs->share_opq_and_eventq = false;
252 	}
253 	/* If the available interrupt count is more than one,
254 	we don’t need to share the interrupt for IO and event queue */
255 	if (softs->intr_count > 1)
256 		softs->share_opq_and_eventq = false;
257 
258 	DBG_INIT("softs->num_op_obq : %u\n",softs->num_op_obq);
259 
260 	/* TODO : Reset the interrupt count based on number of queues*/
261 
262 	softs->num_op_raid_ibq = softs->num_op_obq;
263 	softs->num_op_aio_ibq = softs->num_op_raid_ibq;
264 	softs->max_ibq_elem_size =  softs->pqi_dev_cap.max_iq_elem_len * 16;
265 	softs->max_obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
266 	if (softs->max_ib_iu_length_per_fw == 256 &&
267 	    softs->ob_spanning_supported) {
268 		/* older f/w that doesn't actually support spanning. */
269 		softs->max_ib_iu_length = softs->max_ibq_elem_size;
270 	} else {
271 		/* max. inbound IU length is an multiple of our inbound element size. */
272 		softs->max_ib_iu_length = PQISRC_ROUND_DOWN(softs->max_ib_iu_length_per_fw,
273 			softs->max_ibq_elem_size);
274 	}
275 
276 	/* If Max. Outstanding IO came with Max. Spanning element count then,
277 		needed elements per IO are multiplication of
278 		Max.Outstanding IO and  Max.Spanning element */
279 	total_iq_elements = (softs->max_outstanding_io *
280 		(softs->max_ib_iu_length / softs->max_ibq_elem_size));
281 
282 	softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
283 	softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
284 		softs->pqi_dev_cap.max_iq_elements);
285 
286 	softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
287 	softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
288 		softs->pqi_dev_cap.max_oq_elements);
289 
290 	/* spanning elements should be 9 (1152/128) */
291 	softs->max_spanning_elems = softs->max_ib_iu_length/softs->max_ibq_elem_size;
292 	ASSERT(softs->max_spanning_elems == PQISRC_MAX_SPANNING_ELEMS);
293 
294 	/* max SGs should be 8 (128/16) */
295 	softs->max_sg_per_single_iu_element = softs->max_ibq_elem_size / sizeof(sgt_t);
296 	ASSERT(softs->max_sg_per_single_iu_element == MAX_EMBEDDED_SG_IN_IU);
297 
298 	/* max SGs for spanning cmd should be 68*/
299 	softs->max_sg_per_spanning_cmd = (softs->max_spanning_elems - 1) * softs->max_sg_per_single_iu_element;
300 	softs->max_sg_per_spanning_cmd += MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
301 
302 	DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);       /* 1152 per FW advertisement */
303 	DBG_INIT("softs->num_elem_per_op_ibq: %u\n", softs->num_elem_per_op_ibq); /* 32 for xcal */
304 	DBG_INIT("softs->num_elem_per_op_obq: %u\n", softs->num_elem_per_op_obq); /* 256 for xcal */
305 	DBG_INIT("softs->max_spanning_elems: %d\n", softs->max_spanning_elems);   /* 9 */
306 	DBG_INIT("softs->max_sg_per_spanning_cmd: %u\n", softs->max_sg_per_spanning_cmd); /* 68 until we add AIO writes */
307 
308 	DBG_FUNC("OUT\n");
309 }
310 
311 /*
312  * Configure the operational queue parameters.
313  */
314 int
pqisrc_configure_op_queues(pqisrc_softstate_t * softs)315 pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
316 {
317 	int ret = PQI_STATUS_SUCCESS;
318 
319 	/* Get the PQI capability,
320 		REPORT PQI DEVICE CAPABILITY request */
321 	ret = pqisrc_report_pqi_capability(softs);
322 	if (ret) {
323 		DBG_ERR("Failed to send report pqi dev capability request : %d\n",
324 				ret);
325 		goto err_out;
326 	}
327 
328 	/* Reserve required no of slots for internal requests */
329 	softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
330 
331 	/* Decide the Op queue configuration */
332 	pqisrc_decide_opq_config(softs);
333 
334 	DBG_FUNC("OUT\n");
335 	return ret;
336 
337 err_out:
338 	DBG_FUNC("OUT failed\n");
339 	return ret;
340 }
341 
342 /*
343  * Validate the PQI mode of adapter.
344  */
345 int
pqisrc_check_pqimode(pqisrc_softstate_t * softs)346 pqisrc_check_pqimode(pqisrc_softstate_t *softs)
347 {
348 	int ret = PQI_STATUS_FAILURE;
349 	int tmo = 0;
350 	uint64_t signature = 0;
351 
352 	DBG_FUNC("IN\n");
353 
354 	/* Check the PQI device signature */
355 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
356 	do {
357 		signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
358 
359 		if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
360 				sizeof(uint64_t)) == 0) {
361 			ret = PQI_STATUS_SUCCESS;
362 			break;
363 		}
364 		OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
365 	} while (tmo--);
366 
367 	PRINT_PQI_SIGNATURE(signature);
368 
369 	if (tmo <= 0) {
370 		DBG_ERR("PQI Signature is invalid\n");
371 		ret = PQI_STATUS_TIMEOUT;
372 		goto err_out;
373 	}
374 
375 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
376 	/* Check function and status code for the device */
377 	COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
378 		PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
379 	if (!tmo) {
380 		DBG_ERR("PQI device is not in IDLE state\n");
381 		ret = PQI_STATUS_TIMEOUT;
382 		goto err_out;
383 	}
384 
385 
386 	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
387 	/* Check the PQI device status register */
388 	COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
389 				PQI_DEV_STATE_AT_INIT, tmo);
390 	if (!tmo) {
391 		DBG_ERR("PQI Registers are not ready\n");
392 		ret = PQI_STATUS_TIMEOUT;
393 		goto err_out;
394 	}
395 
396 	DBG_FUNC("OUT\n");
397 	return ret;
398 err_out:
399 	DBG_FUNC("OUT failed\n");
400 	return ret;
401 }
402 
403 /* Wait for PQI reset completion for the adapter*/
404 int
pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t * softs)405 pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
406 {
407 	int ret = PQI_STATUS_SUCCESS;
408 	pqi_reset_reg_t reset_reg;
409 	int pqi_reset_timeout = 0;
410 	uint64_t val = 0;
411 	uint32_t max_timeout = 0;
412 
413 	val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
414 
415 	max_timeout = (val & 0xFFFF00000000) >> 32;
416 
417 	DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
418 
419 	while(1) {
420 		if (pqi_reset_timeout++ == max_timeout) {
421 			return PQI_STATUS_TIMEOUT;
422 		}
423 		OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
424 		reset_reg.all_bits = PCI_MEM_GET32(softs,
425 			&softs->pqi_reg->dev_reset, PQI_DEV_RESET);
426 		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
427 			break;
428 	}
429 
430 	return ret;
431 }
432 
433 /*
434  * Function used to perform PQI hard reset.
435  */
436 int
pqi_reset(pqisrc_softstate_t * softs)437 pqi_reset(pqisrc_softstate_t *softs)
438 {
439 	int ret = PQI_STATUS_SUCCESS;
440 	uint32_t val = 0;
441 	pqi_reset_reg_t pqi_reset_reg;
442 
443 	DBG_FUNC("IN\n");
444 
445 	if (true == softs->ctrl_in_pqi_mode) {
446 
447 		if (softs->pqi_reset_quiesce_allowed) {
448 			val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
449 					LEGACY_SIS_IDBR);
450 			val |= SIS_PQI_RESET_QUIESCE;
451 			PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
452 					LEGACY_SIS_IDBR, LE_32(val));
453 			OS_SLEEP(1000);     /* 1 ms delay for PCI W/R ordering issue */
454 			ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
455 			if (ret) {
456 				DBG_ERR("failed with error %d during quiesce\n", ret);
457 				return ret;
458 			}
459 		}
460 
461 		pqi_reset_reg.all_bits = 0;
462 		pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
463 		pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
464 
465 		PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
466 			LE_32(pqi_reset_reg.all_bits));
467 		OS_SLEEP(1000);     /* 1 ms delay for PCI W/R ordering issue */
468 
469 		ret = pqisrc_wait_for_pqi_reset_completion(softs);
470 		if (ret) {
471 			DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
472 			return ret;
473 		}
474 	}
475 	softs->ctrl_in_pqi_mode = false;
476 	DBG_FUNC("OUT\n");
477 	return ret;
478 }
479 
480 /*
481  * Initialize the adapter with supported PQI configuration.
482  */
483 int
pqisrc_pqi_init(pqisrc_softstate_t * softs)484 pqisrc_pqi_init(pqisrc_softstate_t *softs)
485 {
486 	int ret = PQI_STATUS_SUCCESS;
487 
488 	DBG_FUNC("IN\n");
489 
490 	/* Check the PQI signature */
491 	ret = pqisrc_check_pqimode(softs);
492 	if(ret) {
493 		DBG_ERR("failed to switch to pqi\n");
494                 goto err_out;
495 	}
496 
497 	PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
498 	softs->ctrl_in_pqi_mode = true;
499 
500 	/* Get the No. of Online CPUs,NUMA/Processor config from OS */
501 	ret = os_get_processor_config(softs);
502 	if (ret) {
503 		DBG_ERR("Failed to get processor config from OS %d\n",
504 			ret);
505 		goto err_out;
506 	}
507 
508 	softs->intr_type = INTR_TYPE_NONE;
509 
510 	/* Get the interrupt count, type, priority available from OS */
511 	ret = os_get_intr_config(softs);
512 	if (ret) {
513 		DBG_ERR("Failed to get interrupt config from OS %d\n",
514 			ret);
515 		goto err_out;
516 	}
517 
518 	/*Enable/Set Legacy INTx Interrupt mask clear pqi register,
519 	 *if allocated interrupt is legacy type.
520 	 */
521 	if (INTR_TYPE_FIXED == softs->intr_type) {
522 		pqisrc_configure_legacy_intx(softs, true);
523 		sis_enable_intx(softs);
524 	}
525 
526 	/* Create Admin Queue pair*/
527 	ret = pqisrc_create_admin_queue(softs);
528 	if(ret) {
529                 DBG_ERR("Failed to configure admin queue\n");
530                 goto err_admin_queue;
531     	}
532 
533 	/* For creating event and IO operational queues we have to submit
534 	   admin IU requests.So Allocate resources for submitting IUs */
535 
536 	/* Allocate the request container block (rcb) */
537 	ret = pqisrc_allocate_rcb(softs);
538 	if (ret == PQI_STATUS_FAILURE) {
539                 DBG_ERR("Failed to allocate rcb \n");
540                 goto err_rcb;
541     	}
542 
543 	/* Allocate & initialize request id queue */
544 	ret = pqisrc_init_taglist(softs,&softs->taglist,
545 				softs->max_outstanding_io);
546 	if (ret) {
547 		DBG_ERR("Failed to allocate memory for request id q : %d\n",
548 			ret);
549 		goto err_taglist;
550 	}
551 
552 	ret = pqisrc_configure_op_queues(softs);
553 	if (ret) {
554 			DBG_ERR("Failed to configure op queue\n");
555 			goto err_config_opq;
556 	}
557 
558 	/* Create Operational queues */
559 	ret = pqisrc_create_op_queues(softs);
560 	if(ret) {
561 		DBG_ERR("Failed to create op queue\n");
562 		goto err_create_opq;
563 	}
564 
565 	softs->ctrl_online = true;
566 
567 	DBG_FUNC("OUT\n");
568 	return ret;
569 
570 err_create_opq:
571 err_config_opq:
572 	pqisrc_destroy_taglist(softs,&softs->taglist);
573 err_taglist:
574 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
575 err_rcb:
576 	pqisrc_destroy_admin_queue(softs);
577 err_admin_queue:
578 	os_free_intr_config(softs);
579 err_out:
580 	DBG_FUNC("OUT failed\n");
581 	return PQI_STATUS_FAILURE;
582 }
583 
584 /* */
585 int
pqisrc_force_sis(pqisrc_softstate_t * softs)586 pqisrc_force_sis(pqisrc_softstate_t *softs)
587 {
588 	int ret = PQI_STATUS_SUCCESS;
589 
590 	if (SIS_IS_KERNEL_PANIC(softs)) {
591 		DBG_ERR("Controller FW is not running\n");
592 		return PQI_STATUS_FAILURE;
593 	}
594 
595 	if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
596 		return ret;
597 	}
598 
599 	if (SIS_IS_KERNEL_UP(softs)) {
600 		PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
601 		return ret;
602 	}
603 	/* Disable interrupts ? */
604 	sis_disable_interrupt(softs);
605 
606 	/* reset pqi, this will delete queues */
607 	ret = pqi_reset(softs);
608 	if (ret) {
609 		return ret;
610 	}
611 	/* Re enable SIS */
612 	ret = pqisrc_reenable_sis(softs);
613 	if (ret) {
614 		return ret;
615 	}
616 
617 	PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
618 
619 	return ret;
620 }
621 
622 /* 5 mins timeout for quiesce */
623 #define PQI_QUIESCE_TIMEOUT	300000
624 
625 int
pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t * softs)626 pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
627 {
628 
629 	int count = 0;
630 	int ret = PQI_STATUS_SUCCESS;
631 
632 	DBG_NOTE("softs->taglist.num_elem : %u",softs->taglist.num_elem);
633 
634 	if (softs->taglist.num_elem == softs->max_outstanding_io)
635 		return ret;
636 	else {
637 		DBG_WARN("%u commands pending\n",
638 		softs->max_outstanding_io - softs->taglist.num_elem);
639 
640 		while(1) {
641 
642 			/* Since heartbeat timer stopped ,check for firmware status*/
643 			if (SIS_IS_KERNEL_PANIC(softs)) {
644 				DBG_ERR("Controller FW is not running\n");
645 				return PQI_STATUS_FAILURE;
646 			}
647 
648 			if (softs->taglist.num_elem != softs->max_outstanding_io) {
649 				/* Sleep for 1 msec */
650 				OS_SLEEP(1000);
651 				count++;
652 				if(count % 1000 == 0) {
653 					DBG_WARN("Waited for %d seconds", count/1000);
654 				}
655 				if (count >= PQI_QUIESCE_TIMEOUT) {
656 					return PQI_STATUS_FAILURE;
657 				}
658 				continue;
659 			}
660 			break;
661 		}
662 	}
663 	return ret;
664 }
665 
666 void
pqisrc_complete_internal_cmds(pqisrc_softstate_t * softs)667 pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
668 {
669 
670 	int tag = 0;
671 	rcb_t *rcb;
672 
673 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
674 		rcb = &softs->rcb[tag];
675 		if(rcb->req_pending && is_internal_req(rcb)) {
676 			rcb->status = PQI_STATUS_TIMEOUT;
677 			rcb->req_pending = false;
678 		}
679 	}
680 }
681 
682 
683 /*
684  * Uninitialize the resources used during PQI initialization.
685  */
686 void
pqisrc_pqi_uninit(pqisrc_softstate_t * softs)687 pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
688 {
689 	int ret;
690 
691 	DBG_FUNC("IN\n");
692 
693 	/* Wait for any rescan to finish */
694 	pqisrc_wait_for_rescan_complete(softs);
695 
696 	/* Wait for commands to complete */
697 	ret = pqisrc_wait_for_cmnd_complete(softs);
698 
699 	/* disable and free the interrupt resources */
700 	os_destroy_intr(softs);
701 
702 	/* Complete all pending commands. */
703 	if(ret != PQI_STATUS_SUCCESS) {
704 		pqisrc_complete_internal_cmds(softs);
705 		os_complete_outstanding_cmds_nodevice(softs);
706 	}
707 
708 	if(softs->devlist_lockcreated==true){
709 		os_uninit_spinlock(&softs->devlist_lock);
710 		softs->devlist_lockcreated = false;
711 	}
712 
713 	/* Free all queues */
714 	pqisrc_destroy_op_ib_queues(softs);
715 	pqisrc_destroy_op_ob_queues(softs);
716 	pqisrc_destroy_event_queue(softs);
717 
718 	/* Free  rcb */
719 	pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
720 
721 	/* Free request id lists */
722 	pqisrc_destroy_taglist(softs,&softs->taglist);
723 
724 	/* Free Admin Queue */
725 	pqisrc_destroy_admin_queue(softs);
726 
727 	/* Switch back to SIS mode */
728 	if (pqisrc_force_sis(softs)) {
729 		DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
730 	}
731 
732 	DBG_FUNC("OUT\n");
733 }
734 
735 
736 /*
737  * Function to do any sanity checks for OS macros
738  */
739 void
sanity_check_os_behavior(pqisrc_softstate_t * softs)740 sanity_check_os_behavior(pqisrc_softstate_t *softs)
741 {
742 #ifdef OS_ATOMIC64_INC
743 	OS_ATOMIC64_T atomic_test_var = 0;
744 	OS_ATOMIC64_T atomic_ret = 0;
745 
746 	atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
747 	ASSERT(atomic_ret == 1);
748 
749 	atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
750 	ASSERT(atomic_ret == 2);
751 
752 	atomic_ret = OS_ATOMIC64_DEC(&atomic_test_var);
753 	ASSERT(atomic_ret == 1);
754 #else
755 	DBG_INIT("OS needs to define/implement atomic macros\n");
756 #endif
757 }
758 
759 /*
760  * Function to initialize the adapter settings.
761  */
762 int
pqisrc_init(pqisrc_softstate_t * softs)763 pqisrc_init(pqisrc_softstate_t *softs)
764 {
765 	int ret = 0;
766 	uint32_t	ctrl_type;
767 
768 	DBG_FUNC("IN\n");
769 
770 	sanity_check_os_behavior(softs);
771 
772 	check_struct_sizes();
773 
774 	/*Get verbose flags, defined in OS code XX_debug.h or so*/
775 #ifdef DISABLE_ERR_RESP_VERBOSE
776 	softs->err_resp_verbose = false;
777 #else
778 	softs->err_resp_verbose = true;
779 #endif
780 
781 	/* prevent attachment of revA hardware. */
782 	ctrl_type = PQI_GET_CTRL_TYPE(softs);
783 	if (ctrl_type == PQI_CTRL_PRODUCT_ID_GEN2_REV_A) {
784 		DBG_ERR("adapter at B.D.F=%u.%u.%u: unsupported RevA card.\n",
785 			softs->bus_id, softs->device_id, softs->func_id);
786 		ret = PQI_STATUS_FAILURE;
787 		goto err_out;
788 	}
789 
790 	/* Increment the global adapter ID and tie it to this BDF */
791 #ifdef OS_ATOMIC64_INC
792 	static OS_ATOMIC64_T g_adapter_cnt = 0;
793 	softs->adapter_num = (uint8_t)OS_ATOMIC64_INC(&g_adapter_cnt);
794 #else
795 	static uint64_t g_adapter_cnt = 0;
796    softs->adapter_num = (uint8_t)++g_adapter_cnt;
797 #endif
798 	DBG_NOTE("Initializing adapter %u\n", (uint32_t)softs->adapter_num);
799 
800 	ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
801 	if(ret != PQI_STATUS_SUCCESS){
802 		DBG_ERR(" Failed to initialize scan lock\n");
803 		goto err_out;
804 	}
805 
806 	/* Init the Sync interface */
807 	ret = pqisrc_sis_init(softs);
808 	if (ret) {
809 		DBG_ERR("SIS Init failed with error %d\n", ret);
810 		goto err_sis;
811 	}
812 
813 
814 	/* Init the PQI interface */
815 	ret = pqisrc_pqi_init(softs);
816 	if (ret) {
817 		DBG_ERR("PQI Init failed with error %d\n", ret);
818 		goto err_pqi;
819 	}
820 
821 	/* Setup interrupt */
822 	ret = os_setup_intr(softs);
823 	if (ret) {
824 		DBG_ERR("Interrupt setup failed with error %d\n", ret);
825 		goto err_intr;
826 	}
827 
828 	/* Report event configuration */
829 	ret = pqisrc_report_event_config(softs);
830 	if(ret){
831 				DBG_ERR(" Failed to configure Report events\n");
832 		goto err_event;
833 	}
834 
835 	/* Set event configuration*/
836 	ret = pqisrc_set_event_config(softs);
837 	if(ret){
838 				DBG_ERR(" Failed to configure Set events\n");
839 				goto err_event;
840 	}
841 
842 	/* Check for For PQI spanning */
843 	ret = pqisrc_get_ctrl_fw_version(softs);
844 	if(ret){
845 				DBG_ERR(" Failed to get ctrl fw version\n");
846 	goto err_fw_version;
847 	}
848 
849 	/* update driver version in to FW */
850 	ret = pqisrc_write_driver_version_to_host_wellness(softs);
851 	if (ret) {
852 		DBG_ERR(" Failed to update driver version in to FW");
853 		goto err_host_wellness;
854 	}
855 
856 	/* Setup sense features */
857 	ret = pqisrc_QuerySenseFeatures(softs);
858 	if (ret) {
859 		DBG_ERR("Failed to get sense features\n");
860 		goto err_sense;
861 	}
862 
863 	os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
864 	ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
865 	if(ret){
866 		DBG_ERR(" Failed to initialize devlist_lock\n");
867 		softs->devlist_lockcreated=false;
868 		goto err_lock;
869 	}
870 	softs->devlist_lockcreated = true;
871 
872 	/* Get the PQI configuration table to read heart-beat counter*/
873 	ret = pqisrc_process_config_table(softs);
874 	if (ret) {
875 		DBG_ERR("Failed to process PQI configuration table %d\n", ret);
876 		goto err_config_tab;
877 	}
878 
879 	softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
880 
881 	memset(softs->dev_list, 0, sizeof(*softs->dev_list));
882 	pqisrc_init_bitmap(softs);
883 
884 	DBG_FUNC("OUT\n");
885 	return ret;
886 
887 err_config_tab:
888 	if(softs->devlist_lockcreated==true){
889 		os_uninit_spinlock(&softs->devlist_lock);
890 		softs->devlist_lockcreated = false;
891 	}
892 err_lock:
893 err_fw_version:
894 err_event:
895 err_host_wellness:
896 err_intr:
897 err_sense:
898 	pqisrc_pqi_uninit(softs);
899 err_pqi:
900 	pqisrc_sis_uninit(softs);
901 err_sis:
902 	os_destroy_semaphore(&softs->scan_lock);
903 err_out:
904 	DBG_FUNC("OUT failed\n");
905 	return ret;
906 }
907 
908 /*
909  * Write all data in the adapter's battery-backed cache to
910  * storage.
911  */
912 int
pqisrc_flush_cache(pqisrc_softstate_t * softs,enum pqisrc_flush_cache_event_type event_type)913 pqisrc_flush_cache( pqisrc_softstate_t *softs,
914 			enum pqisrc_flush_cache_event_type event_type)
915 {
916 	int rval = PQI_STATUS_SUCCESS;
917 	pqisrc_raid_req_t request;
918 	pqisrc_bmic_flush_cache_t *flush_buff = NULL;
919 
920 	DBG_FUNC("IN\n");
921 
922 	if (pqisrc_ctrl_offline(softs))
923 		return PQI_STATUS_FAILURE;
924 
925 	flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
926 	if (!flush_buff) {
927 		DBG_ERR("Failed to allocate memory for flush cache params\n");
928 		rval = PQI_STATUS_FAILURE;
929 		return rval;
930 	}
931 
932 	flush_buff->halt_event = event_type;
933 
934 	memset(&request, 0, sizeof(request));
935 
936 	request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
937 	request.cmd.bmic_cdb.op_code = BMIC_WRITE;
938 	request.cmd.bmic_cdb.cmd = BMIC_CACHE_FLUSH;
939 	request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*flush_buff));
940 
941 	rval = pqisrc_prepare_send_ctrlr_request(softs, &request, flush_buff, sizeof(*flush_buff));
942 
943 	if (rval) {
944 		DBG_ERR("error in build send raid req ret=%d\n", rval);
945 	}
946 
947 	os_mem_free(softs, (void *)flush_buff, sizeof(pqisrc_bmic_flush_cache_t));
948 
949 	DBG_FUNC("OUT\n");
950 
951 	return rval;
952 }
953 
954 /*
955  * Uninitialize the adapter.
956  */
957 void
pqisrc_uninit(pqisrc_softstate_t * softs)958 pqisrc_uninit(pqisrc_softstate_t *softs)
959 {
960 	DBG_FUNC("IN\n");
961 
962 	pqisrc_pqi_uninit(softs);
963 
964 	pqisrc_sis_uninit(softs);
965 
966 	os_destroy_semaphore(&softs->scan_lock);
967 
968 	pqisrc_cleanup_devices(softs);
969 
970 	DBG_FUNC("OUT\n");
971 }
972