xref: /titanic_52/usr/src/uts/common/io/scsi/adapters/smartpqi/smartpqi_intr.c (revision cb4dfecea9d9ae7eae23766f391868f78ffa30c6)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2018 Nexenta Systems, Inc.
14  */
15 
16 /*
17  * This file sets up the interrupts with the system and also processes
18  * interrupts from the hardware.
19  */
20 
21 /* ---- Driver specific header ---- */
22 #include <smartpqi.h>
23 
24 /* ---- Forward declarations of private methods ---- */
25 static int add_intrs(pqi_state_t s, int type);
26 static uint_t intr_handler(caddr_t arg1, caddr_t arg2);
27 static void sync_error(pqi_state_t s, pqi_io_request_t *io,
28     pqi_io_response_t *rsp);
29 static void process_raid_io_error(pqi_io_request_t *io);
30 static void process_aio_io_error(pqi_io_request_t *io);
31 static void disable_aio_path(pqi_io_request_t *io);
32 
33 /*
34  * smartpqi_register_intrs -- Figure out which type of interrupts and register
35  *			      them with the framework.
36  */
37 int
38 smartpqi_register_intrs(pqi_state_t s)
39 {
40 	int	intr_types;
41 
42 	/* ---- Get supported interrupt types ---- */
43 	if (ddi_intr_get_supported_types(s->s_dip, &intr_types) !=
44 	    DDI_SUCCESS) {
45 		dev_err(s->s_dip, CE_NOTE,
46 		    "failed to get supported intr types");
47 		return (FALSE);
48 	}
49 
50 	if (intr_types & DDI_INTR_TYPE_MSIX) {
51 		if (add_intrs(s, DDI_INTR_TYPE_MSIX) == TRUE) {
52 			s->s_intr_type = DDI_INTR_TYPE_MSIX;
53 			return (TRUE);
54 		}
55 	} else if (intr_types & DDI_INTR_TYPE_MSI) {
56 		if (add_intrs(s, DDI_INTR_TYPE_MSI) == TRUE) {
57 			s->s_intr_type = DDI_INTR_TYPE_MSI;
58 			return (TRUE);
59 		}
60 	} else if (intr_types & DDI_INTR_TYPE_FIXED) {
61 		if (add_intrs(s, DDI_INTR_TYPE_FIXED) == TRUE) {
62 			s->s_intr_type = DDI_INTR_TYPE_FIXED;
63 			return (TRUE);
64 		}
65 	} else {
66 		/* ---- Warning since it's a DDI framework error ---- */
67 		dev_err(s->s_dip, CE_WARN,
68 		    "ddi_intr_get_supported_types returned bogus type of 0x%x",
69 		    intr_types);
70 	}
71 
72 	return (FALSE);
73 }
74 
75 /*
76  * smartqpi_unregister_intrs -- Disable and remove interrupt handlers
77  */
78 void
79 smartpqi_unregister_intrs(pqi_state_t s)
80 {
81 	int	i;
82 
83 	/* --- First disable the interrupts ---- */
84 	if (s->s_intr_cap & DDI_INTR_FLAG_BLOCK) {
85 		(void) ddi_intr_block_disable(s->s_itable, s->s_intr_cnt);
86 	} else {
87 		for (i = 0; i < s->s_intr_cnt; i++) {
88 			(void) ddi_intr_disable(s->s_itable[i]);
89 		}
90 	}
91 
92 	/* ---- Next remove the interrupt handlers ---- */
93 	for (i = 0; i < s->s_intr_cnt; i++) {
94 		(void) ddi_intr_remove_handler(s->s_itable[i]);
95 		(void) ddi_intr_free(s->s_itable[i]);
96 	}
97 
98 	kmem_free(s->s_itable, s->s_intr_size);
99 	/* ---- Just in case ---- */
100 	s->s_itable = NULL;
101 	s->s_intr_size = 0;
102 }
103 
104 void
105 pqi_process_io_intr(pqi_state_t s, pqi_queue_group_t *qg)
106 {
107 	pqi_index_t		oq_pi;
108 	pqi_index_t		oq_ci;
109 	pqi_io_request_t	*io;
110 	pqi_io_response_t	*rsp;
111 	uint16_t		rqst_id;
112 	int			response_cnt = 0;
113 	int			qnotify;
114 
115 	oq_ci = qg->oq_ci_copy;
116 	atomic_inc_32(&s->s_intr_count);
117 
118 	mutex_enter(&s->s_intr_mutex);
119 	for (;;) {
120 		(void) ddi_dma_sync(s->s_queue_dma->handle,
121 		    (uintptr_t)qg->oq_pi -
122 		    (uintptr_t)s->s_queue_dma->alloc_memory,
123 		    sizeof (oq_pi), DDI_DMA_SYNC_FORCPU);
124 
125 		oq_pi = *qg->oq_pi;
126 		if (oq_pi == oq_ci)
127 			break;
128 
129 		rsp = (pqi_io_response_t *)(qg->oq_element_array +
130 		    (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH));
131 		(void) ddi_dma_sync(s->s_queue_dma->handle,
132 		    (uintptr_t)rsp - (uintptr_t)s->s_queue_dma->alloc_memory,
133 		    sizeof (*rsp), DDI_DMA_SYNC_FORCPU);
134 		rqst_id = rsp->request_id;
135 		ASSERT(rqst_id < s->s_max_io_slots);
136 		io = &s->s_io_rqst_pool[rqst_id];
137 
138 		ASSERT(io->io_refcount == 1);
139 
140 		if (io->io_cmd != NULL) {
141 			pqi_cmd_t	cmd = io->io_cmd;
142 
143 			mutex_enter(&cmd->pc_device->pd_mutex);
144 			if (cmd->pc_flags & PQI_FLAG_ABORTED) {
145 				mutex_exit(&cmd->pc_device->pd_mutex);
146 				response_cnt++;
147 				oq_ci = (oq_ci + 1) % s->s_num_elements_per_oq;
148 				continue;
149 			}
150 			cmd->pc_flags |= PQI_FLAG_FINISHING;
151 			mutex_exit(&cmd->pc_device->pd_mutex);
152 		}
153 
154 		io->io_iu_type = rsp->header.iu_type;
155 		switch (rsp->header.iu_type) {
156 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
157 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
158 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
159 			io->io_status = PQI_DATA_IN_OUT_GOOD;
160 			break;
161 		case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
162 			io->io_status = PQI_DATA_IN_OUT_ERROR;
163 			sync_error(s, io, rsp);
164 			process_raid_io_error(io);
165 			break;
166 		case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
167 			io->io_status = PQI_DATA_IN_OUT_ERROR;
168 			sync_error(s, io, rsp);
169 			process_aio_io_error(io);
170 			break;
171 		case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
172 			io->io_status = PQI_DATA_IN_OUT_PROTOCOL_ERROR;
173 			disable_aio_path(io);
174 			break;
175 
176 		default:
177 			ASSERT(0);
178 			break;
179 		}
180 		io->io_cb(io, io->io_context);
181 		response_cnt++;
182 		oq_ci = (oq_ci + 1) % s->s_num_elements_per_oq;
183 	}
184 
185 	if (response_cnt) {
186 		qg->cmplt_count += response_cnt;
187 		qg->oq_ci_copy = oq_ci;
188 		ddi_put32(s->s_datap, qg->oq_ci, oq_ci);
189 	}
190 	mutex_exit(&s->s_intr_mutex);
191 
192 	mutex_enter(&s->s_mutex);
193 	qnotify = HBA_QUIESCED_PENDING(s);
194 	mutex_exit(&s->s_mutex);
195 
196 	if (qnotify)
197 		pqi_quiesced_notify(s);
198 
199 }
200 
201 /*
202  * add_intrs --
203  */
204 static int
205 add_intrs(pqi_state_t s, int type)
206 {
207 	dev_info_t	*dip	= s->s_dip;
208 	int		avail;
209 	int		actual;
210 	int		count	= 0;
211 	int		i;
212 	int		ret;
213 
214 	/* ---- Get number of interrupts ---- */
215 	ret = ddi_intr_get_nintrs(dip, type, &count);
216 	if (ret != DDI_SUCCESS || count <= 0) {
217 		dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_nintrs failed, "
218 		    "ret=%d, count=%d", ret, count);
219 		return (FALSE);
220 	}
221 
222 	/* ---- Get number of available interrupts ---- */
223 	ret = ddi_intr_get_navail(dip, type, &avail);
224 	if (ret != DDI_SUCCESS || avail == 0) {
225 		dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_navail failed, "
226 		    "ret=%d, avail=%d", ret, avail);
227 		return (FALSE);
228 	}
229 
230 	if (type != DDI_INTR_TYPE_FIXED)
231 		count = 1;
232 
233 	s->s_intr_size = count * sizeof (ddi_intr_handle_t);
234 	s->s_itable = kmem_zalloc(s->s_intr_size, KM_SLEEP);
235 	ret = ddi_intr_alloc(dip, s->s_itable, type, 0, count, &actual,
236 	    DDI_INTR_ALLOC_NORMAL);
237 	if (ret != DDI_SUCCESS || actual == 0) {
238 		dev_err(s->s_dip, CE_NOTE, "ddi_intr_alloc failed, ret=%d",
239 		    ret);
240 		return (FALSE);
241 	}
242 
243 	/* ---- Use count return or abort? Make note of at least---- */
244 	if (actual < count) {
245 		dev_err(s->s_dip, CE_NOTE,
246 		    "interrupts: requested=%d, received=%d",
247 		    count, actual);
248 	}
249 	s->s_intr_cnt = actual;
250 
251 	/* ---- Get priority for first intr, assume rest are the same ---- */
252 	if ((ret = ddi_intr_get_pri(s->s_itable[0], &s->s_intr_pri)) !=
253 	    DDI_SUCCESS) {
254 		dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_pri failed, ret=%d",
255 		    ret);
256 		goto failure;
257 	}
258 
259 	/* ---- Test for high level mutex ---- */
260 	if (s->s_intr_pri >= ddi_intr_get_hilevel_pri()) {
261 		dev_err(s->s_dip, CE_NOTE, "Hi level interrupts not supported");
262 		goto failure;
263 	}
264 
265 	/* ---- Install interrupt handler ---- */
266 	for (i = 0; i < actual; i++) {
267 		if ((ret = ddi_intr_add_handler(s->s_itable[i], intr_handler,
268 		    (caddr_t)s, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
269 			dev_err(s->s_dip, CE_NOTE,
270 			    "ddi_intr_add_handler failed, index=%d, ret=%d",
271 			    i, ret);
272 			goto failure;
273 		}
274 	}
275 
276 	if ((ret = ddi_intr_get_cap(s->s_itable[0], &s->s_intr_cap))
277 	    != DDI_SUCCESS) {
278 		dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_cap failed, ret=%d",
279 		    ret);
280 		goto failure;
281 	}
282 
283 	/* ---- Enable interrupts ---- */
284 	if (s->s_intr_cap & DDI_INTR_FLAG_BLOCK) {
285 		(void) ddi_intr_block_enable(s->s_itable, s->s_intr_cnt);
286 	} else {
287 		/* --- Enable interrupts for either MSI or FIXED ---- */
288 		for (i = 0; i < actual; i++)
289 			(void) ddi_intr_enable(s->s_itable[i]);
290 	}
291 
292 	return (TRUE);
293 
294 failure:
295 	/* ---- Free allocated interrupts pointers ---- */
296 	for (i = 0; i < actual; i++)
297 		(void) ddi_intr_free(s->s_itable[i]);
298 	kmem_free(s->s_itable, s->s_intr_size);
299 	s->s_itable = NULL;
300 	s->s_intr_size = 0;
301 	return (FALSE);
302 }
303 
304 static void
305 disable_aio_path(pqi_io_request_t *io)
306 {
307 	pqi_device_t	devp;
308 
309 	devp = io->io_cmd->pc_device;
310 	devp->pd_aio_enabled = 0;
311 }
312 
313 static void
314 process_raid_io_error(pqi_io_request_t *io)
315 {
316 	pqi_raid_error_info_t	ei;
317 	pqi_cmd_t		cmd;
318 	int			sense_len;
319 	int			statusbuf_len;
320 	int			sense_len_to_copy;
321 	struct scsi_arq_status	*arq;
322 	struct scsi_pkt		*pkt;
323 
324 	if ((ei = io->io_error_info) != NULL) {
325 		io->io_status = ei->data_out_result;
326 		if ((cmd = io->io_cmd) == NULL)
327 			return;
328 
329 		pkt = cmd->pc_pkt;
330 		pkt->pkt_resid -= ei->data_out_transferred;
331 		/* LINTED E_BAD_PTR_CAST_ALIGN */
332 		arq = (struct scsi_arq_status *)pkt->pkt_scbp;
333 		*((uchar_t *)&arq->sts_status) = ei->status;
334 		*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
335 		arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
336 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS |
337 		    STATE_ARQ_DONE;
338 
339 		sense_len = ei->sense_data_length;
340 		if (sense_len == 0)
341 			sense_len = ei->response_data_length;
342 
343 		if (sense_len == 0) {
344 			/* ---- auto request sense failed ---- */
345 			arq->sts_rqpkt_status.sts_chk = 1;
346 			arq->sts_rqpkt_resid = cmd->pc_statuslen;
347 			return;
348 		} else if (sense_len < cmd->pc_statuslen) {
349 			/* ---- auto request sense short ---- */
350 			arq->sts_rqpkt_resid = cmd->pc_statuslen -
351 			    sense_len;
352 		} else {
353 			/* ---- auto request sense complete ---- */
354 			arq->sts_rqpkt_resid = 0;
355 		}
356 		arq->sts_rqpkt_statistics = 0;
357 		pkt->pkt_state |= STATE_ARQ_DONE;
358 		if (cmd->pc_statuslen > PQI_ARQ_STATUS_NOSENSE_LEN) {
359 			statusbuf_len = cmd->pc_statuslen -
360 			    PQI_ARQ_STATUS_NOSENSE_LEN;
361 		} else {
362 			statusbuf_len = 0;
363 		}
364 
365 		if (sense_len > sizeof (ei->data))
366 			sense_len = sizeof (ei->data);
367 		sense_len_to_copy = min(sense_len, statusbuf_len);
368 
369 		if (sense_len_to_copy) {
370 			(void) memcpy(&arq->sts_sensedata, ei->data,
371 			    sense_len_to_copy);
372 		}
373 	} else {
374 		/*
375 		 * sync_error is called before this and sets io_error_info
376 		 * which means the value must be non-zero
377 		 */
378 		ASSERT(0);
379 	}
380 }
381 
382 /*ARGSUSED*/
383 static void
384 process_aio_io_error(pqi_io_request_t *io)
385 {
386 }
387 
388 static void
389 sync_error(pqi_state_t s, pqi_io_request_t *io, pqi_io_response_t *rsp)
390 {
391 	(void) ddi_dma_sync(s->s_error_dma->handle,
392 	    rsp->error_index * PQI_ERROR_BUFFER_ELEMENT_LENGTH,
393 	    PQI_ERROR_BUFFER_ELEMENT_LENGTH, DDI_DMA_SYNC_FORCPU);
394 
395 	io->io_error_info = s->s_error_dma->alloc_memory +
396 	    (rsp->error_index * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
397 }
398 
399 static void
400 process_event_intr(pqi_state_t s)
401 {
402 	pqi_event_queue_t	*q = &s->s_event_queue;
403 	pqi_event_response_t	*rsp;
404 	int			idx;
405 	int			num_events	= 0;
406 	pqi_event_t		e;
407 	pqi_index_t		oq_ci;
408 	pqi_index_t		oq_pi;
409 
410 	oq_ci = q->oq_ci_copy;
411 
412 	mutex_enter(&s->s_intr_mutex);
413 	for (;;) {
414 		(void) ddi_dma_sync(s->s_queue_dma->handle,
415 		    (uintptr_t)q->oq_pi -
416 		    (uintptr_t)s->s_queue_dma->alloc_memory,
417 		    sizeof (oq_pi), DDI_DMA_SYNC_FORCPU);
418 		oq_pi = *q->oq_pi;
419 
420 		if (oq_pi == oq_ci)
421 			break;
422 
423 		num_events++;
424 		(void) ddi_dma_sync(s->s_queue_dma->handle,
425 		    (uintptr_t)q->oq_element_array +
426 		    (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH) -
427 		    (uintptr_t)s->s_queue_dma->alloc_memory,
428 		    sizeof (*rsp),
429 		    DDI_DMA_SYNC_FORCPU);
430 		rsp = (pqi_event_response_t *)((uintptr_t)q->oq_element_array +
431 		    (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH));
432 		idx = pqi_map_event(rsp->event_type);
433 
434 		if (idx != -1 && rsp->request_acknowlege) {
435 			e = &s->s_events[idx];
436 			e->ev_pending = B_TRUE;
437 			e->ev_type = rsp->event_type;
438 			e->ev_id = rsp->event_id;
439 			e->ev_additional = rsp->additional_event_id;
440 		}
441 		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
442 	}
443 
444 	if (num_events != 0) {
445 		q->oq_ci_copy = oq_ci;
446 		ddi_put32(s->s_datap, q->oq_ci, oq_ci);
447 		(void) ddi_taskq_dispatch(s->s_events_taskq, pqi_event_worker,
448 		    s, 0);
449 	}
450 	mutex_exit(&s->s_intr_mutex);
451 }
452 
453 static uint_t
454 intr_handler(caddr_t arg1, caddr_t arg2)
455 {
456 	/* LINTED E_BAD_PTR_CAST_ALIGN */
457 	pqi_state_t		s = (pqi_state_t)arg1;
458 	int			queue_group_idx = (int)(intptr_t)arg2;
459 	pqi_queue_group_t	*qg;
460 
461 	if (s->s_intr_ready == 0)
462 		return (DDI_INTR_CLAIMED);
463 
464 	qg = &s->s_queue_groups[queue_group_idx];
465 	pqi_process_io_intr(s, qg);
466 	if (queue_group_idx == s->s_event_queue.int_msg_num)
467 		process_event_intr(s);
468 
469 	pqi_start_io(s, qg, RAID_PATH, NULL);
470 	pqi_start_io(s, qg, AIO_PATH, NULL);
471 
472 	return (DDI_INTR_CLAIMED);
473 }
474