1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2023 Tintri by DDN, Inc. All rights reserved.
14 */
15
16 /*
17 * This file sets up the interrupts with the system and also processes
18 * interrupts from the hardware.
19 */
20
21 /* ---- Driver specific header ---- */
22 #include <smartpqi.h>
23
24 /* ---- Forward declarations of private methods ---- */
25 static int add_intrs(pqi_state_t *s, int type);
26 static uint_t intr_handler(caddr_t arg1, caddr_t arg2);
27 static void sync_error(pqi_state_t *s, pqi_io_request_t *io,
28 pqi_io_response_t *rsp);
29 static void process_raid_io_error(pqi_io_request_t *io);
30 static void process_aio_io_error(pqi_io_request_t *io);
31 static void disable_aio_path(pqi_io_request_t *io);
32
33 /*
34 * smartpqi_register_intrs -- Figure out which type of interrupts and register
35 * them with the framework.
36 */
37 int
smartpqi_register_intrs(pqi_state_t * s)38 smartpqi_register_intrs(pqi_state_t *s)
39 {
40 int intr_types;
41
42 /* ---- Get supported interrupt types ---- */
43 if (ddi_intr_get_supported_types(s->s_dip, &intr_types) !=
44 DDI_SUCCESS) {
45 dev_err(s->s_dip, CE_NOTE,
46 "failed to get supported intr types");
47 return (FALSE);
48 }
49
50 if (intr_types & DDI_INTR_TYPE_MSIX) {
51 if (add_intrs(s, DDI_INTR_TYPE_MSIX) == TRUE) {
52 s->s_intr_type = DDI_INTR_TYPE_MSIX;
53 return (TRUE);
54 }
55 } else if (intr_types & DDI_INTR_TYPE_MSI) {
56 if (add_intrs(s, DDI_INTR_TYPE_MSI) == TRUE) {
57 s->s_intr_type = DDI_INTR_TYPE_MSI;
58 return (TRUE);
59 }
60 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
61 if (add_intrs(s, DDI_INTR_TYPE_FIXED) == TRUE) {
62 s->s_intr_type = DDI_INTR_TYPE_FIXED;
63 return (TRUE);
64 }
65 } else {
66 /* ---- Warning since it's a DDI framework error ---- */
67 dev_err(s->s_dip, CE_WARN,
68 "ddi_intr_get_supported_types returned bogus type of 0x%x",
69 intr_types);
70 }
71
72 return (FALSE);
73 }
74
75 /*
76 * smartqpi_unregister_intrs -- Disable and remove interrupt handlers
77 */
78 void
smartpqi_unregister_intrs(pqi_state_t * s)79 smartpqi_unregister_intrs(pqi_state_t *s)
80 {
81 int i;
82
83 /* --- First disable the interrupts ---- */
84 if (s->s_intr_cap & DDI_INTR_FLAG_BLOCK) {
85 (void) ddi_intr_block_disable(s->s_itable, s->s_intr_cnt);
86 } else {
87 for (i = 0; i < s->s_intr_cnt; i++) {
88 (void) ddi_intr_disable(s->s_itable[i]);
89 }
90 }
91
92 /* ---- Next remove the interrupt handlers ---- */
93 for (i = 0; i < s->s_intr_cnt; i++) {
94 (void) ddi_intr_remove_handler(s->s_itable[i]);
95 (void) ddi_intr_free(s->s_itable[i]);
96 }
97
98 kmem_free(s->s_itable, s->s_intr_size);
99 /* ---- Just in case ---- */
100 s->s_itable = NULL;
101 s->s_intr_size = 0;
102 }
103
104 void
pqi_process_io_intr(pqi_state_t * s,pqi_queue_group_t * qg)105 pqi_process_io_intr(pqi_state_t *s, pqi_queue_group_t *qg)
106 {
107 pqi_index_t oq_pi;
108 pqi_index_t oq_ci;
109 pqi_io_request_t *io;
110 pqi_io_response_t *rsp;
111 uint16_t rqst_ix;
112 uint8_t rqst_gen;
113 int response_cnt = 0;
114 int qnotify;
115
116 oq_ci = qg->oq_ci_copy;
117 atomic_inc_32(&s->s_intr_count);
118
119 mutex_enter(&s->s_intr_mutex);
120 for (;;) {
121 (void) ddi_dma_sync(s->s_queue_dma->handle,
122 (uintptr_t)qg->oq_pi -
123 (uintptr_t)s->s_queue_dma->alloc_memory,
124 sizeof (oq_pi), DDI_DMA_SYNC_FORCPU);
125
126 oq_pi = *qg->oq_pi;
127 if (oq_pi == oq_ci)
128 break;
129
130 rsp = (pqi_io_response_t *)(qg->oq_element_array +
131 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH));
132 (void) ddi_dma_sync(s->s_queue_dma->handle,
133 (uintptr_t)rsp - (uintptr_t)s->s_queue_dma->alloc_memory,
134 sizeof (*rsp), DDI_DMA_SYNC_FORCPU);
135 rqst_ix = PQI_REQID_INDEX(rsp->request_id);
136 ASSERT(rqst_ix < s->s_max_io_slots);
137 rqst_gen = PQI_REQID_GEN(rsp->request_id);
138 io = &s->s_io_rqst_pool[rqst_ix];
139
140 if (!pqi_service_io(io, rqst_gen)) {
141 /*
142 * Generation does not match, this response must be
143 * a stale response for a previous (timed out) i/o req.
144 */
145 goto skipto;
146 }
147 ASSERT(io->io_refcount == 1);
148
149 if (io->io_cmd != NULL) {
150 pqi_cmd_t *cmd = io->io_cmd;
151
152 if ((cmd->pc_flags & PQI_FLAG_TIMED_OUT) != 0)
153 goto skipto;
154 }
155
156 io->io_iu_type = rsp->header.iu_type;
157 switch (rsp->header.iu_type) {
158 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
159 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
160 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
161 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
162 io->io_status = PQI_DATA_IN_OUT_GOOD;
163 break;
164 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
165 io->io_status = PQI_DATA_IN_OUT_ERROR;
166 sync_error(s, io, rsp);
167 process_raid_io_error(io);
168 break;
169 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
170 io->io_status = PQI_DATA_IN_OUT_ERROR;
171 sync_error(s, io, rsp);
172 process_aio_io_error(io);
173 break;
174 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
175 io->io_status = PQI_DATA_IN_OUT_PROTOCOL_ERROR;
176 disable_aio_path(io);
177 break;
178
179 default:
180 ASSERT(0);
181 break;
182 }
183 io->io_cb(io, io->io_context);
184 skipto:
185 response_cnt++;
186 oq_ci = (oq_ci + 1) % s->s_num_elements_per_oq;
187 }
188
189 if (response_cnt) {
190 qg->cmplt_count += response_cnt;
191 qg->oq_ci_copy = oq_ci;
192 ddi_put32(s->s_datap, qg->oq_ci, oq_ci);
193 }
194 mutex_exit(&s->s_intr_mutex);
195
196 mutex_enter(&s->s_mutex);
197 qnotify = HBA_QUIESCED_PENDING(s);
198 mutex_exit(&s->s_mutex);
199
200 if (qnotify)
201 pqi_quiesced_notify(s);
202
203 }
204
205 static int
add_intrs(pqi_state_t * s,int type)206 add_intrs(pqi_state_t *s, int type)
207 {
208 dev_info_t *dip = s->s_dip;
209 int avail;
210 int actual;
211 int count = 0;
212 int i;
213 int ret;
214
215 /* ---- Get number of interrupts ---- */
216 ret = ddi_intr_get_nintrs(dip, type, &count);
217 if (ret != DDI_SUCCESS || count <= 0) {
218 dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_nintrs failed, "
219 "ret=%d, count=%d", ret, count);
220 return (FALSE);
221 }
222
223 /* ---- Get number of available interrupts ---- */
224 ret = ddi_intr_get_navail(dip, type, &avail);
225 if (ret != DDI_SUCCESS || avail == 0) {
226 dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_navail failed, "
227 "ret=%d, avail=%d", ret, avail);
228 return (FALSE);
229 }
230
231 if (type != DDI_INTR_TYPE_FIXED)
232 count = 1;
233
234 s->s_intr_size = count * sizeof (ddi_intr_handle_t);
235 s->s_itable = kmem_zalloc(s->s_intr_size, KM_SLEEP);
236 ret = ddi_intr_alloc(dip, s->s_itable, type, 0, count, &actual,
237 DDI_INTR_ALLOC_NORMAL);
238 if (ret != DDI_SUCCESS || actual == 0) {
239 dev_err(s->s_dip, CE_NOTE, "ddi_intr_alloc failed, ret=%d",
240 ret);
241 return (FALSE);
242 }
243
244 /* ---- Use count return or abort? Make note of at least---- */
245 if (actual < count) {
246 dev_err(s->s_dip, CE_NOTE,
247 "interrupts: requested=%d, received=%d",
248 count, actual);
249 }
250 s->s_intr_cnt = actual;
251
252 /* ---- Get priority for first intr, assume rest are the same ---- */
253 if ((ret = ddi_intr_get_pri(s->s_itable[0], &s->s_intr_pri)) !=
254 DDI_SUCCESS) {
255 dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_pri failed, ret=%d",
256 ret);
257 goto failure;
258 }
259
260 /* ---- Test for high level mutex ---- */
261 if (s->s_intr_pri >= ddi_intr_get_hilevel_pri()) {
262 dev_err(s->s_dip, CE_NOTE, "Hi level interrupts not supported");
263 goto failure;
264 }
265
266 /* ---- Install interrupt handler ---- */
267 for (i = 0; i < actual; i++) {
268 if ((ret = ddi_intr_add_handler(s->s_itable[i], intr_handler,
269 (caddr_t)s, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
270 dev_err(s->s_dip, CE_NOTE,
271 "ddi_intr_add_handler failed, index=%d, ret=%d",
272 i, ret);
273 goto failure;
274 }
275 }
276
277 if ((ret = ddi_intr_get_cap(s->s_itable[0], &s->s_intr_cap))
278 != DDI_SUCCESS) {
279 dev_err(s->s_dip, CE_NOTE, "ddi_intr_get_cap failed, ret=%d",
280 ret);
281 goto failure;
282 }
283
284 /* ---- Enable interrupts ---- */
285 if (s->s_intr_cap & DDI_INTR_FLAG_BLOCK) {
286 (void) ddi_intr_block_enable(s->s_itable, s->s_intr_cnt);
287 } else {
288 /* --- Enable interrupts for either MSI or FIXED ---- */
289 for (i = 0; i < actual; i++)
290 (void) ddi_intr_enable(s->s_itable[i]);
291 }
292
293 return (TRUE);
294
295 failure:
296 /* ---- Free allocated interrupts pointers ---- */
297 for (i = 0; i < actual; i++)
298 (void) ddi_intr_free(s->s_itable[i]);
299 kmem_free(s->s_itable, s->s_intr_size);
300 s->s_itable = NULL;
301 s->s_intr_size = 0;
302 return (FALSE);
303 }
304
305 static void
disable_aio_path(pqi_io_request_t * io)306 disable_aio_path(pqi_io_request_t *io)
307 {
308 pqi_device_t *devp;
309
310 devp = io->io_cmd->pc_device;
311 devp->pd_aio_enabled = 0;
312 }
313
314 static void
process_raid_io_error(pqi_io_request_t * io)315 process_raid_io_error(pqi_io_request_t *io)
316 {
317 pqi_raid_error_info_t ei;
318 pqi_cmd_t *cmd;
319 int sense_len;
320 int statusbuf_len;
321 int sense_len_to_copy;
322 struct scsi_arq_status *arq;
323 struct scsi_pkt *pkt;
324
325 if ((ei = io->io_error_info) != NULL) {
326 io->io_status = ei->data_out_result;
327 if ((cmd = io->io_cmd) == NULL || cmd->pc_pkt == NULL)
328 return;
329
330 pkt = cmd->pc_pkt;
331 pkt->pkt_resid -= ei->data_out_transferred;
332 /* LINTED E_BAD_PTR_CAST_ALIGN */
333 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
334 *((uchar_t *)&arq->sts_status) = ei->status;
335 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
336 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
337 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS |
338 STATE_ARQ_DONE;
339
340 sense_len = ei->sense_data_length;
341 if (sense_len == 0)
342 sense_len = ei->response_data_length;
343
344 if (sense_len == 0) {
345 /* ---- auto request sense failed ---- */
346 arq->sts_rqpkt_status.sts_chk = 1;
347 arq->sts_rqpkt_resid = cmd->pc_statuslen;
348 return;
349 } else if (sense_len < cmd->pc_statuslen) {
350 /* ---- auto request sense short ---- */
351 arq->sts_rqpkt_resid = cmd->pc_statuslen -
352 sense_len;
353 } else {
354 /* ---- auto request sense complete ---- */
355 arq->sts_rqpkt_resid = 0;
356 }
357 arq->sts_rqpkt_statistics = 0;
358 pkt->pkt_state |= STATE_ARQ_DONE;
359 if (cmd->pc_statuslen > PQI_ARQ_STATUS_NOSENSE_LEN) {
360 statusbuf_len = cmd->pc_statuslen -
361 PQI_ARQ_STATUS_NOSENSE_LEN;
362 } else {
363 statusbuf_len = 0;
364 }
365
366 if (sense_len > sizeof (ei->data))
367 sense_len = sizeof (ei->data);
368 sense_len_to_copy = min(sense_len, statusbuf_len);
369
370 if (sense_len_to_copy) {
371 (void) memcpy(&arq->sts_sensedata, ei->data,
372 sense_len_to_copy);
373 }
374 } else {
375 /*
376 * sync_error is called before this and sets io_error_info
377 * which means the value must be non-zero
378 */
379 ASSERT(0);
380 }
381 }
382
383 static void
process_aio_io_error(pqi_io_request_t * io __unused)384 process_aio_io_error(pqi_io_request_t *io __unused)
385 {
386 }
387
388 static void
sync_error(pqi_state_t * s,pqi_io_request_t * io,pqi_io_response_t * rsp)389 sync_error(pqi_state_t *s, pqi_io_request_t *io, pqi_io_response_t *rsp)
390 {
391 (void) ddi_dma_sync(s->s_error_dma->handle,
392 rsp->error_index * PQI_ERROR_BUFFER_ELEMENT_LENGTH,
393 PQI_ERROR_BUFFER_ELEMENT_LENGTH, DDI_DMA_SYNC_FORCPU);
394
395 io->io_error_info = s->s_error_dma->alloc_memory +
396 (rsp->error_index * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
397 }
398
399 static void
process_event_intr(pqi_state_t * s)400 process_event_intr(pqi_state_t *s)
401 {
402 pqi_event_queue_t *q = &s->s_event_queue;
403 pqi_event_response_t *rsp;
404 int idx;
405 int num_events = 0;
406 pqi_event_t *e;
407 pqi_index_t oq_ci;
408 pqi_index_t oq_pi;
409
410 oq_ci = q->oq_ci_copy;
411
412 mutex_enter(&s->s_intr_mutex);
413 for (;;) {
414 (void) ddi_dma_sync(s->s_queue_dma->handle,
415 (uintptr_t)q->oq_pi -
416 (uintptr_t)s->s_queue_dma->alloc_memory,
417 sizeof (oq_pi), DDI_DMA_SYNC_FORCPU);
418 oq_pi = *q->oq_pi;
419
420 if (oq_pi == oq_ci)
421 break;
422
423 num_events++;
424 (void) ddi_dma_sync(s->s_queue_dma->handle,
425 (uintptr_t)q->oq_element_array +
426 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH) -
427 (uintptr_t)s->s_queue_dma->alloc_memory,
428 sizeof (*rsp),
429 DDI_DMA_SYNC_FORCPU);
430 rsp = (pqi_event_response_t *)((uintptr_t)q->oq_element_array +
431 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH));
432 idx = pqi_map_event(rsp->event_type);
433
434 if (idx != -1 && rsp->request_acknowlege) {
435 e = &s->s_events[idx];
436 e->ev_pending = B_TRUE;
437 e->ev_type = rsp->event_type;
438 e->ev_id = rsp->event_id;
439 e->ev_additional = rsp->additional_event_id;
440 }
441 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
442 }
443
444 if (num_events != 0) {
445 q->oq_ci_copy = oq_ci;
446 ddi_put32(s->s_datap, q->oq_ci, oq_ci);
447 (void) ddi_taskq_dispatch(s->s_events_taskq, pqi_event_worker,
448 s, 0);
449 }
450 mutex_exit(&s->s_intr_mutex);
451 }
452
453 static uint_t
intr_handler(caddr_t arg1,caddr_t arg2)454 intr_handler(caddr_t arg1, caddr_t arg2)
455 {
456 pqi_state_t *s = (pqi_state_t *)arg1;
457 int queue_group_idx = (int)(intptr_t)arg2;
458 pqi_queue_group_t *qg;
459
460 if (!s->s_intr_ready)
461 return (DDI_INTR_CLAIMED);
462
463 qg = &s->s_queue_groups[queue_group_idx];
464 pqi_process_io_intr(s, qg);
465 if (queue_group_idx == s->s_event_queue.int_msg_num)
466 process_event_intr(s);
467
468 pqi_start_io(s, qg, RAID_PATH, NULL);
469 pqi_start_io(s, qg, AIO_PATH, NULL);
470
471 return (DDI_INTR_CLAIMED);
472 }
473