1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * tavor_event.c
29 * Tavor Interrupt and Event Processing Routines
30 *
31 * Implements all the routines necessary for allocating, freeing, and
32 * handling all of the various event types that the Tavor hardware can
33 * generate.
34 * These routines include the main Tavor interrupt service routine
35 * (tavor_isr()) as well as all the code necessary to setup and handle
36 * events from each of the many event queues used by the Tavor device.
37 */
38
39 #include <sys/types.h>
40 #include <sys/conf.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/modctl.h>
44
45 #include <sys/ib/adapters/tavor/tavor.h>
46
47 static void tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq);
48 static void tavor_eq_catastrophic(tavor_state_t *state);
49 static int tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size,
50 uint_t intr, tavor_eqhdl_t *eqhdl);
51 static int tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl);
52 static int tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
53 uint_t evt_type_mask, int (*eqfunc)(tavor_state_t *state,
54 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe));
55 static int tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq);
56 static void tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
57 uint_t force_sync);
58 static int tavor_port_state_change_handler(tavor_state_t *state,
59 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
60 static int tavor_comm_estbl_handler(tavor_state_t *state,
61 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
62 static int tavor_local_wq_cat_err_handler(tavor_state_t *state,
63 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
64 static int tavor_invreq_local_wq_err_handler(tavor_state_t *state,
65 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
66 static int tavor_local_acc_vio_wq_err_handler(tavor_state_t *state,
67 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
68 static int tavor_sendq_drained_handler(tavor_state_t *state,
69 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
70 static int tavor_path_mig_handler(tavor_state_t *state,
71 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
72 static int tavor_path_mig_err_handler(tavor_state_t *state,
73 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
74 static int tavor_srq_catastrophic_handler(tavor_state_t *state,
75 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
76 static int tavor_srq_last_wqe_reached_handler(tavor_state_t *state,
77 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
78 static int tavor_ecc_detection_handler(tavor_state_t *state,
79 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
80 static int tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
81 tavor_hw_eqe_t *eqe);
82
83
84 /*
85 * tavor_eq_init_all
86 * Context: Only called from attach() path context
87 */
88 int
tavor_eq_init_all(tavor_state_t * state)89 tavor_eq_init_all(tavor_state_t *state)
90 {
91 uint_t log_eq_size, intr_num;
92 uint_t num_eq, num_eq_init, num_eq_unmap;
93 int status, i;
94
95 /*
96 * For now, all Event Queues default to the same size (pulled from
97 * the current configuration profile) and are all assigned to the
98 * same interrupt or MSI. In the future we may support assigning
99 * EQs to specific interrupts or MSIs XXX
100 */
101 log_eq_size = state->ts_cfg_profile->cp_log_default_eq_sz;
102
103 /*
104 * If MSI is to be used, then set intr_num to the MSI number
105 * (currently zero because we're using only one) or'd with the
106 * MSI enable flag. Otherwise, for regular (i.e. 'legacy') interrupt,
107 * use the 'inta_pin' value returned by QUERY_ADAPTER.
108 */
109 if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
110 intr_num = TAVOR_EQ_MSI_ENABLE_FLAG | 0;
111 } else {
112 intr_num = state->ts_adapter.inta_pin;
113 }
114
115 /*
116 * Total number of supported EQs is hardcoded. Tavor hardware
117 * supports up to 64 EQs. We are currently using only 45 of them
118 * We will set aside the first 32 for use with Completion Queues (CQ)
119 * and reserve a few of the other 32 for each specific class of event
120 * (see below for more details).
121 */
122 num_eq = TAVOR_NUM_EQ_USED;
123
124 /*
125 * The "num_eq_unmap" variable is used in any possible failure
126 * cleanup (below) to indicate which events queues might require
127 * possible event class unmapping.
128 */
129 num_eq_unmap = 0;
130
131 /*
132 * Allocate and initialize all the Event Queues. If any of these
133 * EQ allocations fail then jump to the end, cleanup what had been
134 * successfully initialized, and return an error.
135 */
136 for (i = 0; i < num_eq; i++) {
137 status = tavor_eq_alloc(state, log_eq_size, intr_num,
138 &state->ts_eqhdl[i]);
139 if (status != DDI_SUCCESS) {
140 num_eq_init = i;
141 goto all_eq_init_fail;
142 }
143 }
144 num_eq_init = num_eq;
145
146 /*
147 * Setup EQ0-EQ31 for use with Completion Queues. Note: We can
148 * cast the return value to void here because, when we use the
149 * TAVOR_EVT_NO_MASK flag, it is not possible for
150 * tavor_eq_handler_init() to return an error.
151 */
152 for (i = 0; i < 32; i++) {
153 (void) tavor_eq_handler_init(state, state->ts_eqhdl[i],
154 TAVOR_EVT_NO_MASK, tavor_cq_handler);
155 }
156 num_eq_unmap = 32;
157
158 /*
159 * Setup EQ32 for handling Completion Queue Error Events.
160 *
161 * These events include things like CQ overflow or CQ access
162 * violation errors. If this setup fails for any reason (which, in
163 * general, it really never should), then jump to the end, cleanup
164 * everything that has been successfully initialized, and return an
165 * error.
166 */
167 status = tavor_eq_handler_init(state, state->ts_eqhdl[32],
168 TAVOR_EVT_MSK_CQ_ERRORS, tavor_cq_err_handler);
169 if (status != DDI_SUCCESS) {
170 goto all_eq_init_fail;
171 }
172 num_eq_unmap = 33;
173
174 /*
175 * Setup EQ33 for handling Port State Change Events
176 *
177 * These events include things like Port Up and Port Down events.
178 * If this setup fails for any reason (which, in general, it really
179 * never should), then undo all previous EQ mapping, jump to the end,
180 * cleanup everything that has been successfully initialized, and
181 * return an error.
182 */
183 status = tavor_eq_handler_init(state, state->ts_eqhdl[33],
184 TAVOR_EVT_MSK_PORT_STATE_CHANGE, tavor_port_state_change_handler);
185 if (status != DDI_SUCCESS) {
186 goto all_eq_init_fail;
187 }
188 num_eq_unmap = 34;
189
190 /*
191 * Setup EQ34 for handling Communication Established Events
192 *
193 * These events correspond to the IB affiliated asynchronous events
194 * that are used for connection management. If this setup fails for
195 * any reason (which, in general, it really never should), then undo
196 * all previous EQ mapping, jump to the end, cleanup everything that
197 * has been successfully initialized, and return an error.
198 */
199 status = tavor_eq_handler_init(state, state->ts_eqhdl[34],
200 TAVOR_EVT_MSK_COMM_ESTABLISHED, tavor_comm_estbl_handler);
201 if (status != DDI_SUCCESS) {
202 goto all_eq_init_fail;
203 }
204 num_eq_unmap = 35;
205
206 /*
207 * Setup EQ35 for handling Command Completion Events
208 *
209 * These events correspond to the Tavor generated events that are used
210 * to indicate Tavor firmware command completion. These events are
211 * only generated when Tavor firmware commands are posted using the
212 * asynchronous completion mechanism. If this setup fails for any
213 * reason (which, in general, it really never should), then undo all
214 * previous EQ mapping, jump to the end, cleanup everything that has
215 * been successfully initialized, and return an error.
216 */
217 status = tavor_eq_handler_init(state, state->ts_eqhdl[35],
218 TAVOR_EVT_MSK_COMMAND_INTF_COMP, tavor_cmd_complete_handler);
219 if (status != DDI_SUCCESS) {
220 goto all_eq_init_fail;
221 }
222 num_eq_unmap = 36;
223
224 /*
225 * Setup EQ36 for handling Local WQ Catastrophic Error Events
226 *
227 * These events correspond to the similarly-named IB affiliated
228 * asynchronous error type. If this setup fails for any reason
229 * (which, in general, it really never should), then undo all previous
230 * EQ mapping, jump to the end, cleanup everything that has been
231 * successfully initialized, and return an error.
232 */
233 status = tavor_eq_handler_init(state, state->ts_eqhdl[36],
234 TAVOR_EVT_MSK_LOCAL_WQ_CAT_ERROR, tavor_local_wq_cat_err_handler);
235 if (status != DDI_SUCCESS) {
236 goto all_eq_init_fail;
237 }
238 num_eq_unmap = 37;
239
240 /*
241 * Setup EQ37 for handling Invalid Req Local WQ Error Events
242 *
243 * These events also correspond to the similarly-named IB affiliated
244 * asynchronous error type. If this setup fails for any reason
245 * (which, in general, it really never should), then undo all previous
246 * EQ mapping, jump to the end, cleanup everything that has been
247 * successfully initialized, and return an error.
248 */
249 status = tavor_eq_handler_init(state, state->ts_eqhdl[37],
250 TAVOR_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR,
251 tavor_invreq_local_wq_err_handler);
252 if (status != DDI_SUCCESS) {
253 goto all_eq_init_fail;
254 }
255 num_eq_unmap = 38;
256
257 /*
258 * Setup EQ38 for handling Local Access Violation WQ Error Events
259 *
260 * These events also correspond to the similarly-named IB affiliated
261 * asynchronous error type. If this setup fails for any reason
262 * (which, in general, it really never should), then undo all previous
263 * EQ mapping, jump to the end, cleanup everything that has been
264 * successfully initialized, and return an error.
265 */
266 status = tavor_eq_handler_init(state, state->ts_eqhdl[38],
267 TAVOR_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR,
268 tavor_local_acc_vio_wq_err_handler);
269 if (status != DDI_SUCCESS) {
270 goto all_eq_init_fail;
271 }
272 num_eq_unmap = 39;
273
274 /*
275 * Setup EQ39 for handling Send Queue Drained Events
276 *
277 * These events correspond to the IB affiliated asynchronous events
278 * that are used to indicate completion of a Send Queue Drained QP
279 * state transition. If this setup fails for any reason (which, in
280 * general, it really never should), then undo all previous EQ
281 * mapping, jump to the end, cleanup everything that has been
282 * successfully initialized, and return an error.
283 */
284 status = tavor_eq_handler_init(state, state->ts_eqhdl[39],
285 TAVOR_EVT_MSK_SEND_QUEUE_DRAINED, tavor_sendq_drained_handler);
286 if (status != DDI_SUCCESS) {
287 goto all_eq_init_fail;
288 }
289 num_eq_unmap = 40;
290
291 /*
292 * Setup EQ40 for handling Path Migration Succeeded Events
293 *
294 * These events correspond to the IB affiliated asynchronous events
295 * that are used to indicate successful completion of a path
296 * migration. If this setup fails for any reason (which, in general,
297 * it really never should), then undo all previous EQ mapping, jump
298 * to the end, cleanup everything that has been successfully
299 * initialized, and return an error.
300 */
301 status = tavor_eq_handler_init(state, state->ts_eqhdl[40],
302 TAVOR_EVT_MSK_PATH_MIGRATED, tavor_path_mig_handler);
303 if (status != DDI_SUCCESS) {
304 goto all_eq_init_fail;
305 }
306 num_eq_unmap = 41;
307
308 /*
309 * Setup EQ41 for handling Path Migration Failed Events
310 *
311 * These events correspond to the IB affiliated asynchronous events
312 * that are used to indicate that path migration was not successful.
313 * If this setup fails for any reason (which, in general, it really
314 * never should), then undo all previous EQ mapping, jump to the end,
315 * cleanup everything that has been successfully initialized, and
316 * return an error.
317 */
318 status = tavor_eq_handler_init(state, state->ts_eqhdl[41],
319 TAVOR_EVT_MSK_PATH_MIGRATE_FAILED, tavor_path_mig_err_handler);
320 if (status != DDI_SUCCESS) {
321 goto all_eq_init_fail;
322 }
323 num_eq_unmap = 42;
324
325 /*
326 * Setup EQ42 for handling Local Catastrophic Error Events
327 *
328 * These events correspond to the similarly-named IB unaffiliated
329 * asynchronous error type. If this setup fails for any reason
330 * (which, in general, it really never should), then undo all previous
331 * EQ mapping, jump to the end, cleanup everything that has been
332 * successfully initialized, and return an error.
333 *
334 * This error is unique, in that an EQE is not generated if this event
335 * occurs. Instead, an interrupt is called and we must poll the
336 * Catastrophic Error buffer in CR-Space. This mapping is setup simply
337 * to enable this error reporting. We pass in a NULL handler since it
338 * will never be called.
339 */
340 status = tavor_eq_handler_init(state, state->ts_eqhdl[42],
341 TAVOR_EVT_MSK_LOCAL_CAT_ERROR, NULL);
342 if (status != DDI_SUCCESS) {
343 goto all_eq_init_fail;
344 }
345 num_eq_unmap = 43;
346
347 /*
348 * Setup EQ43 for handling SRQ Catastrophic Error Events
349 *
350 * These events correspond to the similarly-named IB affiliated
351 * asynchronous error type. If this setup fails for any reason
352 * (which, in general, it really never should), then undo all previous
353 * EQ mapping, jump to the end, cleanup everything that has been
354 * successfully initialized, and return an error.
355 */
356 status = tavor_eq_handler_init(state, state->ts_eqhdl[43],
357 TAVOR_EVT_MSK_SRQ_CATASTROPHIC_ERROR,
358 tavor_srq_catastrophic_handler);
359 if (status != DDI_SUCCESS) {
360 goto all_eq_init_fail;
361 }
362 num_eq_unmap = 44;
363
364 /*
365 * Setup EQ44 for handling SRQ Last WQE Reached Events
366 *
367 * These events correspond to the similarly-named IB affiliated
368 * asynchronous event type. If this setup fails for any reason
369 * (which, in general, it really never should), then undo all previous
370 * EQ mapping, jump to the end, cleanup everything that has been
371 * successfully initialized, and return an error.
372 */
373 status = tavor_eq_handler_init(state, state->ts_eqhdl[44],
374 TAVOR_EVT_MSK_SRQ_LAST_WQE_REACHED,
375 tavor_srq_last_wqe_reached_handler);
376 if (status != DDI_SUCCESS) {
377 goto all_eq_init_fail;
378 }
379 num_eq_unmap = 45;
380
381 /*
382 * Setup EQ45 for handling ECC error detection events
383 *
384 * These events correspond to the similarly-named IB affiliated
385 * asynchronous event type. If this setup fails for any reason
386 * (which, in general, it really never should), then undo all previous
387 * EQ mapping, jump to the end, cleanup everything that has been
388 * successfully initialized, and return an error.
389 */
390 status = tavor_eq_handler_init(state, state->ts_eqhdl[45],
391 TAVOR_EVT_MSK_ECC_DETECTION,
392 tavor_ecc_detection_handler);
393 if (status != DDI_SUCCESS) {
394 goto all_eq_init_fail;
395 }
396 num_eq_unmap = 46;
397
398 /*
399 * Setup EQ46 to catch all other types of events. Specifically, we
400 * do not catch the "Local EEC Catastrophic Error Event" because we
401 * should have no EEC (the Tavor driver does not support RD). We also
402 * choose not to handle any of the address translation page fault
403 * event types. Since we are not doing any page fault handling (and
404 * since the Tavor firmware does not currently support any such
405 * handling), we allow these events to go to the catch-all handler.
406 */
407 status = tavor_eq_handler_init(state, state->ts_eqhdl[46],
408 TAVOR_EVT_CATCHALL_MASK, tavor_no_eqhandler);
409 if (status != DDI_SUCCESS) {
410 goto all_eq_init_fail;
411 }
412
413 return (DDI_SUCCESS);
414
415 all_eq_init_fail:
416 /* Unmap any of the partially mapped EQs from above */
417 for (i = 0; i < num_eq_unmap; i++) {
418 (void) tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
419 }
420
421 /* Free up any of the partially allocated EQs from above */
422 for (i = 0; i < num_eq_init; i++) {
423 (void) tavor_eq_free(state, &state->ts_eqhdl[i]);
424 }
425 return (status);
426 }
427
428
429 /*
430 * tavor_eq_fini_all
431 * Context: Only called from attach() and/or detach() path contexts
432 */
433 int
tavor_eq_fini_all(tavor_state_t * state)434 tavor_eq_fini_all(tavor_state_t *state)
435 {
436 uint_t num_eq;
437 int status, i;
438
439 /*
440 * Grab the total number of supported EQs again. This is the same
441 * hardcoded value that was used above (during the event queue
442 * initialization.)
443 */
444 num_eq = TAVOR_NUM_EQ_USED;
445
446 /*
447 * For each of the event queues that we initialized and mapped
448 * earlier, attempt to unmap the events from the EQ.
449 */
450 for (i = 0; i < num_eq; i++) {
451 status = tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
452 if (status != DDI_SUCCESS) {
453 return (DDI_FAILURE);
454 }
455 }
456
457 /*
458 * Teardown and free up all the Event Queues that were allocated
459 * earlier.
460 */
461 for (i = 0; i < num_eq; i++) {
462 status = tavor_eq_free(state, &state->ts_eqhdl[i]);
463 if (status != DDI_SUCCESS) {
464 return (DDI_FAILURE);
465 }
466 }
467
468 return (DDI_SUCCESS);
469 }
470
471
472 /*
473 * tavor_eq_arm_all
474 * Context: Only called from attach() and/or detach() path contexts
475 */
476 void
tavor_eq_arm_all(tavor_state_t * state)477 tavor_eq_arm_all(tavor_state_t *state)
478 {
479 uint_t num_eq;
480 int i;
481
482 /*
483 * Grab the total number of supported EQs again. This is the same
484 * hardcoded value that was used above (during the event queue
485 * initialization.)
486 */
487 num_eq = TAVOR_NUM_EQ_USED;
488
489 /*
490 * For each of the event queues that we initialized and mapped
491 * earlier, attempt to arm it for event generation.
492 */
493 for (i = 0; i < num_eq; i++) {
494 tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, i, 0);
495 }
496 }
497
498
499 /*
500 * tavor_isr()
501 * Context: Only called from interrupt context (and during panic)
502 */
503 /* ARGSUSED */
504 uint_t
tavor_isr(caddr_t arg1,caddr_t arg2)505 tavor_isr(caddr_t arg1, caddr_t arg2)
506 {
507 tavor_state_t *state;
508 uint64_t *ecr, *clr_int;
509 uint64_t ecrreg, int_mask;
510 uint_t status;
511 int i;
512
513 /*
514 * Grab the Tavor softstate pointer from the input parameter
515 */
516 state = (tavor_state_t *)arg1;
517
518 /*
519 * Find the pointers to the ECR and clr_INT registers
520 */
521 ecr = state->ts_cmd_regs.ecr;
522 clr_int = state->ts_cmd_regs.clr_int;
523
524 /*
525 * Read the ECR register. Each of the 64 bits in the ECR register
526 * corresponds to an event queue. If a bit is set, then the
527 * corresponding event queue has fired.
528 */
529 ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
530
531 /*
532 * As long as there are bits set (i.e. as long as there are still
533 * EQs in the "fired" state), call tavor_eq_poll() to process each
534 * fired EQ. If no ECR bits are set, do not claim the interrupt.
535 */
536 status = DDI_INTR_UNCLAIMED;
537 do {
538 i = 0;
539 while (ecrreg != 0x0) {
540 if (ecrreg & 0x1) {
541 tavor_eq_poll(state, state->ts_eqhdl[i]);
542 status = DDI_INTR_CLAIMED;
543 }
544 ecrreg = ecrreg >> 1;
545 i++;
546 }
547
548 /*
549 * Clear the interrupt. Note: Depending on the type of
550 * event (interrupt or MSI), we need to use a different
551 * mask to clear the event. In the case of MSI, the bit
552 * to clear corresponds to the MSI number, and for legacy
553 * interrupts the bit corresponds to the value in 'inta_pin'.
554 */
555 if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
556 int_mask = ((uint64_t)1 << 0);
557 } else {
558 int_mask = ((uint64_t)1 << state->ts_adapter.inta_pin);
559 }
560 ddi_put64(state->ts_reg_cmdhdl, clr_int, int_mask);
561
562 /* Reread the ECR register */
563 ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
564
565 } while (ecrreg != 0x0);
566
567 return (status);
568 }
569
570
571 /*
572 * tavor_eq_doorbell
573 * Context: Only called from interrupt context
574 */
575 void
tavor_eq_doorbell(tavor_state_t * state,uint32_t eq_cmd,uint32_t eqn,uint32_t eq_param)576 tavor_eq_doorbell(tavor_state_t *state, uint32_t eq_cmd, uint32_t eqn,
577 uint32_t eq_param)
578 {
579 uint64_t doorbell = 0;
580
581 /* Build the doorbell from the parameters */
582 doorbell = ((uint64_t)eq_cmd << TAVOR_EQDB_CMD_SHIFT) |
583 ((uint64_t)eqn << TAVOR_EQDB_EQN_SHIFT) | eq_param;
584
585 /* Write the doorbell to UAR */
586 TAVOR_UAR_DOORBELL(state, (uint64_t *)&state->ts_uar->eq,
587 doorbell);
588 }
589
590 /*
591 * tavor_eq_poll
592 * Context: Only called from interrupt context (and during panic)
593 */
594 static void
tavor_eq_poll(tavor_state_t * state,tavor_eqhdl_t eq)595 tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq)
596 {
597 uint64_t *clr_ecr;
598 tavor_hw_eqe_t *eqe;
599 uint64_t ecr_mask;
600 uint32_t cons_indx, wrap_around_mask;
601 int (*eqfunction)(tavor_state_t *state, tavor_eqhdl_t eq,
602 tavor_hw_eqe_t *eqe);
603
604 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq))
605
606 /* Find the pointer to the clr_ECR register */
607 clr_ecr = state->ts_cmd_regs.clr_ecr;
608
609 /*
610 * Check for Local Catastrophic Error If we have this kind of error,
611 * then we don't need to do anything else here, as this kind of
612 * catastrophic error is handled separately. So we call the
613 * catastrophic handler, clear the ECR and then return.
614 */
615 if (eq->eq_evttypemask == TAVOR_EVT_MSK_LOCAL_CAT_ERROR) {
616 /*
617 * Call Catastrophic Error handler
618 */
619 tavor_eq_catastrophic(state);
620
621 /*
622 * Clear the ECR. Specifically, clear the bit corresponding
623 * to the event queue just processed.
624 */
625 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
626 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
627
628 return;
629 }
630
631 /* Get the consumer pointer index */
632 cons_indx = eq->eq_consindx;
633
634 /*
635 * Calculate the wrap around mask. Note: This operation only works
636 * because all Tavor event queues have power-of-2 sizes
637 */
638 wrap_around_mask = (eq->eq_bufsz - 1);
639
640 /* Calculate the pointer to the first EQ entry */
641 eqe = &eq->eq_buf[cons_indx];
642 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe))
643
644 /*
645 * Sync the current EQE to read
646 * We need to force a ddi_dma_sync() here (independent of how the
647 * EQ was mapped) because it is possible for us to receive the
648 * interrupt, do a read of the ECR, and have each of these
649 * operations complete successfully even though the hardware's DMA
650 * to the EQ has not yet completed.
651 */
652 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU, TAVOR_EQ_SYNC_FORCE);
653
654 /*
655 * Pull the handler function for this EQ from the Tavor Event Queue
656 * handle
657 */
658 eqfunction = eq->eq_func;
659
660 /*
661 * Keep pulling entries from the EQ until we find an entry owner by
662 * the hardware. As long as there the EQE's owned by SW, process
663 * each entry by calling its handler function and updating the EQ
664 * consumer index.
665 */
666 do {
667 while (TAVOR_EQE_OWNER_IS_SW(eq, eqe)) {
668 /*
669 * Call the EQ handler function. But only call if we
670 * are not in polled I/O mode (i.e. not processing
671 * because of a system panic). Note: We don't call
672 * the EQ handling functions from a system panic
673 * because we are primarily concerned only with
674 * ensuring that the event queues do not overflow (or,
675 * more specifically, the event queue associated with
676 * the CQ that is being used in the sync/dump process).
677 * Also, we don't want to make any upcalls (to the
678 * IBTF) because we can't guarantee when/if those
679 * calls would ever return. And, if we're in panic,
680 * then we reached here through a PollCQ() call (from
681 * tavor_cq_poll()), and we need to ensure that we
682 * successfully return any work completions to the
683 * caller.
684 */
685 if (ddi_in_panic() == 0) {
686 eqfunction(state, eq, eqe);
687 }
688
689 /* Reset entry to hardware ownership */
690 TAVOR_EQE_OWNER_SET_HW(eq, eqe);
691
692 /* Sync the current EQE for device */
693 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORDEV,
694 TAVOR_EQ_SYNC_NORMAL);
695
696 /* Increment the consumer index */
697 cons_indx = (cons_indx + 1) & wrap_around_mask;
698
699 /* Update the pointer to the next EQ entry */
700 eqe = &eq->eq_buf[cons_indx];
701
702 /* Sync the next EQE to read */
703 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
704 TAVOR_EQ_SYNC_NORMAL);
705 }
706
707 /*
708 * Clear the ECR. Specifically, clear the bit corresponding
709 * to the event queue just processed.
710 */
711 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
712 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
713
714 /* Write an EQ doorbell to update the consumer index */
715 eq->eq_consindx = cons_indx;
716 tavor_eq_doorbell(state, TAVOR_EQDB_SET_CONSINDX, eq->eq_eqnum,
717 cons_indx);
718
719 /* Write another EQ doorbell to rearm */
720 tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, eq->eq_eqnum, 0);
721
722 /*
723 * NOTE: Due to the nature of Mellanox hardware, we do not have
724 * to do an explicit PIO read to ensure that the doorbell write
725 * has been flushed to the hardware. There is state encoded in
726 * the doorbell information we write which makes this
727 * unnecessary. We can be assured that if an event needs to be
728 * generated, the hardware will make sure that it is, solving
729 * the possible race condition.
730 */
731
732 /* Sync the next EQE to read */
733 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
734 TAVOR_EQ_SYNC_NORMAL);
735
736 } while (TAVOR_EQE_OWNER_IS_SW(eq, eqe));
737 }
738
739
740 /*
741 * tavor_eq_catastrophic
742 * Context: Only called from interrupt context (and during panic)
743 */
744 static void
tavor_eq_catastrophic(tavor_state_t * state)745 tavor_eq_catastrophic(tavor_state_t *state)
746 {
747 ibt_async_code_t type;
748 ibc_async_event_t event;
749 uint32_t *base_addr;
750 uint32_t buf_size;
751 uint32_t word;
752 uint8_t err_type;
753 uint32_t err_buf;
754 int i;
755
756 bzero(&event, sizeof (ibc_async_event_t));
757
758 base_addr = (uint32_t *)(uintptr_t)(
759 (uintptr_t)state->ts_reg_cmd_baseaddr +
760 state->ts_fw.error_buf_addr);
761 buf_size = state->ts_fw.error_buf_sz;
762
763 word = ddi_get32(state->ts_reg_cmdhdl, base_addr);
764
765 err_type = (word & 0xFF000000) >> 24;
766 type = IBT_ERROR_LOCAL_CATASTROPHIC;
767
768 switch (err_type) {
769 case TAVOR_CATASTROPHIC_INTERNAL_ERROR:
770 cmn_err(CE_WARN, "Catastrophic Internal Error: 0x%02x",
771 err_type);
772
773 break;
774
775 case TAVOR_CATASTROPHIC_UPLINK_BUS_ERROR:
776 cmn_err(CE_WARN, "Catastrophic Uplink Bus Error: 0x%02x",
777 err_type);
778
779 break;
780
781 case TAVOR_CATASTROPHIC_DDR_DATA_ERROR:
782 cmn_err(CE_WARN, "Catastrophic DDR Data Error: 0x%02x",
783 err_type);
784
785 break;
786
787 case TAVOR_CATASTROPHIC_INTERNAL_PARITY_ERROR:
788 cmn_err(CE_WARN, "Catastrophic Internal Parity Error: 0x%02x",
789 err_type);
790
791 break;
792
793 default:
794 /* Unknown type of Catastrophic error */
795 cmn_err(CE_WARN, "Catastrophic Unknown Error: 0x%02x",
796 err_type);
797
798 break;
799 }
800
801 /*
802 * Read in the catastrophic error buffer from the hardware, printing
803 * only to the log file only
804 */
805 for (i = 0; i < buf_size; i += 4) {
806 base_addr = (uint32_t *)((uintptr_t)(state->ts_reg_cmd_baseaddr
807 + state->ts_fw.error_buf_addr + (i * 4)));
808 err_buf = ddi_get32(state->ts_reg_cmdhdl, base_addr);
809 cmn_err(CE_WARN, "catastrophic_error[%02x]: %08X", i, err_buf);
810 }
811
812 /*
813 * We also call the IBTF here to inform it of the catastrophic error.
814 * Note: Since no event information (i.e. QP handles, CQ handles,
815 * etc.) is necessary, we pass a NULL pointer instead of a pointer to
816 * an empty ibc_async_event_t struct.
817 *
818 * But we also check if "ts_ibtfpriv" is NULL. If it is then it
819 * means that we've have either received this event before we
820 * finished attaching to the IBTF or we've received it while we
821 * are in the process of detaching.
822 */
823 if (state->ts_ibtfpriv != NULL) {
824 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
825 }
826 }
827
828
829 /*
830 * tavor_eq_alloc()
831 * Context: Only called from attach() path context
832 */
833 static int
tavor_eq_alloc(tavor_state_t * state,uint32_t log_eq_size,uint_t intr,tavor_eqhdl_t * eqhdl)834 tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size, uint_t intr,
835 tavor_eqhdl_t *eqhdl)
836 {
837 tavor_rsrc_t *eqc, *rsrc;
838 tavor_hw_eqc_t eqc_entry;
839 tavor_eqhdl_t eq;
840 ibt_mr_attr_t mr_attr;
841 tavor_mr_options_t op;
842 tavor_pdhdl_t pd;
843 tavor_mrhdl_t mr;
844 tavor_hw_eqe_t *buf;
845 uint64_t addr;
846 uint32_t lkey;
847 uint_t dma_xfer_mode;
848 int status, i;
849
850 /* Use the internal protection domain (PD) for setting up EQs */
851 pd = state->ts_pdhdl_internal;
852
853 /* Increment the reference count on the protection domain (PD) */
854 tavor_pd_refcnt_inc(pd);
855
856 /*
857 * Allocate an EQ context entry. This will be filled in with all
858 * the necessary parameters to define the Event Queue. And then
859 * ownership will be passed to the hardware in the final step
860 * below. If we fail here, we must undo the protection domain
861 * reference count.
862 */
863 status = tavor_rsrc_alloc(state, TAVOR_EQC, 1, TAVOR_SLEEP, &eqc);
864 if (status != DDI_SUCCESS) {
865 goto eqalloc_fail1;
866 }
867
868 /*
869 * Allocate the software structure for tracking the event queue (i.e.
870 * the Tavor Event Queue handle). If we fail here, we must undo the
871 * protection domain reference count and the previous resource
872 * allocation.
873 */
874 status = tavor_rsrc_alloc(state, TAVOR_EQHDL, 1, TAVOR_SLEEP, &rsrc);
875 if (status != DDI_SUCCESS) {
876 goto eqalloc_fail2;
877 }
878 eq = (tavor_eqhdl_t)rsrc->tr_addr;
879
880 /*
881 * Allocate the memory for Event Queue. Note: Although we use the
882 * common queue allocation routine, we always specify
883 * TAVOR_QUEUE_LOCATION_NORMAL (i.e. EQ located in system memory)
884 * because it would be inefficient to have EQs located in DDR memory.
885 * This is primarily because EQs are read from (by software) more
886 * than they are written to. Also note that, unlike Tavor QP work
887 * queues, event queues do not have the same strict alignment
888 * requirements. It is sufficient for the EQ memory to be both
889 * aligned to and bound to addresses which are a multiple of EQE size.
890 */
891 eq->eq_eqinfo.qa_size = (1 << log_eq_size) * sizeof (tavor_hw_eqe_t);
892 eq->eq_eqinfo.qa_alloc_align = sizeof (tavor_hw_eqe_t);
893 eq->eq_eqinfo.qa_bind_align = sizeof (tavor_hw_eqe_t);
894 eq->eq_eqinfo.qa_location = TAVOR_QUEUE_LOCATION_NORMAL;
895 status = tavor_queue_alloc(state, &eq->eq_eqinfo, TAVOR_SLEEP);
896 if (status != DDI_SUCCESS) {
897 goto eqalloc_fail3;
898 }
899 buf = (tavor_hw_eqe_t *)eq->eq_eqinfo.qa_buf_aligned;
900
901 /*
902 * Initialize each of the Event Queue Entries (EQE) by setting their
903 * ownership to hardware ("owner" bit set to HW). This is in
904 * preparation for the final transfer of ownership (below) of the
905 * EQ context itself.
906 */
907 for (i = 0; i < (1 << log_eq_size); i++) {
908 TAVOR_EQE_OWNER_SET_HW(eq, &buf[i]);
909 }
910
911 /*
912 * Register the memory for the EQ. The memory for the EQ must
913 * be registered in the Tavor TPT tables. This gives us the LKey
914 * to specify in the EQ context below.
915 *
916 * Because we are in the attach path we use NOSLEEP here so that we
917 * SPIN in the HCR since the event queues are not setup yet, and we
918 * cannot NOSPIN at this point in time.
919 */
920 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
921 mr_attr.mr_len = eq->eq_eqinfo.qa_size;
922 mr_attr.mr_as = NULL;
923 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
924 dma_xfer_mode = state->ts_cfg_profile->cp_streaming_consistent;
925 if (dma_xfer_mode == DDI_DMA_STREAMING) {
926 mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
927 }
928 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
929 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
930 op.mro_bind_override_addr = 0;
931 status = tavor_mr_register(state, pd, &mr_attr, &mr, &op);
932 if (status != DDI_SUCCESS) {
933 goto eqalloc_fail4;
934 }
935 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
936 addr = mr->mr_bindinfo.bi_addr;
937 lkey = mr->mr_lkey;
938
939 /* Determine if later ddi_dma_sync will be necessary */
940 eq->eq_sync = TAVOR_EQ_IS_SYNC_REQ(state, eq->eq_eqinfo);
941
942 /* Sync entire EQ for use by the hardware (if necessary) */
943 if (eq->eq_sync) {
944 (void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0,
945 eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
946 }
947
948 /*
949 * Fill in the EQC entry. This is the final step before passing
950 * ownership of the EQC entry to the Tavor hardware. We use all of
951 * the information collected/calculated above to fill in the
952 * requisite portions of the EQC. Note: We create all EQs in the
953 * "fired" state. We will arm them later (after our interrupt
954 * routine had been registered.)
955 */
956 bzero(&eqc_entry, sizeof (tavor_hw_eqc_t));
957 eqc_entry.owner = TAVOR_HW_OWNER;
958 eqc_entry.xlat = TAVOR_VA2PA_XLAT_ENABLED;
959 eqc_entry.state = TAVOR_EQ_FIRED;
960 eqc_entry.start_addr_h = (addr >> 32);
961 eqc_entry.start_addr_l = (addr & 0xFFFFFFFF);
962 eqc_entry.log_eq_sz = log_eq_size;
963 eqc_entry.usr_page = 0;
964 eqc_entry.pd = pd->pd_pdnum;
965 eqc_entry.intr = intr;
966 eqc_entry.lkey = lkey;
967
968 /*
969 * Write the EQC entry to hardware. Lastly, we pass ownership of
970 * the entry to the hardware (using the Tavor SW2HW_EQ firmware
971 * command). Note: in general, this operation shouldn't fail. But
972 * if it does, we have to undo everything we've done above before
973 * returning error.
974 */
975 status = tavor_cmn_ownership_cmd_post(state, SW2HW_EQ, &eqc_entry,
976 sizeof (tavor_hw_eqc_t), eqc->tr_indx, TAVOR_CMD_NOSLEEP_SPIN);
977 if (status != TAVOR_CMD_SUCCESS) {
978 cmn_err(CE_CONT, "Tavor: SW2HW_EQ command failed: %08x\n",
979 status);
980 goto eqalloc_fail5;
981 }
982
983 /*
984 * Fill in the rest of the Tavor Event Queue handle. Having
985 * successfully transferred ownership of the EQC, we can update the
986 * following fields for use in further operations on the EQ.
987 */
988 eq->eq_eqcrsrcp = eqc;
989 eq->eq_rsrcp = rsrc;
990 eq->eq_consindx = 0;
991 eq->eq_eqnum = eqc->tr_indx;
992 eq->eq_buf = buf;
993 eq->eq_bufsz = (1 << log_eq_size);
994 eq->eq_mrhdl = mr;
995 *eqhdl = eq;
996
997 return (DDI_SUCCESS);
998
999 /*
1000 * The following is cleanup for all possible failure cases in this routine
1001 */
1002 eqalloc_fail5:
1003 if (tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1004 TAVOR_NOSLEEP) != DDI_SUCCESS) {
1005 TAVOR_WARNING(state, "failed to deregister EQ memory");
1006 }
1007 eqalloc_fail4:
1008 tavor_queue_free(state, &eq->eq_eqinfo);
1009 eqalloc_fail3:
1010 tavor_rsrc_free(state, &rsrc);
1011 eqalloc_fail2:
1012 tavor_rsrc_free(state, &eqc);
1013 eqalloc_fail1:
1014 tavor_pd_refcnt_dec(pd);
1015 return (status);
1016 }
1017
1018
1019 /*
1020 * tavor_eq_free()
1021 * Context: Only called from attach() and/or detach() path contexts
1022 */
1023 static int
tavor_eq_free(tavor_state_t * state,tavor_eqhdl_t * eqhdl)1024 tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl)
1025 {
1026 tavor_rsrc_t *eqc, *rsrc;
1027 tavor_hw_eqc_t eqc_entry;
1028 tavor_pdhdl_t pd;
1029 tavor_mrhdl_t mr;
1030 tavor_eqhdl_t eq;
1031 uint32_t eqnum;
1032 int status;
1033
1034 /*
1035 * Pull all the necessary information from the Tavor Event Queue
1036 * handle. This is necessary here because the resource for the
1037 * EQ handle is going to be freed up as part of this operation.
1038 */
1039 eq = *eqhdl;
1040 eqc = eq->eq_eqcrsrcp;
1041 rsrc = eq->eq_rsrcp;
1042 pd = state->ts_pdhdl_internal;
1043 mr = eq->eq_mrhdl;
1044 eqnum = eq->eq_eqnum;
1045
1046 /*
1047 * Reclaim EQC entry from hardware (using the Tavor HW2SW_EQ
1048 * firmware command). If the ownership transfer fails for any reason,
1049 * then it is an indication that something (either in HW or SW) has
1050 * gone seriously wrong.
1051 */
1052 status = tavor_cmn_ownership_cmd_post(state, HW2SW_EQ, &eqc_entry,
1053 sizeof (tavor_hw_eqc_t), eqnum, TAVOR_CMD_NOSLEEP_SPIN);
1054 if (status != TAVOR_CMD_SUCCESS) {
1055 TAVOR_WARNING(state, "failed to reclaim EQC ownership");
1056 cmn_err(CE_CONT, "Tavor: HW2SW_EQ command failed: %08x\n",
1057 status);
1058 return (DDI_FAILURE);
1059 }
1060
1061 /*
1062 * Deregister the memory for the Event Queue. If this fails
1063 * for any reason, then it is an indication that something (either
1064 * in HW or SW) has gone seriously wrong. So we print a warning
1065 * message and continue.
1066 */
1067 status = tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1068 TAVOR_NOSLEEP);
1069 if (status != DDI_SUCCESS) {
1070 TAVOR_WARNING(state, "failed to deregister EQ memory");
1071 }
1072
1073 /* Free the memory for the EQ */
1074 tavor_queue_free(state, &eq->eq_eqinfo);
1075
1076 /* Free the Tavor Event Queue handle */
1077 tavor_rsrc_free(state, &rsrc);
1078
1079 /* Free up the EQC entry resource */
1080 tavor_rsrc_free(state, &eqc);
1081
1082 /* Decrement the reference count on the protection domain (PD) */
1083 tavor_pd_refcnt_dec(pd);
1084
1085 /* Set the eqhdl pointer to NULL and return success */
1086 *eqhdl = NULL;
1087
1088 return (DDI_SUCCESS);
1089 }
1090
1091
1092 /*
1093 * tavor_eq_handler_init
1094 * Context: Only called from attach() path context
1095 */
1096 static int
tavor_eq_handler_init(tavor_state_t * state,tavor_eqhdl_t eq,uint_t evt_type_mask,int (* eq_func)(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe))1097 tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
1098 uint_t evt_type_mask, int (*eq_func)(tavor_state_t *state,
1099 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe))
1100 {
1101 int status;
1102
1103 /*
1104 * Save away the EQ handler function and the event type mask. These
1105 * will be used later during interrupt and event queue processing.
1106 */
1107 eq->eq_func = eq_func;
1108 eq->eq_evttypemask = evt_type_mask;
1109
1110 /*
1111 * Map the EQ to a specific class of event (or events) depending
1112 * on the mask value passed in. The TAVOR_EVT_NO_MASK means not
1113 * to attempt associating the EQ with any specific class of event.
1114 * This is particularly useful when initializing the events queues
1115 * used for CQ events. The mapping is done using the Tavor MAP_EQ
1116 * firmware command. Note: This command should not, in general, fail.
1117 * If it does, then something (probably HW related) has gone seriously
1118 * wrong.
1119 */
1120 if (evt_type_mask != TAVOR_EVT_NO_MASK) {
1121 status = tavor_map_eq_cmd_post(state,
1122 TAVOR_CMD_MAP_EQ_EVT_MAP, eq->eq_eqnum, evt_type_mask,
1123 TAVOR_CMD_NOSLEEP_SPIN);
1124 if (status != TAVOR_CMD_SUCCESS) {
1125 cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1126 "%08x\n", status);
1127 return (DDI_FAILURE);
1128 }
1129 }
1130
1131 return (DDI_SUCCESS);
1132 }
1133
1134
1135 /*
1136 * tavor_eq_handler_fini
1137 * Context: Only called from attach() and/or detach() path contexts
1138 */
1139 static int
tavor_eq_handler_fini(tavor_state_t * state,tavor_eqhdl_t eq)1140 tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq)
1141 {
1142 int status;
1143
1144 /*
1145 * Unmap the EQ from the event class to which it had been previously
1146 * mapped. The unmapping is done using the Tavor MAP_EQ (in much
1147 * the same way that the initial mapping was done). The difference,
1148 * however, is in the TAVOR_EQ_EVT_UNMAP flag that is passed to the
1149 * MAP_EQ firmware command. The TAVOR_EVT_NO_MASK (which may have
1150 * been passed in at init time) still means that no association has
1151 * been made between the EQ and any specific class of event (and,
1152 * hence, no unmapping is necessary). Note: This command should not,
1153 * in general, fail. If it does, then something (probably HW related)
1154 * has gone seriously wrong.
1155 */
1156 if (eq->eq_evttypemask != TAVOR_EVT_NO_MASK) {
1157 status = tavor_map_eq_cmd_post(state,
1158 TAVOR_CMD_MAP_EQ_EVT_UNMAP, eq->eq_eqnum,
1159 eq->eq_evttypemask, TAVOR_CMD_NOSLEEP_SPIN);
1160 if (status != TAVOR_CMD_SUCCESS) {
1161 cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1162 "%08x\n", status);
1163 return (DDI_FAILURE);
1164 }
1165 }
1166
1167 return (DDI_SUCCESS);
1168 }
1169
1170
1171 /*
1172 * tavor_eqe_sync()
1173 * Context: Can be called from interrupt or base context.
1174 *
1175 * Typically, this routine does nothing unless the EQ memory is
1176 * mapped as DDI_DMA_STREAMING. However, there is a condition where
1177 * ddi_dma_sync() is necessary even if the memory was mapped in
1178 * consistent mode. The "force_sync" parameter is used here to force
1179 * the call to ddi_dma_sync() independent of how the EQ memory was
1180 * mapped.
1181 */
1182 static void
tavor_eqe_sync(tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe,uint_t flag,uint_t force_sync)1183 tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
1184 uint_t force_sync)
1185 {
1186 ddi_dma_handle_t dmahdl;
1187 off_t offset;
1188
1189 /* Determine if EQ needs to be synced or not */
1190 if ((eq->eq_sync == 0) && (force_sync == TAVOR_EQ_SYNC_NORMAL)) {
1191 return;
1192 }
1193
1194 /* Get the DMA handle from EQ context */
1195 dmahdl = eq->eq_mrhdl->mr_bindinfo.bi_dmahdl;
1196
1197 /* Calculate offset of next EQE */
1198 offset = (off_t)((uintptr_t)eqe - (uintptr_t)&eq->eq_buf[0]);
1199 (void) ddi_dma_sync(dmahdl, offset, sizeof (tavor_hw_eqe_t), flag);
1200 }
1201
1202
1203 /*
1204 * tavor_port_state_change_handler()
1205 * Context: Only called from interrupt context
1206 */
1207 static int
tavor_port_state_change_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1208 tavor_port_state_change_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1209 tavor_hw_eqe_t *eqe)
1210 {
1211 ibc_async_event_t event;
1212 ibt_async_code_t type;
1213 uint_t port, subtype;
1214 uint_t eqe_evttype;
1215 char link_msg[24];
1216
1217 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1218
1219 ASSERT(eqe_evttype == TAVOR_EVT_PORT_STATE_CHANGE ||
1220 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1221
1222 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1223 tavor_eq_overflow_handler(state, eq, eqe);
1224
1225 return (DDI_FAILURE);
1226 }
1227
1228 /*
1229 * Depending on the type of Port State Change event, pass the
1230 * appropriate asynch event to the IBTF.
1231 */
1232 port = TAVOR_EQE_PORTNUM_GET(eq, eqe);
1233
1234 /* Check for valid port number in event */
1235 if ((port == 0) || (port > state->ts_cfg_profile->cp_num_ports)) {
1236 TAVOR_WARNING(state, "Unexpected port number in port state "
1237 "change event");
1238 cmn_err(CE_CONT, " Port number: %02x\n", port);
1239 return (DDI_FAILURE);
1240 }
1241
1242 subtype = TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe);
1243 if (subtype == TAVOR_PORT_LINK_ACTIVE) {
1244 event.ev_port = port;
1245 type = IBT_EVENT_PORT_UP;
1246
1247 (void) snprintf(link_msg, 23, "port %d up", port);
1248 ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_RESTORED,
1249 DDI_EXTERNAL_FAULT, link_msg);
1250 } else if (subtype == TAVOR_PORT_LINK_DOWN) {
1251 event.ev_port = port;
1252 type = IBT_ERROR_PORT_DOWN;
1253
1254 (void) snprintf(link_msg, 23, "port %d down", port);
1255 ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_LOST,
1256 DDI_EXTERNAL_FAULT, link_msg);
1257 } else {
1258 TAVOR_WARNING(state, "Unexpected subtype in port state change "
1259 "event");
1260 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
1261 TAVOR_EQE_EVTTYPE_GET(eq, eqe), subtype);
1262 return (DDI_FAILURE);
1263 }
1264
1265 /*
1266 * Deliver the event to the IBTF. Note: If "ts_ibtfpriv" is NULL,
1267 * then we have either received this event before we finished
1268 * attaching to the IBTF or we've received it while we are in the
1269 * process of detaching.
1270 */
1271 if (state->ts_ibtfpriv != NULL) {
1272 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1273 }
1274
1275 return (DDI_SUCCESS);
1276 }
1277
1278
1279 /*
1280 * tavor_comm_estbl_handler()
1281 * Context: Only called from interrupt context
1282 */
1283 static int
tavor_comm_estbl_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1284 tavor_comm_estbl_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1285 tavor_hw_eqe_t *eqe)
1286 {
1287 tavor_qphdl_t qp;
1288 uint_t qpnum;
1289 ibc_async_event_t event;
1290 ibt_async_code_t type;
1291 uint_t eqe_evttype;
1292
1293 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1294
1295 ASSERT(eqe_evttype == TAVOR_EVT_COMM_ESTABLISHED ||
1296 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1297
1298 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1299 tavor_eq_overflow_handler(state, eq, eqe);
1300
1301 return (DDI_FAILURE);
1302 }
1303
1304 /* Get the QP handle from QP number in event descriptor */
1305 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1306 qp = tavor_qphdl_from_qpnum(state, qpnum);
1307
1308 /*
1309 * If the QP handle is NULL, this is probably an indication
1310 * that the QP has been freed already. In which case, we
1311 * should not deliver this event.
1312 *
1313 * We also check that the QP number in the handle is the
1314 * same as the QP number in the event queue entry. This
1315 * extra check allows us to handle the case where a QP was
1316 * freed and then allocated again in the time it took to
1317 * handle the event queue processing. By constantly incrementing
1318 * the non-constrained portion of the QP number every time
1319 * a new QP is allocated, we mitigate (somewhat) the chance
1320 * that a stale event could be passed to the client's QP
1321 * handler.
1322 *
1323 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1324 * means that we've have either received this event before we
1325 * finished attaching to the IBTF or we've received it while we
1326 * are in the process of detaching.
1327 */
1328 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1329 (state->ts_ibtfpriv != NULL)) {
1330 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1331 type = IBT_EVENT_COM_EST_QP;
1332
1333 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1334 }
1335
1336 return (DDI_SUCCESS);
1337 }
1338
1339
1340 /*
1341 * tavor_local_wq_cat_err_handler()
1342 * Context: Only called from interrupt context
1343 */
1344 static int
tavor_local_wq_cat_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1345 tavor_local_wq_cat_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1346 tavor_hw_eqe_t *eqe)
1347 {
1348 tavor_qphdl_t qp;
1349 uint_t qpnum;
1350 ibc_async_event_t event;
1351 ibt_async_code_t type;
1352 uint_t eqe_evttype;
1353
1354 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1355
1356 ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_WQ_CAT_ERROR ||
1357 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1358
1359 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1360 tavor_eq_overflow_handler(state, eq, eqe);
1361
1362 return (DDI_FAILURE);
1363 }
1364
1365 /* Get the QP handle from QP number in event descriptor */
1366 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1367 qp = tavor_qphdl_from_qpnum(state, qpnum);
1368
1369 /*
1370 * If the QP handle is NULL, this is probably an indication
1371 * that the QP has been freed already. In which case, we
1372 * should not deliver this event.
1373 *
1374 * We also check that the QP number in the handle is the
1375 * same as the QP number in the event queue entry. This
1376 * extra check allows us to handle the case where a QP was
1377 * freed and then allocated again in the time it took to
1378 * handle the event queue processing. By constantly incrementing
1379 * the non-constrained portion of the QP number every time
1380 * a new QP is allocated, we mitigate (somewhat) the chance
1381 * that a stale event could be passed to the client's QP
1382 * handler.
1383 *
1384 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1385 * means that we've have either received this event before we
1386 * finished attaching to the IBTF or we've received it while we
1387 * are in the process of detaching.
1388 */
1389 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1390 (state->ts_ibtfpriv != NULL)) {
1391 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1392 type = IBT_ERROR_CATASTROPHIC_QP;
1393
1394 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1395 }
1396
1397 return (DDI_SUCCESS);
1398 }
1399
1400
1401 /*
1402 * tavor_invreq_local_wq_err_handler()
1403 * Context: Only called from interrupt context
1404 */
1405 static int
tavor_invreq_local_wq_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1406 tavor_invreq_local_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1407 tavor_hw_eqe_t *eqe)
1408 {
1409 tavor_qphdl_t qp;
1410 uint_t qpnum;
1411 ibc_async_event_t event;
1412 ibt_async_code_t type;
1413 uint_t eqe_evttype;
1414
1415 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1416
1417 ASSERT(eqe_evttype == TAVOR_EVT_INV_REQ_LOCAL_WQ_ERROR ||
1418 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1419
1420 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1421 tavor_eq_overflow_handler(state, eq, eqe);
1422
1423 return (DDI_FAILURE);
1424 }
1425
1426 /* Get the QP handle from QP number in event descriptor */
1427 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1428 qp = tavor_qphdl_from_qpnum(state, qpnum);
1429
1430 /*
1431 * If the QP handle is NULL, this is probably an indication
1432 * that the QP has been freed already. In which case, we
1433 * should not deliver this event.
1434 *
1435 * We also check that the QP number in the handle is the
1436 * same as the QP number in the event queue entry. This
1437 * extra check allows us to handle the case where a QP was
1438 * freed and then allocated again in the time it took to
1439 * handle the event queue processing. By constantly incrementing
1440 * the non-constrained portion of the QP number every time
1441 * a new QP is allocated, we mitigate (somewhat) the chance
1442 * that a stale event could be passed to the client's QP
1443 * handler.
1444 *
1445 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1446 * means that we've have either received this event before we
1447 * finished attaching to the IBTF or we've received it while we
1448 * are in the process of detaching.
1449 */
1450 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1451 (state->ts_ibtfpriv != NULL)) {
1452 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1453 type = IBT_ERROR_INVALID_REQUEST_QP;
1454
1455 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1456 }
1457
1458 return (DDI_SUCCESS);
1459 }
1460
1461
1462 /*
1463 * tavor_local_acc_vio_wq_err_handler()
1464 * Context: Only called from interrupt context
1465 */
1466 static int
tavor_local_acc_vio_wq_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1467 tavor_local_acc_vio_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1468 tavor_hw_eqe_t *eqe)
1469 {
1470 tavor_qphdl_t qp;
1471 uint_t qpnum;
1472 ibc_async_event_t event;
1473 ibt_async_code_t type;
1474 uint_t eqe_evttype;
1475
1476 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1477
1478 ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_ACC_VIO_WQ_ERROR ||
1479 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1480
1481 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1482 tavor_eq_overflow_handler(state, eq, eqe);
1483
1484 return (DDI_FAILURE);
1485 }
1486
1487 /* Get the QP handle from QP number in event descriptor */
1488 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1489 qp = tavor_qphdl_from_qpnum(state, qpnum);
1490
1491 /*
1492 * If the QP handle is NULL, this is probably an indication
1493 * that the QP has been freed already. In which case, we
1494 * should not deliver this event.
1495 *
1496 * We also check that the QP number in the handle is the
1497 * same as the QP number in the event queue entry. This
1498 * extra check allows us to handle the case where a QP was
1499 * freed and then allocated again in the time it took to
1500 * handle the event queue processing. By constantly incrementing
1501 * the non-constrained portion of the QP number every time
1502 * a new QP is allocated, we mitigate (somewhat) the chance
1503 * that a stale event could be passed to the client's QP
1504 * handler.
1505 *
1506 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1507 * means that we've have either received this event before we
1508 * finished attaching to the IBTF or we've received it while we
1509 * are in the process of detaching.
1510 */
1511 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1512 (state->ts_ibtfpriv != NULL)) {
1513 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1514 type = IBT_ERROR_ACCESS_VIOLATION_QP;
1515
1516 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1517 }
1518
1519 return (DDI_SUCCESS);
1520 }
1521
1522
1523 /*
1524 * tavor_sendq_drained_handler()
1525 * Context: Only called from interrupt context
1526 */
1527 static int
tavor_sendq_drained_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1528 tavor_sendq_drained_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1529 tavor_hw_eqe_t *eqe)
1530 {
1531 tavor_qphdl_t qp;
1532 uint_t qpnum;
1533 ibc_async_event_t event;
1534 uint_t forward_sqd_event;
1535 ibt_async_code_t type;
1536 uint_t eqe_evttype;
1537
1538 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1539
1540 ASSERT(eqe_evttype == TAVOR_EVT_SEND_QUEUE_DRAINED ||
1541 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1542
1543 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1544 tavor_eq_overflow_handler(state, eq, eqe);
1545
1546 return (DDI_FAILURE);
1547 }
1548
1549 /* Get the QP handle from QP number in event descriptor */
1550 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1551 qp = tavor_qphdl_from_qpnum(state, qpnum);
1552
1553 /*
1554 * If the QP handle is NULL, this is probably an indication
1555 * that the QP has been freed already. In which case, we
1556 * should not deliver this event.
1557 *
1558 * We also check that the QP number in the handle is the
1559 * same as the QP number in the event queue entry. This
1560 * extra check allows us to handle the case where a QP was
1561 * freed and then allocated again in the time it took to
1562 * handle the event queue processing. By constantly incrementing
1563 * the non-constrained portion of the QP number every time
1564 * a new QP is allocated, we mitigate (somewhat) the chance
1565 * that a stale event could be passed to the client's QP
1566 * handler.
1567 *
1568 * And then we check if "ts_ibtfpriv" is NULL. If it is then it
1569 * means that we've have either received this event before we
1570 * finished attaching to the IBTF or we've received it while we
1571 * are in the process of detaching.
1572 */
1573 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1574 (state->ts_ibtfpriv != NULL)) {
1575 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1576 type = IBT_EVENT_SQD;
1577
1578 /*
1579 * Grab the QP lock and update the QP state to reflect that
1580 * the Send Queue Drained event has arrived. Also determine
1581 * whether the event is intended to be forwarded on to the
1582 * consumer or not. This information is used below in
1583 * determining whether or not to call the IBTF.
1584 */
1585 mutex_enter(&qp->qp_lock);
1586 forward_sqd_event = qp->qp_forward_sqd_event;
1587 qp->qp_forward_sqd_event = 0;
1588 qp->qp_sqd_still_draining = 0;
1589 mutex_exit(&qp->qp_lock);
1590
1591 if (forward_sqd_event != 0) {
1592 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1593 }
1594 }
1595
1596 return (DDI_SUCCESS);
1597 }
1598
1599
1600 /*
1601 * tavor_path_mig_handler()
1602 * Context: Only called from interrupt context
1603 */
1604 static int
tavor_path_mig_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1605 tavor_path_mig_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1606 tavor_hw_eqe_t *eqe)
1607 {
1608 tavor_qphdl_t qp;
1609 uint_t qpnum;
1610 ibc_async_event_t event;
1611 ibt_async_code_t type;
1612 uint_t eqe_evttype;
1613
1614 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1615
1616 ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATED ||
1617 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1618
1619 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1620 tavor_eq_overflow_handler(state, eq, eqe);
1621
1622 return (DDI_FAILURE);
1623 }
1624
1625 /* Get the QP handle from QP number in event descriptor */
1626 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1627 qp = tavor_qphdl_from_qpnum(state, qpnum);
1628
1629 /*
1630 * If the QP handle is NULL, this is probably an indication
1631 * that the QP has been freed already. In which case, we
1632 * should not deliver this event.
1633 *
1634 * We also check that the QP number in the handle is the
1635 * same as the QP number in the event queue entry. This
1636 * extra check allows us to handle the case where a QP was
1637 * freed and then allocated again in the time it took to
1638 * handle the event queue processing. By constantly incrementing
1639 * the non-constrained portion of the QP number every time
1640 * a new QP is allocated, we mitigate (somewhat) the chance
1641 * that a stale event could be passed to the client's QP
1642 * handler.
1643 *
1644 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1645 * means that we've have either received this event before we
1646 * finished attaching to the IBTF or we've received it while we
1647 * are in the process of detaching.
1648 */
1649 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1650 (state->ts_ibtfpriv != NULL)) {
1651 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1652 type = IBT_EVENT_PATH_MIGRATED_QP;
1653
1654 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1655 }
1656
1657 return (DDI_SUCCESS);
1658 }
1659
1660
1661 /*
1662 * tavor_path_mig_err_handler()
1663 * Context: Only called from interrupt context
1664 */
1665 static int
tavor_path_mig_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1666 tavor_path_mig_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1667 tavor_hw_eqe_t *eqe)
1668 {
1669 tavor_qphdl_t qp;
1670 uint_t qpnum;
1671 ibc_async_event_t event;
1672 ibt_async_code_t type;
1673 uint_t eqe_evttype;
1674
1675 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1676
1677 ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATE_FAILED ||
1678 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1679
1680 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1681 tavor_eq_overflow_handler(state, eq, eqe);
1682
1683 return (DDI_FAILURE);
1684 }
1685
1686 /* Get the QP handle from QP number in event descriptor */
1687 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1688 qp = tavor_qphdl_from_qpnum(state, qpnum);
1689
1690 /*
1691 * If the QP handle is NULL, this is probably an indication
1692 * that the QP has been freed already. In which case, we
1693 * should not deliver this event.
1694 *
1695 * We also check that the QP number in the handle is the
1696 * same as the QP number in the event queue entry. This
1697 * extra check allows us to handle the case where a QP was
1698 * freed and then allocated again in the time it took to
1699 * handle the event queue processing. By constantly incrementing
1700 * the non-constrained portion of the QP number every time
1701 * a new QP is allocated, we mitigate (somewhat) the chance
1702 * that a stale event could be passed to the client's QP
1703 * handler.
1704 *
1705 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1706 * means that we've have either received this event before we
1707 * finished attaching to the IBTF or we've received it while we
1708 * are in the process of detaching.
1709 */
1710 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1711 (state->ts_ibtfpriv != NULL)) {
1712 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1713 type = IBT_ERROR_PATH_MIGRATE_REQ_QP;
1714
1715 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1716 }
1717
1718 return (DDI_SUCCESS);
1719 }
1720
1721
1722 /*
1723 * tavor_srq_catastrophic_handler()
1724 * Context: Only called from interrupt context
1725 */
1726 static int
tavor_srq_catastrophic_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1727 tavor_srq_catastrophic_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1728 tavor_hw_eqe_t *eqe)
1729 {
1730 tavor_qphdl_t qp;
1731 uint_t qpnum;
1732 ibc_async_event_t event;
1733 ibt_async_code_t type;
1734 uint_t eqe_evttype;
1735
1736 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1737
1738 ASSERT(eqe_evttype == TAVOR_EVT_SRQ_CATASTROPHIC_ERROR ||
1739 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1740
1741 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1742 tavor_eq_overflow_handler(state, eq, eqe);
1743
1744 return (DDI_FAILURE);
1745 }
1746
1747 /* Get the QP handle from QP number in event descriptor */
1748 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1749 qp = tavor_qphdl_from_qpnum(state, qpnum);
1750
1751 /*
1752 * If the QP handle is NULL, this is probably an indication
1753 * that the QP has been freed already. In which case, we
1754 * should not deliver this event.
1755 *
1756 * We also check that the QP number in the handle is the
1757 * same as the QP number in the event queue entry. This
1758 * extra check allows us to handle the case where a QP was
1759 * freed and then allocated again in the time it took to
1760 * handle the event queue processing. By constantly incrementing
1761 * the non-constrained portion of the QP number every time
1762 * a new QP is allocated, we mitigate (somewhat) the chance
1763 * that a stale event could be passed to the client's QP
1764 * handler.
1765 *
1766 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1767 * means that we've have either received this event before we
1768 * finished attaching to the IBTF or we've received it while we
1769 * are in the process of detaching.
1770 */
1771 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1772 (state->ts_ibtfpriv != NULL)) {
1773 event.ev_srq_hdl = (ibt_srq_hdl_t)qp->qp_srqhdl->srq_hdlrarg;
1774 type = IBT_ERROR_CATASTROPHIC_SRQ;
1775
1776 mutex_enter(&qp->qp_srqhdl->srq_lock);
1777 qp->qp_srqhdl->srq_state = TAVOR_SRQ_STATE_ERROR;
1778 mutex_exit(&qp->qp_srqhdl->srq_lock);
1779
1780 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1781 }
1782
1783 return (DDI_SUCCESS);
1784 }
1785
1786
1787 /*
1788 * tavor_srq_last_wqe_reached_handler()
1789 * Context: Only called from interrupt context
1790 */
1791 static int
tavor_srq_last_wqe_reached_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1792 tavor_srq_last_wqe_reached_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1793 tavor_hw_eqe_t *eqe)
1794 {
1795 tavor_qphdl_t qp;
1796 uint_t qpnum;
1797 ibc_async_event_t event;
1798 ibt_async_code_t type;
1799 uint_t eqe_evttype;
1800
1801 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1802
1803 ASSERT(eqe_evttype == TAVOR_EVT_SRQ_LAST_WQE_REACHED ||
1804 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1805
1806 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1807 tavor_eq_overflow_handler(state, eq, eqe);
1808
1809 return (DDI_FAILURE);
1810 }
1811
1812 /* Get the QP handle from QP number in event descriptor */
1813 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1814 qp = tavor_qphdl_from_qpnum(state, qpnum);
1815
1816 /*
1817 * If the QP handle is NULL, this is probably an indication
1818 * that the QP has been freed already. In which case, we
1819 * should not deliver this event.
1820 *
1821 * We also check that the QP number in the handle is the
1822 * same as the QP number in the event queue entry. This
1823 * extra check allows us to handle the case where a QP was
1824 * freed and then allocated again in the time it took to
1825 * handle the event queue processing. By constantly incrementing
1826 * the non-constrained portion of the QP number every time
1827 * a new QP is allocated, we mitigate (somewhat) the chance
1828 * that a stale event could be passed to the client's QP
1829 * handler.
1830 *
1831 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1832 * means that we've have either received this event before we
1833 * finished attaching to the IBTF or we've received it while we
1834 * are in the process of detaching.
1835 */
1836 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1837 (state->ts_ibtfpriv != NULL)) {
1838 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1839 type = IBT_EVENT_EMPTY_CHAN;
1840
1841 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1842 }
1843
1844 return (DDI_SUCCESS);
1845 }
1846
1847
1848 /*
1849 * tavor_ecc_detection_handler()
1850 * Context: Only called from interrupt context
1851 */
1852 static int
tavor_ecc_detection_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1853 tavor_ecc_detection_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1854 tavor_hw_eqe_t *eqe)
1855 {
1856 uint_t eqe_evttype;
1857 uint_t data;
1858 int i;
1859
1860 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1861
1862 ASSERT(eqe_evttype == TAVOR_EVT_ECC_DETECTION ||
1863 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1864
1865 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1866 tavor_eq_overflow_handler(state, eq, eqe);
1867
1868 return (DDI_FAILURE);
1869 }
1870
1871 /*
1872 * The "ECC Detection Event" indicates that a correctable single-bit
1873 * has occurred with the attached DDR. The EQE provides some
1874 * additional information about the errored EQ. So we print a warning
1875 * message here along with that additional information.
1876 */
1877 TAVOR_WARNING(state, "ECC Correctable Error Event Detected");
1878 for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
1879 data = ((uint_t *)eqe)[i];
1880 cmn_err(CE_CONT, "! EQE[%02x]: %08x\n", i, data);
1881 }
1882
1883 return (DDI_SUCCESS);
1884 }
1885
1886
1887 /*
1888 * tavor_eq_overflow_handler()
1889 * Context: Only called from interrupt context
1890 */
1891 void
tavor_eq_overflow_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1892 tavor_eq_overflow_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1893 tavor_hw_eqe_t *eqe)
1894 {
1895 uint_t error_type, data;
1896
1897 ASSERT(TAVOR_EQE_EVTTYPE_GET(eq, eqe) == TAVOR_EVT_EQ_OVERFLOW);
1898
1899 /*
1900 * The "Event Queue Overflow Event" indicates that something has
1901 * probably gone seriously wrong with some hardware (or, perhaps,
1902 * with the software... though it's unlikely in this case). The EQE
1903 * provides some additional information about the errored EQ. So we
1904 * print a warning message here along with that additional information.
1905 */
1906 error_type = TAVOR_EQE_OPERRTYPE_GET(eq, eqe);
1907 data = TAVOR_EQE_OPERRDATA_GET(eq, eqe);
1908
1909 TAVOR_WARNING(state, "Event Queue overflow");
1910 cmn_err(CE_CONT, " Error type: %02x, data: %08x\n", error_type, data);
1911 }
1912
1913
1914 /*
1915 * tavor_no_eqhandler
1916 * Context: Only called from interrupt context
1917 */
1918 /* ARGSUSED */
1919 static int
tavor_no_eqhandler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1920 tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
1921 tavor_hw_eqe_t *eqe)
1922 {
1923 uint_t data;
1924 int i;
1925
1926 /*
1927 * This "unexpected event" handler (or "catch-all" handler) will
1928 * receive all events for which no other handler has been registered.
1929 * If we end up here, then something has probably gone seriously wrong
1930 * with the Tavor hardware (or, perhaps, with the software... though
1931 * it's unlikely in this case). The EQE provides all the information
1932 * about the event. So we print a warning message here along with
1933 * the contents of the EQE.
1934 */
1935 TAVOR_WARNING(state, "Unexpected Event handler");
1936 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
1937 TAVOR_EQE_EVTTYPE_GET(eq, eqe), TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe));
1938 for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
1939 data = ((uint_t *)eqe)[i];
1940 cmn_err(CE_CONT, " EQE[%02x]: %08x\n", i, data);
1941 }
1942
1943 return (DDI_SUCCESS);
1944 }
1945