xref: /titanic_41/usr/src/uts/common/io/ib/adapters/tavor/tavor_event.c (revision 9e39c5ba00a55fa05777cc94b148296af305e135)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * tavor_event.c
29  *    Tavor Interrupt and Event Processing Routines
30  *
31  *    Implements all the routines necessary for allocating, freeing, and
32  *    handling all of the various event types that the Tavor hardware can
33  *    generate.
34  *    These routines include the main Tavor interrupt service routine
35  *    (tavor_isr()) as well as all the code necessary to setup and handle
36  *    events from each of the many event queues used by the Tavor device.
37  */
38 
39 #include <sys/types.h>
40 #include <sys/conf.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/modctl.h>
44 
45 #include <sys/ib/adapters/tavor/tavor.h>
46 
47 static void tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq);
48 static void tavor_eq_catastrophic(tavor_state_t *state);
49 static int tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size,
50     uint_t intr, tavor_eqhdl_t *eqhdl);
51 static int tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl);
52 static int tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
53     uint_t evt_type_mask, int (*eqfunc)(tavor_state_t *state,
54     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe));
55 static int tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq);
56 static void tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
57     uint_t force_sync);
58 static int tavor_port_state_change_handler(tavor_state_t *state,
59     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
60 static int tavor_comm_estbl_handler(tavor_state_t *state,
61     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
62 static int tavor_local_wq_cat_err_handler(tavor_state_t *state,
63     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
64 static int tavor_invreq_local_wq_err_handler(tavor_state_t *state,
65     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
66 static int tavor_local_acc_vio_wq_err_handler(tavor_state_t *state,
67     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
68 static int tavor_sendq_drained_handler(tavor_state_t *state,
69     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
70 static int tavor_path_mig_handler(tavor_state_t *state,
71     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
72 static int tavor_path_mig_err_handler(tavor_state_t *state,
73     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
74 static int tavor_srq_catastrophic_handler(tavor_state_t *state,
75     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
76 static int tavor_srq_last_wqe_reached_handler(tavor_state_t *state,
77     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
78 static int tavor_ecc_detection_handler(tavor_state_t *state,
79     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
80 static int tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
81     tavor_hw_eqe_t *eqe);
82 
83 
84 /*
85  * tavor_eq_init_all
86  *    Context: Only called from attach() path context
87  */
88 int
tavor_eq_init_all(tavor_state_t * state)89 tavor_eq_init_all(tavor_state_t *state)
90 {
91 	uint_t		log_eq_size, intr_num;
92 	uint_t		num_eq, num_eq_init, num_eq_unmap;
93 	int		status, i;
94 	char		*errormsg;
95 
96 	TAVOR_TNF_ENTER(tavor_eq_init_all);
97 
98 	/*
99 	 * For now, all Event Queues default to the same size (pulled from
100 	 * the current configuration profile) and are all assigned to the
101 	 * same interrupt or MSI.  In the future we may support assigning
102 	 * EQs to specific interrupts or MSIs XXX
103 	 */
104 	log_eq_size = state->ts_cfg_profile->cp_log_default_eq_sz;
105 
106 	/*
107 	 * If MSI is to be used, then set intr_num to the MSI number
108 	 * (currently zero because we're using only one) or'd with the
109 	 * MSI enable flag.  Otherwise, for regular (i.e. 'legacy') interrupt,
110 	 * use the 'inta_pin' value returned by QUERY_ADAPTER.
111 	 */
112 	if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
113 		intr_num = TAVOR_EQ_MSI_ENABLE_FLAG | 0;
114 	} else {
115 		intr_num = state->ts_adapter.inta_pin;
116 	}
117 
118 	/*
119 	 * Total number of supported EQs is hardcoded.  Tavor hardware
120 	 * supports up to 64 EQs.  We are currently using only 45 of them
121 	 * We will set aside the first 32 for use with Completion Queues (CQ)
122 	 * and reserve a few of the other 32 for each specific class of event
123 	 * (see below for more details).
124 	 */
125 	num_eq = TAVOR_NUM_EQ_USED;
126 
127 	/*
128 	 * The "num_eq_unmap" variable is used in any possible failure
129 	 * cleanup (below) to indicate which events queues might require
130 	 * possible event class unmapping.
131 	 */
132 	num_eq_unmap = 0;
133 
134 	/*
135 	 * Allocate and initialize all the Event Queues.  If any of these
136 	 * EQ allocations fail then jump to the end, cleanup what had been
137 	 * successfully initialized, and return an error.
138 	 */
139 	for (i = 0; i < num_eq; i++) {
140 		status = tavor_eq_alloc(state, log_eq_size, intr_num,
141 		    &state->ts_eqhdl[i]);
142 		if (status != DDI_SUCCESS) {
143 			/* Set "status" and "errormsg" and goto failure */
144 			TAVOR_TNF_FAIL(status, "failed EQ alloc");
145 			num_eq_init = i;
146 			goto all_eq_init_fail;
147 		}
148 	}
149 	num_eq_init = num_eq;
150 
151 	/*
152 	 * Setup EQ0-EQ31 for use with Completion Queues.  Note: We can
153 	 * cast the return value to void here because, when we use the
154 	 * TAVOR_EVT_NO_MASK flag, it is not possible for
155 	 * tavor_eq_handler_init() to return an error.
156 	 */
157 	for (i = 0; i < 32; i++) {
158 		(void) tavor_eq_handler_init(state, state->ts_eqhdl[i],
159 		    TAVOR_EVT_NO_MASK, tavor_cq_handler);
160 	}
161 	num_eq_unmap = 32;
162 
163 	/*
164 	 * Setup EQ32 for handling Completion Queue Error Events.
165 	 *
166 	 * These events include things like CQ overflow or CQ access
167 	 * violation errors.  If this setup fails for any reason (which, in
168 	 * general, it really never should), then jump to the end, cleanup
169 	 * everything that has been successfully initialized, and return an
170 	 * error.
171 	 */
172 	status = tavor_eq_handler_init(state, state->ts_eqhdl[32],
173 	    TAVOR_EVT_MSK_CQ_ERRORS, tavor_cq_err_handler);
174 	if (status != DDI_SUCCESS) {
175 		/* Set "status" and "errormsg" and goto failure */
176 		TAVOR_TNF_FAIL(status, "completion queue error event");
177 		goto all_eq_init_fail;
178 	}
179 	num_eq_unmap = 33;
180 
181 	/*
182 	 * Setup EQ33 for handling Port State Change Events
183 	 *
184 	 * These events include things like Port Up and Port Down events.
185 	 * If this setup fails for any reason (which, in general, it really
186 	 * never should), then undo all previous EQ mapping, jump to the end,
187 	 * cleanup everything that has been successfully initialized, and
188 	 * return an error.
189 	 */
190 	status = tavor_eq_handler_init(state, state->ts_eqhdl[33],
191 	    TAVOR_EVT_MSK_PORT_STATE_CHANGE, tavor_port_state_change_handler);
192 	if (status != DDI_SUCCESS) {
193 		/* Set "status" and "errormsg" and goto failure */
194 		TAVOR_TNF_FAIL(status, "port state change event");
195 		goto all_eq_init_fail;
196 	}
197 	num_eq_unmap = 34;
198 
199 	/*
200 	 * Setup EQ34 for handling Communication Established Events
201 	 *
202 	 * These events correspond to the IB affiliated asynchronous events
203 	 * that are used for connection management.  If this setup fails for
204 	 * any reason (which, in general, it really never should), then undo
205 	 * all previous EQ mapping, jump to the end, cleanup everything that
206 	 * has been successfully initialized, and return an error.
207 	 */
208 	status = tavor_eq_handler_init(state, state->ts_eqhdl[34],
209 	    TAVOR_EVT_MSK_COMM_ESTABLISHED, tavor_comm_estbl_handler);
210 	if (status != DDI_SUCCESS) {
211 		/* Set "status" and "errormsg" and goto failure */
212 		TAVOR_TNF_FAIL(status, "communication established event");
213 		goto all_eq_init_fail;
214 	}
215 	num_eq_unmap = 35;
216 
217 	/*
218 	 * Setup EQ35 for handling Command Completion Events
219 	 *
220 	 * These events correspond to the Tavor generated events that are used
221 	 * to indicate Tavor firmware command completion.  These events are
222 	 * only generated when Tavor firmware commands are posted using the
223 	 * asynchronous completion mechanism.  If this setup fails for any
224 	 * reason (which, in general, it really never should), then undo all
225 	 * previous EQ mapping, jump to the end, cleanup everything that has
226 	 * been successfully initialized, and return an error.
227 	 */
228 	status = tavor_eq_handler_init(state, state->ts_eqhdl[35],
229 	    TAVOR_EVT_MSK_COMMAND_INTF_COMP, tavor_cmd_complete_handler);
230 	if (status != DDI_SUCCESS) {
231 		/* Set "status" and "errormsg" and goto failure */
232 		TAVOR_TNF_FAIL(status, "command completion event");
233 		goto all_eq_init_fail;
234 	}
235 	num_eq_unmap = 36;
236 
237 	/*
238 	 * Setup EQ36 for handling Local WQ Catastrophic Error Events
239 	 *
240 	 * These events correspond to the similarly-named IB affiliated
241 	 * asynchronous error type.  If this setup fails for any reason
242 	 * (which, in general, it really never should), then undo all previous
243 	 * EQ mapping, jump to the end, cleanup everything that has been
244 	 * successfully initialized, and return an error.
245 	 */
246 	status = tavor_eq_handler_init(state, state->ts_eqhdl[36],
247 	    TAVOR_EVT_MSK_LOCAL_WQ_CAT_ERROR, tavor_local_wq_cat_err_handler);
248 	if (status != DDI_SUCCESS) {
249 		/* Set "status" and "errormsg" and goto failure */
250 		TAVOR_TNF_FAIL(status, "local WQ catastrophic error event");
251 		goto all_eq_init_fail;
252 	}
253 	num_eq_unmap = 37;
254 
255 	/*
256 	 * Setup EQ37 for handling Invalid Req Local WQ Error Events
257 	 *
258 	 * These events also correspond to the similarly-named IB affiliated
259 	 * asynchronous error type.  If this setup fails for any reason
260 	 * (which, in general, it really never should), then undo all previous
261 	 * EQ mapping, jump to the end, cleanup everything that has been
262 	 * successfully initialized, and return an error.
263 	 */
264 	status = tavor_eq_handler_init(state, state->ts_eqhdl[37],
265 	    TAVOR_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR,
266 	    tavor_invreq_local_wq_err_handler);
267 	if (status != DDI_SUCCESS) {
268 		/* Set "status" and "errormsg" and goto failure */
269 		TAVOR_TNF_FAIL(status, "invalid req local WQ error event");
270 		goto all_eq_init_fail;
271 	}
272 	num_eq_unmap = 38;
273 
274 	/*
275 	 * Setup EQ38 for handling Local Access Violation WQ Error Events
276 	 *
277 	 * These events also correspond to the similarly-named IB affiliated
278 	 * asynchronous error type.  If this setup fails for any reason
279 	 * (which, in general, it really never should), then undo all previous
280 	 * EQ mapping, jump to the end, cleanup everything that has been
281 	 * successfully initialized, and return an error.
282 	 */
283 	status = tavor_eq_handler_init(state, state->ts_eqhdl[38],
284 	    TAVOR_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR,
285 	    tavor_local_acc_vio_wq_err_handler);
286 	if (status != DDI_SUCCESS) {
287 		/* Set "status" and "errormsg" and goto failure */
288 		TAVOR_TNF_FAIL(status, "local access violation WQ error event");
289 		goto all_eq_init_fail;
290 	}
291 	num_eq_unmap = 39;
292 
293 	/*
294 	 * Setup EQ39 for handling Send Queue Drained Events
295 	 *
296 	 * These events correspond to the IB affiliated asynchronous events
297 	 * that are used to indicate completion of a Send Queue Drained QP
298 	 * state transition.  If this setup fails for any reason (which, in
299 	 * general, it really never should), then undo all previous EQ
300 	 * mapping, jump to the end, cleanup everything that has been
301 	 * successfully initialized, and return an error.
302 	 */
303 	status = tavor_eq_handler_init(state, state->ts_eqhdl[39],
304 	    TAVOR_EVT_MSK_SEND_QUEUE_DRAINED, tavor_sendq_drained_handler);
305 	if (status != DDI_SUCCESS) {
306 		/* Set "status" and "errormsg" and goto failure */
307 		TAVOR_TNF_FAIL(status, "send queue drained event");
308 		goto all_eq_init_fail;
309 	}
310 	num_eq_unmap = 40;
311 
312 	/*
313 	 * Setup EQ40 for handling Path Migration Succeeded Events
314 	 *
315 	 * These events correspond to the IB affiliated asynchronous events
316 	 * that are used to indicate successful completion of a path
317 	 * migration.  If this setup fails for any reason (which, in general,
318 	 * it really never should), then undo all previous EQ mapping, jump
319 	 * to the end, cleanup everything that has been successfully
320 	 * initialized, and return an error.
321 	 */
322 	status = tavor_eq_handler_init(state, state->ts_eqhdl[40],
323 	    TAVOR_EVT_MSK_PATH_MIGRATED, tavor_path_mig_handler);
324 	if (status != DDI_SUCCESS) {
325 		/* Set "status" and "errormsg" and goto failure */
326 		TAVOR_TNF_FAIL(status, "path migration succeeded event");
327 		goto all_eq_init_fail;
328 	}
329 	num_eq_unmap = 41;
330 
331 	/*
332 	 * Setup EQ41 for handling Path Migration Failed Events
333 	 *
334 	 * These events correspond to the IB affiliated asynchronous events
335 	 * that are used to indicate that path migration was not successful.
336 	 * If this setup fails for any reason (which, in general, it really
337 	 * never should), then undo all previous EQ mapping, jump to the end,
338 	 * cleanup everything that has been successfully initialized, and
339 	 * return an error.
340 	 */
341 	status = tavor_eq_handler_init(state, state->ts_eqhdl[41],
342 	    TAVOR_EVT_MSK_PATH_MIGRATE_FAILED, tavor_path_mig_err_handler);
343 	if (status != DDI_SUCCESS) {
344 		/* Set "status" and "errormsg" and goto failure */
345 		TAVOR_TNF_FAIL(status, "path migration failed event");
346 		goto all_eq_init_fail;
347 	}
348 	num_eq_unmap = 42;
349 
350 	/*
351 	 * Setup EQ42 for handling Local Catastrophic Error Events
352 	 *
353 	 * These events correspond to the similarly-named IB unaffiliated
354 	 * asynchronous error type.  If this setup fails for any reason
355 	 * (which, in general, it really never should), then undo all previous
356 	 * EQ mapping, jump to the end, cleanup everything that has been
357 	 * successfully initialized, and return an error.
358 	 *
359 	 * This error is unique, in that an EQE is not generated if this event
360 	 * occurs.  Instead, an interrupt is called and we must poll the
361 	 * Catastrophic Error buffer in CR-Space.  This mapping is setup simply
362 	 * to enable this error reporting.  We pass in a NULL handler since it
363 	 * will never be called.
364 	 */
365 	status = tavor_eq_handler_init(state, state->ts_eqhdl[42],
366 	    TAVOR_EVT_MSK_LOCAL_CAT_ERROR, NULL);
367 	if (status != DDI_SUCCESS) {
368 		/* Set "status" and "errormsg" and goto failure */
369 		TAVOR_TNF_FAIL(status, "local catastrophic error event");
370 		goto all_eq_init_fail;
371 	}
372 	num_eq_unmap = 43;
373 
374 	/*
375 	 * Setup EQ43 for handling SRQ Catastrophic Error Events
376 	 *
377 	 * These events correspond to the similarly-named IB affiliated
378 	 * asynchronous error type.  If this setup fails for any reason
379 	 * (which, in general, it really never should), then undo all previous
380 	 * EQ mapping, jump to the end, cleanup everything that has been
381 	 * successfully initialized, and return an error.
382 	 */
383 	status = tavor_eq_handler_init(state, state->ts_eqhdl[43],
384 	    TAVOR_EVT_MSK_SRQ_CATASTROPHIC_ERROR,
385 	    tavor_srq_catastrophic_handler);
386 	if (status != DDI_SUCCESS) {
387 		/* Set "status" and "errormsg" and goto failure */
388 		TAVOR_TNF_FAIL(status, "srq catastrophic error event");
389 		goto all_eq_init_fail;
390 	}
391 	num_eq_unmap = 44;
392 
393 	/*
394 	 * Setup EQ44 for handling SRQ Last WQE Reached Events
395 	 *
396 	 * These events correspond to the similarly-named IB affiliated
397 	 * asynchronous event type.  If this setup fails for any reason
398 	 * (which, in general, it really never should), then undo all previous
399 	 * EQ mapping, jump to the end, cleanup everything that has been
400 	 * successfully initialized, and return an error.
401 	 */
402 	status = tavor_eq_handler_init(state, state->ts_eqhdl[44],
403 	    TAVOR_EVT_MSK_SRQ_LAST_WQE_REACHED,
404 	    tavor_srq_last_wqe_reached_handler);
405 	if (status != DDI_SUCCESS) {
406 		/* Set "status" and "errormsg" and goto failure */
407 		TAVOR_TNF_FAIL(status, "srq last wqe reached event");
408 		goto all_eq_init_fail;
409 	}
410 	num_eq_unmap = 45;
411 
412 	/*
413 	 * Setup EQ45 for handling ECC error detection events
414 	 *
415 	 * These events correspond to the similarly-named IB affiliated
416 	 * asynchronous event type.  If this setup fails for any reason
417 	 * (which, in general, it really never should), then undo all previous
418 	 * EQ mapping, jump to the end, cleanup everything that has been
419 	 * successfully initialized, and return an error.
420 	 */
421 	status = tavor_eq_handler_init(state, state->ts_eqhdl[45],
422 	    TAVOR_EVT_MSK_ECC_DETECTION,
423 	    tavor_ecc_detection_handler);
424 	if (status != DDI_SUCCESS) {
425 		/* Set "status" and "errormsg" and goto failure */
426 		TAVOR_TNF_FAIL(status, "ecc detection event");
427 		goto all_eq_init_fail;
428 	}
429 	num_eq_unmap = 46;
430 
431 	/*
432 	 * Setup EQ46 to catch all other types of events.  Specifically, we
433 	 * do not catch the "Local EEC Catastrophic Error Event" because we
434 	 * should have no EEC (the Tavor driver does not support RD).  We also
435 	 * choose not to handle any of the address translation page fault
436 	 * event types.  Since we are not doing any page fault handling (and
437 	 * since the Tavor firmware does not currently support any such
438 	 * handling), we allow these events to go to the catch-all handler.
439 	 */
440 	status = tavor_eq_handler_init(state, state->ts_eqhdl[46],
441 	    TAVOR_EVT_CATCHALL_MASK, tavor_no_eqhandler);
442 	if (status != DDI_SUCCESS) {
443 		/* Set "status" and "errormsg" and goto failure */
444 		TAVOR_TNF_FAIL(status, "all other events");
445 		TNF_PROBE_0(tavor_eq_init_all_allothershdlr_fail,
446 		    TAVOR_TNF_ERROR, "");
447 		goto all_eq_init_fail;
448 	}
449 
450 	TAVOR_TNF_EXIT(tavor_eq_init_all);
451 	return (DDI_SUCCESS);
452 
453 all_eq_init_fail:
454 	/* Unmap any of the partially mapped EQs from above */
455 	for (i = 0; i < num_eq_unmap; i++) {
456 		(void) tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
457 	}
458 
459 	/* Free up any of the partially allocated EQs from above */
460 	for (i = 0; i < num_eq_init; i++) {
461 		(void) tavor_eq_free(state, &state->ts_eqhdl[i]);
462 	}
463 	TNF_PROBE_1(tavor_eq_init_all_fail, TAVOR_TNF_ERROR, "",
464 	    tnf_string, msg, errormsg);
465 	TAVOR_TNF_EXIT(tavor_eq_init_all);
466 	return (status);
467 }
468 
469 
470 /*
471  * tavor_eq_fini_all
472  *    Context: Only called from attach() and/or detach() path contexts
473  */
474 int
tavor_eq_fini_all(tavor_state_t * state)475 tavor_eq_fini_all(tavor_state_t *state)
476 {
477 	uint_t		num_eq;
478 	int		status, i;
479 
480 	TAVOR_TNF_ENTER(tavor_eq_fini_all);
481 
482 	/*
483 	 * Grab the total number of supported EQs again.  This is the same
484 	 * hardcoded value that was used above (during the event queue
485 	 * initialization.)
486 	 */
487 	num_eq = TAVOR_NUM_EQ_USED;
488 
489 	/*
490 	 * For each of the event queues that we initialized and mapped
491 	 * earlier, attempt to unmap the events from the EQ.
492 	 */
493 	for (i = 0; i < num_eq; i++) {
494 		status = tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
495 		if (status != DDI_SUCCESS) {
496 			TNF_PROBE_0(tavor_eq_fini_all_eqhdlfini_fail,
497 			    TAVOR_TNF_ERROR, "");
498 			TAVOR_TNF_EXIT(tavor_eq_fini_all);
499 			return (DDI_FAILURE);
500 		}
501 	}
502 
503 	/*
504 	 * Teardown and free up all the Event Queues that were allocated
505 	 * earlier.
506 	 */
507 	for (i = 0; i < num_eq; i++) {
508 		status = tavor_eq_free(state, &state->ts_eqhdl[i]);
509 		if (status != DDI_SUCCESS) {
510 			TNF_PROBE_0(tavor_eq_fini_all_eqfree_fail,
511 			    TAVOR_TNF_ERROR, "");
512 			TAVOR_TNF_EXIT(tavor_eq_fini_all);
513 			return (DDI_FAILURE);
514 		}
515 	}
516 
517 	TAVOR_TNF_EXIT(tavor_eq_fini_all);
518 	return (DDI_SUCCESS);
519 }
520 
521 
522 /*
523  * tavor_eq_arm_all
524  *    Context: Only called from attach() and/or detach() path contexts
525  */
526 void
tavor_eq_arm_all(tavor_state_t * state)527 tavor_eq_arm_all(tavor_state_t *state)
528 {
529 	uint_t		num_eq;
530 	int		i;
531 
532 	TAVOR_TNF_ENTER(tavor_eq_arm_all);
533 
534 	/*
535 	 * Grab the total number of supported EQs again.  This is the same
536 	 * hardcoded value that was used above (during the event queue
537 	 * initialization.)
538 	 */
539 	num_eq = TAVOR_NUM_EQ_USED;
540 
541 	/*
542 	 * For each of the event queues that we initialized and mapped
543 	 * earlier, attempt to arm it for event generation.
544 	 */
545 	for (i = 0; i < num_eq; i++) {
546 		tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, i, 0);
547 	}
548 
549 	TAVOR_TNF_EXIT(tavor_eq_arm_all);
550 }
551 
552 
553 /*
554  * tavor_isr()
555  *    Context: Only called from interrupt context (and during panic)
556  */
557 /* ARGSUSED */
558 uint_t
tavor_isr(caddr_t arg1,caddr_t arg2)559 tavor_isr(caddr_t arg1, caddr_t arg2)
560 {
561 	tavor_state_t	*state;
562 	uint64_t	*ecr, *clr_int;
563 	uint64_t	ecrreg, int_mask;
564 	uint_t		status;
565 	int		i;
566 
567 	TAVOR_TNF_ENTER(tavor_isr);
568 
569 	/*
570 	 * Grab the Tavor softstate pointer from the input parameter
571 	 */
572 	state	= (tavor_state_t *)arg1;
573 
574 	/*
575 	 * Find the pointers to the ECR and clr_INT registers
576 	 */
577 	ecr	= state->ts_cmd_regs.ecr;
578 	clr_int = state->ts_cmd_regs.clr_int;
579 
580 	/*
581 	 * Read the ECR register.  Each of the 64 bits in the ECR register
582 	 * corresponds to an event queue.  If a bit is set, then the
583 	 * corresponding event queue has fired.
584 	 */
585 	ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
586 
587 	/*
588 	 * As long as there are bits set (i.e. as long as there are still
589 	 * EQs in the "fired" state), call tavor_eq_poll() to process each
590 	 * fired EQ.  If no ECR bits are set, do not claim the interrupt.
591 	 */
592 	status = DDI_INTR_UNCLAIMED;
593 	do {
594 		i = 0;
595 		while (ecrreg != 0x0) {
596 			if (ecrreg & 0x1) {
597 				tavor_eq_poll(state, state->ts_eqhdl[i]);
598 				status = DDI_INTR_CLAIMED;
599 			}
600 			ecrreg = ecrreg >> 1;
601 			i++;
602 		}
603 
604 		/*
605 		 * Clear the interrupt.  Note: Depending on the type of
606 		 * event (interrupt or MSI), we need to use a different
607 		 * mask to clear the event.  In the case of MSI, the bit
608 		 * to clear corresponds to the MSI number, and for legacy
609 		 * interrupts the bit corresponds to the value in 'inta_pin'.
610 		 */
611 		if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
612 			int_mask = ((uint64_t)1 << 0);
613 		} else {
614 			int_mask = ((uint64_t)1 << state->ts_adapter.inta_pin);
615 		}
616 		ddi_put64(state->ts_reg_cmdhdl, clr_int, int_mask);
617 
618 		/* Reread the ECR register */
619 		ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
620 
621 	} while (ecrreg != 0x0);
622 
623 	TAVOR_TNF_EXIT(tavor_isr);
624 	return (status);
625 }
626 
627 
628 /*
629  * tavor_eq_doorbell
630  *    Context: Only called from interrupt context
631  */
632 void
tavor_eq_doorbell(tavor_state_t * state,uint32_t eq_cmd,uint32_t eqn,uint32_t eq_param)633 tavor_eq_doorbell(tavor_state_t *state, uint32_t eq_cmd, uint32_t eqn,
634     uint32_t eq_param)
635 {
636 	uint64_t	doorbell = 0;
637 
638 	/* Build the doorbell from the parameters */
639 	doorbell = ((uint64_t)eq_cmd << TAVOR_EQDB_CMD_SHIFT) |
640 	    ((uint64_t)eqn << TAVOR_EQDB_EQN_SHIFT) | eq_param;
641 
642 	TNF_PROBE_1_DEBUG(tavor_eq_doorbell, TAVOR_TNF_TRACE, "",
643 	    tnf_ulong, doorbell, doorbell);
644 
645 	/* Write the doorbell to UAR */
646 	TAVOR_UAR_DOORBELL(state, (uint64_t *)&state->ts_uar->eq,
647 	    doorbell);
648 }
649 
650 /*
651  * tavor_eq_poll
652  *    Context: Only called from interrupt context (and during panic)
653  */
654 static void
tavor_eq_poll(tavor_state_t * state,tavor_eqhdl_t eq)655 tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq)
656 {
657 	uint64_t	*clr_ecr;
658 	tavor_hw_eqe_t	*eqe;
659 	uint64_t	ecr_mask;
660 	uint32_t	cons_indx, wrap_around_mask;
661 	int (*eqfunction)(tavor_state_t *state, tavor_eqhdl_t eq,
662 	    tavor_hw_eqe_t *eqe);
663 
664 	TAVOR_TNF_ENTER(tavor_eq_poll);
665 
666 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq))
667 
668 	/* Find the pointer to the clr_ECR register */
669 	clr_ecr = state->ts_cmd_regs.clr_ecr;
670 
671 	/*
672 	 * Check for Local Catastrophic Error If we have this kind of error,
673 	 * then we don't need to do anything else here, as this kind of
674 	 * catastrophic error is handled separately.  So we call the
675 	 * catastrophic handler, clear the ECR and then return.
676 	 */
677 	if (eq->eq_evttypemask == TAVOR_EVT_MSK_LOCAL_CAT_ERROR) {
678 		/*
679 		 * Call Catastrophic Error handler
680 		 */
681 		tavor_eq_catastrophic(state);
682 
683 		/*
684 		 * Clear the ECR.  Specifically, clear the bit corresponding
685 		 * to the event queue just processed.
686 		 */
687 		ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
688 		ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
689 
690 		TAVOR_TNF_EXIT(tavor_eq_poll);
691 		return;
692 	}
693 
694 	/* Get the consumer pointer index */
695 	cons_indx = eq->eq_consindx;
696 
697 	/*
698 	 * Calculate the wrap around mask.  Note: This operation only works
699 	 * because all Tavor event queues have power-of-2 sizes
700 	 */
701 	wrap_around_mask = (eq->eq_bufsz - 1);
702 
703 	/* Calculate the pointer to the first EQ entry */
704 	eqe = &eq->eq_buf[cons_indx];
705 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe))
706 
707 	/*
708 	 * Sync the current EQE to read
709 	 *    We need to force a ddi_dma_sync() here (independent of how the
710 	 *    EQ was mapped) because it is possible for us to receive the
711 	 *    interrupt, do a read of the ECR, and have each of these
712 	 *    operations complete successfully even though the hardware's DMA
713 	 *    to the EQ has not yet completed.
714 	 */
715 	tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU, TAVOR_EQ_SYNC_FORCE);
716 
717 	/*
718 	 * Pull the handler function for this EQ from the Tavor Event Queue
719 	 * handle
720 	 */
721 	eqfunction = eq->eq_func;
722 
723 	/*
724 	 * Keep pulling entries from the EQ until we find an entry owner by
725 	 * the hardware.  As long as there the EQE's owned by SW, process
726 	 * each entry by calling its handler function and updating the EQ
727 	 * consumer index.
728 	 */
729 	do {
730 		while (TAVOR_EQE_OWNER_IS_SW(eq, eqe)) {
731 			/*
732 			 * Call the EQ handler function.  But only call if we
733 			 * are not in polled I/O mode (i.e. not processing
734 			 * because of a system panic).  Note: We don't call
735 			 * the EQ handling functions from a system panic
736 			 * because we are primarily concerned only with
737 			 * ensuring that the event queues do not overflow (or,
738 			 * more specifically, the event queue associated with
739 			 * the CQ that is being used in the sync/dump process).
740 			 * Also, we don't want to make any upcalls (to the
741 			 * IBTF) because we can't guarantee when/if those
742 			 * calls would ever return.  And, if we're in panic,
743 			 * then we reached here through a PollCQ() call (from
744 			 * tavor_cq_poll()), and we need to ensure that we
745 			 * successfully return any work completions to the
746 			 * caller.
747 			 */
748 			if (ddi_in_panic() == 0) {
749 				eqfunction(state, eq, eqe);
750 			}
751 
752 			/* Reset entry to hardware ownership */
753 			TAVOR_EQE_OWNER_SET_HW(eq, eqe);
754 
755 			/* Sync the current EQE for device */
756 			tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORDEV,
757 			    TAVOR_EQ_SYNC_NORMAL);
758 
759 			/* Increment the consumer index */
760 			cons_indx = (cons_indx + 1) & wrap_around_mask;
761 
762 			/* Update the pointer to the next EQ entry */
763 			eqe = &eq->eq_buf[cons_indx];
764 
765 			/* Sync the next EQE to read */
766 			tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
767 			    TAVOR_EQ_SYNC_NORMAL);
768 		}
769 
770 		/*
771 		 * Clear the ECR.  Specifically, clear the bit corresponding
772 		 * to the event queue just processed.
773 		 */
774 		ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
775 		ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
776 
777 		/* Write an EQ doorbell to update the consumer index */
778 		eq->eq_consindx = cons_indx;
779 		tavor_eq_doorbell(state, TAVOR_EQDB_SET_CONSINDX, eq->eq_eqnum,
780 		    cons_indx);
781 
782 		/* Write another EQ doorbell to rearm */
783 		tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, eq->eq_eqnum, 0);
784 
785 		/*
786 		 * NOTE: Due to the nature of Mellanox hardware, we do not have
787 		 * to do an explicit PIO read to ensure that the doorbell write
788 		 * has been flushed to the hardware.  There is state encoded in
789 		 * the doorbell information we write which makes this
790 		 * unnecessary.  We can be assured that if an event needs to be
791 		 * generated, the hardware will make sure that it is, solving
792 		 * the possible race condition.
793 		 */
794 
795 		/* Sync the next EQE to read */
796 		tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
797 		    TAVOR_EQ_SYNC_NORMAL);
798 
799 	} while (TAVOR_EQE_OWNER_IS_SW(eq, eqe));
800 
801 	TAVOR_TNF_EXIT(tavor_eq_poll);
802 }
803 
804 
805 /*
806  * tavor_eq_catastrophic
807  *    Context: Only called from interrupt context (and during panic)
808  */
809 static void
tavor_eq_catastrophic(tavor_state_t * state)810 tavor_eq_catastrophic(tavor_state_t *state)
811 {
812 	ibt_async_code_t	type;
813 	ibc_async_event_t	event;
814 	uint32_t		*base_addr;
815 	uint32_t		buf_size;
816 	uint32_t		word;
817 	uint8_t			err_type;
818 	uint32_t		err_buf;
819 	int			i;
820 
821 	TAVOR_TNF_ENTER(tavor_eq_catastrophic);
822 
823 	bzero(&event, sizeof (ibc_async_event_t));
824 
825 	base_addr = (uint32_t *)(uintptr_t)(
826 	    (uintptr_t)state->ts_reg_cmd_baseaddr +
827 	    state->ts_fw.error_buf_addr);
828 	buf_size = state->ts_fw.error_buf_sz;
829 
830 	word = ddi_get32(state->ts_reg_cmdhdl, base_addr);
831 
832 	err_type = (word & 0xFF000000) >> 24;
833 	type	 = IBT_ERROR_LOCAL_CATASTROPHIC;
834 
835 	switch (err_type) {
836 	case TAVOR_CATASTROPHIC_INTERNAL_ERROR:
837 		cmn_err(CE_WARN, "Catastrophic Internal Error: 0x%02x",
838 		    err_type);
839 
840 		break;
841 
842 	case TAVOR_CATASTROPHIC_UPLINK_BUS_ERROR:
843 		cmn_err(CE_WARN, "Catastrophic Uplink Bus Error: 0x%02x",
844 		    err_type);
845 
846 		break;
847 
848 	case TAVOR_CATASTROPHIC_DDR_DATA_ERROR:
849 		cmn_err(CE_WARN, "Catastrophic DDR Data Error: 0x%02x",
850 		    err_type);
851 
852 		break;
853 
854 	case TAVOR_CATASTROPHIC_INTERNAL_PARITY_ERROR:
855 		cmn_err(CE_WARN, "Catastrophic Internal Parity Error: 0x%02x",
856 		    err_type);
857 
858 		break;
859 
860 	default:
861 		/* Unknown type of Catastrophic error */
862 		cmn_err(CE_WARN, "Catastrophic Unknown Error: 0x%02x",
863 		    err_type);
864 
865 		break;
866 	}
867 
868 	/*
869 	 * Read in the catastrophic error buffer from the hardware, printing
870 	 * only to the log file only
871 	 */
872 	for (i = 0; i < buf_size; i += 4) {
873 		base_addr = (uint32_t *)((uintptr_t)(state->ts_reg_cmd_baseaddr
874 		    + state->ts_fw.error_buf_addr + (i * 4)));
875 		err_buf = ddi_get32(state->ts_reg_cmdhdl, base_addr);
876 		cmn_err(CE_WARN, "catastrophic_error[%02x]: %08X", i, err_buf);
877 	}
878 
879 	/*
880 	 * We also call the IBTF here to inform it of the catastrophic error.
881 	 * Note: Since no event information (i.e. QP handles, CQ handles,
882 	 * etc.) is necessary, we pass a NULL pointer instead of a pointer to
883 	 * an empty ibc_async_event_t struct.
884 	 *
885 	 * But we also check if "ts_ibtfpriv" is NULL.  If it is then it
886 	 * means that we've have either received this event before we
887 	 * finished attaching to the IBTF or we've received it while we
888 	 * are in the process of detaching.
889 	 */
890 	if (state->ts_ibtfpriv != NULL) {
891 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
892 	}
893 
894 	TAVOR_TNF_EXIT(tavor_eq_catastrophic);
895 }
896 
897 
898 /*
899  * tavor_eq_alloc()
900  *    Context: Only called from attach() path context
901  */
902 static int
tavor_eq_alloc(tavor_state_t * state,uint32_t log_eq_size,uint_t intr,tavor_eqhdl_t * eqhdl)903 tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size, uint_t intr,
904     tavor_eqhdl_t *eqhdl)
905 {
906 	tavor_rsrc_t		*eqc, *rsrc;
907 	tavor_hw_eqc_t		eqc_entry;
908 	tavor_eqhdl_t		eq;
909 	ibt_mr_attr_t		mr_attr;
910 	tavor_mr_options_t	op;
911 	tavor_pdhdl_t		pd;
912 	tavor_mrhdl_t		mr;
913 	tavor_hw_eqe_t		*buf;
914 	uint64_t		addr;
915 	uint32_t		lkey;
916 	uint_t			dma_xfer_mode;
917 	int			status, i;
918 	char			*errormsg;
919 
920 	TAVOR_TNF_ENTER(tavor_eq_alloc);
921 
922 	/* Use the internal protection domain (PD) for setting up EQs */
923 	pd = state->ts_pdhdl_internal;
924 
925 	/* Increment the reference count on the protection domain (PD) */
926 	tavor_pd_refcnt_inc(pd);
927 
928 	/*
929 	 * Allocate an EQ context entry.  This will be filled in with all
930 	 * the necessary parameters to define the Event Queue.  And then
931 	 * ownership will be passed to the hardware in the final step
932 	 * below.  If we fail here, we must undo the protection domain
933 	 * reference count.
934 	 */
935 	status = tavor_rsrc_alloc(state, TAVOR_EQC, 1, TAVOR_SLEEP, &eqc);
936 	if (status != DDI_SUCCESS) {
937 		/* Set "status" and "errormsg" and goto failure */
938 		TAVOR_TNF_FAIL(DDI_FAILURE, "failed EQ context");
939 		goto eqalloc_fail1;
940 	}
941 
942 	/*
943 	 * Allocate the software structure for tracking the event queue (i.e.
944 	 * the Tavor Event Queue handle).  If we fail here, we must undo the
945 	 * protection domain reference count and the previous resource
946 	 * allocation.
947 	 */
948 	status = tavor_rsrc_alloc(state, TAVOR_EQHDL, 1, TAVOR_SLEEP, &rsrc);
949 	if (status != DDI_SUCCESS) {
950 		/* Set "status" and "errormsg" and goto failure */
951 		TAVOR_TNF_FAIL(DDI_FAILURE, "failed EQ handler");
952 		goto eqalloc_fail2;
953 	}
954 	eq = (tavor_eqhdl_t)rsrc->tr_addr;
955 
956 	/*
957 	 * Allocate the memory for Event Queue.  Note: Although we use the
958 	 * common queue allocation routine, we always specify
959 	 * TAVOR_QUEUE_LOCATION_NORMAL (i.e. EQ located in system memory)
960 	 * because it would be inefficient to have EQs located in DDR memory.
961 	 * This is primarily because EQs are read from (by software) more
962 	 * than they are written to.  Also note that, unlike Tavor QP work
963 	 * queues, event queues do not have the same strict alignment
964 	 * requirements.  It is sufficient for the EQ memory to be both
965 	 * aligned to and bound to addresses which are a multiple of EQE size.
966 	 */
967 	eq->eq_eqinfo.qa_size = (1 << log_eq_size) * sizeof (tavor_hw_eqe_t);
968 	eq->eq_eqinfo.qa_alloc_align = sizeof (tavor_hw_eqe_t);
969 	eq->eq_eqinfo.qa_bind_align  = sizeof (tavor_hw_eqe_t);
970 	eq->eq_eqinfo.qa_location = TAVOR_QUEUE_LOCATION_NORMAL;
971 	status = tavor_queue_alloc(state, &eq->eq_eqinfo, TAVOR_SLEEP);
972 	if (status != DDI_SUCCESS) {
973 		/* Set "status" and "errormsg" and goto failure */
974 		TAVOR_TNF_FAIL(DDI_FAILURE, "failed event queue");
975 		goto eqalloc_fail3;
976 	}
977 	buf = (tavor_hw_eqe_t *)eq->eq_eqinfo.qa_buf_aligned;
978 
979 	/*
980 	 * Initialize each of the Event Queue Entries (EQE) by setting their
981 	 * ownership to hardware ("owner" bit set to HW).  This is in
982 	 * preparation for the final transfer of ownership (below) of the
983 	 * EQ context itself.
984 	 */
985 	for (i = 0; i < (1 << log_eq_size); i++) {
986 		TAVOR_EQE_OWNER_SET_HW(eq, &buf[i]);
987 	}
988 
989 	/*
990 	 * Register the memory for the EQ.  The memory for the EQ must
991 	 * be registered in the Tavor TPT tables.  This gives us the LKey
992 	 * to specify in the EQ context below.
993 	 *
994 	 * Because we are in the attach path we use NOSLEEP here so that we
995 	 * SPIN in the HCR since the event queues are not setup yet, and we
996 	 * cannot NOSPIN at this point in time.
997 	 */
998 	mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
999 	mr_attr.mr_len	 = eq->eq_eqinfo.qa_size;
1000 	mr_attr.mr_as	 = NULL;
1001 	mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
1002 	dma_xfer_mode	 = state->ts_cfg_profile->cp_streaming_consistent;
1003 	if (dma_xfer_mode == DDI_DMA_STREAMING) {
1004 		mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
1005 	}
1006 	op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1007 	op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
1008 	op.mro_bind_override_addr = 0;
1009 	status = tavor_mr_register(state, pd, &mr_attr, &mr, &op);
1010 	if (status != DDI_SUCCESS) {
1011 		/* Set "status" and "errormsg" and goto failure */
1012 		TAVOR_TNF_FAIL(DDI_FAILURE, "failed register mr");
1013 		goto eqalloc_fail4;
1014 	}
1015 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
1016 	addr = mr->mr_bindinfo.bi_addr;
1017 	lkey = mr->mr_lkey;
1018 
1019 	/* Determine if later ddi_dma_sync will be necessary */
1020 	eq->eq_sync = TAVOR_EQ_IS_SYNC_REQ(state, eq->eq_eqinfo);
1021 
1022 	/* Sync entire EQ for use by the hardware (if necessary) */
1023 	if (eq->eq_sync) {
1024 		(void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0,
1025 		    eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
1026 	}
1027 
1028 	/*
1029 	 * Fill in the EQC entry.  This is the final step before passing
1030 	 * ownership of the EQC entry to the Tavor hardware.  We use all of
1031 	 * the information collected/calculated above to fill in the
1032 	 * requisite portions of the EQC.  Note:  We create all EQs in the
1033 	 * "fired" state.  We will arm them later (after our interrupt
1034 	 * routine had been registered.)
1035 	 */
1036 	bzero(&eqc_entry, sizeof (tavor_hw_eqc_t));
1037 	eqc_entry.owner		= TAVOR_HW_OWNER;
1038 	eqc_entry.xlat		= TAVOR_VA2PA_XLAT_ENABLED;
1039 	eqc_entry.state		= TAVOR_EQ_FIRED;
1040 	eqc_entry.start_addr_h	= (addr >> 32);
1041 	eqc_entry.start_addr_l	= (addr & 0xFFFFFFFF);
1042 	eqc_entry.log_eq_sz	= log_eq_size;
1043 	eqc_entry.usr_page	= 0;
1044 	eqc_entry.pd		= pd->pd_pdnum;
1045 	eqc_entry.intr		= intr;
1046 	eqc_entry.lkey		= lkey;
1047 
1048 	/*
1049 	 * Write the EQC entry to hardware.  Lastly, we pass ownership of
1050 	 * the entry to the hardware (using the Tavor SW2HW_EQ firmware
1051 	 * command).  Note: in general, this operation shouldn't fail.  But
1052 	 * if it does, we have to undo everything we've done above before
1053 	 * returning error.
1054 	 */
1055 	status = tavor_cmn_ownership_cmd_post(state, SW2HW_EQ, &eqc_entry,
1056 	    sizeof (tavor_hw_eqc_t), eqc->tr_indx, TAVOR_CMD_NOSLEEP_SPIN);
1057 	if (status != TAVOR_CMD_SUCCESS) {
1058 		cmn_err(CE_CONT, "Tavor: SW2HW_EQ command failed: %08x\n",
1059 		    status);
1060 		TNF_PROBE_1(tavor_eq_alloc_sw2hw_eq_cmd_fail,
1061 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1062 		/* Set "status" and "errormsg" and goto failure */
1063 		TAVOR_TNF_FAIL(ibc_get_ci_failure(0), "tavor SW2HW_EQ command");
1064 		goto eqalloc_fail5;
1065 	}
1066 
1067 	/*
1068 	 * Fill in the rest of the Tavor Event Queue handle.  Having
1069 	 * successfully transferred ownership of the EQC, we can update the
1070 	 * following fields for use in further operations on the EQ.
1071 	 */
1072 	eq->eq_eqcrsrcp	 = eqc;
1073 	eq->eq_rsrcp	 = rsrc;
1074 	eq->eq_consindx	 = 0;
1075 	eq->eq_eqnum	 = eqc->tr_indx;
1076 	eq->eq_buf	 = buf;
1077 	eq->eq_bufsz	 = (1 << log_eq_size);
1078 	eq->eq_mrhdl	 = mr;
1079 	*eqhdl		 = eq;
1080 
1081 	TAVOR_TNF_EXIT(tavor_eq_alloc);
1082 	return (DDI_SUCCESS);
1083 
1084 /*
1085  * The following is cleanup for all possible failure cases in this routine
1086  */
1087 eqalloc_fail5:
1088 	if (tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1089 	    TAVOR_NOSLEEP) != DDI_SUCCESS) {
1090 		TAVOR_WARNING(state, "failed to deregister EQ memory");
1091 	}
1092 eqalloc_fail4:
1093 	tavor_queue_free(state, &eq->eq_eqinfo);
1094 eqalloc_fail3:
1095 	tavor_rsrc_free(state, &rsrc);
1096 eqalloc_fail2:
1097 	tavor_rsrc_free(state, &eqc);
1098 eqalloc_fail1:
1099 	tavor_pd_refcnt_dec(pd);
1100 eqalloc_fail:
1101 	TNF_PROBE_1(tavor_eq_alloc_fail, TAVOR_TNF_ERROR, "",
1102 	    tnf_string, msg, errormsg);
1103 	TAVOR_TNF_EXIT(tavor_eq_alloc);
1104 	return (status);
1105 }
1106 
1107 
1108 /*
1109  * tavor_eq_free()
1110  *    Context: Only called from attach() and/or detach() path contexts
1111  */
1112 static int
tavor_eq_free(tavor_state_t * state,tavor_eqhdl_t * eqhdl)1113 tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl)
1114 {
1115 	tavor_rsrc_t		*eqc, *rsrc;
1116 	tavor_hw_eqc_t		eqc_entry;
1117 	tavor_pdhdl_t		pd;
1118 	tavor_mrhdl_t		mr;
1119 	tavor_eqhdl_t		eq;
1120 	uint32_t		eqnum;
1121 	int			status;
1122 
1123 	TAVOR_TNF_ENTER(tavor_eq_free);
1124 
1125 	/*
1126 	 * Pull all the necessary information from the Tavor Event Queue
1127 	 * handle.  This is necessary here because the resource for the
1128 	 * EQ handle is going to be freed up as part of this operation.
1129 	 */
1130 	eq	= *eqhdl;
1131 	eqc	= eq->eq_eqcrsrcp;
1132 	rsrc	= eq->eq_rsrcp;
1133 	pd	= state->ts_pdhdl_internal;
1134 	mr	= eq->eq_mrhdl;
1135 	eqnum	= eq->eq_eqnum;
1136 
1137 	/*
1138 	 * Reclaim EQC entry from hardware (using the Tavor HW2SW_EQ
1139 	 * firmware command).  If the ownership transfer fails for any reason,
1140 	 * then it is an indication that something (either in HW or SW) has
1141 	 * gone seriously wrong.
1142 	 */
1143 	status = tavor_cmn_ownership_cmd_post(state, HW2SW_EQ, &eqc_entry,
1144 	    sizeof (tavor_hw_eqc_t), eqnum, TAVOR_CMD_NOSLEEP_SPIN);
1145 	if (status != TAVOR_CMD_SUCCESS) {
1146 		TAVOR_WARNING(state, "failed to reclaim EQC ownership");
1147 		cmn_err(CE_CONT, "Tavor: HW2SW_EQ command failed: %08x\n",
1148 		    status);
1149 		TNF_PROBE_1(tavor_eq_free_hw2sw_eq_cmd_fail,
1150 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1151 		TAVOR_TNF_EXIT(tavor_eq_free);
1152 		return (DDI_FAILURE);
1153 	}
1154 
1155 	/*
1156 	 * Deregister the memory for the Event Queue.  If this fails
1157 	 * for any reason, then it is an indication that something (either
1158 	 * in HW or SW) has gone seriously wrong.  So we print a warning
1159 	 * message and continue.
1160 	 */
1161 	status = tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1162 	    TAVOR_NOSLEEP);
1163 	if (status != DDI_SUCCESS) {
1164 		TAVOR_WARNING(state, "failed to deregister EQ memory");
1165 		TNF_PROBE_0(tavor_eq_free_dereg_mr_fail, TAVOR_TNF_ERROR, "");
1166 		TAVOR_TNF_EXIT(tavor_eq_free);
1167 	}
1168 
1169 	/* Free the memory for the EQ */
1170 	tavor_queue_free(state, &eq->eq_eqinfo);
1171 
1172 	/* Free the Tavor Event Queue handle */
1173 	tavor_rsrc_free(state, &rsrc);
1174 
1175 	/* Free up the EQC entry resource */
1176 	tavor_rsrc_free(state, &eqc);
1177 
1178 	/* Decrement the reference count on the protection domain (PD) */
1179 	tavor_pd_refcnt_dec(pd);
1180 
1181 	/* Set the eqhdl pointer to NULL and return success */
1182 	*eqhdl = NULL;
1183 
1184 	TAVOR_TNF_EXIT(tavor_eq_free);
1185 	return (DDI_SUCCESS);
1186 }
1187 
1188 
1189 /*
1190  * tavor_eq_handler_init
1191  *    Context: Only called from attach() path context
1192  */
1193 static int
tavor_eq_handler_init(tavor_state_t * state,tavor_eqhdl_t eq,uint_t evt_type_mask,int (* eq_func)(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe))1194 tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
1195     uint_t evt_type_mask, int (*eq_func)(tavor_state_t *state,
1196     tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe))
1197 {
1198 	int		status;
1199 
1200 	TAVOR_TNF_ENTER(tavor_eq_handler_init);
1201 
1202 	/*
1203 	 * Save away the EQ handler function and the event type mask.  These
1204 	 * will be used later during interrupt and event queue processing.
1205 	 */
1206 	eq->eq_func	   = eq_func;
1207 	eq->eq_evttypemask = evt_type_mask;
1208 
1209 	/*
1210 	 * Map the EQ to a specific class of event (or events) depending
1211 	 * on the mask value passed in.  The TAVOR_EVT_NO_MASK means not
1212 	 * to attempt associating the EQ with any specific class of event.
1213 	 * This is particularly useful when initializing the events queues
1214 	 * used for CQ events.   The mapping is done using the Tavor MAP_EQ
1215 	 * firmware command.  Note: This command should not, in general, fail.
1216 	 * If it does, then something (probably HW related) has gone seriously
1217 	 * wrong.
1218 	 */
1219 	if (evt_type_mask != TAVOR_EVT_NO_MASK) {
1220 		status = tavor_map_eq_cmd_post(state,
1221 		    TAVOR_CMD_MAP_EQ_EVT_MAP, eq->eq_eqnum, evt_type_mask,
1222 		    TAVOR_CMD_NOSLEEP_SPIN);
1223 		if (status != TAVOR_CMD_SUCCESS) {
1224 			cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1225 			    "%08x\n", status);
1226 			TNF_PROBE_1(tavor_eq_handler_init_map_eq_cmd_fail,
1227 			    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1228 			TAVOR_TNF_EXIT(tavor_eq_handler_init);
1229 			return (DDI_FAILURE);
1230 		}
1231 	}
1232 
1233 	TAVOR_TNF_EXIT(tavor_eq_handler_init);
1234 	return (DDI_SUCCESS);
1235 }
1236 
1237 
1238 /*
1239  * tavor_eq_handler_fini
1240  *    Context: Only called from attach() and/or detach() path contexts
1241  */
1242 static int
tavor_eq_handler_fini(tavor_state_t * state,tavor_eqhdl_t eq)1243 tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq)
1244 {
1245 	int			status;
1246 
1247 	TAVOR_TNF_ENTER(tavor_eq_handler_fini);
1248 
1249 	/*
1250 	 * Unmap the EQ from the event class to which it had been previously
1251 	 * mapped.  The unmapping is done using the Tavor MAP_EQ (in much
1252 	 * the same way that the initial mapping was done).  The difference,
1253 	 * however, is in the TAVOR_EQ_EVT_UNMAP flag that is passed to the
1254 	 * MAP_EQ firmware command.  The TAVOR_EVT_NO_MASK (which may have
1255 	 * been passed in at init time) still means that no association has
1256 	 * been made between the EQ and any specific class of event (and,
1257 	 * hence, no unmapping is necessary).  Note: This command should not,
1258 	 * in general, fail.  If it does, then something (probably HW related)
1259 	 * has gone seriously wrong.
1260 	 */
1261 	if (eq->eq_evttypemask != TAVOR_EVT_NO_MASK) {
1262 		status = tavor_map_eq_cmd_post(state,
1263 		    TAVOR_CMD_MAP_EQ_EVT_UNMAP, eq->eq_eqnum,
1264 		    eq->eq_evttypemask, TAVOR_CMD_NOSLEEP_SPIN);
1265 		if (status != TAVOR_CMD_SUCCESS) {
1266 			cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1267 			    "%08x\n", status);
1268 			TNF_PROBE_1(tavor_eq_handler_fini_map_eq_cmd_fail,
1269 			    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1270 			TAVOR_TNF_EXIT(tavor_eq_handler_fini);
1271 			return (DDI_FAILURE);
1272 		}
1273 	}
1274 
1275 	TAVOR_TNF_EXIT(tavor_eq_handler_fini);
1276 	return (DDI_SUCCESS);
1277 }
1278 
1279 
1280 /*
1281  * tavor_eqe_sync()
1282  *    Context: Can be called from interrupt or base context.
1283  *
1284  *    Typically, this routine does nothing unless the EQ memory is
1285  *    mapped as DDI_DMA_STREAMING.  However, there is a condition where
1286  *    ddi_dma_sync() is necessary even if the memory was mapped in
1287  *    consistent mode.  The "force_sync" parameter is used here to force
1288  *    the call to ddi_dma_sync() independent of how the EQ memory was
1289  *    mapped.
1290  */
1291 static void
tavor_eqe_sync(tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe,uint_t flag,uint_t force_sync)1292 tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
1293     uint_t force_sync)
1294 {
1295 	ddi_dma_handle_t	dmahdl;
1296 	off_t			offset;
1297 	int			status;
1298 
1299 	TAVOR_TNF_ENTER(tavor_eqe_sync);
1300 
1301 	/* Determine if EQ needs to be synced or not */
1302 	if ((eq->eq_sync == 0) && (force_sync == TAVOR_EQ_SYNC_NORMAL)) {
1303 		TAVOR_TNF_EXIT(tavor_eqe_sync);
1304 		return;
1305 	}
1306 
1307 	/* Get the DMA handle from EQ context */
1308 	dmahdl = eq->eq_mrhdl->mr_bindinfo.bi_dmahdl;
1309 
1310 	/* Calculate offset of next EQE */
1311 	offset = (off_t)((uintptr_t)eqe - (uintptr_t)&eq->eq_buf[0]);
1312 	status = ddi_dma_sync(dmahdl, offset, sizeof (tavor_hw_eqe_t), flag);
1313 	if (status != DDI_SUCCESS) {
1314 		TNF_PROBE_0(tavor_eqe_sync_getnextentry_fail,
1315 		    TAVOR_TNF_ERROR, "");
1316 		TAVOR_TNF_EXIT(tavor_eqe_sync);
1317 		return;
1318 	}
1319 
1320 	TAVOR_TNF_EXIT(tavor_eqe_sync);
1321 }
1322 
1323 
1324 /*
1325  * tavor_port_state_change_handler()
1326  *    Context: Only called from interrupt context
1327  */
1328 static int
tavor_port_state_change_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1329 tavor_port_state_change_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1330     tavor_hw_eqe_t *eqe)
1331 {
1332 	ibc_async_event_t	event;
1333 	ibt_async_code_t	type;
1334 	uint_t			port, subtype;
1335 	uint_t			eqe_evttype;
1336 	char			link_msg[24];
1337 
1338 	TAVOR_TNF_ENTER(tavor_port_state_change_handler);
1339 
1340 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1341 
1342 	ASSERT(eqe_evttype == TAVOR_EVT_PORT_STATE_CHANGE ||
1343 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1344 
1345 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1346 		TNF_PROBE_0(tavor_port_state_change_eq_overflow_condition,
1347 		    TAVOR_TNF_ERROR, "");
1348 		tavor_eq_overflow_handler(state, eq, eqe);
1349 
1350 		TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1351 		return (DDI_FAILURE);
1352 	}
1353 
1354 	/*
1355 	 * Depending on the type of Port State Change event, pass the
1356 	 * appropriate asynch event to the IBTF.
1357 	 */
1358 	port = TAVOR_EQE_PORTNUM_GET(eq, eqe);
1359 
1360 	/* Check for valid port number in event */
1361 	if ((port == 0) || (port > state->ts_cfg_profile->cp_num_ports)) {
1362 		TAVOR_WARNING(state, "Unexpected port number in port state "
1363 		    "change event");
1364 		cmn_err(CE_CONT, "  Port number: %02x\n", port);
1365 		TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1366 		return (DDI_FAILURE);
1367 	}
1368 
1369 	subtype = TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe);
1370 	if (subtype == TAVOR_PORT_LINK_ACTIVE) {
1371 		event.ev_port 	= port;
1372 		type		= IBT_EVENT_PORT_UP;
1373 
1374 		(void) snprintf(link_msg, 23, "port %d up", port);
1375 		ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_RESTORED,
1376 		    DDI_EXTERNAL_FAULT, link_msg);
1377 	} else if (subtype == TAVOR_PORT_LINK_DOWN) {
1378 		event.ev_port	= port;
1379 		type		= IBT_ERROR_PORT_DOWN;
1380 
1381 		(void) snprintf(link_msg, 23, "port %d down", port);
1382 		ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_LOST,
1383 		    DDI_EXTERNAL_FAULT, link_msg);
1384 	} else {
1385 		TAVOR_WARNING(state, "Unexpected subtype in port state change "
1386 		    "event");
1387 		cmn_err(CE_CONT, "  Event type: %02x, subtype: %02x\n",
1388 		    TAVOR_EQE_EVTTYPE_GET(eq, eqe), subtype);
1389 		TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1390 		return (DDI_FAILURE);
1391 	}
1392 
1393 	/*
1394 	 * Deliver the event to the IBTF.  Note: If "ts_ibtfpriv" is NULL,
1395 	 * then we have either received this event before we finished
1396 	 * attaching to the IBTF or we've received it while we are in the
1397 	 * process of detaching.
1398 	 */
1399 	if (state->ts_ibtfpriv != NULL) {
1400 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1401 	}
1402 
1403 	TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1404 	return (DDI_SUCCESS);
1405 }
1406 
1407 
1408 /*
1409  * tavor_comm_estbl_handler()
1410  *    Context: Only called from interrupt context
1411  */
1412 static int
tavor_comm_estbl_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1413 tavor_comm_estbl_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1414     tavor_hw_eqe_t *eqe)
1415 {
1416 	tavor_qphdl_t		qp;
1417 	uint_t			qpnum;
1418 	ibc_async_event_t	event;
1419 	ibt_async_code_t	type;
1420 	uint_t			eqe_evttype;
1421 
1422 	TAVOR_TNF_ENTER(tavor_comm_estbl_handler);
1423 
1424 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1425 
1426 	ASSERT(eqe_evttype == TAVOR_EVT_COMM_ESTABLISHED ||
1427 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1428 
1429 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1430 		TNF_PROBE_0(tavor_comm_estbl_eq_overflow_condition,
1431 		    TAVOR_TNF_ERROR, "");
1432 		tavor_eq_overflow_handler(state, eq, eqe);
1433 
1434 		TAVOR_TNF_EXIT(tavor_comm_estbl_handler);
1435 		return (DDI_FAILURE);
1436 	}
1437 
1438 	/* Get the QP handle from QP number in event descriptor */
1439 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1440 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1441 
1442 	/*
1443 	 * If the QP handle is NULL, this is probably an indication
1444 	 * that the QP has been freed already.  In which case, we
1445 	 * should not deliver this event.
1446 	 *
1447 	 * We also check that the QP number in the handle is the
1448 	 * same as the QP number in the event queue entry.  This
1449 	 * extra check allows us to handle the case where a QP was
1450 	 * freed and then allocated again in the time it took to
1451 	 * handle the event queue processing.  By constantly incrementing
1452 	 * the non-constrained portion of the QP number every time
1453 	 * a new QP is allocated, we mitigate (somewhat) the chance
1454 	 * that a stale event could be passed to the client's QP
1455 	 * handler.
1456 	 *
1457 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1458 	 * means that we've have either received this event before we
1459 	 * finished attaching to the IBTF or we've received it while we
1460 	 * are in the process of detaching.
1461 	 */
1462 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1463 	    (state->ts_ibtfpriv != NULL)) {
1464 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1465 		type		= IBT_EVENT_COM_EST_QP;
1466 
1467 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1468 	} else {
1469 		TNF_PROBE_2(tavor_comm_estbl_handler_dropped_event,
1470 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1471 		    tnf_uint, hdl_qpnum, qpnum);
1472 	}
1473 
1474 	TAVOR_TNF_EXIT(tavor_comm_estbl_handler);
1475 	return (DDI_SUCCESS);
1476 }
1477 
1478 
1479 /*
1480  * tavor_local_wq_cat_err_handler()
1481  *    Context: Only called from interrupt context
1482  */
1483 static int
tavor_local_wq_cat_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1484 tavor_local_wq_cat_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1485     tavor_hw_eqe_t *eqe)
1486 {
1487 	tavor_qphdl_t		qp;
1488 	uint_t			qpnum;
1489 	ibc_async_event_t	event;
1490 	ibt_async_code_t	type;
1491 	uint_t			eqe_evttype;
1492 
1493 	TAVOR_TNF_ENTER(tavor_local_wq_cat_err_handler);
1494 
1495 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1496 
1497 	ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_WQ_CAT_ERROR ||
1498 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1499 
1500 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1501 		TNF_PROBE_0(tavor_local_wq_cat_err_eq_overflow_condition,
1502 		    TAVOR_TNF_ERROR, "");
1503 		tavor_eq_overflow_handler(state, eq, eqe);
1504 
1505 		TAVOR_TNF_EXIT(tavor_local_wq_cat_err_handler);
1506 		return (DDI_FAILURE);
1507 	}
1508 
1509 	/* Get the QP handle from QP number in event descriptor */
1510 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1511 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1512 
1513 	/*
1514 	 * If the QP handle is NULL, this is probably an indication
1515 	 * that the QP has been freed already.  In which case, we
1516 	 * should not deliver this event.
1517 	 *
1518 	 * We also check that the QP number in the handle is the
1519 	 * same as the QP number in the event queue entry.  This
1520 	 * extra check allows us to handle the case where a QP was
1521 	 * freed and then allocated again in the time it took to
1522 	 * handle the event queue processing.  By constantly incrementing
1523 	 * the non-constrained portion of the QP number every time
1524 	 * a new QP is allocated, we mitigate (somewhat) the chance
1525 	 * that a stale event could be passed to the client's QP
1526 	 * handler.
1527 	 *
1528 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1529 	 * means that we've have either received this event before we
1530 	 * finished attaching to the IBTF or we've received it while we
1531 	 * are in the process of detaching.
1532 	 */
1533 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1534 	    (state->ts_ibtfpriv != NULL)) {
1535 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1536 		type		= IBT_ERROR_CATASTROPHIC_QP;
1537 
1538 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1539 	} else {
1540 		TNF_PROBE_2(tavor_local_wq_cat_err_handler_dropped_event,
1541 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1542 		    tnf_uint, hdl_qpnum, qpnum);
1543 	}
1544 
1545 	TAVOR_TNF_EXIT(tavor_local_wq_cat_err_handler);
1546 	return (DDI_SUCCESS);
1547 }
1548 
1549 
1550 /*
1551  * tavor_invreq_local_wq_err_handler()
1552  *    Context: Only called from interrupt context
1553  */
1554 static int
tavor_invreq_local_wq_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1555 tavor_invreq_local_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1556     tavor_hw_eqe_t *eqe)
1557 {
1558 	tavor_qphdl_t		qp;
1559 	uint_t			qpnum;
1560 	ibc_async_event_t	event;
1561 	ibt_async_code_t	type;
1562 	uint_t			eqe_evttype;
1563 
1564 	TAVOR_TNF_ENTER(tavor_invreq_local_wq_err_handler);
1565 
1566 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1567 
1568 	ASSERT(eqe_evttype == TAVOR_EVT_INV_REQ_LOCAL_WQ_ERROR ||
1569 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1570 
1571 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1572 		TNF_PROBE_0(tavor_invreq_local_wq_err_eq_overflow_condition,
1573 		    TAVOR_TNF_ERROR, "");
1574 		tavor_eq_overflow_handler(state, eq, eqe);
1575 
1576 		TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1577 		return (DDI_FAILURE);
1578 	}
1579 
1580 	/* Get the QP handle from QP number in event descriptor */
1581 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1582 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1583 
1584 	/*
1585 	 * If the QP handle is NULL, this is probably an indication
1586 	 * that the QP has been freed already.  In which case, we
1587 	 * should not deliver this event.
1588 	 *
1589 	 * We also check that the QP number in the handle is the
1590 	 * same as the QP number in the event queue entry.  This
1591 	 * extra check allows us to handle the case where a QP was
1592 	 * freed and then allocated again in the time it took to
1593 	 * handle the event queue processing.  By constantly incrementing
1594 	 * the non-constrained portion of the QP number every time
1595 	 * a new QP is allocated, we mitigate (somewhat) the chance
1596 	 * that a stale event could be passed to the client's QP
1597 	 * handler.
1598 	 *
1599 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1600 	 * means that we've have either received this event before we
1601 	 * finished attaching to the IBTF or we've received it while we
1602 	 * are in the process of detaching.
1603 	 */
1604 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1605 	    (state->ts_ibtfpriv != NULL)) {
1606 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1607 		type		= IBT_ERROR_INVALID_REQUEST_QP;
1608 
1609 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1610 	} else {
1611 		TNF_PROBE_2(tavor_invreq_local_wq_err_handler_dropped_event,
1612 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1613 		    tnf_uint, hdl_qpnum, qpnum);
1614 	}
1615 
1616 	TAVOR_TNF_EXIT(tavor_invreq_local_wq_err_handler);
1617 	return (DDI_SUCCESS);
1618 }
1619 
1620 
1621 /*
1622  * tavor_local_acc_vio_wq_err_handler()
1623  *    Context: Only called from interrupt context
1624  */
1625 static int
tavor_local_acc_vio_wq_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1626 tavor_local_acc_vio_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1627     tavor_hw_eqe_t *eqe)
1628 {
1629 	tavor_qphdl_t		qp;
1630 	uint_t			qpnum;
1631 	ibc_async_event_t	event;
1632 	ibt_async_code_t	type;
1633 	uint_t			eqe_evttype;
1634 
1635 	TAVOR_TNF_ENTER(tavor_local_acc_vio_wq_err_handler);
1636 
1637 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1638 
1639 	ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_ACC_VIO_WQ_ERROR ||
1640 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1641 
1642 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1643 		TNF_PROBE_0(tavor_local_acc_vio_wq_err_eq_overflow_condition,
1644 		    TAVOR_TNF_ERROR, "");
1645 		tavor_eq_overflow_handler(state, eq, eqe);
1646 
1647 		TAVOR_TNF_EXIT(tavor_local_acc_vio_wq_err_handler);
1648 		return (DDI_FAILURE);
1649 	}
1650 
1651 	/* Get the QP handle from QP number in event descriptor */
1652 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1653 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1654 
1655 	/*
1656 	 * If the QP handle is NULL, this is probably an indication
1657 	 * that the QP has been freed already.  In which case, we
1658 	 * should not deliver this event.
1659 	 *
1660 	 * We also check that the QP number in the handle is the
1661 	 * same as the QP number in the event queue entry.  This
1662 	 * extra check allows us to handle the case where a QP was
1663 	 * freed and then allocated again in the time it took to
1664 	 * handle the event queue processing.  By constantly incrementing
1665 	 * the non-constrained portion of the QP number every time
1666 	 * a new QP is allocated, we mitigate (somewhat) the chance
1667 	 * that a stale event could be passed to the client's QP
1668 	 * handler.
1669 	 *
1670 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1671 	 * means that we've have either received this event before we
1672 	 * finished attaching to the IBTF or we've received it while we
1673 	 * are in the process of detaching.
1674 	 */
1675 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1676 	    (state->ts_ibtfpriv != NULL)) {
1677 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1678 		type		= IBT_ERROR_ACCESS_VIOLATION_QP;
1679 
1680 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1681 	} else {
1682 		TNF_PROBE_2(tavor_local_acc_vio_wq_err_handler_dropped_event,
1683 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1684 		    tnf_uint, hdl_qpnum, qpnum);
1685 	}
1686 
1687 	TAVOR_TNF_EXIT(tavor_local_acc_vio_wq_err_handler);
1688 	return (DDI_SUCCESS);
1689 }
1690 
1691 
1692 /*
1693  * tavor_sendq_drained_handler()
1694  *    Context: Only called from interrupt context
1695  */
1696 static int
tavor_sendq_drained_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1697 tavor_sendq_drained_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1698     tavor_hw_eqe_t *eqe)
1699 {
1700 	tavor_qphdl_t		qp;
1701 	uint_t			qpnum;
1702 	ibc_async_event_t	event;
1703 	uint_t			forward_sqd_event;
1704 	ibt_async_code_t	type;
1705 	uint_t			eqe_evttype;
1706 
1707 	TAVOR_TNF_ENTER(tavor_sendq_drained_handler);
1708 
1709 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1710 
1711 	ASSERT(eqe_evttype == TAVOR_EVT_SEND_QUEUE_DRAINED ||
1712 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1713 
1714 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1715 		TNF_PROBE_0(tavor_sendq_drained_eq_overflow_condition,
1716 		    TAVOR_TNF_ERROR, "");
1717 		tavor_eq_overflow_handler(state, eq, eqe);
1718 
1719 		TAVOR_TNF_EXIT(tavor_sendq_drained_handler);
1720 		return (DDI_FAILURE);
1721 	}
1722 
1723 	/* Get the QP handle from QP number in event descriptor */
1724 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1725 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1726 
1727 	/*
1728 	 * If the QP handle is NULL, this is probably an indication
1729 	 * that the QP has been freed already.  In which case, we
1730 	 * should not deliver this event.
1731 	 *
1732 	 * We also check that the QP number in the handle is the
1733 	 * same as the QP number in the event queue entry.  This
1734 	 * extra check allows us to handle the case where a QP was
1735 	 * freed and then allocated again in the time it took to
1736 	 * handle the event queue processing.  By constantly incrementing
1737 	 * the non-constrained portion of the QP number every time
1738 	 * a new QP is allocated, we mitigate (somewhat) the chance
1739 	 * that a stale event could be passed to the client's QP
1740 	 * handler.
1741 	 *
1742 	 * And then we check if "ts_ibtfpriv" is NULL.  If it is then it
1743 	 * means that we've have either received this event before we
1744 	 * finished attaching to the IBTF or we've received it while we
1745 	 * are in the process of detaching.
1746 	 */
1747 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1748 	    (state->ts_ibtfpriv != NULL)) {
1749 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1750 		type		= IBT_EVENT_SQD;
1751 
1752 		/*
1753 		 * Grab the QP lock and update the QP state to reflect that
1754 		 * the Send Queue Drained event has arrived.  Also determine
1755 		 * whether the event is intended to be forwarded on to the
1756 		 * consumer or not.  This information is used below in
1757 		 * determining whether or not to call the IBTF.
1758 		 */
1759 		mutex_enter(&qp->qp_lock);
1760 		forward_sqd_event = qp->qp_forward_sqd_event;
1761 		qp->qp_forward_sqd_event  = 0;
1762 		qp->qp_sqd_still_draining = 0;
1763 		mutex_exit(&qp->qp_lock);
1764 
1765 		if (forward_sqd_event != 0) {
1766 			TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1767 		}
1768 	} else {
1769 		TNF_PROBE_2(tavor_sendq_drained_handler_dropped_event,
1770 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1771 		    tnf_uint, hdl_qpnum, qpnum);
1772 	}
1773 
1774 	TAVOR_TNF_EXIT(tavor_sendq_drained_handler);
1775 	return (DDI_SUCCESS);
1776 }
1777 
1778 
1779 /*
1780  * tavor_path_mig_handler()
1781  *    Context: Only called from interrupt context
1782  */
1783 static int
tavor_path_mig_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1784 tavor_path_mig_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1785     tavor_hw_eqe_t *eqe)
1786 {
1787 	tavor_qphdl_t		qp;
1788 	uint_t			qpnum;
1789 	ibc_async_event_t	event;
1790 	ibt_async_code_t	type;
1791 	uint_t			eqe_evttype;
1792 
1793 	TAVOR_TNF_ENTER(tavor_path_mig_handler);
1794 
1795 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1796 
1797 	ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATED ||
1798 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1799 
1800 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1801 		TNF_PROBE_0(tavor_path_mig_eq_overflow_condition,
1802 		    TAVOR_TNF_ERROR, "");
1803 		tavor_eq_overflow_handler(state, eq, eqe);
1804 
1805 		TAVOR_TNF_EXIT(tavor_path_mig_handler);
1806 		return (DDI_FAILURE);
1807 	}
1808 
1809 	/* Get the QP handle from QP number in event descriptor */
1810 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1811 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1812 
1813 	/*
1814 	 * If the QP handle is NULL, this is probably an indication
1815 	 * that the QP has been freed already.  In which case, we
1816 	 * should not deliver this event.
1817 	 *
1818 	 * We also check that the QP number in the handle is the
1819 	 * same as the QP number in the event queue entry.  This
1820 	 * extra check allows us to handle the case where a QP was
1821 	 * freed and then allocated again in the time it took to
1822 	 * handle the event queue processing.  By constantly incrementing
1823 	 * the non-constrained portion of the QP number every time
1824 	 * a new QP is allocated, we mitigate (somewhat) the chance
1825 	 * that a stale event could be passed to the client's QP
1826 	 * handler.
1827 	 *
1828 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1829 	 * means that we've have either received this event before we
1830 	 * finished attaching to the IBTF or we've received it while we
1831 	 * are in the process of detaching.
1832 	 */
1833 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1834 	    (state->ts_ibtfpriv != NULL)) {
1835 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1836 		type		= IBT_EVENT_PATH_MIGRATED_QP;
1837 
1838 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1839 	} else {
1840 		TNF_PROBE_2(tavor_path_mig_handler_dropped_event,
1841 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1842 		    tnf_uint, hdl_qpnum, qpnum);
1843 	}
1844 
1845 	TAVOR_TNF_EXIT(tavor_path_mig_handler);
1846 	return (DDI_SUCCESS);
1847 }
1848 
1849 
1850 /*
1851  * tavor_path_mig_err_handler()
1852  *    Context: Only called from interrupt context
1853  */
1854 static int
tavor_path_mig_err_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1855 tavor_path_mig_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1856     tavor_hw_eqe_t *eqe)
1857 {
1858 	tavor_qphdl_t		qp;
1859 	uint_t			qpnum;
1860 	ibc_async_event_t	event;
1861 	ibt_async_code_t	type;
1862 	uint_t			eqe_evttype;
1863 
1864 	TAVOR_TNF_ENTER(tavor_path_mig_err_handler);
1865 
1866 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1867 
1868 	ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATE_FAILED ||
1869 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1870 
1871 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1872 		TNF_PROBE_0(tavor_path_mig_err_eq_overflow_condition,
1873 		    TAVOR_TNF_ERROR, "");
1874 		tavor_eq_overflow_handler(state, eq, eqe);
1875 
1876 		TAVOR_TNF_EXIT(tavor_path_mig_err_handler);
1877 		return (DDI_FAILURE);
1878 	}
1879 
1880 	/* Get the QP handle from QP number in event descriptor */
1881 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1882 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1883 
1884 	/*
1885 	 * If the QP handle is NULL, this is probably an indication
1886 	 * that the QP has been freed already.  In which case, we
1887 	 * should not deliver this event.
1888 	 *
1889 	 * We also check that the QP number in the handle is the
1890 	 * same as the QP number in the event queue entry.  This
1891 	 * extra check allows us to handle the case where a QP was
1892 	 * freed and then allocated again in the time it took to
1893 	 * handle the event queue processing.  By constantly incrementing
1894 	 * the non-constrained portion of the QP number every time
1895 	 * a new QP is allocated, we mitigate (somewhat) the chance
1896 	 * that a stale event could be passed to the client's QP
1897 	 * handler.
1898 	 *
1899 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1900 	 * means that we've have either received this event before we
1901 	 * finished attaching to the IBTF or we've received it while we
1902 	 * are in the process of detaching.
1903 	 */
1904 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1905 	    (state->ts_ibtfpriv != NULL)) {
1906 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1907 		type		= IBT_ERROR_PATH_MIGRATE_REQ_QP;
1908 
1909 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1910 	} else {
1911 		TNF_PROBE_2(tavor_path_mig_err_handler_dropped_event,
1912 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1913 		    tnf_uint, hdl_qpnum, qpnum);
1914 	}
1915 
1916 	TAVOR_TNF_EXIT(tavor_path_mig_err_handler);
1917 	return (DDI_SUCCESS);
1918 }
1919 
1920 
1921 /*
1922  * tavor_srq_catastrophic_handler()
1923  *    Context: Only called from interrupt context
1924  */
1925 static int
tavor_srq_catastrophic_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)1926 tavor_srq_catastrophic_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1927     tavor_hw_eqe_t *eqe)
1928 {
1929 	tavor_qphdl_t		qp;
1930 	uint_t			qpnum;
1931 	ibc_async_event_t	event;
1932 	ibt_async_code_t	type;
1933 	uint_t			eqe_evttype;
1934 
1935 	TAVOR_TNF_ENTER(tavor_srq_catastrophic_handler);
1936 
1937 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1938 
1939 	ASSERT(eqe_evttype == TAVOR_EVT_SRQ_CATASTROPHIC_ERROR ||
1940 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1941 
1942 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1943 		TNF_PROBE_0(tavor_srq_catastrophic_overflow_condition,
1944 		    TAVOR_TNF_ERROR, "");
1945 		tavor_eq_overflow_handler(state, eq, eqe);
1946 
1947 		TAVOR_TNF_EXIT(tavor_srq_catastrophic_handler);
1948 		return (DDI_FAILURE);
1949 	}
1950 
1951 	/* Get the QP handle from QP number in event descriptor */
1952 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1953 	qp = tavor_qphdl_from_qpnum(state, qpnum);
1954 
1955 	/*
1956 	 * If the QP handle is NULL, this is probably an indication
1957 	 * that the QP has been freed already.  In which case, we
1958 	 * should not deliver this event.
1959 	 *
1960 	 * We also check that the QP number in the handle is the
1961 	 * same as the QP number in the event queue entry.  This
1962 	 * extra check allows us to handle the case where a QP was
1963 	 * freed and then allocated again in the time it took to
1964 	 * handle the event queue processing.  By constantly incrementing
1965 	 * the non-constrained portion of the QP number every time
1966 	 * a new QP is allocated, we mitigate (somewhat) the chance
1967 	 * that a stale event could be passed to the client's QP
1968 	 * handler.
1969 	 *
1970 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
1971 	 * means that we've have either received this event before we
1972 	 * finished attaching to the IBTF or we've received it while we
1973 	 * are in the process of detaching.
1974 	 */
1975 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1976 	    (state->ts_ibtfpriv != NULL)) {
1977 		event.ev_srq_hdl = (ibt_srq_hdl_t)qp->qp_srqhdl->srq_hdlrarg;
1978 		type		= IBT_ERROR_CATASTROPHIC_SRQ;
1979 
1980 		mutex_enter(&qp->qp_srqhdl->srq_lock);
1981 		qp->qp_srqhdl->srq_state = TAVOR_SRQ_STATE_ERROR;
1982 		mutex_exit(&qp->qp_srqhdl->srq_lock);
1983 
1984 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1985 	} else {
1986 		TNF_PROBE_2(tavor_srq_catastrophic_handler_dropped_event,
1987 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1988 		    tnf_uint, hdl_qpnum, qpnum);
1989 	}
1990 
1991 	TAVOR_TNF_EXIT(tavor_srq_catastrophic_handler);
1992 	return (DDI_SUCCESS);
1993 }
1994 
1995 
1996 /*
1997  * tavor_srq_last_wqe_reached_handler()
1998  *    Context: Only called from interrupt context
1999  */
2000 static int
tavor_srq_last_wqe_reached_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)2001 tavor_srq_last_wqe_reached_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2002     tavor_hw_eqe_t *eqe)
2003 {
2004 	tavor_qphdl_t		qp;
2005 	uint_t			qpnum;
2006 	ibc_async_event_t	event;
2007 	ibt_async_code_t	type;
2008 	uint_t			eqe_evttype;
2009 
2010 	TAVOR_TNF_ENTER(tavor_srq_last_wqe_reached_handler);
2011 
2012 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
2013 
2014 	ASSERT(eqe_evttype == TAVOR_EVT_SRQ_LAST_WQE_REACHED ||
2015 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
2016 
2017 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
2018 		TNF_PROBE_0(tavor_srq_last_wqe_reached_over_condition,
2019 		    TAVOR_TNF_ERROR, "");
2020 		tavor_eq_overflow_handler(state, eq, eqe);
2021 
2022 		TAVOR_TNF_EXIT(tavor_srq_last_wqe_reached_handler);
2023 		return (DDI_FAILURE);
2024 	}
2025 
2026 	/* Get the QP handle from QP number in event descriptor */
2027 	qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
2028 	qp = tavor_qphdl_from_qpnum(state, qpnum);
2029 
2030 	/*
2031 	 * If the QP handle is NULL, this is probably an indication
2032 	 * that the QP has been freed already.  In which case, we
2033 	 * should not deliver this event.
2034 	 *
2035 	 * We also check that the QP number in the handle is the
2036 	 * same as the QP number in the event queue entry.  This
2037 	 * extra check allows us to handle the case where a QP was
2038 	 * freed and then allocated again in the time it took to
2039 	 * handle the event queue processing.  By constantly incrementing
2040 	 * the non-constrained portion of the QP number every time
2041 	 * a new QP is allocated, we mitigate (somewhat) the chance
2042 	 * that a stale event could be passed to the client's QP
2043 	 * handler.
2044 	 *
2045 	 * Lastly, we check if "ts_ibtfpriv" is NULL.  If it is then it
2046 	 * means that we've have either received this event before we
2047 	 * finished attaching to the IBTF or we've received it while we
2048 	 * are in the process of detaching.
2049 	 */
2050 	if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
2051 	    (state->ts_ibtfpriv != NULL)) {
2052 		event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
2053 		type		= IBT_EVENT_EMPTY_CHAN;
2054 
2055 		TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
2056 	} else {
2057 		TNF_PROBE_2(tavor_srq_last_wqe_reached_dropped_event,
2058 		    TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
2059 		    tnf_uint, hdl_qpnum, qpnum);
2060 	}
2061 
2062 	TAVOR_TNF_EXIT(tavor_srq_last_wqe_reached_handler);
2063 	return (DDI_SUCCESS);
2064 }
2065 
2066 
2067 /*
2068  * tavor_ecc_detection_handler()
2069  *    Context: Only called from interrupt context
2070  */
2071 static int
tavor_ecc_detection_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)2072 tavor_ecc_detection_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2073     tavor_hw_eqe_t *eqe)
2074 {
2075 	uint_t			eqe_evttype;
2076 	uint_t			data;
2077 	int			i;
2078 
2079 	TAVOR_TNF_ENTER(tavor_ecc_detection_handler);
2080 
2081 	eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
2082 
2083 	ASSERT(eqe_evttype == TAVOR_EVT_ECC_DETECTION ||
2084 	    eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
2085 
2086 	if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
2087 		TNF_PROBE_0(tavor_ecc_detection_eq_overflow_condition,
2088 		    TAVOR_TNF_ERROR, "");
2089 		tavor_eq_overflow_handler(state, eq, eqe);
2090 
2091 		TAVOR_TNF_EXIT(tavor_ecc_detection_handler);
2092 		return (DDI_FAILURE);
2093 	}
2094 
2095 	/*
2096 	 * The "ECC Detection Event" indicates that a correctable single-bit
2097 	 * has occurred with the attached DDR.  The EQE provides some
2098 	 * additional information about the errored EQ.  So we print a warning
2099 	 * message here along with that additional information.
2100 	 */
2101 	TAVOR_WARNING(state, "ECC Correctable Error Event Detected");
2102 	for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
2103 		data = ((uint_t *)eqe)[i];
2104 		cmn_err(CE_CONT, "!  EQE[%02x]: %08x\n", i, data);
2105 	}
2106 
2107 	TAVOR_TNF_EXIT(tavor_ecc_detection_handler);
2108 	return (DDI_SUCCESS);
2109 }
2110 
2111 
2112 /*
2113  * tavor_eq_overflow_handler()
2114  *    Context: Only called from interrupt context
2115  */
2116 void
tavor_eq_overflow_handler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)2117 tavor_eq_overflow_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2118     tavor_hw_eqe_t *eqe)
2119 {
2120 	uint_t		error_type, data;
2121 
2122 	TAVOR_TNF_ENTER(tavor_eq_overflow_handler);
2123 
2124 	ASSERT(TAVOR_EQE_EVTTYPE_GET(eq, eqe) == TAVOR_EVT_EQ_OVERFLOW);
2125 
2126 	/*
2127 	 * The "Event Queue Overflow Event" indicates that something has
2128 	 * probably gone seriously wrong with some hardware (or, perhaps,
2129 	 * with the software... though it's unlikely in this case).  The EQE
2130 	 * provides some additional information about the errored EQ.  So we
2131 	 * print a warning message here along with that additional information.
2132 	 */
2133 	error_type = TAVOR_EQE_OPERRTYPE_GET(eq, eqe);
2134 	data	   = TAVOR_EQE_OPERRDATA_GET(eq, eqe);
2135 
2136 	TAVOR_WARNING(state, "Event Queue overflow");
2137 	cmn_err(CE_CONT, "  Error type: %02x, data: %08x\n", error_type, data);
2138 
2139 	TAVOR_TNF_EXIT(tavor_eq_overflow_handler);
2140 }
2141 
2142 
2143 /*
2144  * tavor_no_eqhandler
2145  *    Context: Only called from interrupt context
2146  */
2147 /* ARGSUSED */
2148 static int
tavor_no_eqhandler(tavor_state_t * state,tavor_eqhdl_t eq,tavor_hw_eqe_t * eqe)2149 tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
2150     tavor_hw_eqe_t *eqe)
2151 {
2152 	uint_t		data;
2153 	int		i;
2154 
2155 	TAVOR_TNF_ENTER(tavor_no_eqhandler);
2156 
2157 	/*
2158 	 * This "unexpected event" handler (or "catch-all" handler) will
2159 	 * receive all events for which no other handler has been registered.
2160 	 * If we end up here, then something has probably gone seriously wrong
2161 	 * with the Tavor hardware (or, perhaps, with the software... though
2162 	 * it's unlikely in this case).  The EQE provides all the information
2163 	 * about the event.  So we print a warning message here along with
2164 	 * the contents of the EQE.
2165 	 */
2166 	TAVOR_WARNING(state, "Unexpected Event handler");
2167 	cmn_err(CE_CONT, "  Event type: %02x, subtype: %02x\n",
2168 	    TAVOR_EQE_EVTTYPE_GET(eq, eqe), TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe));
2169 	for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
2170 		data = ((uint_t *)eqe)[i];
2171 		cmn_err(CE_CONT, "  EQE[%02x]: %08x\n", i, data);
2172 	}
2173 
2174 	TAVOR_TNF_EXIT(tavor_no_eqhandler);
2175 	return (DDI_SUCCESS);
2176 }
2177