1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved.
24 */
25
26 /*
27 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31
32 /*
33 *
34 * MODULE: dapl_ia_util.c
35 *
36 * PURPOSE: Manage IA Info structure
37 *
38 * $Id: dapl_ia_util.c,v 1.29 2003/07/25 19:24:11 sjs2 Exp $
39 */
40
41 #include "dapl.h"
42 #include "dapl_hca_util.h"
43 #include "dapl_ia_util.h"
44 #include "dapl_evd_util.h"
45 #include "dapl_adapter_util.h"
46
47 /* Internal prototype */
48 void dapli_ia_release_hca(
49 DAPL_HCA *hca_ptr);
50
51
52 /*
53 * dapl_ia_alloc
54 *
55 * alloc and initialize an IA INFO struct
56 *
57 * Input:
58 * none
59 *
60 * Output:
61 * ia_ptr
62 *
63 * Returns:
64 * none
65 *
66 */
67 DAPL_IA *
dapl_ia_alloc(DAT_PROVIDER * provider,DAPL_HCA * hca_ptr)68 dapl_ia_alloc(DAT_PROVIDER * provider, DAPL_HCA * hca_ptr)
69 {
70 DAPL_IA * ia_ptr;
71
72 /* Allocate IA */
73 ia_ptr = (DAPL_IA *) dapl_os_alloc(sizeof (DAPL_IA));
74 if (ia_ptr == NULL) {
75 return (NULL);
76 }
77
78 /* zero the structure */
79 (void) dapl_os_memzero(ia_ptr, sizeof (DAPL_IA));
80
81 /*
82 * initialize the header
83 */
84 ia_ptr->header.provider = provider;
85 ia_ptr->header.magic = DAPL_MAGIC_IA;
86 ia_ptr->header.handle_type = DAT_HANDLE_TYPE_IA;
87 ia_ptr->header.owner_ia = ia_ptr;
88 ia_ptr->header.user_context.as_64 = 0;
89 ia_ptr->header.user_context.as_ptr = NULL;
90 dapl_llist_init_entry(&ia_ptr->header.ia_list_entry);
91 dapl_os_lock_init(&ia_ptr->header.lock);
92
93 /*
94 * initialize the body
95 */
96 ia_ptr->hca_ptr = hca_ptr;
97 ia_ptr->async_error_evd = NULL;
98 ia_ptr->cleanup_async_error_evd = DAT_FALSE;
99 dapl_llist_init_entry(&ia_ptr->hca_ia_list_entry);
100 dapl_llist_init_head(&ia_ptr->ep_list_head);
101 dapl_llist_init_head(&ia_ptr->lmr_list_head);
102 dapl_llist_init_head(&ia_ptr->rmr_list_head);
103 dapl_llist_init_head(&ia_ptr->pz_list_head);
104 dapl_llist_init_head(&ia_ptr->evd_list_head);
105 dapl_llist_init_head(&ia_ptr->cno_list_head);
106 dapl_llist_init_head(&ia_ptr->rsp_list_head);
107 dapl_llist_init_head(&ia_ptr->psp_list_head);
108
109 /*
110 * initialize the flags
111 */
112 ia_ptr->dapl_flags = 0;
113
114 dapl_hca_link_ia(hca_ptr, ia_ptr);
115
116 return (ia_ptr);
117 }
118
119
120 /*
121 * dapl_ia_abrupt_close
122 *
123 * Performs an abrupt close of the IA
124 *
125 * Input:
126 * ia_ptr
127 *
128 * Output:
129 * none
130 *
131 * Returns:
132 * status
133 *
134 */
135
136 DAT_RETURN
dapl_ia_abrupt_close(IN DAPL_IA * ia_ptr)137 dapl_ia_abrupt_close(
138 IN DAPL_IA *ia_ptr)
139 {
140 DAT_RETURN dat_status;
141 DAPL_EP *ep_ptr, *next_ep_ptr;
142 DAPL_LMR *lmr_ptr, *next_lmr_ptr;
143 DAPL_RMR *rmr_ptr, *next_rmr_ptr;
144 DAPL_PZ *pz_ptr, *next_pz_ptr;
145 DAPL_EVD *evd_ptr, *next_evd_ptr;
146 DAPL_CNO *cno_ptr, *next_cno_ptr;
147 DAPL_SP *sp_ptr, *next_sp_ptr; /* for PSP and RSP queues */
148 DAPL_HCA *hca_ptr;
149
150 dat_status = DAT_SUCCESS;
151
152 /*
153 * clear all the data structures associated with the IA.
154 * this must be done in order (rmr,rsp) before (ep lmr psp) before
155 * (pz evd)
156 *
157 * Note that in all the following we can leave the loop either
158 * when we run out of entries, or when we get back to the head
159 * if we end up skipping an entry.
160 */
161
162 rmr_ptr = (dapl_llist_is_empty(&ia_ptr->rmr_list_head)
163 ? NULL : dapl_llist_peek_head(&ia_ptr->rmr_list_head));
164 while (rmr_ptr != NULL) {
165 next_rmr_ptr = dapl_llist_next_entry(&ia_ptr->rmr_list_head,
166 &rmr_ptr->header.ia_list_entry);
167 dat_status = dapl_rmr_free(rmr_ptr);
168 if (dat_status != DAT_SUCCESS) {
169 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
170 "ia_close(ABRUPT): rmr_free(%p) returns %x\n",
171 rmr_ptr,
172 dat_status);
173 }
174 rmr_ptr = next_rmr_ptr;
175 }
176
177 sp_ptr = (dapl_llist_is_empty(&ia_ptr->rsp_list_head)
178 ? NULL : dapl_llist_peek_head(&ia_ptr->rsp_list_head));
179 while (sp_ptr != NULL) {
180 next_sp_ptr = dapl_llist_next_entry(&ia_ptr->rsp_list_head,
181 &sp_ptr->header.ia_list_entry);
182 dat_status = dapl_rsp_free(sp_ptr);
183 if (dat_status != DAT_SUCCESS) {
184 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
185 "ia_close(ABRUPT): rsp_free(%p) returns %x\n",
186 sp_ptr,
187 dat_status);
188 }
189 sp_ptr = next_sp_ptr;
190 }
191
192 ep_ptr = (dapl_llist_is_empty(&ia_ptr->ep_list_head)
193 ? NULL : dapl_llist_peek_head(&ia_ptr->ep_list_head));
194 while (ep_ptr != NULL) {
195 next_ep_ptr = dapl_llist_next_entry(&ia_ptr->ep_list_head,
196 &ep_ptr->header.ia_list_entry);
197 dat_status = dapl_ep_disconnect(ep_ptr, DAT_CLOSE_ABRUPT_FLAG);
198 if (dat_status != DAT_SUCCESS) {
199 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
200 "ia_close(ABRUPT): ep_disconnect(%p) returns %x\n",
201 ep_ptr,
202 dat_status);
203 }
204 dat_status = dapl_ep_free(ep_ptr);
205 if (dat_status != DAT_SUCCESS) {
206 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
207 "ia_close(ABRUPT): ep_free(%p) returns %x\n",
208 ep_ptr,
209 dat_status);
210 }
211 ep_ptr = next_ep_ptr;
212 }
213
214 lmr_ptr = (dapl_llist_is_empty(&ia_ptr->lmr_list_head)
215 ? NULL : dapl_llist_peek_head(&ia_ptr->lmr_list_head));
216 while (lmr_ptr != NULL) {
217 next_lmr_ptr = dapl_llist_next_entry(&ia_ptr->lmr_list_head,
218 &lmr_ptr->header.ia_list_entry);
219 dat_status = dapl_lmr_free(lmr_ptr);
220 if (dat_status != DAT_SUCCESS) {
221 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
222 "ia_close(ABRUPT): lmr_free(%p) returns %x\n",
223 lmr_ptr,
224 dat_status);
225 }
226 lmr_ptr = next_lmr_ptr;
227 }
228
229 sp_ptr = (dapl_llist_is_empty(&ia_ptr->psp_list_head)
230 ? NULL : dapl_llist_peek_head(&ia_ptr->psp_list_head));
231 while (sp_ptr != NULL) {
232 next_sp_ptr = dapl_llist_next_entry(&ia_ptr->psp_list_head,
233 &sp_ptr->header.ia_list_entry);
234 dat_status = dapl_psp_free(sp_ptr);
235 if (dat_status != DAT_SUCCESS) {
236 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
237 "ia_close(ABRUPT): psp_free(%p) returns %x\n",
238 sp_ptr,
239 dat_status);
240 }
241 sp_ptr = next_sp_ptr;
242 }
243
244 pz_ptr = (dapl_llist_is_empty(&ia_ptr->pz_list_head)
245 ? NULL : dapl_llist_peek_head(&ia_ptr->pz_list_head));
246 while (pz_ptr != NULL) {
247 next_pz_ptr = dapl_llist_next_entry(&ia_ptr->pz_list_head,
248 &pz_ptr->header.ia_list_entry);
249 dat_status = dapl_pz_free(pz_ptr);
250 if (dat_status != DAT_SUCCESS) {
251 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
252 "ia_close(ABRUPT): pz_free(%p) returns %x\n",
253 pz_ptr,
254 dat_status);
255 }
256 pz_ptr = next_pz_ptr;
257 }
258
259 /*
260 * EVDs are tricky; we want to release all except for the async
261 * EVD. That EVD needs to stick around until after we close the
262 * HCA, to accept any async events that occur. So we cycle through
263 * the list with dapl_llist_next_entry instead of dapl_llist_is_empty.
264 */
265 evd_ptr = (dapl_llist_is_empty(&ia_ptr->evd_list_head)
266 ? NULL : dapl_llist_peek_head(&ia_ptr->evd_list_head));
267 while (evd_ptr != NULL) {
268 next_evd_ptr = dapl_llist_next_entry(&ia_ptr->evd_list_head,
269 &evd_ptr->header.ia_list_entry);
270 if (evd_ptr == ia_ptr->async_error_evd) {
271 /*
272 * Don't delete the EVD, but break any CNO
273 * connections.
274 */
275 (void) dapl_evd_disable(evd_ptr);
276 (void) dapl_evd_modify_cno(evd_ptr,
277 DAT_HANDLE_NULL);
278 } else {
279 /* it isn't the async EVD; delete it. */
280 dat_status = dapl_evd_free(evd_ptr);
281 if (dat_status != DAT_SUCCESS) {
282 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
283 "ia_close(ABRUPT): evd_free(%p) "
284 "returns %x\n",
285 evd_ptr,
286 dat_status);
287 }
288 }
289 evd_ptr = next_evd_ptr;
290 }
291
292 cno_ptr = (dapl_llist_is_empty(&ia_ptr->cno_list_head)
293 ? NULL : dapl_llist_peek_head(&ia_ptr->cno_list_head));
294 while (cno_ptr != NULL) {
295 next_cno_ptr = dapl_llist_next_entry(&ia_ptr->cno_list_head,
296 &cno_ptr->header.ia_list_entry);
297 dat_status = dapl_cno_free(cno_ptr);
298 if (dat_status != DAT_SUCCESS) {
299 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
300 "ia_close(ABRUPT): cno_free(%p) returns %x\n",
301 cno_ptr,
302 dat_status);
303 }
304 cno_ptr = next_cno_ptr;
305 }
306
307 hca_ptr = ia_ptr->hca_ptr;
308
309 /*
310 * Free the async EVD, shutting down callbacks from the HCA.
311 */
312 if (ia_ptr->async_error_evd &&
313 (DAT_TRUE == ia_ptr->cleanup_async_error_evd)) {
314 dat_status = dapls_ia_teardown_callbacks(ia_ptr);
315
316 hca_ptr->async_evd = NULL; /* It was our async EVD; nuke it. */
317
318 dapl_os_atomic_dec(& ia_ptr->async_error_evd->evd_ref_count);
319 dat_status = dapl_evd_free(ia_ptr->async_error_evd);
320
321 if (DAT_SUCCESS != dat_status) {
322 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
323 "ia_close(ABRUPT): evd_free(%p) returns %x\n",
324 ia_ptr->async_error_evd,
325 dat_status);
326 }
327
328 ia_ptr->async_error_evd = NULL;
329 }
330
331 /*
332 * Release our reference on the hca_handle. If we are the last
333 * one, close it
334 */
335 dapli_ia_release_hca(hca_ptr);
336
337 dapls_ia_free(ia_ptr);
338
339 return (DAT_SUCCESS); /* Abrupt close can't fail. */
340 }
341
342
343 /*
344 * dapl_ia_graceful_close
345 *
346 * Performs an graceful close of the IA
347 *
348 * Input:
349 * ia_ptr
350 *
351 * Output:
352 * none
353 *
354 * Returns:
355 * status
356 *
357 */
358
359 DAT_RETURN
dapl_ia_graceful_close(IN DAPL_IA * ia_ptr)360 dapl_ia_graceful_close(
361 IN DAPL_IA *ia_ptr)
362 {
363 DAT_RETURN dat_status;
364 DAT_RETURN cur_dat_status;
365 DAPL_EVD *evd_ptr;
366 DAPL_LLIST_ENTRY *entry;
367 DAPL_HCA *hca_ptr;
368
369 dat_status = DAT_SUCCESS;
370
371 if (!dapl_llist_is_empty(&ia_ptr->rmr_list_head) ||
372 !dapl_llist_is_empty(&ia_ptr->rsp_list_head) ||
373 !dapl_llist_is_empty(&ia_ptr->ep_list_head) ||
374 !dapl_llist_is_empty(&ia_ptr->lmr_list_head) ||
375 !dapl_llist_is_empty(&ia_ptr->psp_list_head) ||
376 !dapl_llist_is_empty(&ia_ptr->pz_list_head)) {
377 dat_status = DAT_ERROR(DAT_INVALID_STATE,
378 DAT_INVALID_STATE_IA_IN_USE);
379 goto bail;
380 }
381
382 /* if the async evd does not need to be cleaned up */
383 /* (ie. it was not created by dapl_ia_open) */
384 /* then the evd list should be empty */
385 if (DAT_FALSE == ia_ptr->cleanup_async_error_evd) {
386 if (!dapl_llist_is_empty(&ia_ptr->evd_list_head)) {
387 dat_status = DAT_ERROR(DAT_INVALID_STATE,
388 DAT_INVALID_STATE_IA_IN_USE);
389 goto bail;
390 }
391 } else {
392 /* else the async evd should be the only evd in */
393 /* the list. */
394 evd_ptr = (DAPL_EVD *)
395 dapl_llist_peek_head(&ia_ptr->evd_list_head);
396
397 if (!(evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) {
398 dat_status = DAT_ERROR(DAT_INVALID_STATE,
399 DAT_INVALID_STATE_IA_IN_USE);
400 goto bail;
401 }
402
403 entry = ia_ptr->evd_list_head;
404
405 /* if the async evd is not the only element in the list */
406 if (entry->blink != entry->flink) {
407 dat_status = DAT_ERROR(DAT_INVALID_STATE,
408 DAT_INVALID_STATE_IA_IN_USE);
409 goto bail;
410 }
411
412 /*
413 * If the async evd has a non-unary ref count (i.e. it's in
414 * use by someone besides us.
415 */
416 if (evd_ptr->evd_ref_count != 1) {
417 dat_status = DAT_ERROR(DAT_INVALID_STATE,
418 DAT_INVALID_STATE_IA_IN_USE);
419 goto bail;
420 }
421 }
422
423 /*
424 * We've validated the call; now we can start the teardown.
425 * Because we're in the IA close routine, we're safe from races with
426 * DAPL consumers on this IA (operate/destroy races are disallowed in
427 * DAPL).
428 */
429 hca_ptr = ia_ptr->hca_ptr;
430
431 /* Tear down the async EVD if needed, first shutting down callbacks. */
432 if (ia_ptr->async_error_evd &&
433 (DAT_TRUE == ia_ptr->cleanup_async_error_evd)) {
434 cur_dat_status = dapls_ia_teardown_callbacks(ia_ptr);
435 if (DAT_SUCCESS != cur_dat_status) {
436 dat_status = cur_dat_status;
437 }
438 hca_ptr->async_evd = NULL;
439 dapl_os_atomic_dec(& ia_ptr->async_error_evd->evd_ref_count);
440 cur_dat_status = dapl_evd_free(ia_ptr->async_error_evd);
441 if (DAT_SUCCESS != cur_dat_status) {
442 dat_status = cur_dat_status;
443 }
444
445 ia_ptr->async_error_evd = NULL;
446 }
447
448 dapli_ia_release_hca(hca_ptr);
449
450 dapls_ia_free(ia_ptr);
451
452 bail:
453 return (dat_status);
454 }
455
456 /*
457 * Release a reference on the HCA handle. If it is 0, close the
458 * handle. Manipulate under lock to prevent races with threads trying to
459 * open the HCA.
460 */
461 void
dapli_ia_release_hca(DAPL_HCA * hca_ptr)462 dapli_ia_release_hca(
463 DAPL_HCA *hca_ptr)
464 {
465 dapl_os_lock(&hca_ptr->lock);
466 dapl_os_atomic_dec(& hca_ptr->handle_ref_count);
467 if (hca_ptr->handle_ref_count == 0) {
468 DAT_RETURN dat_status;
469
470 /*
471 * Get rid of the cqd associated with the hca.
472 * Print out instead of status return as this routine
473 * shouldn't fail.
474 */
475 dat_status = dapls_ib_cqd_destroy(hca_ptr);
476 if (dat_status != DAT_SUCCESS) {
477 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
478 "ERR: Cannot free CQD: err %x\n", dat_status);
479 }
480
481 (void) dapls_ib_close_hca(hca_ptr->ib_hca_handle);
482 hca_ptr->ib_hca_handle = IB_INVALID_HANDLE;
483 }
484 dapl_os_unlock(&hca_ptr->lock);
485 }
486
487
488 /*
489 * dapls_ia_free
490 *
491 * free an IA INFO struct
492 *
493 * Input:
494 * ia_ptr
495 *
496 * Output:
497 * one
498 *
499 * Returns:
500 * none
501 *
502 */
503 void
dapls_ia_free(DAPL_IA * ia_ptr)504 dapls_ia_free(DAPL_IA *ia_ptr)
505 {
506 dapl_os_assert(ia_ptr->header.magic == DAPL_MAGIC_IA);
507
508 dapl_os_assert(ia_ptr->async_error_evd == NULL);
509 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->lmr_list_head));
510 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->rmr_list_head));
511 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->ep_list_head));
512 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->evd_list_head));
513 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->cno_list_head));
514 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->psp_list_head));
515 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->rsp_list_head));
516
517 /*
518 * deinitialize the header
519 */
520 dapl_hca_unlink_ia(ia_ptr->hca_ptr, ia_ptr);
521 /* reset magic to prevent reuse */
522 ia_ptr->header.magic = DAPL_MAGIC_INVALID;
523 dapl_os_lock_destroy(&ia_ptr->header.lock);
524
525 dapl_os_free(ia_ptr, sizeof (DAPL_IA));
526 }
527
528 /*
529 * dapl_ia_link_ep
530 *
531 * Add an ep to the IA structure
532 *
533 * Input:
534 * ia_ptr
535 * ep_ptr
536 *
537 * Output:
538 * none
539 *
540 * Returns:
541 * none
542 *
543 */
544 void
dapl_ia_link_ep(IN DAPL_IA * ia_ptr,IN DAPL_EP * ep_ptr)545 dapl_ia_link_ep(
546 IN DAPL_IA *ia_ptr,
547 IN DAPL_EP *ep_ptr)
548 {
549 dapl_os_lock(&ia_ptr->header.lock);
550 dapl_llist_add_head(&ia_ptr->ep_list_head,
551 &ep_ptr->header.ia_list_entry,
552 ep_ptr);
553 dapl_os_unlock(&ia_ptr->header.lock);
554 }
555
556 /*
557 * dapl_ia_unlink_ep
558 *
559 * Remove an ep from the ia info structure
560 *
561 * Input:
562 * ia_ptr
563 * ep_ptr
564 *
565 * Output:
566 * none
567 *
568 * Returns:
569 * none
570 *
571 */
572 void
dapl_ia_unlink_ep(IN DAPL_IA * ia_ptr,IN DAPL_EP * ep_ptr)573 dapl_ia_unlink_ep(
574 IN DAPL_IA *ia_ptr,
575 IN DAPL_EP *ep_ptr)
576 {
577 dapl_os_lock(&ia_ptr->header.lock);
578 (void) dapl_llist_remove_entry(&ia_ptr->ep_list_head,
579 &ep_ptr->header.ia_list_entry);
580 dapl_os_unlock(&ia_ptr->header.lock);
581 }
582
583 /*
584 * dapl_ia_link_lmr
585 *
586 * Add an lmr to the IA structure
587 *
588 * Input:
589 * ia_ptr
590 * lmr_ptr
591 *
592 * Output:
593 * none
594 *
595 * Returns:
596 * none
597 *
598 */
599 void
dapl_ia_link_lmr(IN DAPL_IA * ia_ptr,IN DAPL_LMR * lmr_ptr)600 dapl_ia_link_lmr(
601 IN DAPL_IA *ia_ptr,
602 IN DAPL_LMR *lmr_ptr)
603 {
604 dapl_os_lock(&ia_ptr->header.lock);
605 dapl_llist_add_head(&ia_ptr->lmr_list_head,
606 &lmr_ptr->header.ia_list_entry,
607 lmr_ptr);
608 dapl_os_unlock(&ia_ptr->header.lock);
609 }
610
611 /*
612 * dapl_ia_unlink_lmr
613 *
614 * Remove an lmr from the ia info structure
615 *
616 * Input:
617 * ia_ptr
618 * lmr_ptr
619 *
620 * Output:
621 * none
622 *
623 * Returns:
624 * none
625 *
626 */
627 void
dapl_ia_unlink_lmr(IN DAPL_IA * ia_ptr,IN DAPL_LMR * lmr_ptr)628 dapl_ia_unlink_lmr(
629 IN DAPL_IA *ia_ptr,
630 IN DAPL_LMR *lmr_ptr)
631 {
632 dapl_os_lock(&ia_ptr->header.lock);
633 (void) dapl_llist_remove_entry(&ia_ptr->lmr_list_head,
634 &lmr_ptr->header.ia_list_entry);
635 dapl_os_unlock(&ia_ptr->header.lock);
636 }
637
638 /*
639 * dapl_ia_link_rmr
640 *
641 * Add an rmr to the IA structure
642 *
643 * Input:
644 * ia_ptr
645 * rmr_ptr
646 *
647 * Output:
648 * none
649 *
650 * Returns:
651 * none
652 *
653 */
654 void
dapl_ia_link_rmr(IN DAPL_IA * ia_ptr,IN DAPL_RMR * rmr_ptr)655 dapl_ia_link_rmr(
656 IN DAPL_IA *ia_ptr,
657 IN DAPL_RMR *rmr_ptr)
658 {
659 dapl_os_lock(&ia_ptr->header.lock);
660 dapl_llist_add_head(&ia_ptr->rmr_list_head,
661 &rmr_ptr->header.ia_list_entry,
662 rmr_ptr);
663 dapl_os_unlock(&ia_ptr->header.lock);
664 }
665
666 /*
667 * dapl_ia_unlink_rmr
668 *
669 * Remove an rmr from the ia info structure
670 *
671 * Input:
672 * ia_ptr
673 * rmr_ptr
674 *
675 * Output:
676 * none
677 *
678 * Returns:
679 * none
680 *
681 */
682 void
dapl_ia_unlink_rmr(IN DAPL_IA * ia_ptr,IN DAPL_RMR * rmr_ptr)683 dapl_ia_unlink_rmr(
684 IN DAPL_IA *ia_ptr,
685 IN DAPL_RMR *rmr_ptr)
686 {
687 dapl_os_lock(&ia_ptr->header.lock);
688 (void) dapl_llist_remove_entry(&ia_ptr->rmr_list_head,
689 &rmr_ptr->header.ia_list_entry);
690 dapl_os_unlock(&ia_ptr->header.lock);
691 }
692
693 /*
694 * dapl_ia_link_pz
695 *
696 * Add an pz to the IA structure
697 *
698 * Input:
699 * ia_ptr
700 * pz_ptr
701 *
702 * Output:
703 * none
704 *
705 * Returns:
706 * none
707 *
708 */
709 void
dapl_ia_link_pz(IN DAPL_IA * ia_ptr,IN DAPL_PZ * pz_ptr)710 dapl_ia_link_pz(
711 IN DAPL_IA *ia_ptr,
712 IN DAPL_PZ *pz_ptr)
713 {
714 dapl_os_lock(&ia_ptr->header.lock);
715 dapl_llist_add_head(&ia_ptr->pz_list_head,
716 &pz_ptr->header.ia_list_entry,
717 pz_ptr);
718 dapl_os_unlock(&ia_ptr->header.lock);
719 }
720
721 /*
722 * dapl_ia_unlink_pz
723 *
724 * Remove an pz from the ia info structure
725 *
726 * Input:
727 * ia_ptr
728 * pz_ptr
729 *
730 * Output:
731 * none
732 *
733 * Returns:
734 * none
735 *
736 */
737 void
dapl_ia_unlink_pz(IN DAPL_IA * ia_ptr,IN DAPL_PZ * pz_ptr)738 dapl_ia_unlink_pz(
739 IN DAPL_IA *ia_ptr,
740 IN DAPL_PZ *pz_ptr)
741 {
742 dapl_os_lock(&ia_ptr->header.lock);
743 (void) dapl_llist_remove_entry(&ia_ptr->pz_list_head,
744 &pz_ptr->header.ia_list_entry);
745 dapl_os_unlock(&ia_ptr->header.lock);
746 }
747
748 /*
749 * dapl_ia_link_evd
750 *
751 * Add an evd to the IA structure
752 *
753 * Input:
754 * ia_ptr
755 * evd_ptr
756 *
757 * Output:
758 * none
759 *
760 * Returns:
761 * none
762 *
763 */
764 void
dapl_ia_link_evd(IN DAPL_IA * ia_ptr,IN DAPL_EVD * evd_ptr)765 dapl_ia_link_evd(
766 IN DAPL_IA *ia_ptr,
767 IN DAPL_EVD *evd_ptr)
768 {
769 dapl_os_lock(&ia_ptr->header.lock);
770 dapl_llist_add_head(&ia_ptr->evd_list_head,
771 &evd_ptr->header.ia_list_entry,
772 evd_ptr);
773 dapl_os_unlock(&ia_ptr->header.lock);
774 }
775
776 /*
777 * dapl_ia_unlink_evd
778 *
779 * Remove an evd from the ia info structure
780 *
781 * Input:
782 * ia_ptr
783 * evd_ptr
784 *
785 * Output:
786 * none
787 *
788 * Returns:
789 * none
790 *
791 */
792 void
dapl_ia_unlink_evd(IN DAPL_IA * ia_ptr,IN DAPL_EVD * evd_ptr)793 dapl_ia_unlink_evd(
794 IN DAPL_IA *ia_ptr,
795 IN DAPL_EVD *evd_ptr)
796 {
797 dapl_os_lock(&ia_ptr->header.lock);
798 (void) dapl_llist_remove_entry(&ia_ptr->evd_list_head,
799 &evd_ptr->header.ia_list_entry);
800 dapl_os_unlock(&ia_ptr->header.lock);
801 }
802
803 /*
804 * dapl_ia_link_cno
805 *
806 * Add an cno to the IA structure
807 *
808 * Input:
809 * ia_ptr
810 * cno_ptr
811 *
812 * Output:
813 * none
814 *
815 * Returns:
816 * none
817 *
818 */
819 void
dapl_ia_link_cno(IN DAPL_IA * ia_ptr,IN DAPL_CNO * cno_ptr)820 dapl_ia_link_cno(
821 IN DAPL_IA *ia_ptr,
822 IN DAPL_CNO *cno_ptr)
823 {
824 dapl_os_lock(&ia_ptr->header.lock);
825 dapl_llist_add_head(&ia_ptr->cno_list_head,
826 &cno_ptr->header.ia_list_entry,
827 cno_ptr);
828 dapl_os_unlock(&ia_ptr->header.lock);
829 }
830
831 /*
832 * dapl_ia_unlink_cno
833 *
834 * Remove an cno from the ia info structure
835 *
836 * Input:
837 * ia_ptr
838 * cno_ptr
839 *
840 * Output:
841 * none
842 *
843 * Returns:
844 * none
845 *
846 */
847 void
dapl_ia_unlink_cno(IN DAPL_IA * ia_ptr,IN DAPL_CNO * cno_ptr)848 dapl_ia_unlink_cno(
849 IN DAPL_IA *ia_ptr,
850 IN DAPL_CNO *cno_ptr)
851 {
852 dapl_os_lock(&ia_ptr->header.lock);
853 (void) dapl_llist_remove_entry(&ia_ptr->cno_list_head,
854 &cno_ptr->header.ia_list_entry);
855 dapl_os_unlock(&ia_ptr->header.lock);
856 }
857
858 /*
859 * dapl_ia_link_psp
860 *
861 * Add an psp to the IA structure
862 *
863 * Input:
864 * ia_ptr
865 * sp_ptr
866 *
867 * Output:
868 * none
869 *
870 * Returns:
871 * none
872 *
873 */
874 void
dapl_ia_link_psp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)875 dapl_ia_link_psp(
876 IN DAPL_IA *ia_ptr,
877 IN DAPL_SP *sp_ptr)
878 {
879 dapl_os_lock(&ia_ptr->header.lock);
880 dapl_llist_add_head(&ia_ptr->psp_list_head,
881 &sp_ptr->header.ia_list_entry,
882 sp_ptr);
883 dapl_os_unlock(&ia_ptr->header.lock);
884 }
885
886 /*
887 * daps_ia_unlink_sp
888 *
889 * Remove an sp from the appropriate ia rsp or psp queue
890 *
891 * Input:
892 * ia_ptr
893 * sp_ptr
894 *
895 * Output:
896 * none
897 *
898 * Returns:
899 * none
900 *
901 */
902 void
dapls_ia_unlink_sp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)903 dapls_ia_unlink_sp(
904 IN DAPL_IA *ia_ptr,
905 IN DAPL_SP *sp_ptr)
906 {
907 DAPL_LLIST_HEAD *list_head;
908
909 if (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_PSP) {
910 list_head = &ia_ptr->psp_list_head;
911 } else {
912 dapl_os_assert(sp_ptr->header.handle_type ==
913 DAT_HANDLE_TYPE_RSP);
914 list_head = &ia_ptr->rsp_list_head;
915 }
916
917 dapl_os_lock(&ia_ptr->header.lock);
918 (void) dapl_llist_remove_entry(list_head,
919 &sp_ptr->header.ia_list_entry);
920 dapl_os_unlock(&ia_ptr->header.lock);
921 }
922
923 /*
924 * dapls_ia_sp_search
925 *
926 * Find an RSP or PSP on the IA list with a matching conn_qual value
927 *
928 * Input:
929 * ia_ptr
930 * sp_ptr
931 *
932 * Output:
933 * none
934 *
935 * Returns:
936 * none
937 *
938 */
939 DAPL_SP *
dapls_ia_sp_search(IN DAPL_IA * ia_ptr,IN DAT_CONN_QUAL conn_qual,IN DAT_BOOLEAN is_psp)940 dapls_ia_sp_search(
941 IN DAPL_IA *ia_ptr,
942 IN DAT_CONN_QUAL conn_qual,
943 IN DAT_BOOLEAN is_psp)
944 {
945 DAPL_SP *sp_ptr;
946 DAPL_LLIST_HEAD *list_head;
947
948 if (is_psp) {
949 list_head = &ia_ptr->psp_list_head;
950 } else {
951 list_head = &ia_ptr->rsp_list_head;
952 }
953
954 dapl_os_lock(&ia_ptr->header.lock);
955
956 sp_ptr = (dapl_llist_is_empty(list_head) ? NULL :
957 dapl_llist_peek_head(list_head));
958
959 while (sp_ptr != NULL) {
960 if (sp_ptr->conn_qual == conn_qual) {
961 break;
962 }
963 sp_ptr = dapl_llist_next_entry(list_head,
964 &sp_ptr->header.ia_list_entry);
965 }
966
967 dapl_os_unlock(&ia_ptr->header.lock);
968
969 return (sp_ptr);
970 }
971
972
973 /*
974 * dapl_ia_link_rsp
975 *
976 * Add an rsp to the IA structure
977 *
978 * Input:
979 * ia_ptr
980 * sp_ptr
981 *
982 * Output:
983 * none
984 *
985 * Returns:
986 * none
987 *
988 */
989 void
dapl_ia_link_rsp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)990 dapl_ia_link_rsp(
991 IN DAPL_IA *ia_ptr,
992 IN DAPL_SP *sp_ptr)
993 {
994 dapl_os_lock(&ia_ptr->header.lock);
995 dapl_llist_add_head(&ia_ptr->rsp_list_head,
996 &sp_ptr->header.ia_list_entry,
997 sp_ptr);
998 dapl_os_unlock(&ia_ptr->header.lock);
999 }
1000
1001 /*
1002 * dapl_ia_link_srq
1003 *
1004 * Add an srq to the IA structure
1005 *
1006 * Input:
1007 * ia_ptr
1008 * srq_ptr
1009 *
1010 * Output:
1011 * none
1012 *
1013 * Returns:
1014 * none
1015 *
1016 */
1017 void
dapl_ia_link_srq(IN DAPL_IA * ia_ptr,IN DAPL_SRQ * srq_ptr)1018 dapl_ia_link_srq(
1019 IN DAPL_IA *ia_ptr,
1020 IN DAPL_SRQ *srq_ptr)
1021 {
1022 dapl_os_lock(&ia_ptr->header.lock);
1023 dapl_llist_add_head(&ia_ptr->srq_list_head,
1024 &srq_ptr->header.ia_list_entry,
1025 srq_ptr);
1026 dapl_os_unlock(&ia_ptr->header.lock);
1027 }
1028
1029 /*
1030 * dapl_ia_unlink_srq
1031 *
1032 * Remove an srq from the ia info structure
1033 *
1034 * Input:
1035 * ia_ptr
1036 * srq_ptr
1037 *
1038 * Output:
1039 * none
1040 *
1041 * Returns:
1042 * none
1043 *
1044 */
1045 void
dapl_ia_unlink_srq(IN DAPL_IA * ia_ptr,IN DAPL_SRQ * srq_ptr)1046 dapl_ia_unlink_srq(
1047 IN DAPL_IA *ia_ptr,
1048 IN DAPL_SRQ *srq_ptr)
1049 {
1050 dapl_os_lock(&ia_ptr->header.lock);
1051 (void) dapl_llist_remove_entry(&ia_ptr->srq_list_head,
1052 &srq_ptr->header.ia_list_entry);
1053 dapl_os_unlock(&ia_ptr->header.lock);
1054 }
1055
1056 DAT_RETURN
dapls_ia_setup_callbacks(IN DAPL_IA * ia_ptr,IN DAPL_EVD * async_evd_ptr)1057 dapls_ia_setup_callbacks(
1058 IN DAPL_IA *ia_ptr,
1059 IN DAPL_EVD *async_evd_ptr)
1060 {
1061 DAT_RETURN dat_status = DAT_SUCCESS;
1062
1063 /* unaffiliated handler */
1064 dat_status =
1065 dapls_ib_setup_async_callback(
1066 ia_ptr,
1067 DAPL_ASYNC_UNAFILIATED,
1068 NULL,
1069 (ib_async_handler_t)dapl_evd_un_async_error_callback,
1070 async_evd_ptr);
1071
1072 if (dat_status != DAT_SUCCESS) {
1073 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1074 "ib_set_un_async_error_eh failed %d\n",
1075 dat_status);
1076 goto bail;
1077 }
1078
1079 /* affiliated cq handler */
1080 dat_status = dapls_ib_setup_async_callback(
1081 ia_ptr,
1082 DAPL_ASYNC_CQ_ERROR,
1083 NULL,
1084 (ib_async_handler_t)dapl_evd_cq_async_error_callback,
1085 async_evd_ptr);
1086
1087 if (dat_status != DAT_SUCCESS) {
1088 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1089 "ib_set_cq_async_error_eh failed %d\n",
1090 dat_status);
1091 goto bail;
1092 }
1093
1094 /* affiliated qp handler */
1095 dat_status = dapls_ib_setup_async_callback(
1096 ia_ptr,
1097 DAPL_ASYNC_QP_ERROR,
1098 NULL,
1099 (ib_async_handler_t)dapl_evd_qp_async_error_callback,
1100 ia_ptr);
1101 if (dat_status != DAT_SUCCESS) {
1102 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1103 "ib_set_qp_async_error_eh failed %d\n",
1104 dat_status);
1105 goto bail;
1106 }
1107
1108 bail:
1109 return (dat_status);
1110 }
1111
1112 DAT_RETURN
dapls_ia_teardown_callbacks(IN DAPL_IA * ia_ptr)1113 dapls_ia_teardown_callbacks(
1114 IN DAPL_IA *ia_ptr)
1115 {
1116 DAT_RETURN dat_status = DAT_SUCCESS;
1117
1118 /* unaffiliated handler */
1119 dat_status =
1120 dapls_ib_setup_async_callback(
1121 ia_ptr,
1122 DAPL_ASYNC_UNAFILIATED,
1123 NULL,
1124 (ib_async_handler_t)0,
1125 NULL);
1126
1127 if (dat_status != DAT_SUCCESS) {
1128 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1129 "ib_set_un_async_error_eh failed %d\n",
1130 dat_status);
1131 goto bail;
1132 }
1133
1134 /* affiliated cq handler */
1135 dat_status = dapls_ib_setup_async_callback(
1136 ia_ptr,
1137 DAPL_ASYNC_CQ_ERROR,
1138 NULL,
1139 (ib_async_handler_t)0,
1140 NULL);
1141
1142 if (dat_status != DAT_SUCCESS) {
1143 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1144 "ib_set_cq_async_error_eh failed %d\n",
1145 dat_status);
1146 goto bail;
1147 }
1148
1149 /* affiliated qp handler */
1150 dat_status = dapls_ib_setup_async_callback(
1151 ia_ptr,
1152 DAPL_ASYNC_QP_ERROR,
1153 NULL,
1154 (ib_async_handler_t)0,
1155 NULL);
1156 if (dat_status != DAT_SUCCESS) {
1157 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1158 "ib_set_qp_async_error_eh failed %d\n",
1159 dat_status);
1160 goto bail;
1161 }
1162
1163 bail:
1164 return (dat_status);
1165 }
1166