1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved.
24 */
25
26 /*
27 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31
32 /*
33 *
34 * MODULE: dapl_ia_util.c
35 *
36 * PURPOSE: Manage IA Info structure
37 *
38 * $Id: dapl_ia_util.c,v 1.29 2003/07/25 19:24:11 sjs2 Exp $
39 */
40
41 #include "dapl.h"
42 #include "dapl_hca_util.h"
43 #include "dapl_ia_util.h"
44 #include "dapl_evd_util.h"
45 #include "dapl_adapter_util.h"
46
47 /* Internal prototype */
48 void dapli_ia_release_hca(
49 DAPL_HCA *hca_ptr);
50
51
52 /*
53 * dapl_ia_alloc
54 *
55 * alloc and initialize an IA INFO struct
56 *
57 * Input:
58 * none
59 *
60 * Output:
61 * ia_ptr
62 *
63 * Returns:
64 * none
65 *
66 */
67 DAPL_IA *
dapl_ia_alloc(DAT_PROVIDER * provider,DAPL_HCA * hca_ptr)68 dapl_ia_alloc(DAT_PROVIDER * provider, DAPL_HCA * hca_ptr)
69 {
70 DAPL_IA * ia_ptr;
71
72 /* Allocate IA */
73 ia_ptr = (DAPL_IA *) dapl_os_alloc(sizeof (DAPL_IA));
74 if (ia_ptr == NULL) {
75 return (NULL);
76 }
77
78 /* zero the structure */
79 (void) dapl_os_memzero(ia_ptr, sizeof (DAPL_IA));
80
81 /*
82 * initialize the header
83 */
84 ia_ptr->header.provider = provider;
85 ia_ptr->header.magic = DAPL_MAGIC_IA;
86 ia_ptr->header.handle_type = DAT_HANDLE_TYPE_IA;
87 ia_ptr->header.owner_ia = ia_ptr;
88 ia_ptr->header.user_context.as_64 = 0;
89 ia_ptr->header.user_context.as_ptr = NULL;
90 dapl_llist_init_entry(&ia_ptr->header.ia_list_entry);
91 dapl_os_lock_init(&ia_ptr->header.lock);
92
93 /*
94 * initialize the body
95 */
96 ia_ptr->hca_ptr = hca_ptr;
97 ia_ptr->async_error_evd = NULL;
98 ia_ptr->cleanup_async_error_evd = DAT_FALSE;
99 dapl_llist_init_entry(&ia_ptr->hca_ia_list_entry);
100 dapl_llist_init_head(&ia_ptr->ep_list_head);
101 dapl_llist_init_head(&ia_ptr->lmr_list_head);
102 dapl_llist_init_head(&ia_ptr->rmr_list_head);
103 dapl_llist_init_head(&ia_ptr->pz_list_head);
104 dapl_llist_init_head(&ia_ptr->evd_list_head);
105 dapl_llist_init_head(&ia_ptr->cno_list_head);
106 dapl_llist_init_head(&ia_ptr->rsp_list_head);
107 dapl_llist_init_head(&ia_ptr->psp_list_head);
108
109 /*
110 * initialize the flags
111 */
112 ia_ptr->dapl_flags = 0;
113
114 dapl_hca_link_ia(hca_ptr, ia_ptr);
115
116 return (ia_ptr);
117 }
118
119
120 /*
121 * dapl_ia_abrupt_close
122 *
123 * Performs an abrupt close of the IA
124 *
125 * Input:
126 * ia_ptr
127 *
128 * Output:
129 * none
130 *
131 * Returns:
132 * status
133 *
134 */
135
136 DAT_RETURN
dapl_ia_abrupt_close(IN DAPL_IA * ia_ptr)137 dapl_ia_abrupt_close(IN DAPL_IA *ia_ptr)
138 {
139 DAT_RETURN dat_status;
140 DAPL_EP *ep_ptr, *next_ep_ptr;
141 DAPL_LMR *lmr_ptr, *next_lmr_ptr;
142 DAPL_RMR *rmr_ptr, *next_rmr_ptr;
143 DAPL_PZ *pz_ptr, *next_pz_ptr;
144 DAPL_EVD *evd_ptr, *next_evd_ptr;
145 DAPL_CNO *cno_ptr, *next_cno_ptr;
146 DAPL_SP *sp_ptr, *next_sp_ptr; /* for PSP and RSP queues */
147 DAPL_HCA *hca_ptr;
148
149 dat_status = DAT_SUCCESS;
150
151 /*
152 * clear all the data structures associated with the IA.
153 * this must be done in order (rmr,rsp) before (ep lmr psp) before
154 * (pz evd)
155 *
156 * Note that in all the following we can leave the loop either
157 * when we run out of entries, or when we get back to the head
158 * if we end up skipping an entry.
159 */
160
161 rmr_ptr = (dapl_llist_is_empty(&ia_ptr->rmr_list_head)
162 ? NULL : dapl_llist_peek_head(&ia_ptr->rmr_list_head));
163 while (rmr_ptr != NULL) {
164 next_rmr_ptr = dapl_llist_next_entry(&ia_ptr->rmr_list_head,
165 &rmr_ptr->header.ia_list_entry);
166 dat_status = dapl_rmr_free(rmr_ptr);
167 if (dat_status != DAT_SUCCESS) {
168 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
169 "ia_close(ABRUPT): rmr_free(%p) returns %x\n",
170 rmr_ptr,
171 dat_status);
172 }
173 rmr_ptr = next_rmr_ptr;
174 }
175
176 sp_ptr = (dapl_llist_is_empty(&ia_ptr->rsp_list_head)
177 ? NULL : dapl_llist_peek_head(&ia_ptr->rsp_list_head));
178 while (sp_ptr != NULL) {
179 next_sp_ptr = dapl_llist_next_entry(&ia_ptr->rsp_list_head,
180 &sp_ptr->header.ia_list_entry);
181 dat_status = dapl_rsp_free(sp_ptr);
182 if (dat_status != DAT_SUCCESS) {
183 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
184 "ia_close(ABRUPT): rsp_free(%p) returns %x\n",
185 sp_ptr,
186 dat_status);
187 }
188 sp_ptr = next_sp_ptr;
189 }
190
191 ep_ptr = (dapl_llist_is_empty(&ia_ptr->ep_list_head)
192 ? NULL : dapl_llist_peek_head(&ia_ptr->ep_list_head));
193 while (ep_ptr != NULL) {
194 next_ep_ptr = dapl_llist_next_entry(&ia_ptr->ep_list_head,
195 &ep_ptr->header.ia_list_entry);
196 dat_status = dapl_ep_disconnect(ep_ptr, DAT_CLOSE_ABRUPT_FLAG);
197 if (dat_status != DAT_SUCCESS) {
198 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
199 "ia_close(ABRUPT): ep_disconnect(%p) returns %x\n",
200 ep_ptr,
201 dat_status);
202 }
203 dat_status = dapl_ep_free(ep_ptr);
204 if (dat_status != DAT_SUCCESS) {
205 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
206 "ia_close(ABRUPT): ep_free(%p) returns %x\n",
207 ep_ptr,
208 dat_status);
209 }
210 ep_ptr = next_ep_ptr;
211 }
212
213 lmr_ptr = (dapl_llist_is_empty(&ia_ptr->lmr_list_head)
214 ? NULL : dapl_llist_peek_head(&ia_ptr->lmr_list_head));
215 while (lmr_ptr != NULL) {
216 next_lmr_ptr = dapl_llist_next_entry(&ia_ptr->lmr_list_head,
217 &lmr_ptr->header.ia_list_entry);
218 dat_status = dapl_lmr_free(lmr_ptr);
219 if (dat_status != DAT_SUCCESS) {
220 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
221 "ia_close(ABRUPT): lmr_free(%p) returns %x\n",
222 lmr_ptr,
223 dat_status);
224 }
225 lmr_ptr = next_lmr_ptr;
226 }
227
228 sp_ptr = (dapl_llist_is_empty(&ia_ptr->psp_list_head)
229 ? NULL : dapl_llist_peek_head(&ia_ptr->psp_list_head));
230 while (sp_ptr != NULL) {
231 next_sp_ptr = dapl_llist_next_entry(&ia_ptr->psp_list_head,
232 &sp_ptr->header.ia_list_entry);
233 dat_status = dapl_psp_free(sp_ptr);
234 if (dat_status != DAT_SUCCESS) {
235 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
236 "ia_close(ABRUPT): psp_free(%p) returns %x\n",
237 sp_ptr,
238 dat_status);
239 }
240 sp_ptr = next_sp_ptr;
241 }
242
243 pz_ptr = (dapl_llist_is_empty(&ia_ptr->pz_list_head)
244 ? NULL : dapl_llist_peek_head(&ia_ptr->pz_list_head));
245 while (pz_ptr != NULL) {
246 next_pz_ptr = dapl_llist_next_entry(&ia_ptr->pz_list_head,
247 &pz_ptr->header.ia_list_entry);
248 dat_status = dapl_pz_free(pz_ptr);
249 if (dat_status != DAT_SUCCESS) {
250 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
251 "ia_close(ABRUPT): pz_free(%p) returns %x\n",
252 pz_ptr,
253 dat_status);
254 }
255 pz_ptr = next_pz_ptr;
256 }
257
258 /*
259 * EVDs are tricky; we want to release all except for the async
260 * EVD. That EVD needs to stick around until after we close the
261 * HCA, to accept any async events that occur. So we cycle through
262 * the list with dapl_llist_next_entry instead of dapl_llist_is_empty.
263 */
264 evd_ptr = (dapl_llist_is_empty(&ia_ptr->evd_list_head)
265 ? NULL : dapl_llist_peek_head(&ia_ptr->evd_list_head));
266 while (evd_ptr != NULL) {
267 next_evd_ptr = dapl_llist_next_entry(&ia_ptr->evd_list_head,
268 &evd_ptr->header.ia_list_entry);
269 if (evd_ptr == ia_ptr->async_error_evd) {
270 /*
271 * Don't delete the EVD, but break any CNO
272 * connections.
273 */
274 (void) dapl_evd_disable(evd_ptr);
275 (void) dapl_evd_modify_cno(evd_ptr,
276 DAT_HANDLE_NULL);
277 } else {
278 /* it isn't the async EVD; delete it. */
279 dat_status = dapl_evd_free(evd_ptr);
280 if (dat_status != DAT_SUCCESS) {
281 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
282 "ia_close(ABRUPT): evd_free(%p) "
283 "returns %x\n",
284 evd_ptr,
285 dat_status);
286 }
287 }
288 evd_ptr = next_evd_ptr;
289 }
290
291 cno_ptr = (dapl_llist_is_empty(&ia_ptr->cno_list_head)
292 ? NULL : dapl_llist_peek_head(&ia_ptr->cno_list_head));
293 while (cno_ptr != NULL) {
294 next_cno_ptr = dapl_llist_next_entry(&ia_ptr->cno_list_head,
295 &cno_ptr->header.ia_list_entry);
296 dat_status = dapl_cno_free(cno_ptr);
297 if (dat_status != DAT_SUCCESS) {
298 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
299 "ia_close(ABRUPT): cno_free(%p) returns %x\n",
300 cno_ptr,
301 dat_status);
302 }
303 cno_ptr = next_cno_ptr;
304 }
305
306 hca_ptr = ia_ptr->hca_ptr;
307
308 /*
309 * Free the async EVD, shutting down callbacks from the HCA.
310 */
311 if (ia_ptr->async_error_evd &&
312 (DAT_TRUE == ia_ptr->cleanup_async_error_evd)) {
313 dat_status = dapls_ia_teardown_callbacks(ia_ptr);
314
315 hca_ptr->async_evd = NULL; /* It was our async EVD; nuke it. */
316
317 dapl_os_atomic_dec(& ia_ptr->async_error_evd->evd_ref_count);
318 dat_status = dapl_evd_free(ia_ptr->async_error_evd);
319
320 if (DAT_SUCCESS != dat_status) {
321 dapl_dbg_log(DAPL_DBG_TYPE_WARN,
322 "ia_close(ABRUPT): evd_free(%p) returns %x\n",
323 ia_ptr->async_error_evd,
324 dat_status);
325 }
326
327 ia_ptr->async_error_evd = NULL;
328 }
329
330 /*
331 * Release our reference on the hca_handle. If we are the last
332 * one, close it
333 */
334 dapli_ia_release_hca(hca_ptr);
335
336 dapls_ia_free(ia_ptr);
337
338 return (DAT_SUCCESS); /* Abrupt close can't fail. */
339 }
340
341
342 /*
343 * dapl_ia_graceful_close
344 *
345 * Performs an graceful close of the IA
346 *
347 * Input:
348 * ia_ptr
349 *
350 * Output:
351 * none
352 *
353 * Returns:
354 * status
355 *
356 */
357
358 DAT_RETURN
dapl_ia_graceful_close(IN DAPL_IA * ia_ptr)359 dapl_ia_graceful_close(IN DAPL_IA *ia_ptr)
360 {
361 DAT_RETURN dat_status;
362 DAT_RETURN cur_dat_status;
363 DAPL_EVD *evd_ptr;
364 DAPL_LLIST_ENTRY *entry;
365 DAPL_HCA *hca_ptr;
366
367 dat_status = DAT_SUCCESS;
368
369 if (!dapl_llist_is_empty(&ia_ptr->rmr_list_head) ||
370 !dapl_llist_is_empty(&ia_ptr->rsp_list_head) ||
371 !dapl_llist_is_empty(&ia_ptr->ep_list_head) ||
372 !dapl_llist_is_empty(&ia_ptr->lmr_list_head) ||
373 !dapl_llist_is_empty(&ia_ptr->psp_list_head) ||
374 !dapl_llist_is_empty(&ia_ptr->pz_list_head)) {
375 dat_status = DAT_ERROR(DAT_INVALID_STATE,
376 DAT_INVALID_STATE_IA_IN_USE);
377 goto bail;
378 }
379
380 /* if the async evd does not need to be cleaned up */
381 /* (ie. it was not created by dapl_ia_open) */
382 /* then the evd list should be empty */
383 if (DAT_FALSE == ia_ptr->cleanup_async_error_evd) {
384 if (!dapl_llist_is_empty(&ia_ptr->evd_list_head)) {
385 dat_status = DAT_ERROR(DAT_INVALID_STATE,
386 DAT_INVALID_STATE_IA_IN_USE);
387 goto bail;
388 }
389 } else {
390 /* else the async evd should be the only evd in */
391 /* the list. */
392 evd_ptr = (DAPL_EVD *)
393 dapl_llist_peek_head(&ia_ptr->evd_list_head);
394
395 if (!(evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) {
396 dat_status = DAT_ERROR(DAT_INVALID_STATE,
397 DAT_INVALID_STATE_IA_IN_USE);
398 goto bail;
399 }
400
401 entry = ia_ptr->evd_list_head;
402
403 /* if the async evd is not the only element in the list */
404 if (entry->blink != entry->flink) {
405 dat_status = DAT_ERROR(DAT_INVALID_STATE,
406 DAT_INVALID_STATE_IA_IN_USE);
407 goto bail;
408 }
409
410 /*
411 * If the async evd has a non-unary ref count (i.e. it's in
412 * use by someone besides us.
413 */
414 if (evd_ptr->evd_ref_count != 1) {
415 dat_status = DAT_ERROR(DAT_INVALID_STATE,
416 DAT_INVALID_STATE_IA_IN_USE);
417 goto bail;
418 }
419 }
420
421 /*
422 * We've validated the call; now we can start the teardown.
423 * Because we're in the IA close routine, we're safe from races with
424 * DAPL consumers on this IA (operate/destroy races are disallowed in
425 * DAPL).
426 */
427 hca_ptr = ia_ptr->hca_ptr;
428
429 /* Tear down the async EVD if needed, first shutting down callbacks. */
430 if (ia_ptr->async_error_evd &&
431 (DAT_TRUE == ia_ptr->cleanup_async_error_evd)) {
432 cur_dat_status = dapls_ia_teardown_callbacks(ia_ptr);
433 if (DAT_SUCCESS != cur_dat_status) {
434 dat_status = cur_dat_status;
435 }
436 hca_ptr->async_evd = NULL;
437 dapl_os_atomic_dec(& ia_ptr->async_error_evd->evd_ref_count);
438 cur_dat_status = dapl_evd_free(ia_ptr->async_error_evd);
439 if (DAT_SUCCESS != cur_dat_status) {
440 dat_status = cur_dat_status;
441 }
442
443 ia_ptr->async_error_evd = NULL;
444 }
445
446 dapli_ia_release_hca(hca_ptr);
447
448 dapls_ia_free(ia_ptr);
449
450 bail:
451 return (dat_status);
452 }
453
454 /*
455 * Release a reference on the HCA handle. If it is 0, close the
456 * handle. Manipulate under lock to prevent races with threads trying to
457 * open the HCA.
458 */
459 void
dapli_ia_release_hca(DAPL_HCA * hca_ptr)460 dapli_ia_release_hca(
461 DAPL_HCA *hca_ptr)
462 {
463 dapl_os_lock(&hca_ptr->lock);
464 dapl_os_atomic_dec(& hca_ptr->handle_ref_count);
465 if (hca_ptr->handle_ref_count == 0) {
466 DAT_RETURN dat_status;
467
468 /*
469 * Get rid of the cqd associated with the hca.
470 * Print out instead of status return as this routine
471 * shouldn't fail.
472 */
473 dat_status = dapls_ib_cqd_destroy(hca_ptr);
474 if (dat_status != DAT_SUCCESS) {
475 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
476 "ERR: Cannot free CQD: err %x\n", dat_status);
477 }
478
479 (void) dapls_ib_close_hca(hca_ptr->ib_hca_handle);
480 hca_ptr->ib_hca_handle = IB_INVALID_HANDLE;
481 }
482 dapl_os_unlock(&hca_ptr->lock);
483 }
484
485
486 /*
487 * dapls_ia_free
488 *
489 * free an IA INFO struct
490 *
491 * Input:
492 * ia_ptr
493 *
494 * Output:
495 * one
496 *
497 * Returns:
498 * none
499 *
500 */
501 void
dapls_ia_free(DAPL_IA * ia_ptr)502 dapls_ia_free(DAPL_IA *ia_ptr)
503 {
504 dapl_os_assert(ia_ptr->header.magic == DAPL_MAGIC_IA);
505
506 dapl_os_assert(ia_ptr->async_error_evd == NULL);
507 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->lmr_list_head));
508 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->rmr_list_head));
509 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->ep_list_head));
510 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->evd_list_head));
511 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->cno_list_head));
512 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->psp_list_head));
513 dapl_os_assert(dapl_llist_is_empty(&ia_ptr->rsp_list_head));
514
515 /*
516 * deinitialize the header
517 */
518 dapl_hca_unlink_ia(ia_ptr->hca_ptr, ia_ptr);
519 /* reset magic to prevent reuse */
520 ia_ptr->header.magic = DAPL_MAGIC_INVALID;
521 dapl_os_lock_destroy(&ia_ptr->header.lock);
522
523 dapl_os_free(ia_ptr, sizeof (DAPL_IA));
524 }
525
526 /*
527 * dapl_ia_link_ep
528 *
529 * Add an ep to the IA structure
530 *
531 * Input:
532 * ia_ptr
533 * ep_ptr
534 *
535 * Output:
536 * none
537 *
538 * Returns:
539 * none
540 *
541 */
542 void
dapl_ia_link_ep(IN DAPL_IA * ia_ptr,IN DAPL_EP * ep_ptr)543 dapl_ia_link_ep(
544 IN DAPL_IA *ia_ptr,
545 IN DAPL_EP *ep_ptr)
546 {
547 dapl_os_lock(&ia_ptr->header.lock);
548 dapl_llist_add_head(&ia_ptr->ep_list_head,
549 &ep_ptr->header.ia_list_entry,
550 ep_ptr);
551 dapl_os_unlock(&ia_ptr->header.lock);
552 }
553
554 /*
555 * dapl_ia_unlink_ep
556 *
557 * Remove an ep from the ia info structure
558 *
559 * Input:
560 * ia_ptr
561 * ep_ptr
562 *
563 * Output:
564 * none
565 *
566 * Returns:
567 * none
568 *
569 */
570 void
dapl_ia_unlink_ep(IN DAPL_IA * ia_ptr,IN DAPL_EP * ep_ptr)571 dapl_ia_unlink_ep(
572 IN DAPL_IA *ia_ptr,
573 IN DAPL_EP *ep_ptr)
574 {
575 dapl_os_lock(&ia_ptr->header.lock);
576 (void) dapl_llist_remove_entry(&ia_ptr->ep_list_head,
577 &ep_ptr->header.ia_list_entry);
578 dapl_os_unlock(&ia_ptr->header.lock);
579 }
580
581 /*
582 * dapl_ia_link_lmr
583 *
584 * Add an lmr to the IA structure
585 *
586 * Input:
587 * ia_ptr
588 * lmr_ptr
589 *
590 * Output:
591 * none
592 *
593 * Returns:
594 * none
595 *
596 */
597 void
dapl_ia_link_lmr(IN DAPL_IA * ia_ptr,IN DAPL_LMR * lmr_ptr)598 dapl_ia_link_lmr(
599 IN DAPL_IA *ia_ptr,
600 IN DAPL_LMR *lmr_ptr)
601 {
602 dapl_os_lock(&ia_ptr->header.lock);
603 dapl_llist_add_head(&ia_ptr->lmr_list_head,
604 &lmr_ptr->header.ia_list_entry,
605 lmr_ptr);
606 dapl_os_unlock(&ia_ptr->header.lock);
607 }
608
609 /*
610 * dapl_ia_unlink_lmr
611 *
612 * Remove an lmr from the ia info structure
613 *
614 * Input:
615 * ia_ptr
616 * lmr_ptr
617 *
618 * Output:
619 * none
620 *
621 * Returns:
622 * none
623 *
624 */
625 void
dapl_ia_unlink_lmr(IN DAPL_IA * ia_ptr,IN DAPL_LMR * lmr_ptr)626 dapl_ia_unlink_lmr(
627 IN DAPL_IA *ia_ptr,
628 IN DAPL_LMR *lmr_ptr)
629 {
630 dapl_os_lock(&ia_ptr->header.lock);
631 (void) dapl_llist_remove_entry(&ia_ptr->lmr_list_head,
632 &lmr_ptr->header.ia_list_entry);
633 dapl_os_unlock(&ia_ptr->header.lock);
634 }
635
636 /*
637 * dapl_ia_link_rmr
638 *
639 * Add an rmr to the IA structure
640 *
641 * Input:
642 * ia_ptr
643 * rmr_ptr
644 *
645 * Output:
646 * none
647 *
648 * Returns:
649 * none
650 *
651 */
652 void
dapl_ia_link_rmr(IN DAPL_IA * ia_ptr,IN DAPL_RMR * rmr_ptr)653 dapl_ia_link_rmr(
654 IN DAPL_IA *ia_ptr,
655 IN DAPL_RMR *rmr_ptr)
656 {
657 dapl_os_lock(&ia_ptr->header.lock);
658 dapl_llist_add_head(&ia_ptr->rmr_list_head,
659 &rmr_ptr->header.ia_list_entry,
660 rmr_ptr);
661 dapl_os_unlock(&ia_ptr->header.lock);
662 }
663
664 /*
665 * dapl_ia_unlink_rmr
666 *
667 * Remove an rmr from the ia info structure
668 *
669 * Input:
670 * ia_ptr
671 * rmr_ptr
672 *
673 * Output:
674 * none
675 *
676 * Returns:
677 * none
678 *
679 */
680 void
dapl_ia_unlink_rmr(IN DAPL_IA * ia_ptr,IN DAPL_RMR * rmr_ptr)681 dapl_ia_unlink_rmr(
682 IN DAPL_IA *ia_ptr,
683 IN DAPL_RMR *rmr_ptr)
684 {
685 dapl_os_lock(&ia_ptr->header.lock);
686 (void) dapl_llist_remove_entry(&ia_ptr->rmr_list_head,
687 &rmr_ptr->header.ia_list_entry);
688 dapl_os_unlock(&ia_ptr->header.lock);
689 }
690
691 /*
692 * dapl_ia_link_pz
693 *
694 * Add an pz to the IA structure
695 *
696 * Input:
697 * ia_ptr
698 * pz_ptr
699 *
700 * Output:
701 * none
702 *
703 * Returns:
704 * none
705 *
706 */
707 void
dapl_ia_link_pz(IN DAPL_IA * ia_ptr,IN DAPL_PZ * pz_ptr)708 dapl_ia_link_pz(
709 IN DAPL_IA *ia_ptr,
710 IN DAPL_PZ *pz_ptr)
711 {
712 dapl_os_lock(&ia_ptr->header.lock);
713 dapl_llist_add_head(&ia_ptr->pz_list_head,
714 &pz_ptr->header.ia_list_entry,
715 pz_ptr);
716 dapl_os_unlock(&ia_ptr->header.lock);
717 }
718
719 /*
720 * dapl_ia_unlink_pz
721 *
722 * Remove an pz from the ia info structure
723 *
724 * Input:
725 * ia_ptr
726 * pz_ptr
727 *
728 * Output:
729 * none
730 *
731 * Returns:
732 * none
733 *
734 */
735 void
dapl_ia_unlink_pz(IN DAPL_IA * ia_ptr,IN DAPL_PZ * pz_ptr)736 dapl_ia_unlink_pz(
737 IN DAPL_IA *ia_ptr,
738 IN DAPL_PZ *pz_ptr)
739 {
740 dapl_os_lock(&ia_ptr->header.lock);
741 (void) dapl_llist_remove_entry(&ia_ptr->pz_list_head,
742 &pz_ptr->header.ia_list_entry);
743 dapl_os_unlock(&ia_ptr->header.lock);
744 }
745
746 /*
747 * dapl_ia_link_evd
748 *
749 * Add an evd to the IA structure
750 *
751 * Input:
752 * ia_ptr
753 * evd_ptr
754 *
755 * Output:
756 * none
757 *
758 * Returns:
759 * none
760 *
761 */
762 void
dapl_ia_link_evd(IN DAPL_IA * ia_ptr,IN DAPL_EVD * evd_ptr)763 dapl_ia_link_evd(
764 IN DAPL_IA *ia_ptr,
765 IN DAPL_EVD *evd_ptr)
766 {
767 dapl_os_lock(&ia_ptr->header.lock);
768 dapl_llist_add_head(&ia_ptr->evd_list_head,
769 &evd_ptr->header.ia_list_entry,
770 evd_ptr);
771 dapl_os_unlock(&ia_ptr->header.lock);
772 }
773
774 /*
775 * dapl_ia_unlink_evd
776 *
777 * Remove an evd from the ia info structure
778 *
779 * Input:
780 * ia_ptr
781 * evd_ptr
782 *
783 * Output:
784 * none
785 *
786 * Returns:
787 * none
788 *
789 */
790 void
dapl_ia_unlink_evd(IN DAPL_IA * ia_ptr,IN DAPL_EVD * evd_ptr)791 dapl_ia_unlink_evd(
792 IN DAPL_IA *ia_ptr,
793 IN DAPL_EVD *evd_ptr)
794 {
795 dapl_os_lock(&ia_ptr->header.lock);
796 (void) dapl_llist_remove_entry(&ia_ptr->evd_list_head,
797 &evd_ptr->header.ia_list_entry);
798 dapl_os_unlock(&ia_ptr->header.lock);
799 }
800
801 /*
802 * dapl_ia_link_cno
803 *
804 * Add an cno to the IA structure
805 *
806 * Input:
807 * ia_ptr
808 * cno_ptr
809 *
810 * Output:
811 * none
812 *
813 * Returns:
814 * none
815 *
816 */
817 void
dapl_ia_link_cno(IN DAPL_IA * ia_ptr,IN DAPL_CNO * cno_ptr)818 dapl_ia_link_cno(
819 IN DAPL_IA *ia_ptr,
820 IN DAPL_CNO *cno_ptr)
821 {
822 dapl_os_lock(&ia_ptr->header.lock);
823 dapl_llist_add_head(&ia_ptr->cno_list_head,
824 &cno_ptr->header.ia_list_entry,
825 cno_ptr);
826 dapl_os_unlock(&ia_ptr->header.lock);
827 }
828
829 /*
830 * dapl_ia_unlink_cno
831 *
832 * Remove an cno from the ia info structure
833 *
834 * Input:
835 * ia_ptr
836 * cno_ptr
837 *
838 * Output:
839 * none
840 *
841 * Returns:
842 * none
843 *
844 */
845 void
dapl_ia_unlink_cno(IN DAPL_IA * ia_ptr,IN DAPL_CNO * cno_ptr)846 dapl_ia_unlink_cno(
847 IN DAPL_IA *ia_ptr,
848 IN DAPL_CNO *cno_ptr)
849 {
850 dapl_os_lock(&ia_ptr->header.lock);
851 (void) dapl_llist_remove_entry(&ia_ptr->cno_list_head,
852 &cno_ptr->header.ia_list_entry);
853 dapl_os_unlock(&ia_ptr->header.lock);
854 }
855
856 /*
857 * dapl_ia_link_psp
858 *
859 * Add an psp to the IA structure
860 *
861 * Input:
862 * ia_ptr
863 * sp_ptr
864 *
865 * Output:
866 * none
867 *
868 * Returns:
869 * none
870 *
871 */
872 void
dapl_ia_link_psp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)873 dapl_ia_link_psp(
874 IN DAPL_IA *ia_ptr,
875 IN DAPL_SP *sp_ptr)
876 {
877 dapl_os_lock(&ia_ptr->header.lock);
878 dapl_llist_add_head(&ia_ptr->psp_list_head,
879 &sp_ptr->header.ia_list_entry,
880 sp_ptr);
881 dapl_os_unlock(&ia_ptr->header.lock);
882 }
883
884 /*
885 * daps_ia_unlink_sp
886 *
887 * Remove an sp from the appropriate ia rsp or psp queue
888 *
889 * Input:
890 * ia_ptr
891 * sp_ptr
892 *
893 * Output:
894 * none
895 *
896 * Returns:
897 * none
898 *
899 */
900 void
dapls_ia_unlink_sp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)901 dapls_ia_unlink_sp(
902 IN DAPL_IA *ia_ptr,
903 IN DAPL_SP *sp_ptr)
904 {
905 DAPL_LLIST_HEAD *list_head;
906
907 if (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_PSP) {
908 list_head = &ia_ptr->psp_list_head;
909 } else {
910 dapl_os_assert(sp_ptr->header.handle_type ==
911 DAT_HANDLE_TYPE_RSP);
912 list_head = &ia_ptr->rsp_list_head;
913 }
914
915 dapl_os_lock(&ia_ptr->header.lock);
916 (void) dapl_llist_remove_entry(list_head,
917 &sp_ptr->header.ia_list_entry);
918 dapl_os_unlock(&ia_ptr->header.lock);
919 }
920
921 /*
922 * dapls_ia_sp_search
923 *
924 * Find an RSP or PSP on the IA list with a matching conn_qual value
925 *
926 * Input:
927 * ia_ptr
928 * sp_ptr
929 *
930 * Output:
931 * none
932 *
933 * Returns:
934 * none
935 *
936 */
937 DAPL_SP *
dapls_ia_sp_search(IN DAPL_IA * ia_ptr,IN DAT_CONN_QUAL conn_qual,IN DAT_BOOLEAN is_psp)938 dapls_ia_sp_search(
939 IN DAPL_IA *ia_ptr,
940 IN DAT_CONN_QUAL conn_qual,
941 IN DAT_BOOLEAN is_psp)
942 {
943 DAPL_SP *sp_ptr;
944 DAPL_LLIST_HEAD *list_head;
945
946 if (is_psp) {
947 list_head = &ia_ptr->psp_list_head;
948 } else {
949 list_head = &ia_ptr->rsp_list_head;
950 }
951
952 dapl_os_lock(&ia_ptr->header.lock);
953
954 sp_ptr = (dapl_llist_is_empty(list_head) ? NULL :
955 dapl_llist_peek_head(list_head));
956
957 while (sp_ptr != NULL) {
958 if (sp_ptr->conn_qual == conn_qual) {
959 break;
960 }
961 sp_ptr = dapl_llist_next_entry(list_head,
962 &sp_ptr->header.ia_list_entry);
963 }
964
965 dapl_os_unlock(&ia_ptr->header.lock);
966
967 return (sp_ptr);
968 }
969
970
971 /*
972 * dapl_ia_link_rsp
973 *
974 * Add an rsp to the IA structure
975 *
976 * Input:
977 * ia_ptr
978 * sp_ptr
979 *
980 * Output:
981 * none
982 *
983 * Returns:
984 * none
985 *
986 */
987 void
dapl_ia_link_rsp(IN DAPL_IA * ia_ptr,IN DAPL_SP * sp_ptr)988 dapl_ia_link_rsp(
989 IN DAPL_IA *ia_ptr,
990 IN DAPL_SP *sp_ptr)
991 {
992 dapl_os_lock(&ia_ptr->header.lock);
993 dapl_llist_add_head(&ia_ptr->rsp_list_head,
994 &sp_ptr->header.ia_list_entry,
995 sp_ptr);
996 dapl_os_unlock(&ia_ptr->header.lock);
997 }
998
999 /*
1000 * dapl_ia_link_srq
1001 *
1002 * Add an srq to the IA structure
1003 *
1004 * Input:
1005 * ia_ptr
1006 * srq_ptr
1007 *
1008 * Output:
1009 * none
1010 *
1011 * Returns:
1012 * none
1013 *
1014 */
1015 void
dapl_ia_link_srq(IN DAPL_IA * ia_ptr,IN DAPL_SRQ * srq_ptr)1016 dapl_ia_link_srq(
1017 IN DAPL_IA *ia_ptr,
1018 IN DAPL_SRQ *srq_ptr)
1019 {
1020 dapl_os_lock(&ia_ptr->header.lock);
1021 dapl_llist_add_head(&ia_ptr->srq_list_head,
1022 &srq_ptr->header.ia_list_entry,
1023 srq_ptr);
1024 dapl_os_unlock(&ia_ptr->header.lock);
1025 }
1026
1027 /*
1028 * dapl_ia_unlink_srq
1029 *
1030 * Remove an srq from the ia info structure
1031 *
1032 * Input:
1033 * ia_ptr
1034 * srq_ptr
1035 *
1036 * Output:
1037 * none
1038 *
1039 * Returns:
1040 * none
1041 *
1042 */
1043 void
dapl_ia_unlink_srq(IN DAPL_IA * ia_ptr,IN DAPL_SRQ * srq_ptr)1044 dapl_ia_unlink_srq(
1045 IN DAPL_IA *ia_ptr,
1046 IN DAPL_SRQ *srq_ptr)
1047 {
1048 dapl_os_lock(&ia_ptr->header.lock);
1049 (void) dapl_llist_remove_entry(&ia_ptr->srq_list_head,
1050 &srq_ptr->header.ia_list_entry);
1051 dapl_os_unlock(&ia_ptr->header.lock);
1052 }
1053
1054 DAT_RETURN
dapls_ia_setup_callbacks(IN DAPL_IA * ia_ptr,IN DAPL_EVD * async_evd_ptr)1055 dapls_ia_setup_callbacks(
1056 IN DAPL_IA *ia_ptr,
1057 IN DAPL_EVD *async_evd_ptr)
1058 {
1059 DAT_RETURN dat_status = DAT_SUCCESS;
1060
1061 #if 0
1062 /*
1063 * Current implementation of dapls_ib_setup_async_callback() does
1064 * nothing and returns DAT_SUCCESS. However, it is declared to expect
1065 * function pointers with different signatures. We do leave the code
1066 * block out till dapls_ib_setup_async_callback() is implemented.
1067 */
1068 /* unaffiliated handler */
1069 dat_status =
1070 dapls_ib_setup_async_callback(
1071 ia_ptr,
1072 DAPL_ASYNC_UNAFILIATED,
1073 NULL,
1074 (ib_async_handler_t)dapl_evd_un_async_error_callback,
1075 async_evd_ptr);
1076
1077 if (dat_status != DAT_SUCCESS) {
1078 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1079 "ib_set_un_async_error_eh failed %d\n",
1080 dat_status);
1081 goto bail;
1082 }
1083
1084 /* affiliated cq handler */
1085 dat_status = dapls_ib_setup_async_callback(
1086 ia_ptr,
1087 DAPL_ASYNC_CQ_ERROR,
1088 NULL,
1089 (ib_async_handler_t)dapl_evd_cq_async_error_callback,
1090 async_evd_ptr);
1091
1092 if (dat_status != DAT_SUCCESS) {
1093 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1094 "ib_set_cq_async_error_eh failed %d\n",
1095 dat_status);
1096 goto bail;
1097 }
1098
1099 /* affiliated qp handler */
1100 dat_status = dapls_ib_setup_async_callback(
1101 ia_ptr,
1102 DAPL_ASYNC_QP_ERROR,
1103 NULL,
1104 (ib_async_handler_t)dapl_evd_qp_async_error_callback,
1105 ia_ptr);
1106 if (dat_status != DAT_SUCCESS) {
1107 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1108 "ib_set_qp_async_error_eh failed %d\n",
1109 dat_status);
1110 goto bail;
1111 }
1112 bail:
1113 #endif
1114 return (dat_status);
1115 }
1116
1117 DAT_RETURN
dapls_ia_teardown_callbacks(IN DAPL_IA * ia_ptr)1118 dapls_ia_teardown_callbacks(
1119 IN DAPL_IA *ia_ptr)
1120 {
1121 DAT_RETURN dat_status = DAT_SUCCESS;
1122
1123 /* unaffiliated handler */
1124 dat_status =
1125 dapls_ib_setup_async_callback(
1126 ia_ptr,
1127 DAPL_ASYNC_UNAFILIATED,
1128 NULL,
1129 (ib_async_handler_t)0,
1130 NULL);
1131
1132 if (dat_status != DAT_SUCCESS) {
1133 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1134 "ib_set_un_async_error_eh failed %d\n",
1135 dat_status);
1136 goto bail;
1137 }
1138
1139 /* affiliated cq handler */
1140 dat_status = dapls_ib_setup_async_callback(
1141 ia_ptr,
1142 DAPL_ASYNC_CQ_ERROR,
1143 NULL,
1144 (ib_async_handler_t)0,
1145 NULL);
1146
1147 if (dat_status != DAT_SUCCESS) {
1148 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1149 "ib_set_cq_async_error_eh failed %d\n",
1150 dat_status);
1151 goto bail;
1152 }
1153
1154 /* affiliated qp handler */
1155 dat_status = dapls_ib_setup_async_callback(
1156 ia_ptr,
1157 DAPL_ASYNC_QP_ERROR,
1158 NULL,
1159 (ib_async_handler_t)0,
1160 NULL);
1161 if (dat_status != DAT_SUCCESS) {
1162 dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1163 "ib_set_qp_async_error_eh failed %d\n",
1164 dat_status);
1165 goto bail;
1166 }
1167
1168 bail:
1169 return (dat_status);
1170 }
1171