1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * sol_cma is a part of sol_ofs misc module. This file
28 * provides interfaces for supporting the communication
29 * management API defined in "rdma_cm.h". In-Kernel
30 * consumers of the "rdma_cm.h" API should link sol_ofs
31 * misc module using :
32 * -N misc/sol_ofs
33 * Solaris uCMA (sol_ucma) driver is the current consumer for
34 * sol_cma.
35 */
36
37 /* Standard driver includes */
38 #include <sys/types.h>
39 #include <sys/modctl.h>
40 #include <sys/errno.h>
41 #include <sys/stat.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/modctl.h>
45
46 #include <sys/ib/clients/of/ofed_kernel.h>
47 #include <sys/ib/clients/of/rdma/ib_addr.h>
48
49 #include <sys/ib/clients/of/sol_ofs/sol_cma.h>
50 #include <sys/ib/clients/of/sol_ofs/sol_kverb_impl.h>
51
52 /* Modload support */
53 static struct modlmisc sol_ofs_modmisc = {
54 &mod_miscops,
55 "Solaris OFS Misc module"
56 };
57
58 struct modlinkage sol_ofs_modlinkage = {
59 MODREV_1,
60 (void *)&sol_ofs_modmisc,
61 NULL
62 };
63
64 static ib_client_t *sol_cma_ib_client;
65 sol_cma_glbl_listen_t sol_cma_glbl_listen;
66 avl_tree_t sol_cma_glbl_listen_tree;
67
68 static void sol_cma_add_dev(struct ib_device *);
69 static void sol_cma_rem_dev(struct ib_device *);
70
71 static llist_head_t sol_cma_dev_list = LLIST_HEAD_INIT(sol_cma_dev_list);
72 kmutex_t sol_cma_dev_mutex;
73 kmutex_t sol_cma_glob_mutex;
74
75 char *sol_rdmacm_dbg_str = "sol_rdmacm";
76 char *sol_ofs_dbg_str = "sol_ofs_mod";
77
78 /*
79 * Local functions defines.
80 */
81 int sol_cma_req_cmid_cmp(const void *p1, const void *p2);
82 int sol_cma_cmid_cmp(const void *p1, const void *p2);
83 int sol_cma_svc_cmp(const void *, const void *);
84
85 static struct rdma_cm_id *cma_alloc_chan(rdma_cm_event_handler,
86 void *, enum rdma_port_space);
87 static void cma_set_chan_state(sol_cma_chan_t *, cma_chan_state_t);
88 static int cma_cas_chan_state(sol_cma_chan_t *, cma_chan_state_t,
89 cma_chan_state_t);
90 static void cma_free_listen_list(struct rdma_cm_id *);
91 static void cma_destroy_id(struct rdma_cm_id *);
92 static void cma_handle_nomore_events(sol_cma_chan_t *);
93
94 extern void sol_ofs_dprintf_init();
95 extern void sol_ofs_dprintf_fini();
96
97 cma_chan_state_t cma_get_chan_state(sol_cma_chan_t *);
98 extern int ibcma_init_root_chan(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
99 extern int ibcma_fini_root_chan(sol_cma_chan_t *);
100 extern void ibcma_copy_srv_hdl(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
101 extern int ibcma_fini_ep_chan(sol_cma_chan_t *);
102 extern uint64_t ibcma_init_root_sid(sol_cma_chan_t *);
103 extern void rdma_ib_destroy_id(struct rdma_cm_id *);
104 extern int rdma_ib_bind_addr(struct rdma_cm_id *, struct sockaddr *);
105 extern int rdma_ib_resolve_addr(struct rdma_cm_id *, struct sockaddr *,
106 struct sockaddr *, int);
107 extern int rdma_ib_resolve_route(struct rdma_cm_id *, int);
108 extern int rdma_ib_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
109 int *);
110 extern int rdma_ib_connect(struct rdma_cm_id *, struct rdma_conn_param *);
111 extern int rdma_ib_listen(struct rdma_cm_id *, int);
112 extern int rdma_ib_accept(struct rdma_cm_id *, struct rdma_conn_param *);
113 extern int rdma_ib_reject(struct rdma_cm_id *, const void *, uint8_t);
114 extern int rdma_ib_disconnect(struct rdma_cm_id *);
115 extern int rdma_ib_join_multicast(struct rdma_cm_id *, struct sockaddr *,
116 void *);
117 extern void rdma_ib_leave_multicast(struct rdma_cm_id *, struct sockaddr *);
118
119 int
_init(void)120 _init(void)
121 {
122 int err;
123
124 sol_ofs_dprintf_init();
125 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init()");
126
127 mutex_init(&sol_cma_glob_mutex, NULL, MUTEX_DRIVER, NULL);
128 mutex_init(&sol_cma_dev_mutex, NULL, MUTEX_DRIVER, NULL);
129 avl_create(&sol_cma_glbl_listen_tree,
130 sol_cma_svc_cmp, sizeof (sol_cma_glbl_listen_t),
131 offsetof(sol_cma_glbl_listen_t, cma_listen_node));
132
133 sol_cma_ib_client = kmem_zalloc(sizeof (ib_client_t), KM_NOSLEEP);
134 if (!sol_cma_ib_client) {
135 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
136 "_init() - mem alloc failed");
137 avl_destroy(&sol_cma_glbl_listen_tree);
138 mutex_destroy(&sol_cma_dev_mutex);
139 mutex_destroy(&sol_cma_glob_mutex);
140 sol_ofs_dprintf_fini();
141 return (ENOMEM);
142 }
143
144 sol_cma_ib_client->name = "sol_ofs";
145 sol_cma_ib_client->add = sol_cma_add_dev;
146 sol_cma_ib_client->remove = sol_cma_rem_dev;
147 sol_cma_ib_client->dip = NULL;
148
149 if ((err = ib_register_client(sol_cma_ib_client)) != 0) {
150 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
151 "_init() ib_register_client() failed with err %d",
152 err);
153 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
154 avl_destroy(&sol_cma_glbl_listen_tree);
155 mutex_destroy(&sol_cma_dev_mutex);
156 mutex_destroy(&sol_cma_glob_mutex);
157 sol_ofs_dprintf_fini();
158 return (err);
159 }
160
161 if ((err = mod_install(&sol_ofs_modlinkage)) != 0) {
162 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
163 "_init() - mod_install() failed");
164 ib_unregister_client(sol_cma_ib_client);
165 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
166 avl_destroy(&sol_cma_glbl_listen_tree);
167 mutex_destroy(&sol_cma_dev_mutex);
168 mutex_destroy(&sol_cma_glob_mutex);
169 sol_ofs_dprintf_fini();
170 return (err);
171 }
172
173 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init() - ret");
174 return (err);
175 }
176
177 int
_fini(void)178 _fini(void)
179 {
180 int err;
181
182 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini()");
183
184 if (avl_numnodes(&sol_cma_glbl_listen_tree)) {
185 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "_fini - "
186 "listen CMIDs still active");
187 return (EBUSY);
188 }
189 if ((err = mod_remove(&sol_ofs_modlinkage)) != 0) {
190 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
191 "_fini: mod_remove failed");
192 return (err);
193 }
194
195 ib_unregister_client(sol_cma_ib_client);
196 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
197 avl_destroy(&sol_cma_glbl_listen_tree);
198 mutex_destroy(&sol_cma_dev_mutex);
199 mutex_destroy(&sol_cma_glob_mutex);
200 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini() - ret");
201 sol_ofs_dprintf_fini();
202 return (err);
203 }
204
205 int
_info(struct modinfo * modinfop)206 _info(struct modinfo *modinfop)
207 {
208 return (mod_info(&sol_ofs_modlinkage, modinfop));
209 }
210
211 typedef struct cma_device {
212 kmutex_t cma_mutex;
213 /* Ptr in the global sol_cma_dev_list */
214 llist_head_t cma_list;
215 /* List of listeners for this device */
216 genlist_t cma_epchan_list;
217 struct ib_device *cma_device;
218 uint_t cma_ref_count;
219 enum {
220 SOL_CMA_DEV_ADDED,
221 SOL_CMA_DEV_REM_IN_PROGRESS
222 } cma_dev_state;
223 } cma_device_t;
224
225 static void
sol_cma_add_dev(struct ib_device * dev)226 sol_cma_add_dev(struct ib_device *dev)
227 {
228 cma_device_t *new_device;
229
230 new_device = kmem_zalloc(sizeof (cma_device_t), KM_NOSLEEP);
231 if (!new_device) {
232 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_add_dev() "
233 "alloc failed!!");
234 return;
235 }
236 mutex_init(&new_device->cma_mutex, NULL, MUTEX_DRIVER, NULL);
237 llist_head_init(&new_device->cma_list, new_device);
238 init_genlist(&new_device->cma_epchan_list);
239 new_device->cma_device = dev;
240
241 ib_set_client_data(dev, sol_cma_ib_client, new_device);
242
243 mutex_enter(&sol_cma_dev_mutex);
244 llist_add_tail(&new_device->cma_list, &sol_cma_dev_list);
245 mutex_exit(&sol_cma_dev_mutex);
246 }
247
248 static void
sol_cma_rem_dev(struct ib_device * dev)249 sol_cma_rem_dev(struct ib_device *dev)
250 {
251 cma_device_t *rem_device;
252 genlist_entry_t *entry;
253
254 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "sol_rem_dev(%p)", dev);
255
256 rem_device = (cma_device_t *)ib_get_client_data(dev, sol_cma_ib_client);
257 if (!rem_device) {
258 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_rem_dev() "
259 "NULL cma_dev!!");
260 return;
261 }
262
263 mutex_enter(&rem_device->cma_mutex);
264 rem_device->cma_dev_state = SOL_CMA_DEV_REM_IN_PROGRESS;
265 if (rem_device->cma_ref_count) {
266 mutex_exit(&rem_device->cma_mutex);
267 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str, "sol_cma_rem_dev() "
268 "BUSY cma_dev!!");
269 return;
270 }
271 entry = remove_genlist_head(&rem_device->cma_epchan_list);
272 while (entry) {
273 sol_cma_chan_t *ep_chanp;
274
275 ep_chanp = (sol_cma_chan_t *)entry->data;
276 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
277 genlist_entry_t *entry1;
278 sol_cma_chan_t *root_chanp;
279
280 ASSERT(ep_chanp->chan_listenp);
281 entry1 = ep_chanp->chan_listenp->listen_ep_root_entry;
282 root_chanp = (sol_cma_chan_t *)ep_chanp->listen_root;
283 root_chanp->chan_listenp->listen_eps--;
284 delete_genlist(&root_chanp->chan_listenp->listen_list,
285 entry1);
286
287 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
288 kmem_free(entry, sizeof (genlist_entry_t));
289 }
290
291 entry = remove_genlist_head(&rem_device->cma_epchan_list);
292 }
293 mutex_exit(&rem_device->cma_mutex);
294
295 mutex_enter(&sol_cma_dev_mutex);
296 llist_del(&rem_device->cma_list);
297 mutex_exit(&sol_cma_dev_mutex);
298
299 kmem_free(rem_device, sizeof (cma_device_t));
300 }
301
302 struct ib_device *
sol_cma_acquire_device(ib_guid_t hca_guid)303 sol_cma_acquire_device(ib_guid_t hca_guid)
304 {
305 llist_head_t *entry;
306 cma_device_t *cma_devp;
307
308 mutex_enter(&sol_cma_dev_mutex);
309 list_for_each(entry, &sol_cma_dev_list) {
310 cma_devp = (cma_device_t *)entry->ptr;
311
312 if (cma_devp->cma_device->node_guid != hca_guid)
313 continue;
314
315 mutex_enter(&cma_devp->cma_mutex);
316 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS) {
317 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
318 "sol_cma_acquire_dev() - Device getting removed!!");
319 mutex_exit(&cma_devp->cma_mutex);
320 mutex_exit(&sol_cma_dev_mutex);
321 return (NULL);
322 }
323 cma_devp->cma_ref_count++;
324 mutex_exit(&cma_devp->cma_mutex);
325 mutex_exit(&sol_cma_dev_mutex);
326 return (cma_devp->cma_device);
327
328 }
329 mutex_exit(&sol_cma_dev_mutex);
330 return (NULL);
331 }
332
333 static void
sol_cma_release_device(struct rdma_cm_id * id)334 sol_cma_release_device(struct rdma_cm_id *id)
335 {
336 ib_device_t *device = id->device;
337 llist_head_t *entry;
338 cma_device_t *cma_devp;
339
340 mutex_enter(&sol_cma_dev_mutex);
341 list_for_each(entry, &sol_cma_dev_list) {
342 cma_devp = (cma_device_t *)entry->ptr;
343
344 if (cma_devp->cma_device != device)
345 continue;
346
347 mutex_enter(&cma_devp->cma_mutex);
348 cma_devp->cma_ref_count--;
349 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS &&
350 cma_devp->cma_ref_count == 0) {
351 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
352 "sol_cma_release_dev() - Device free removed!!");
353 mutex_exit(&cma_devp->cma_mutex);
354 llist_del(&cma_devp->cma_list);
355 kmem_free(cma_devp, sizeof (cma_device_t));
356 mutex_exit(&sol_cma_dev_mutex);
357 return;
358 }
359 mutex_exit(&cma_devp->cma_mutex);
360 }
361 mutex_exit(&sol_cma_dev_mutex);
362 }
363
364 void
sol_cma_add_hca_list(sol_cma_chan_t * ep_chanp,ib_guid_t hca_guid)365 sol_cma_add_hca_list(sol_cma_chan_t *ep_chanp, ib_guid_t hca_guid)
366 {
367 llist_head_t *entry;
368 cma_device_t *cma_devp;
369
370 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx)",
371 ep_chanp, hca_guid);
372 mutex_enter(&sol_cma_dev_mutex);
373 list_for_each(entry, &sol_cma_dev_list) {
374 cma_devp = (cma_device_t *)entry->ptr;
375
376 if ((cma_devp->cma_device)->node_guid != hca_guid)
377 continue;
378
379 mutex_enter(&cma_devp->cma_mutex);
380 ep_chanp->chan_listenp->listen_ep_dev_entry =
381 add_genlist(&cma_devp->cma_epchan_list,
382 (uintptr_t)ep_chanp, NULL);
383 ep_chanp->chan_listenp->listen_ep_device = cma_devp->cma_device;
384 mutex_exit(&cma_devp->cma_mutex);
385 mutex_exit(&sol_cma_dev_mutex);
386 return;
387 }
388 mutex_exit(&sol_cma_dev_mutex);
389 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx): "
390 "No matching HCA in list!!", ep_chanp, hca_guid);
391 }
392
393 /*
394 * rdma_cm.h API functions.
395 */
396 struct rdma_cm_id *
rdma_create_id(rdma_cm_event_handler evt_hdlr,void * context,enum rdma_port_space ps)397 rdma_create_id(rdma_cm_event_handler evt_hdlr, void *context,
398 enum rdma_port_space ps)
399 {
400 struct rdma_cm_id *rdma_idp;
401
402 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_create_id(%p, %p, %x)",
403 evt_hdlr, context, ps);
404
405 if (ps != RDMA_PS_TCP && ps != RDMA_PS_UDP && ps != RDMA_PS_IPOIB) {
406 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
407 "rdma_create_id: unsupported protocol %x", ps);
408 return (NULL);
409 }
410
411 rdma_idp = cma_alloc_chan(evt_hdlr, context, ps);
412 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
413 "rdma_create_id : ret %p", rdma_idp);
414
415 return (rdma_idp);
416 }
417
418 void
rdma_map_id2clnthdl(struct rdma_cm_id * rdma_idp,void * ib_client_hdl,void * iw_client_hdl)419 rdma_map_id2clnthdl(struct rdma_cm_id *rdma_idp, void *ib_client_hdl,
420 void *iw_client_hdl)
421 {
422 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
423
424 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
425 "rdma_map_id2clnthdl(%p, %p, %p)",
426 rdma_idp, ib_client_hdl, iw_client_hdl);
427 ASSERT(ib_client_hdl != NULL || iw_client_hdl != NULL);
428 chanp->chan_ib_client_hdl = ib_client_hdl;
429 chanp->chan_iw_client_hdl = iw_client_hdl;
430 }
431
432 void
rdma_map_id2qphdl(struct rdma_cm_id * rdma_idp,void * qp_hdl)433 rdma_map_id2qphdl(struct rdma_cm_id *rdma_idp, void *qp_hdl)
434 {
435 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
436
437 ASSERT(rdma_idp);
438 ASSERT(qp_hdl);
439 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_mapid2qphdl(%p, %p)",
440 rdma_idp, qp_hdl);
441 chanp->chan_qp_hdl = qp_hdl;
442 }
443
444
445 void
rdma_destroy_id(struct rdma_cm_id * rdma_idp)446 rdma_destroy_id(struct rdma_cm_id *rdma_idp)
447 {
448 sol_cma_chan_t *chanp, *root_chanp;
449 cma_chan_state_t state;
450 int rc, is_root_cmid, do_wait, is_passive;
451
452 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p)", rdma_idp);
453
454 if (!rdma_idp)
455 return;
456
457 is_root_cmid = do_wait = is_passive = 0;
458
459 chanp = (sol_cma_chan_t *)rdma_idp;
460 root_chanp = (sol_cma_chan_t *)chanp->listen_root;
461 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p), %p",
462 rdma_idp, root_chanp);
463
464 mutex_enter(&chanp->chan_mutex);
465 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_CMID_DESTROYED;
466
467 /*
468 * Wait in destroy of CMID when rdma_resolve_addr() / rdma_listen()
469 * rdma_resolve_route() API is in progress.
470 */
471 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_API_PROGRESS)
472 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
473
474 /* Wait if Event is been notified to consumer */
475 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_EVENT_PROGRESS)
476 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
477
478 if (rdma_idp->device)
479 sol_cma_release_device(rdma_idp);
480
481 if (chanp->chan_listenp && chanp->chan_listenp->listen_is_root)
482 is_root_cmid = 1;
483 if (root_chanp == NULL && is_root_cmid == 0)
484 is_passive = 1;
485
486 /*
487 * Skip Active side handling for passive CMIDs and listen CMID
488 * for which REQ CMIDs have not been created.
489 */
490 if (is_passive || (is_root_cmid && chanp->chan_req_state !=
491 REQ_CMID_QUEUED)) {
492 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
493 "Skipping passive %p, %x, %x", chanp->chan_listenp,
494 is_root_cmid, chanp->chan_req_state);
495 goto skip_passive_handling;
496 }
497
498 /*
499 * destroy_id() called for listening CMID and there are REQ
500 * CMIDs not yet notified. Reject such CMIDs and decrement
501 * the count.
502 */
503 if (is_root_cmid && chanp->chan_req_cnt) {
504 sol_cma_chan_t *req_cmid_chan, *next_chan;
505
506 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
507 "not notified handling");
508 for (req_cmid_chan = (sol_cma_chan_t *)avl_first(
509 &chanp->chan_req_avl_tree); req_cmid_chan &&
510 chanp->chan_req_cnt; req_cmid_chan = next_chan) {
511 next_chan = AVL_NEXT(
512 &chanp->chan_req_avl_tree, req_cmid_chan);
513 if (req_cmid_chan->chan_req_state ==
514 REQ_CMID_NOTIFIED) {
515 avl_remove(&chanp->chan_req_avl_tree,
516 req_cmid_chan);
517 chanp->chan_req_cnt--;
518 chanp->chan_req_total_cnt--;
519 mutex_exit(&chanp->chan_mutex);
520 mutex_enter(&req_cmid_chan->chan_mutex);
521 req_cmid_chan->chan_req_state =
522 REQ_CMID_SERVER_NONE;
523 if (rdma_idp->ps == RDMA_PS_TCP)
524 cma_set_chan_state(req_cmid_chan,
525 SOL_CMA_CHAN_DESTROY_PENDING);
526 mutex_exit(&req_cmid_chan->chan_mutex);
527 (void) rdma_disconnect(
528 (struct rdma_cm_id *)req_cmid_chan);
529 mutex_enter(&chanp->chan_mutex);
530 if (rdma_idp->ps == RDMA_PS_TCP) {
531 mutex_enter(
532 &req_cmid_chan->chan_mutex);
533 req_cmid_chan->listen_root =
534 rdma_idp;
535 mutex_exit(
536 &req_cmid_chan->chan_mutex);
537 } else {
538 mutex_destroy(
539 &req_cmid_chan->chan_mutex);
540 cv_destroy(
541 &req_cmid_chan->chan_destroy_cv);
542 kmem_free(req_cmid_chan,
543 sizeof (sol_cma_chan_t));
544 }
545 }
546 }
547 }
548
549 /*
550 * destroy_id() called for :
551 * listening CMID and all REQ CMIDs destroy_id() called
552 * REQ CMID and 1 more REQ CMID not yet destroyed.
553 * wait till the CMID is completly destroyed.
554 */
555 if (is_root_cmid && chanp->chan_req_total_cnt == 0) {
556 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
557 "root idp waiting");
558 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_WAIT);
559 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
560 }
561 mutex_exit(&chanp->chan_mutex);
562
563 if (root_chanp)
564 mutex_enter(&root_chanp->chan_mutex);
565 mutex_enter(&chanp->chan_mutex);
566 #ifdef DEBUG
567 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
568 "root_idp %p, cnt %x, state %x", root_chanp,
569 root_chanp ? root_chanp->chan_req_total_cnt : 0,
570 root_chanp ? cma_get_chan_state(root_chanp) : 0);
571 #endif
572
573 if (root_chanp && root_chanp->chan_req_total_cnt == 1 &&
574 cma_get_chan_state(root_chanp) == SOL_CMA_CHAN_DESTROY_PENDING)
575 do_wait = 1;
576 if (root_chanp)
577 mutex_exit(&root_chanp->chan_mutex);
578
579 skip_passive_handling :
580 state = cma_get_chan_state(chanp);
581 if (is_root_cmid == 0 && state != SOL_CMA_CHAN_DISCONNECT &&
582 SOL_CMAID_CONNECTED(chanp)) {
583 /*
584 * A connected CM ID has not been disconnected.
585 * Call rdma_disconnect() to disconnect it.
586 */
587 mutex_exit(&chanp->chan_mutex);
588 rc = rdma_disconnect(rdma_idp);
589 if (rc) {
590 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
591 "rdma_destroy_id(%p)- disconnect failed!!",
592 rdma_idp);
593 return;
594 }
595 mutex_enter(&chanp->chan_mutex);
596 if (root_chanp && chanp->listen_root == NULL)
597 chanp->listen_root = (struct rdma_cm_id *)root_chanp;
598 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
599 "rdma_destroy_id(chanp %p, connect %x, ps %x)",
600 chanp, chanp->chan_connect_flag, rdma_idp->ps);
601 if (SOL_CMAID_CONNECTED(chanp)) {
602 if (do_wait) {
603 cma_set_chan_state(chanp,
604 SOL_CMA_CHAN_DESTROY_WAIT);
605 cv_wait(&chanp->chan_destroy_cv,
606 &chanp->chan_mutex);
607 mutex_exit(&chanp->chan_mutex);
608 cma_destroy_id(rdma_idp);
609 } else {
610 cma_set_chan_state(chanp,
611 SOL_CMA_CHAN_DESTROY_PENDING);
612 mutex_exit(&chanp->chan_mutex);
613 }
614 } else {
615 /*
616 * No more callbacks are expected for this CMID.
617 * Free this CMID.
618 */
619 mutex_exit(&chanp->chan_mutex);
620 cma_destroy_id(rdma_idp);
621 }
622 } else if (is_root_cmid == 0 && state ==
623 SOL_CMA_CHAN_DISCONNECT && SOL_CMAID_CONNECTED(chanp)) {
624 /*
625 * CM ID was connected and disconnect is process.
626 * Free of this CM ID is done for the DISCONNECT
627 * notification for this CMID.
628 */
629 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
630 mutex_exit(&chanp->chan_mutex);
631 } else if (state != SOL_CMA_CHAN_DESTROY_PENDING) {
632 /* CM ID, not connected, just free it. */
633 mutex_exit(&chanp->chan_mutex);
634 cma_destroy_id(rdma_idp);
635 } else
636 mutex_exit(&chanp->chan_mutex);
637
638 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: ret");
639 }
640
641 /*
642 * State transitions for Address resolution :
643 * Active Side (Client) :
644 * 1. CREATE_ID-->BIND_ADDR-->RESOLVE_ADDR-->RESOLVE_ROUTE
645 *
646 * Passive Side (Server) :
647 * 2. CREATE_ID-->RESOLVE_ADDR-->RESOLVE_ROUTE
648 * IF_ADDR_ANY can be passed as local address in RESOLVE_ADDR
649 */
650 int
rdma_bind_addr(struct rdma_cm_id * idp,struct sockaddr * addr)651 rdma_bind_addr(struct rdma_cm_id *idp, struct sockaddr *addr)
652 {
653 sol_cma_chan_t *chanp;
654 struct rdma_addr *addrp;
655 int ret;
656
657 ASSERT(idp);
658 ASSERT(addr);
659 chanp = (sol_cma_chan_t *)idp;
660 addrp = &(idp->route.addr);
661 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_bind_addr(%p, %p)",
662 idp, addr);
663
664 mutex_enter(&chanp->chan_mutex);
665 ret = cma_cas_chan_state(chanp, SOL_CMA_CHAN_IDLE, SOL_CMA_CHAN_BOUND);
666 if (ret) {
667 mutex_exit(&chanp->chan_mutex);
668 return (ret);
669 }
670 /* Copy the local address to rdma_id structure */
671 bcopy((void *)addr, (void *)&(addrp->src_addr),
672 sizeof (struct sockaddr));
673 mutex_exit(&chanp->chan_mutex);
674
675 /*
676 * First call rdma_ib_bind_addr() to bind this address.
677 * Next call rdma_iw_bind_addr() to bind this address.
678 * For IF_ADDR_ANY, IB address is given priority over
679 * iWARP.
680 */
681 if (chanp->chan_ib_client_hdl == NULL) {
682 ofs_client_t *ofs_clnt;
683
684 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
685 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
686 }
687 if (chanp->chan_ib_client_hdl && rdma_ib_bind_addr(idp, addr) == 0) {
688 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
689 "rdma_bind_addr: ret IB @");
690 return (0);
691 #ifdef IWARP_SUPPORT
692 } else if (chanp->chan_iw_client_hdl && rdma_iw_bind_addr(idp, addr)
693 == 0) {
694 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
695 "rdma_bind_addr: ret iWARP @");
696 return (0);
697 #endif /* IWARP_SUPPORT */
698 }
699
700 mutex_enter(&chanp->chan_mutex);
701 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
702 mutex_exit(&chanp->chan_mutex);
703 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str, "rdma_bind_addr: ret failure!");
704 return (EINVAL);
705 }
706
707 int
rdma_resolve_addr(struct rdma_cm_id * idp,struct sockaddr * src_addr,struct sockaddr * dst_addr,int timeout_ms)708 rdma_resolve_addr(struct rdma_cm_id *idp, struct sockaddr *src_addr,
709 struct sockaddr *dst_addr, int timeout_ms)
710 {
711 sol_cma_chan_t *chanp;
712 struct rdma_addr *addrp;
713 cma_chan_state_t state;
714
715 ASSERT(idp);
716 chanp = (sol_cma_chan_t *)idp;
717 addrp = &(idp->route.addr);
718 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr(%p, %p, "
719 "%p, %x)", idp, src_addr, dst_addr, timeout_ms);
720
721 mutex_enter(&chanp->chan_mutex);
722 state = cma_get_chan_state(chanp);
723 if (state != SOL_CMA_CHAN_IDLE && state != SOL_CMA_CHAN_BOUND) {
724 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
725 "rdma_resolve_addr : invalid chan state %x", state);
726 mutex_exit(&chanp->chan_mutex);
727 return (EINVAL);
728 }
729 if (chanp->chan_cmid_destroy_state &
730 SOL_CMA_CALLER_CMID_DESTROYED) {
731 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
732 "rdma_resolve_addr : CMID %p, destroy called", chanp);
733 mutex_exit(&chanp->chan_mutex);
734 return (EINVAL);
735 }
736 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
737
738 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
739 bcopy((void *)src_addr, (void *)&(addrp->src_addr),
740 sizeof (struct sockaddr));
741 }
742 bcopy((void *)dst_addr, (void *)&(addrp->dst_addr),
743 sizeof (struct sockaddr));
744 mutex_exit(&chanp->chan_mutex);
745
746 /*
747 * First resolve this as an @ corresponding to IB fabric
748 * if this fails, resolve this as an @ corresponding to iWARP
749 */
750 if (chanp->chan_ib_client_hdl == NULL) {
751 ofs_client_t *ofs_clnt;
752
753 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
754 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
755 }
756 if (chanp->chan_ib_client_hdl && rdma_ib_resolve_addr(idp, src_addr,
757 dst_addr, timeout_ms) == 0) {
758 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
759 "rdma_resolve_addr: ret IB @");
760 #ifdef IWARP_SUPPORT
761 } else if (chanp->chan_iw_client_hdl && rdma_iw_resolve_addr(idp,
762 src_addr, dst_addr, timeout_ms) == 0) {
763 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
764 "rdma_resolve_addr: ret iWARP @");
765 #endif /* IWARP_SUPPORT */
766 } else {
767 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
768 "rdma_resolve_addr: Invalid @");
769 return (EINVAL);
770 }
771 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr: ret 0");
772 return (0);
773 }
774
775 static void cma_generate_event_sync(struct rdma_cm_id *,
776 enum rdma_cm_event_type, int, struct rdma_conn_param *,
777 struct rdma_ud_param *);
778
779 void
cma_resolve_addr_callback(sol_cma_chan_t * chanp,int rc)780 cma_resolve_addr_callback(sol_cma_chan_t *chanp, int rc)
781 {
782 enum rdma_cm_event_type event;
783
784 mutex_enter(&chanp->chan_mutex);
785 if (chanp->chan_cmid_destroy_state &
786 SOL_CMA_CALLER_CMID_DESTROYED) {
787 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
788 "cma_resolve_addr : CMID %p, destroy called", chanp);
789 chanp->chan_cmid_destroy_state &=
790 ~SOL_CMA_CALLER_API_PROGRESS;
791 cv_broadcast(&chanp->chan_destroy_cv);
792 mutex_exit(&chanp->chan_mutex);
793 return;
794 }
795 if (rc == 0) {
796 cma_set_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD);
797 event = RDMA_CM_EVENT_ADDR_RESOLVED;
798 } else
799 event = RDMA_CM_EVENT_ADDR_ERROR;
800
801 /*
802 * Generate RDMA_CM_EVENT_ADDR_RESOLVED event
803 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
804 * userland.
805 */
806 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
807 mutex_exit(&chanp->chan_mutex);
808 cma_generate_event_sync((struct rdma_cm_id *)chanp, event, 0,
809 NULL, NULL);
810
811 mutex_enter(&chanp->chan_mutex);
812 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
813 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
814 cv_broadcast(&chanp->chan_destroy_cv);
815 mutex_exit(&chanp->chan_mutex);
816 }
817
818 int
rdma_resolve_route(struct rdma_cm_id * idp,int timeout_ms)819 rdma_resolve_route(struct rdma_cm_id *idp, int timeout_ms)
820 {
821 sol_cma_chan_t *chanp;
822
823 ASSERT(idp);
824 chanp = (sol_cma_chan_t *)idp;
825 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route(%p, %x)", idp,
826 timeout_ms);
827
828 mutex_enter(&chanp->chan_mutex);
829 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD,
830 SOL_CMA_CHAN_ROUTE_RESLVD) != 0) {
831 mutex_exit(&chanp->chan_mutex);
832 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
833 "resolve_route: Invalid state");
834 return (EINVAL);
835 }
836 if (chanp->chan_cmid_destroy_state &
837 SOL_CMA_CALLER_CMID_DESTROYED) {
838 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
839 "rdma_resolve_route : CMID %p, destroy called", chanp);
840 mutex_exit(&chanp->chan_mutex);
841 return (EINVAL);
842 }
843 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
844 mutex_exit(&chanp->chan_mutex);
845
846 /*
847 * Generate RDMA_CM_EVENT_ROUTE_RESOLVED event
848 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
849 * userland
850 */
851 cma_generate_event(idp, RDMA_CM_EVENT_ROUTE_RESOLVED, 0,
852 NULL, NULL);
853
854 mutex_enter(&chanp->chan_mutex);
855 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
856 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
857 cv_broadcast(&chanp->chan_destroy_cv);
858 mutex_exit(&chanp->chan_mutex);
859
860 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route: ret 0");
861 return (0);
862 }
863
864 /*
865 * Connect or Listen request should be send after Route is resolved
866 *
867 * Active Side (Client) :
868 * 1. (State ROUTE_RESOLVED)-->CONNECT-->ACCEPT/REJECT-->DISCONNECT
869 * -->DESTROY_ID-->close(9E)
870 * 2. Same as (1), DESTROY_ID without DISCONNECT
871 * 3. Same as (1), close(9e) without DESTROY_ID.
872 *
873 * Passive Side (Server) :
874 * 4. (State ROUTE_RESOLVED)-->LISTEN->DISCONNECT
875 * -->DESTROY_ID-->close(9E)
876 * 5. Same as (4), DESTROY_ID without DISCONNECT
877 * 6. Same as (4), close(9e) without DESTROY_ID.
878 */
879 int
rdma_connect(struct rdma_cm_id * idp,struct rdma_conn_param * conn_param)880 rdma_connect(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
881 {
882 sol_cma_chan_t *chanp;
883 int ret = EINVAL;
884
885 ASSERT(idp);
886 chanp = (sol_cma_chan_t *)idp;
887 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect(%p, %p)", idp,
888 conn_param);
889
890 mutex_enter(&chanp->chan_mutex);
891 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
892 mutex_exit(&chanp->chan_mutex);
893 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
894 "rdma_connect, Invalid Xport");
895 return (EINVAL);
896 }
897 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
898 SOL_CMA_CHAN_CONNECT)) {
899 mutex_exit(&chanp->chan_mutex);
900 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
901 "rdma_connect, Invalid state");
902 return (EINVAL);
903 }
904
905 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
906 ret = rdma_ib_connect(idp, conn_param);
907 #ifdef IWARP_SUPPORT
908 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
909 ret = rdma_iw_connect(idp, conn_param);
910 #endif /* IWARP_SUPPORT */
911 }
912 mutex_exit(&chanp->chan_mutex);
913
914 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect: ret %x", ret);
915 return (ret);
916 }
917
918 static int cma_init_listen_root(sol_cma_chan_t *);
919 static void cma_fini_listen_root(sol_cma_chan_t *);
920
921 int
rdma_listen(struct rdma_cm_id * idp,int bklog)922 rdma_listen(struct rdma_cm_id *idp, int bklog)
923 {
924 sol_cma_chan_t *chanp;
925 int ret = 0;
926 genlist_entry_t *entry;
927 cma_chan_state_t state;
928
929 ASSERT(idp);
930 chanp = (sol_cma_chan_t *)idp;
931 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen(%p, %x)",
932 idp, bklog);
933
934 mutex_enter(&chanp->chan_mutex);
935 state = cma_get_chan_state(chanp);
936 if (state == SOL_CMA_CHAN_IDLE) {
937 mutex_exit(&chanp->chan_mutex);
938 return (EINVAL);
939 }
940 cma_set_chan_state(chanp, SOL_CMA_CHAN_LISTEN);
941
942 if (chanp->chan_cmid_destroy_state &
943 SOL_CMA_CALLER_CMID_DESTROYED) {
944 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
945 "rdma_listen : CMID %p, destroy called", chanp);
946 mutex_exit(&chanp->chan_mutex);
947 return (EINVAL);
948 }
949 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
950
951 ASSERT(chanp->chan_listenp == NULL);
952
953 chanp->chan_listenp = kmem_zalloc(sizeof (sol_cma_listen_info_t),
954 KM_SLEEP);
955 init_genlist(&(CHAN_LISTEN_LIST(chanp)));
956 (chanp->chan_listenp)->listen_is_root = 1;
957 ret = cma_init_listen_root(chanp);
958 if (ret) {
959 chanp->chan_listenp = NULL;
960 mutex_exit(&chanp->chan_mutex);
961 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
962 "cma_init_listen_root: failed");
963 kmem_free(chanp->chan_listenp,
964 sizeof (sol_cma_listen_info_t));
965 return (EINVAL);
966 }
967
968 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
969 ibcma_append_listen_list(idp);
970 #ifdef IWARP_SUPPORT
971 iwcma_append_listen_list(idp);
972 #endif
973 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
974 ibcma_append_listen_list(idp);
975 #ifdef IWARP_SUPPORT
976 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
977 iwcma_append_listen_list(idp);
978 #endif /* IWARP_SUPPORT */
979 }
980
981 if (genlist_empty(&(CHAN_LISTEN_LIST(chanp)))) {
982 cma_fini_listen_root(chanp);
983 kmem_free((void *)chanp->chan_listenp,
984 sizeof (sol_cma_listen_info_t));
985 chanp->chan_listenp = NULL;
986 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
987 "No listeners");
988 mutex_exit(&chanp->chan_mutex);
989 return (0);
990 }
991
992 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
993 chanp->chan_cmid_destroy_state &=
994 ~SOL_CMA_CALLER_API_PROGRESS;
995 cv_broadcast(&chanp->chan_destroy_cv);
996 }
997
998 genlist_for_each(entry, &(CHAN_LISTEN_LIST(chanp))) {
999 struct rdma_cm_id *ep_idp;
1000 sol_cma_chan_t *ep_chanp;
1001
1002 ep_idp = (struct rdma_cm_id *)entry->data;
1003 ep_chanp = (sol_cma_chan_t *)ep_idp;
1004 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1005 ret = rdma_ib_listen(ep_idp, bklog);
1006 #ifdef IWARP_SUPPORT
1007 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1008 ret = rdma_iw_listen(ep_idp, bklog);
1009 #endif
1010 if (ret)
1011 break;
1012 }
1013
1014 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
1015 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1016 cv_broadcast(&chanp->chan_destroy_cv);
1017 mutex_exit(&chanp->chan_mutex);
1018
1019 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen: ret %x", ret);
1020 return (ret);
1021 }
1022
1023 int
rdma_accept(struct rdma_cm_id * idp,struct rdma_conn_param * conn_param)1024 rdma_accept(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
1025 {
1026 struct rdma_cm_id *root_idp;
1027 sol_cma_chan_t *root_chanp, *chanp;
1028 int ret = EINVAL;
1029
1030 ASSERT(idp);
1031 chanp = (sol_cma_chan_t *)idp;
1032 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept(%p, %p)",
1033 idp, conn_param);
1034
1035 mutex_enter(&chanp->chan_mutex);
1036 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1037 SOL_CMA_CHAN_ACCEPT) && cma_cas_chan_state(chanp,
1038 SOL_CMA_CHAN_CONNECT, SOL_CMA_CHAN_ACCEPT)) {
1039 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1040 "rdma_accept, Invalid state");
1041 mutex_exit(&chanp->chan_mutex);
1042 return (EINVAL);
1043 }
1044 mutex_exit(&chanp->chan_mutex);
1045
1046 root_idp = CHAN_LISTEN_ROOT(chanp);
1047 root_chanp = (sol_cma_chan_t *)root_idp;
1048 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p",
1049 root_idp);
1050
1051 /* For TCP, delete from REQ AVL & insert to ACPT AVL */
1052 if (root_idp && root_idp->ps == RDMA_PS_TCP) {
1053 void *find_ret;
1054 avl_index_t where;
1055
1056 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p"
1057 "REQ AVL remove %p", root_chanp, idp);
1058 mutex_enter(&root_chanp->chan_mutex);
1059 mutex_enter(&chanp->chan_mutex);
1060
1061 /*
1062 * This CMID has been deleted, maybe because of timeout.
1063 * Return EINVAL.
1064 */
1065 if (chanp->chan_req_state != REQ_CMID_NOTIFIED) {
1066 mutex_exit(&chanp->chan_mutex);
1067 mutex_exit(&root_chanp->chan_mutex);
1068 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1069 "accept: root_idp %p chanp %p, not in REQ "
1070 "AVL tree", root_chanp, chanp);
1071 return (EINVAL);
1072 }
1073 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1074 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1075
1076
1077 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1078 "Add to ACPT AVL of %p IDP, idp %p, qp_hdl %p",
1079 root_idp, idp, chanp->chan_qp_hdl);
1080 find_ret = avl_find(&root_chanp->chan_acpt_avl_tree,
1081 (void *)chanp->chan_qp_hdl, &where);
1082 if (find_ret) {
1083 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1084 mutex_exit(&chanp->chan_mutex);
1085 mutex_exit(&root_chanp->chan_mutex);
1086 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1087 "DUPLICATE ENTRY in ACPT AVL : root %p, "
1088 "idp %p, qp_hdl %p",
1089 root_idp, idp, chanp->chan_qp_hdl);
1090 return (EINVAL);
1091 }
1092 avl_insert(&root_chanp->chan_acpt_avl_tree,
1093 (void *)idp, where);
1094 chanp->chan_req_state = REQ_CMID_ACCEPTED;
1095 mutex_exit(&chanp->chan_mutex);
1096 mutex_exit(&root_chanp->chan_mutex);
1097 }
1098
1099 if (root_idp && IS_UDP_CMID(root_idp)) {
1100 cma_chan_state_t chan_state;
1101
1102 /*
1103 * Accepting the connect request, no more events for this
1104 * connection.
1105 */
1106 cma_handle_nomore_events(chanp);
1107 mutex_enter(&chanp->chan_mutex);
1108 chan_state = cma_get_chan_state(chanp);
1109 mutex_exit(&chanp->chan_mutex);
1110 /* If rdma_destroy_id() was called, destroy CMID */
1111 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1112 cma_destroy_id((struct rdma_cm_id *)chanp);
1113 return (EINVAL);
1114 }
1115 }
1116
1117 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1118 ret = rdma_ib_accept(idp, conn_param);
1119 #ifdef IWARP_SUPPORT
1120 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1121 ret = rdma_iw_accept(idp, conn_param);
1122 #endif /* IWARP_SUPPORT */
1123
1124 if (ret && root_idp && idp->ps == RDMA_PS_TCP) {
1125 void *find_ret;
1126 avl_index_t where;
1127
1128 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1129 "Delete from REQ AVL of %p IDP, idp %p",
1130 root_idp, idp);
1131 mutex_enter(&root_chanp->chan_mutex);
1132 mutex_enter(&chanp->chan_mutex);
1133 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
1134 ASSERT(cma_get_acpt_idp(root_idp,
1135 chanp->chan_qp_hdl));
1136 avl_remove(&root_chanp->chan_acpt_avl_tree,
1137 idp);
1138 find_ret = avl_find(&root_chanp->chan_req_avl_tree,
1139 (void *)chanp->chan_qp_hdl, &where);
1140 if (find_ret) {
1141 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1142 mutex_exit(&chanp->chan_mutex);
1143 mutex_exit(&root_chanp->chan_mutex);
1144 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1145 "DUPLICATE ENTRY in REQ AVL : root %p, "
1146 "idp %p, session_id %p",
1147 root_idp, idp, chanp->chan_session_id);
1148 return (EINVAL);
1149 }
1150 avl_insert(&root_chanp->chan_req_avl_tree, idp, where);
1151 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1152 }
1153 mutex_exit(&chanp->chan_mutex);
1154 mutex_exit(&root_chanp->chan_mutex);
1155 }
1156
1157 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept: ret %x", ret);
1158 return (ret);
1159 }
1160
1161 int
rdma_notify(struct rdma_cm_id * idp,enum ib_event_type evt)1162 rdma_notify(struct rdma_cm_id *idp, enum ib_event_type evt)
1163 {
1164 sol_cma_chan_t *chanp;
1165
1166 ASSERT(idp);
1167 chanp = (sol_cma_chan_t *)idp;
1168 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify(%p, %x)", idp, evt);
1169
1170 mutex_enter(&chanp->chan_mutex);
1171 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
1172 SOL_CMA_CHAN_EVENT_NOTIFIED)) {
1173 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1174 "rdma_notify, Invalid state");
1175 mutex_exit(&chanp->chan_mutex);
1176 return (EINVAL);
1177 }
1178 mutex_exit(&chanp->chan_mutex);
1179
1180 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify: ret 0");
1181 return (0);
1182 }
1183
1184 int
rdma_reject(struct rdma_cm_id * idp,const void * priv_data,uint8_t priv_data_len)1185 rdma_reject(struct rdma_cm_id *idp, const void *priv_data,
1186 uint8_t priv_data_len)
1187 {
1188 struct rdma_cm_id *root_idp;
1189 sol_cma_chan_t *root_chanp, *chanp;
1190 int ret = EINVAL;
1191
1192 ASSERT(idp);
1193 chanp = (sol_cma_chan_t *)idp;
1194 root_idp = CHAN_LISTEN_ROOT(chanp);
1195 root_chanp = (sol_cma_chan_t *)root_idp;
1196 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject(%p, %p)", idp,
1197 priv_data, priv_data_len);
1198
1199 mutex_enter(&chanp->chan_mutex);
1200 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1201 SOL_CMA_CHAN_REJECT)) {
1202 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1203 "rdma_accept, Invalid state");
1204 mutex_exit(&chanp->chan_mutex);
1205 return (EINVAL);
1206 }
1207 mutex_exit(&chanp->chan_mutex);
1208
1209 if (root_idp) {
1210 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "reject: root_idp %p"
1211 "REQ AVL remove %p", root_chanp, idp);
1212
1213 /*
1214 * Remove from REQ AVL tree. If this CMID has been deleted,
1215 * it maybe because of timeout. Return EINVAL.
1216 */
1217 mutex_enter(&root_chanp->chan_mutex);
1218 mutex_enter(&chanp->chan_mutex);
1219 if (chanp->chan_req_state != REQ_CMID_NOTIFIED &&
1220 chanp->chan_req_state != REQ_CMID_QUEUED) {
1221 mutex_exit(&chanp->chan_mutex);
1222 mutex_exit(&root_chanp->chan_mutex);
1223 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1224 "reject: root_idp %p chanp %p, not in REQ "
1225 "AVL tree", root_chanp, chanp);
1226 return (EINVAL);
1227 }
1228 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1229 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1230 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1231 mutex_exit(&chanp->chan_mutex);
1232 mutex_exit(&root_chanp->chan_mutex);
1233 }
1234
1235 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1236 ret = rdma_ib_reject(idp, priv_data, priv_data_len);
1237 #ifdef IWARP_SUPPORT
1238 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1239 ret = rdma_iw_reject(idp, priv_data, priv_data_len);
1240 #endif /* IWARP_SUPPORT */
1241
1242
1243 if (!ret && root_idp) {
1244 cma_chan_state_t chan_state;
1245
1246 /*
1247 * Rejecting connect request, no more events for this
1248 * connection.
1249 */
1250 cma_handle_nomore_events(chanp);
1251 mutex_enter(&chanp->chan_mutex);
1252 chan_state = cma_get_chan_state(chanp);
1253 mutex_exit(&chanp->chan_mutex);
1254 /* If rdma_destroy_id() was called, destroy CMID */
1255 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING)
1256 cma_destroy_id((struct rdma_cm_id *)chanp);
1257 } else if (ret && root_idp) {
1258 avl_index_t where;
1259
1260 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1261 "reject fail: Add to Req AVL of %p IDP, idp %p,"
1262 "session_id %p", root_idp, idp,
1263 chanp->chan_session_id);
1264 mutex_enter(&root_chanp->chan_mutex);
1265 mutex_enter(&chanp->chan_mutex);
1266 if (chanp->chan_req_state == REQ_CMID_SERVER_NONE) {
1267 if (avl_find(&root_chanp->chan_req_avl_tree,
1268 (void *)chanp->chan_session_id, &where)) {
1269 mutex_exit(&chanp->chan_mutex);
1270 mutex_exit(&root_chanp->chan_mutex);
1271 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1272 "DUPLICATE ENTRY in REQ AVL : root %p, "
1273 "idp %p, session_id %p",
1274 root_idp, idp, chanp->chan_session_id);
1275 return (EINVAL);
1276 }
1277 avl_insert(&root_chanp->chan_req_avl_tree,
1278 (void *)idp, where);
1279 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1280 }
1281 mutex_exit(&chanp->chan_mutex);
1282 mutex_exit(&root_chanp->chan_mutex);
1283 }
1284
1285 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject: ret %x", ret);
1286 return (ret);
1287 }
1288
1289 int
rdma_disconnect(struct rdma_cm_id * idp)1290 rdma_disconnect(struct rdma_cm_id *idp)
1291 {
1292 sol_cma_chan_t *chanp;
1293 int ret = EINVAL;
1294 cma_chan_state_t state;
1295
1296 chanp = (sol_cma_chan_t *)idp;
1297 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect(%p)", idp);
1298
1299 if (!idp)
1300 return (0);
1301
1302 mutex_enter(&chanp->chan_mutex);
1303 if (!(SOL_CMAID_CONNECTED(chanp))) {
1304 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1305 "rdma_disconnect(%p) - Not connected!!", idp);
1306 mutex_exit(&chanp->chan_mutex);
1307 return (EINVAL);
1308 }
1309 state = cma_get_chan_state(chanp);
1310 cma_set_chan_state(chanp, SOL_CMA_CHAN_DISCONNECT);
1311 mutex_exit(&chanp->chan_mutex);
1312
1313 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1314 ret = rdma_ib_disconnect(idp);
1315 #ifdef IWARP_SUPPORT
1316 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
1317 ret = rdma_iw_disconnect(idp);
1318 #endif /* IWARP_SUPPORT */
1319 }
1320
1321 if (ret) {
1322 mutex_enter(&chanp->chan_mutex);
1323 cma_set_chan_state(chanp, state);
1324 mutex_exit(&chanp->chan_mutex);
1325 return (ret);
1326 }
1327
1328 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect: ret %x", ret);
1329 return (ret);
1330 }
1331
1332 int
rdma_init_qp_attr(struct rdma_cm_id * idp,struct ib_qp_attr * qpattr,int * qp_attr_mask)1333 rdma_init_qp_attr(struct rdma_cm_id *idp, struct ib_qp_attr *qpattr,
1334 int *qp_attr_mask)
1335 {
1336 sol_cma_chan_t *chanp;
1337 int ret = EINVAL;
1338
1339 ASSERT(idp);
1340 chanp = (sol_cma_chan_t *)idp;
1341 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_init_qp_attr(%p, %p, %p)",
1342 idp, qpattr, qp_attr_mask);
1343
1344 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1345 ret = rdma_ib_init_qp_attr(idp, qpattr, qp_attr_mask);
1346 #ifdef IWARP_SUPPORT
1347 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1348 ret = rdma_iw_init_qp_attr(idp, qpattr, qp_attr_mask);
1349 #endif /* IWARP_SUPPORT */
1350 } else {
1351 ret = EINVAL;
1352 }
1353
1354 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1355 "rdma_init_qp_attr: ret %x", ret);
1356
1357 return (ret);
1358 }
1359
1360 int
1361 rdma_join_multicast(struct rdma_cm_id *idp, struct sockaddr *addr,
1362 void *context)
1363 {
1364 sol_cma_chan_t *chanp;
1365 int ret = ENODEV;
1366 cma_chan_state_t state;
1367
1368 ASSERT(idp);
1369 chanp = (sol_cma_chan_t *)idp;
1370 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1371 "rdma_join_multicast(%p, %p, %p)",
1372 idp, addr, context);
1373
1374 mutex_enter(&chanp->chan_mutex);
1375 state = cma_get_chan_state(chanp);
1376 if (state != SOL_CMA_CHAN_BOUND &&
1377 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1378 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1379 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1380 "rdma_join_multicast, Invalid state");
1381 mutex_exit(&chanp->chan_mutex);
1382 return (EINVAL);
1383 }
1384
1385 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1386 ret = rdma_ib_join_multicast(idp, addr, context);
1387 #ifdef IWARP_SUPPORT
1388 /* No support for Multicast on iWARP */
1389 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1390 ret = ENOTSUP;
1391 #endif /* IWARP_SUPPORT */
1392 mutex_exit(&chanp->chan_mutex);
1393
1394 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1395 "rdma_join_multicast: ret %x", ret);
1396 return (ret);
1397 }
1398
1399 void
1400 rdma_leave_multicast(struct rdma_cm_id *idp, struct sockaddr *addr)
1401 {
1402 sol_cma_chan_t *chanp;
1403 cma_chan_state_t state;
1404
1405 ASSERT(idp);
1406 chanp = (sol_cma_chan_t *)idp;
1407 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_leave_multicast(%p, %p)",
1408 idp, addr);
1409
1410 mutex_enter(&chanp->chan_mutex);
1411 state = cma_get_chan_state(chanp);
1412 if (state != SOL_CMA_CHAN_BOUND &&
1413 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1414 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1415 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1416 "rdma_leave_multicast, Invalid state");
1417 mutex_exit(&chanp->chan_mutex);
1418 return;
1419 }
1420
1421 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1422 rdma_ib_leave_multicast(idp, addr);
1423 #ifdef IWARP_SUPPORT
1424 /* No support for Multicast on iWARP */
1425 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1426 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1427 "rdma_leave_multicast, iWARP");
1428 #endif /* IWARP_SUPPORT */
1429 mutex_exit(&chanp->chan_mutex);
1430
1431 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_join_multicast: ret");
1432 }
1433
1434 /*
1435 * Functions to compare to rdma_cm_id *, used by AVL tree
1436 * routines.
1437 */
1438 int
1439 sol_cma_req_cmid_cmp(const void *p1, const void *p2)
1440 {
1441 sol_cma_chan_t *chanp;
1442
1443 chanp = (sol_cma_chan_t *)p2;
1444 if (chanp->chan_session_id > p1)
1445 return (+1);
1446 else if (chanp->chan_session_id < p1)
1447 return (-1);
1448 else
1449 return (0);
1450 }
1451
1452 int
1453 sol_cma_cmid_cmp(const void *p1, const void *p2)
1454 {
1455 sol_cma_chan_t *chanp;
1456
1457 chanp = (sol_cma_chan_t *)p2;
1458 if (chanp->chan_qp_hdl > p1)
1459 return (+1);
1460 else if (chanp->chan_qp_hdl < p1)
1461 return (-1);
1462 else
1463 return (0);
1464 }
1465
1466 /*
1467 * Function to compare two sol_cma_glbl_listen_t *, used by
1468 * AVL tree routines.
1469 */
1470 int
1471 sol_cma_svc_cmp(const void *p1, const void *p2)
1472 {
1473 sol_cma_glbl_listen_t *listenp;
1474 uint64_t sid;
1475
1476 sid = *(uint64_t *)p1;
1477 listenp = (sol_cma_glbl_listen_t *)p2;
1478 if (listenp->cma_listen_chan_sid > sid)
1479 return (+1);
1480 else if (listenp->cma_listen_chan_sid < sid)
1481 return (-1);
1482 else
1483 return (0);
1484 }
1485
1486 static int
1487 cma_init_listen_root(sol_cma_chan_t *chanp)
1488 {
1489 sol_cma_glbl_listen_t *cma_listenp;
1490 sol_cma_listen_info_t *chan_listenp;
1491 int rc = 0;
1492 avl_index_t where = 0;
1493 uint64_t listen_sid;
1494
1495 ASSERT(chanp);
1496 ASSERT(chanp->chan_listenp);
1497 chan_listenp = chanp->chan_listenp;
1498
1499 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1500 "cma_init_listen_root(%p)", chanp);
1501
1502 /*
1503 * First search for matching global listen_info for this SID.
1504 * If found with the same client handle, reuse the service
1505 * handle, if matching SID is found with different client
1506 * handle, return EINVAL.
1507 */
1508 listen_sid = ibcma_init_root_sid(chanp);
1509 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1510 "cma_init_listen_root: search SID 0x%llx",
1511 listen_sid);
1512
1513 mutex_enter(&sol_cma_glob_mutex);
1514 cma_listenp = avl_find(&sol_cma_glbl_listen_tree,
1515 (void *) &listen_sid, &where);
1516 if (cma_listenp && cma_listenp->cma_listen_clnt_hdl ==
1517 chanp->chan_ib_client_hdl) {
1518 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1519 "cma_init_listen_root: matching listenp %p SID 0x%llx",
1520 cma_listenp, listen_sid);
1521 chan_listenp->listen_entry = add_genlist(
1522 &cma_listenp->cma_listen_chan_list,
1523 (uintptr_t)chanp, NULL);
1524 chan_listenp->chan_glbl_listen_info = cma_listenp;
1525 ibcma_copy_srv_hdl(chanp, cma_listenp);
1526 mutex_exit(&sol_cma_glob_mutex);
1527 return (0);
1528 } else if (cma_listenp) {
1529 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1530 "cma_init_listen_root: listenp %p, SID 0x%llx match, "
1531 "client hdl prev %p, new %p mismatch",
1532 cma_listenp, listen_sid,
1533 cma_listenp->cma_listen_clnt_hdl,
1534 chanp->chan_ib_client_hdl);
1535 mutex_exit(&sol_cma_glob_mutex);
1536 return (EINVAL);
1537 }
1538
1539 cma_listenp = kmem_zalloc(sizeof (sol_cma_glbl_listen_t), KM_SLEEP);
1540 init_genlist(&cma_listenp->cma_listen_chan_list);
1541 chan_listenp->listen_entry = add_genlist(
1542 &cma_listenp->cma_listen_chan_list, (uintptr_t)chanp, NULL);
1543 chan_listenp->chan_glbl_listen_info = cma_listenp;
1544 cma_listenp->cma_listen_clnt_hdl = chanp->chan_ib_client_hdl;
1545 cma_listenp->cma_listen_chan_sid = listen_sid;
1546
1547 rc = ibcma_init_root_chan(chanp, cma_listenp);
1548 if (rc) {
1549 mutex_exit(&sol_cma_glob_mutex);
1550 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1551 "cma_init_listen_root: ibcma_init_root_chan failed!!");
1552 delete_genlist(&cma_listenp->cma_listen_chan_list,
1553 chan_listenp->listen_entry);
1554 kmem_free(cma_listenp, sizeof (sol_cma_glbl_listen_t));
1555 return (rc);
1556 }
1557 avl_insert(&sol_cma_glbl_listen_tree, cma_listenp, where);
1558 mutex_exit(&sol_cma_glob_mutex);
1559 return (0);
1560 }
1561
1562 static void
1563 cma_fini_listen_root(sol_cma_chan_t *chanp)
1564 {
1565 sol_cma_glbl_listen_t *cma_listenp;
1566 sol_cma_listen_info_t *chan_listenp;
1567
1568 ASSERT(chanp);
1569 ASSERT(chanp->chan_listenp);
1570 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_fini_listen_root(%p)",
1571 chanp);
1572 chan_listenp = chanp->chan_listenp;
1573 cma_listenp = chan_listenp->chan_glbl_listen_info;
1574 ASSERT(cma_listenp);
1575 mutex_enter(&sol_cma_glob_mutex);
1576 delete_genlist(&cma_listenp->cma_listen_chan_list,
1577 chan_listenp->listen_entry);
1578 if (genlist_empty(&cma_listenp->cma_listen_chan_list)) {
1579 if (ibcma_fini_root_chan(chanp) == 0) {
1580 avl_remove(&sol_cma_glbl_listen_tree,
1581 cma_listenp);
1582 kmem_free(cma_listenp,
1583 sizeof (sol_cma_glbl_listen_t));
1584 } else
1585 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1586 "cma_fini_listen_root: "
1587 "ibcma_fini_root_chan failed");
1588 }
1589
1590 mutex_exit(&sol_cma_glob_mutex);
1591 }
1592
1593 typedef struct cma_event_async_arg {
1594 struct rdma_cm_id *idp;
1595 enum rdma_cm_event_type event;
1596 int status;
1597 union {
1598 struct rdma_conn_param conn;
1599 struct rdma_ud_param param;
1600 } un;
1601 struct rdma_conn_param *conn_param;
1602 struct rdma_ud_param *ud_paramp;
1603 } cma_event_async_arg_t;
1604
1605 static void cma_generate_event_sync(struct rdma_cm_id *,
1606 enum rdma_cm_event_type, int, struct rdma_conn_param *,
1607 struct rdma_ud_param *);
1608
1609 void
1610 cma_generate_event_thr(void *arg)
1611 {
1612 cma_event_async_arg_t *event_arg = (cma_event_async_arg_t *)arg;
1613
1614 cma_generate_event_sync(event_arg->idp, event_arg->event,
1615 event_arg->status, event_arg->conn_param,
1616 event_arg->ud_paramp);
1617
1618 if (event_arg->conn_param && event_arg->conn_param->private_data_len)
1619 kmem_free((void *)event_arg->conn_param->private_data,
1620 event_arg->conn_param->private_data_len);
1621 if (event_arg->ud_paramp && event_arg->ud_paramp->private_data_len)
1622 kmem_free((void *)event_arg->ud_paramp->private_data,
1623 event_arg->ud_paramp->private_data_len);
1624 kmem_free(arg, sizeof (cma_event_async_arg_t));
1625 }
1626
1627 void
1628 cma_generate_event(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1629 int status, struct rdma_conn_param *conn_param,
1630 struct rdma_ud_param *ud_paramp)
1631 {
1632 cma_event_async_arg_t *event_arg;
1633 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1634
1635 /*
1636 * Set SOL_CMA_CALLER_EVENT_PROGRESS to indicate event
1637 * notification is in progress, so that races between
1638 * rdma_destroy_id() and event notification is taken care.
1639 *
1640 * If rdma_destroy_id() has been called for this CMID, call
1641 * cma_generate_event_sync() which skips notification to the
1642 * consumer and handles the event.
1643 */
1644 mutex_enter(&chanp->chan_mutex);
1645 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
1646 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1647 mutex_exit(&chanp->chan_mutex);
1648 cma_generate_event_sync(idp, event, status, conn_param,
1649 ud_paramp);
1650 return;
1651 }
1652 mutex_exit(&chanp->chan_mutex);
1653
1654 event_arg = kmem_zalloc(sizeof (cma_event_async_arg_t), KM_SLEEP);
1655 event_arg->idp = idp;
1656 event_arg->event = event;
1657 event_arg->status = status;
1658 event_arg->conn_param = NULL;
1659 event_arg->ud_paramp = NULL;
1660 if (conn_param && conn_param->private_data_len) {
1661 bcopy(conn_param, &(event_arg->un.conn),
1662 sizeof (struct rdma_conn_param));
1663 event_arg->conn_param = &(event_arg->un.conn);
1664 event_arg->conn_param->private_data = kmem_zalloc(
1665 conn_param->private_data_len, KM_SLEEP);
1666 bcopy(conn_param->private_data,
1667 (void *)event_arg->conn_param->private_data,
1668 conn_param->private_data_len);
1669 } else if (conn_param && conn_param->private_data_len == 0) {
1670 bcopy(conn_param, &(event_arg->un.conn),
1671 sizeof (struct rdma_conn_param));
1672 } else if (ud_paramp) {
1673 bcopy(ud_paramp, &(event_arg->un.param),
1674 sizeof (struct rdma_ud_param));
1675 event_arg->ud_paramp = &(event_arg->un.param);
1676 if (ud_paramp->private_data_len) {
1677 event_arg->ud_paramp->private_data = kmem_zalloc(
1678 ud_paramp->private_data_len, KM_SLEEP);
1679 bcopy(ud_paramp->private_data,
1680 (void *)event_arg->ud_paramp->private_data,
1681 ud_paramp->private_data_len);
1682 } else if (ud_paramp->private_data) {
1683 event_arg->ud_paramp->private_data =
1684 ud_paramp->private_data;
1685 }
1686 }
1687
1688 if (taskq_dispatch(system_taskq, cma_generate_event_thr,
1689 (void *)event_arg, TQ_SLEEP) == 0) {
1690 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1691 "generate_event_async: taskq_dispatch() failed!!");
1692 mutex_enter(&chanp->chan_mutex);
1693 chanp->chan_cmid_destroy_state &=
1694 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1695 if (chanp->chan_cmid_destroy_state &
1696 SOL_CMA_CALLER_CMID_DESTROYED)
1697 cv_broadcast(&chanp->chan_destroy_cv);
1698 mutex_exit(&chanp->chan_mutex);
1699 }
1700 }
1701
1702 static void
1703 cma_generate_event_sync(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1704 int status, struct rdma_conn_param *conn_param,
1705 struct rdma_ud_param *ud_paramp)
1706 {
1707 struct rdma_cm_event cm_event;
1708 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1709 struct rdma_cm_id *root_idp = NULL;
1710 sol_cma_chan_t *root_chanp;
1711 int ret;
1712 cma_chan_state_t chan_state;
1713
1714 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "generate_event_sync(%p, %x, "
1715 "%x, %p, %p", idp, event, status, conn_param, ud_paramp);
1716
1717 bzero(&cm_event, sizeof (cm_event));
1718 cm_event.event = event;
1719 cm_event.status = status;
1720 if (conn_param)
1721 bcopy((void *)conn_param, (void *)(&(cm_event.param.conn)),
1722 sizeof (struct rdma_conn_param));
1723 else if (ud_paramp)
1724 bcopy((void *)ud_paramp, (void *)(&(cm_event.param.ud)),
1725 sizeof (struct rdma_ud_param));
1726
1727 /*
1728 * If the consumer has destroyed the context for this CMID -
1729 * do not notify, skip to handling the sol_ofs specific
1730 * handling of the event.
1731 */
1732 mutex_enter(&chanp->chan_mutex);
1733 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1734 mutex_exit(&chanp->chan_mutex);
1735 goto ofs_consume_event;
1736 }
1737 mutex_exit(&chanp->chan_mutex);
1738
1739 root_idp = CHAN_LISTEN_ROOT(chanp);
1740 root_chanp = (sol_cma_chan_t *)root_idp;
1741 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "gen_event: root_idp %p",
1742 root_idp);
1743
1744 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1745 /*
1746 * Update chan_req_state for the REQ CMID. Decrement
1747 * count of REQ CMIDs not notifed to consumer.
1748 */
1749 ASSERT(root_idp);
1750 mutex_enter(&root_chanp->chan_mutex);
1751 root_chanp->chan_req_cnt--;
1752 #ifdef DEBUG
1753 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1754 "Dec req_cnt of %p IDP, idp %p, req_cnt %x",
1755 root_idp, idp, root_chanp->chan_req_cnt);
1756 #endif
1757 mutex_exit(&root_chanp->chan_mutex);
1758 }
1759
1760 /* Pass the event to the client */
1761 ret = (idp->event_handler) (idp, &cm_event);
1762
1763 if (ret) {
1764 /*
1765 * If the consumer returned failure :
1766 * CONNECT_REQUEST :
1767 * 1. rdma_disconnect() to disconnect connection.
1768 * 2. wakeup destroy, if destroy has been called
1769 * for this CMID
1770 * 3. Destroy CMID if rdma_destroy has not been
1771 * called.
1772 * DISCONNECTED :
1773 * 1. call cma_handle_nomore_events() to cleanup
1774 * Other Events :
1775 * 1. Client is expected to destroy the CMID.
1776 */
1777 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1778 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
1779 "cma_generate_event_async: consumer failed %d "
1780 "event", event);
1781 if (rdma_disconnect(idp)) {
1782 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1783 "generate_event_async: rdma_disconnect "
1784 "failed");
1785 }
1786 mutex_enter(&chanp->chan_mutex);
1787 ASSERT(SOL_IS_SERVER_CMID(chanp));
1788 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1789 chanp->chan_cmid_destroy_state &=
1790 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1791 if (chanp->chan_cmid_destroy_state &
1792 SOL_CMA_CALLER_CMID_DESTROYED) {
1793 cv_broadcast(&chanp->chan_destroy_cv);
1794 mutex_exit(&chanp->chan_mutex);
1795 } else {
1796 mutex_exit(&chanp->chan_mutex);
1797 rdma_destroy_id(idp);
1798 }
1799 } else if (event == RDMA_CM_EVENT_DISCONNECTED) {
1800 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1801 "generate_event_async: consumer failed %d event",
1802 event);
1803 cma_handle_nomore_events(chanp);
1804 mutex_enter(&chanp->chan_mutex);
1805 chan_state = cma_get_chan_state(chanp);
1806 chanp->chan_cmid_destroy_state &=
1807 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1808 if (chanp->chan_cmid_destroy_state &
1809 SOL_CMA_CALLER_CMID_DESTROYED) {
1810 cv_broadcast(&chanp->chan_destroy_cv);
1811 mutex_exit(&chanp->chan_mutex);
1812 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1813 /* rdma_destroy_id() called: destroy CMID */
1814 mutex_exit(&chanp->chan_mutex);
1815 cma_destroy_id((struct rdma_cm_id *)chanp);
1816 } else
1817 mutex_exit(&chanp->chan_mutex);
1818 } else {
1819 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1820 "generate_event_async: consumer failed %d event",
1821 event);
1822 }
1823
1824 return;
1825 }
1826 ofs_consume_event:
1827 if (event == RDMA_CM_EVENT_DISCONNECTED) {
1828 cma_chan_state_t chan_state;
1829
1830 cma_handle_nomore_events(chanp);
1831 mutex_enter(&chanp->chan_mutex);
1832 chan_state = cma_get_chan_state(chanp);
1833 chanp->chan_cmid_destroy_state &=
1834 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1835 if (chanp->chan_cmid_destroy_state &
1836 SOL_CMA_CALLER_CMID_DESTROYED) {
1837 cv_broadcast(&chanp->chan_destroy_cv);
1838 mutex_exit(&chanp->chan_mutex);
1839 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1840 /* If rdma_destroy_id() was called, destroy CMID */
1841 mutex_exit(&chanp->chan_mutex);
1842 cma_destroy_id((struct rdma_cm_id *)chanp);
1843 } else
1844 mutex_exit(&chanp->chan_mutex);
1845 return;
1846 } else if (IS_UDP_CMID(idp) && event == RDMA_CM_EVENT_UNREACHABLE) {
1847 /*
1848 * If rdma_destroy_id() was called, destroy CMID
1849 * If not chan_connect_flag/ chan_req_state has already been
1850 * set to indicate that it can be deleted.
1851 */
1852 mutex_enter(&chanp->chan_mutex);
1853 chan_state = cma_get_chan_state(chanp);
1854 chanp->chan_cmid_destroy_state &=
1855 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1856 if (chanp->chan_cmid_destroy_state &
1857 SOL_CMA_CALLER_CMID_DESTROYED) {
1858 cv_broadcast(&chanp->chan_destroy_cv);
1859 mutex_exit(&chanp->chan_mutex);
1860 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1861 mutex_exit(&chanp->chan_mutex);
1862 cma_destroy_id(idp);
1863 } else
1864 mutex_exit(&chanp->chan_mutex);
1865 return;
1866 }
1867
1868 mutex_enter(&chanp->chan_mutex);
1869 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_EVENT_PROGRESS;
1870 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1871 cv_broadcast(&chanp->chan_destroy_cv);
1872 mutex_exit(&chanp->chan_mutex);
1873 }
1874
1875 /* Local Static functions */
1876 static struct rdma_cm_id *
1877 cma_alloc_chan(rdma_cm_event_handler evt_hdlr, void *context,
1878 enum rdma_port_space ps)
1879 {
1880 struct rdma_cm_id *rdma_idp;
1881 sol_cma_chan_t *chanp;
1882
1883 chanp = kmem_zalloc(sizeof (sol_cma_chan_t), KM_SLEEP);
1884 mutex_init(&chanp->chan_mutex, NULL, MUTEX_DRIVER, NULL);
1885 cv_init(&chanp->chan_destroy_cv, NULL, CV_DRIVER, NULL);
1886 rdma_idp = &(chanp->chan_rdma_cm);
1887 rdma_idp->context = context;
1888 rdma_idp->ps = ps;
1889 rdma_idp->event_handler = evt_hdlr;
1890 mutex_enter(&chanp->chan_mutex);
1891 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
1892 avl_create(&chanp->chan_req_avl_tree, sol_cma_req_cmid_cmp,
1893 sizeof (sol_cma_chan_t),
1894 offsetof(sol_cma_chan_t, chan_req_avl_node));
1895 avl_create(&chanp->chan_acpt_avl_tree, sol_cma_cmid_cmp,
1896 sizeof (sol_cma_chan_t),
1897 offsetof(sol_cma_chan_t, chan_acpt_avl_node));
1898 mutex_exit(&chanp->chan_mutex);
1899
1900 return (rdma_idp);
1901 }
1902
1903 /* Change the state of sol_cma_chan_t */
1904 static void
1905 cma_set_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t newstate)
1906 {
1907 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1908 chanp->chan_state = newstate;
1909 }
1910
1911 cma_chan_state_t
1912 cma_get_chan_state(sol_cma_chan_t *chanp)
1913 {
1914 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1915 return (chanp->chan_state);
1916 }
1917
1918 /* Check & Swap the state of sol_ucma_chan_t */
1919 static int
1920 cma_cas_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t prevstate,
1921 cma_chan_state_t newstate)
1922 {
1923 int ret = 0;
1924
1925 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1926 if (chanp->chan_state != prevstate)
1927 ret = -1;
1928 else
1929 chanp->chan_state = newstate;
1930
1931 return (ret);
1932 }
1933
1934 static void
1935 cma_free_listen_list(struct rdma_cm_id *idp)
1936 {
1937 genlist_entry_t *entry;
1938 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1939
1940 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_free_listen_list(%p)", idp);
1941 mutex_enter(&chanp->chan_mutex);
1942 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1943 mutex_exit(&chanp->chan_mutex);
1944 while (entry) {
1945 sol_cma_chan_t *ep_chanp;
1946
1947 ep_chanp = (sol_cma_chan_t *)entry->data;
1948 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "fini_ep_chan: %p",
1949 ep_chanp);
1950 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
1951 genlist_entry_t *entry1;
1952 struct ib_device *device;
1953 cma_device_t *cma_device;
1954
1955 ASSERT(ep_chanp->chan_listenp);
1956 mutex_enter(&ep_chanp->chan_mutex);
1957 entry1 = ep_chanp->chan_listenp->listen_ep_dev_entry;
1958 device = ep_chanp->chan_listenp->listen_ep_device;
1959 ASSERT(device);
1960 cma_device = device->data;
1961 delete_genlist(&cma_device->cma_epchan_list,
1962 entry1);
1963 sol_cma_release_device(
1964 (struct rdma_cm_id *)ep_chanp);
1965 mutex_exit(&ep_chanp->chan_mutex);
1966 if (ep_chanp->chan_listenp)
1967 kmem_free(ep_chanp->chan_listenp,
1968 sizeof (sol_cma_listen_info_t));
1969
1970 mutex_destroy(&ep_chanp->chan_mutex);
1971 cv_destroy(&ep_chanp->chan_destroy_cv);
1972 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
1973 kmem_free(entry, sizeof (genlist_entry_t));
1974 }
1975
1976 mutex_enter(&chanp->chan_mutex);
1977 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1978 mutex_exit(&chanp->chan_mutex);
1979 }
1980 }
1981
1982 /*
1983 * Destroy a listening CMID when :
1984 * a. All CONNECTION REQUEST recieved have been rejected
1985 * or closed.
1986 * b. No CONNECTION REQUEST recieved.
1987 * Do not destroy a listening CMID when :
1988 * a. CONNECTION REQUEST has been recieved and not been
1989 * accepted from the passive / server side.
1990 * b. CONNECTION REQUEST has been recieved and has been
1991 * accepted from the passive server side.
1992 * Mark the listening CMID as destroy pending.
1993 *
1994 * For CMIDs created for rdma_connect() or created for a
1995 * CONNECT request, destroy the CMID only when :
1996 * CONNECTION has been closed or rejected.
1997 *
1998 * Mark the CMID as destroy pending.
1999 *
2000 * When a connection is rejected or closed :
2001 * Check if flag indicates - destroy pending,
2002 * cma_destroy_id() is called, this also does
2003 *
2004 * If there is a listening CMID assosiated with it,
2005 * call cma_destroy_if(listen_cmid);
2006 */
2007 void
2008 cma_destroy_id(struct rdma_cm_id *idp)
2009 {
2010 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
2011 cma_chan_state_t state;
2012 ulong_t acpt_nodes, req_nodes;
2013
2014 mutex_enter(&chanp->chan_mutex);
2015 acpt_nodes = avl_numnodes(&chanp->chan_acpt_avl_tree);
2016 req_nodes = avl_numnodes(&chanp->chan_req_avl_tree);
2017 state = cma_get_chan_state(chanp);
2018 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)- "
2019 "est CMIDs %ld, req CMID %ld, listen_root %p, state %x, %x",
2020 idp, acpt_nodes, req_nodes, chanp->listen_root,
2021 state, chanp->chan_req_state);
2022
2023 /*
2024 * If there are either REQ recieved or Established CMIDs just return.
2025 * rdma_destroy() for these CMIDs can be called by client later.
2026 */
2027 if (acpt_nodes || req_nodes) {
2028 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
2029 mutex_exit(&chanp->chan_mutex);
2030 return;
2031 }
2032 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROYING);
2033 avl_destroy(&chanp->chan_req_avl_tree);
2034 avl_destroy(&chanp->chan_acpt_avl_tree);
2035
2036 mutex_exit(&chanp->chan_mutex);
2037 if (idp->route.path_rec) {
2038 kmem_free(idp->route.path_rec,
2039 sizeof (struct ib_sa_path_rec) * idp->route.num_paths);
2040 idp->route.path_rec = NULL;
2041 }
2042
2043 switch (chanp->chan_xport_type) {
2044 case SOL_CMA_XPORT_NONE :
2045 break;
2046 case SOL_CMA_XPORT_IB :
2047 rdma_ib_destroy_id(idp);
2048 break;
2049 #ifdef IWARP_SUPPORT
2050 case SOL_CMA_XPORT_IWARP :
2051 rdma_iw_destroy_id(idp);
2052 break;
2053 #endif /* IWARP_SUPPORT */
2054 default :
2055 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
2056 "cma_destroy_id: Unsupported xport type %x",
2057 chanp->chan_xport_type);
2058 break;
2059 }
2060
2061 /*
2062 * Flush out & Free all listeners wrt to this ID
2063 * No locking is required as this code is executed
2064 * all REQ CMIDs have been destroyed. listen_list
2065 * will therefore not be modified during this loop.
2066 */
2067 if (chanp->chan_listenp) {
2068 cma_free_listen_list(idp);
2069 cma_fini_listen_root(chanp);
2070 kmem_free((void *)chanp->chan_listenp,
2071 sizeof (sol_cma_listen_info_t));
2072 chanp->chan_listenp = NULL;
2073 }
2074
2075 if (chanp->listen_root) {
2076 struct rdma_cm_id *root_idp;
2077 sol_cma_chan_t *root_chanp;
2078
2079 root_idp = chanp->listen_root;
2080 root_chanp = (sol_cma_chan_t *)root_idp;
2081 mutex_enter(&root_chanp->chan_mutex);
2082 state = cma_get_chan_state(root_chanp);
2083 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2084 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2085 mutex_exit(&root_chanp->chan_mutex);
2086 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)-"
2087 " root idp %p, state %x, acpt_nodes %ld, req_nodes %ld",
2088 idp, root_idp, state, acpt_nodes, req_nodes);
2089
2090 if (state == SOL_CMA_CHAN_DESTROY_PENDING &&
2091 req_nodes == 0UL && acpt_nodes == 0UL) {
2092 mutex_enter(&root_chanp->chan_mutex);
2093 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2094 mutex_exit(&root_chanp->chan_mutex);
2095 cma_destroy_id(root_idp);
2096 } else if (state == SOL_CMA_CHAN_DESTROY_WAIT &&
2097 req_nodes == 0UL && acpt_nodes == 0UL) {
2098 mutex_enter(&root_chanp->chan_mutex);
2099 cma_set_chan_state(root_chanp,
2100 SOL_CMA_CHAN_DESTROY_PENDING);
2101 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2102 cv_broadcast(&root_chanp->chan_destroy_cv);
2103 mutex_exit(&root_chanp->chan_mutex);
2104 }
2105 }
2106
2107 mutex_destroy(&chanp->chan_mutex);
2108 cv_destroy(&chanp->chan_destroy_cv);
2109 kmem_free(chanp, sizeof (sol_cma_chan_t));
2110 }
2111
2112 /*
2113 * Server TCP disconnect for an established channel.
2114 * If destroy_id() has been called for the listening
2115 * CMID and there are no more CMIDs with pending
2116 * events corresponding to the listening CMID, free
2117 * the listening CMID.
2118 *
2119 */
2120 static void
2121 cma_handle_nomore_events(sol_cma_chan_t *chanp)
2122 {
2123 struct rdma_cm_id *idp, *root_idp;
2124 sol_cma_chan_t *root_chanp;
2125 cma_chan_state_t state;
2126 ulong_t req_nodes, acpt_nodes;
2127
2128 idp = (struct rdma_cm_id *)chanp;
2129 root_idp = CHAN_LISTEN_ROOT(chanp);
2130 root_chanp = (sol_cma_chan_t *)root_idp;
2131 if (!root_chanp)
2132 return;
2133
2134 mutex_enter(&root_chanp->chan_mutex);
2135 mutex_enter(&chanp->chan_mutex);
2136 CHAN_LISTEN_ROOT(chanp) = NULL;
2137 root_chanp->chan_req_total_cnt--;
2138
2139 /*
2140 * Removal of CMID from the AVL trees should already have been done
2141 * by now. Below code mainly as a safety net.
2142 */
2143 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
2144 ASSERT(chanp->chan_qp_hdl);
2145 ASSERT(cma_get_acpt_idp(root_idp,
2146 chanp->chan_qp_hdl));
2147 avl_remove(&root_chanp->chan_acpt_avl_tree, idp);
2148 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2149 }
2150 if (REQ_CMID_IN_REQ_AVL_TREE(chanp)) {
2151 ASSERT(chanp->chan_session_id);
2152 ASSERT(cma_get_req_idp(root_idp,
2153 chanp->chan_session_id));
2154 avl_remove(&root_chanp->chan_req_avl_tree, idp);
2155 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2156 }
2157
2158 state = cma_get_chan_state(root_chanp);
2159 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2160 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2161 mutex_exit(&chanp->chan_mutex);
2162 mutex_exit(&root_chanp->chan_mutex);
2163 if (state == SOL_CMA_CHAN_DESTROY_PENDING && req_nodes == 0UL &&
2164 acpt_nodes == 0UL)
2165 cma_destroy_id(root_idp);
2166 }
2167
2168 extern int ib_modify_qp(struct ib_qp *, struct ib_qp_attr *, int);
2169 extern int rdma_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
2170 int *);
2171
2172 static int
2173 cma_init_ud_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2174 {
2175 struct ib_qp_attr qp_attr;
2176 int qp_attr_mask, ret;
2177
2178 qp_attr.qp_state = IB_QPS_INIT;
2179 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2180 if (ret)
2181 return (ret);
2182
2183 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
2184 if (ret)
2185 return (ret);
2186
2187 qp_attr.qp_state = IB_QPS_RTR;
2188 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
2189 if (ret)
2190 return (ret);
2191
2192 qp_attr.qp_state = IB_QPS_RTS;
2193 qp_attr.sq_psn = 0;
2194 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
2195
2196 return (ret);
2197 }
2198
2199 static int
2200 cma_init_conn_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2201 {
2202 struct ib_qp_attr qp_attr;
2203 int qp_attr_mask, ret;
2204
2205 qp_attr.qp_state = IB_QPS_INIT;
2206 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2207 if (ret)
2208 return (ret);
2209
2210 return (ib_modify_qp(qp, &qp_attr, qp_attr_mask));
2211 }
2212
2213 static inline int
2214 cma_is_ud_ps(enum rdma_port_space ps)
2215 {
2216 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
2217 }
2218
2219 int
2220 rdma_create_qp(struct rdma_cm_id *idp, struct ib_pd *pd,
2221 struct ib_qp_init_attr *qp_init_attr)
2222 {
2223 sol_cma_chan_t *chanp;
2224 struct ib_qp *qp;
2225 int ret;
2226 ofs_client_t *dev_ofs_client;
2227
2228 ASSERT(idp);
2229 chanp = (sol_cma_chan_t *)idp;
2230 if (idp->device->node_guid != pd->device->node_guid)
2231 return (-EINVAL);
2232
2233 dev_ofs_client = (ofs_client_t *)pd->device->clnt_hdl;
2234 rdma_map_id2clnthdl(idp, dev_ofs_client->ibt_hdl, NULL);
2235
2236 qp = ib_create_qp(pd, qp_init_attr);
2237 if ((uintptr_t)qp >= (uintptr_t)-0xFFF) {
2238 return ((intptr_t)qp);
2239 }
2240 rdma_map_id2qphdl(idp, (void *)qp->ibt_qp);
2241
2242 if (cma_is_ud_ps(idp->ps)) {
2243 ret = cma_init_ud_qp(chanp, qp);
2244 } else {
2245 ret = cma_init_conn_qp(chanp, qp);
2246 }
2247
2248 if (ret) {
2249 goto err;
2250 }
2251
2252 idp->qp = qp;
2253 chanp->chan_qp_num = qp->qp_num;
2254 chanp->chan_is_srq = (qp->srq != NULL);
2255 return (0);
2256 err:
2257 (void) ib_destroy_qp(qp);
2258 return (ret);
2259 }
2260
2261 void
2262 rdma_destroy_qp(struct rdma_cm_id *idp)
2263 {
2264 ASSERT(idp);
2265 (void) ib_destroy_qp(idp->qp);
2266 idp->qp = NULL;
2267 }
2268