1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <sys/sysmacros.h>
39 #include <unistd.h>
40 #include <errno.h>
41 #include <assert.h>
42 #include <malloc.h>
43 #include <fcntl.h>
44 #include <dlfcn.h>
45 #include <sched.h>
46
47 #include <rsmapi.h>
48 #include <sys/rsm/rsmndi.h>
49 #include <rsmlib_in.h>
50 #include <sys/rsm/rsm.h>
51
52 /* lint -w2 */
53
54 extern rsm_node_id_t rsm_local_nodeid;
55 extern int loopback_getv(rsm_scat_gath_t *);
56 extern int loopback_putv(rsm_scat_gath_t *);
57
58 static rsm_ndlib_attr_t _rsm_genlib_attr = {
59 B_TRUE, /* mapping needed for put/get */
60 B_FALSE /* mapping needed for putv/getv */
61 };
62
63 static int
__rsm_import_connect(rsmapi_controller_handle_t controller,rsm_node_id_t node_id,rsm_memseg_id_t segment_id,rsm_permission_t perm,rsm_memseg_import_handle_t * im_memseg)64 __rsm_import_connect(
65 rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
66 rsm_memseg_id_t segment_id, rsm_permission_t perm,
67 rsm_memseg_import_handle_t *im_memseg) {
68
69 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
70 "__rsm_import_connect: enter\n"));
71
72 controller = controller;
73 node_id = node_id;
74 segment_id = segment_id;
75 perm = perm;
76 im_memseg = im_memseg;
77
78 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
79 "__rsm_import_connect: exit\n"));
80
81 return (RSM_SUCCESS);
82 }
83
84 static int
__rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg)85 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
86
87 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
88 "__rsm_import_disconnect: enter\n"));
89
90 im_memseg = im_memseg;
91
92 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
93 "__rsm_import_disconnect: exit\n"));
94
95 return (RSM_SUCCESS);
96 }
97
98 /*
99 * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
100 * We can have a new ops vector that makes that assumption.
101 */
102
103 static int
__rsm_get8x8(rsm_memseg_import_handle_t im_memseg,off_t off,uint8_t * datap,ulong_t rep_cnt,boolean_t swap)104 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
105 uint8_t *datap,
106 ulong_t rep_cnt,
107 boolean_t swap)
108 {
109 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
110 uint8_t *data_addr =
111 (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
112 uint_t i = 0;
113 int e;
114
115 swap = swap;
116
117 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
118 "__rsm_import_get8x8: enter\n"));
119
120 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
121 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
122 (rsm_barrier_handle_t)seg->rsmseg_barrier);
123 if (e != RSM_SUCCESS) {
124 return (e);
125 }
126 }
127
128 for (i = 0; i < rep_cnt; i++) {
129 datap[i] = data_addr[i];
130 }
131
132 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
133 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
134 (rsm_barrier_handle_t)seg->rsmseg_barrier);
135 if (e != RSM_SUCCESS) {
136 return (e);
137 }
138 }
139
140 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
141 "__rsm_import_get8x8: exit\n"));
142
143 return (RSM_SUCCESS);
144 }
145
146 static int
__rsm_get16x16(rsm_memseg_import_handle_t im_memseg,off_t off,uint16_t * datap,ulong_t rep_cnt,boolean_t swap)147 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
148 uint16_t *datap,
149 ulong_t rep_cnt,
150 boolean_t swap)
151 {
152 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
153 uint16_t *data_addr =
154 /* LINTED */
155 (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
156 uint_t i = 0;
157 int e;
158
159 swap = swap;
160
161 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
162 "__rsm_import_get16x16: enter\n"));
163
164 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
165 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
166 (rsm_barrier_handle_t)seg->rsmseg_barrier);
167 if (e != RSM_SUCCESS) {
168 return (e);
169 }
170 }
171
172 for (i = 0; i < rep_cnt; i++) {
173 datap[i] = data_addr[i];
174 }
175
176 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
177 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
178 (rsm_barrier_handle_t)seg->rsmseg_barrier);
179 if (e != RSM_SUCCESS) {
180 return (e);
181 }
182 }
183
184 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
185 "__rsm_import_get16x16: exit\n"));
186
187 return (RSM_SUCCESS);
188 }
189
190 static int
__rsm_get32x32(rsm_memseg_import_handle_t im_memseg,off_t off,uint32_t * datap,ulong_t rep_cnt,boolean_t swap)191 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
192 uint32_t *datap,
193 ulong_t rep_cnt,
194 boolean_t swap)
195 {
196 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
197 uint32_t *data_addr =
198 /* LINTED */
199 (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
200 uint_t i = 0;
201 int e;
202
203 swap = swap;
204
205 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
206 "__rsm_import_get32x32: enter\n"));
207
208 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
209 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
210 (rsm_barrier_handle_t)seg->rsmseg_barrier);
211 if (e != RSM_SUCCESS) {
212 return (e);
213 }
214 }
215
216 for (i = 0; i < rep_cnt; i++) {
217 datap[i] = data_addr[i];
218 }
219
220 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
221 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
222 (rsm_barrier_handle_t)seg->rsmseg_barrier);
223 if (e != RSM_SUCCESS) {
224 return (e);
225 }
226 }
227
228 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
229 "__rsm_import_get32x32: exit\n"));
230
231 return (RSM_SUCCESS);
232 }
233
234 static int
__rsm_get64x64(rsm_memseg_import_handle_t im_memseg,off_t off,uint64_t * datap,ulong_t rep_cnt,boolean_t swap)235 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
236 uint64_t *datap,
237 ulong_t rep_cnt,
238 boolean_t swap)
239 {
240 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
241 uint64_t *data_addr =
242 /* LINTED */
243 (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
244 uint_t i = 0;
245 int e;
246
247 swap = swap;
248
249 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
250 "__rsm_import_get64x64: enter\n"));
251
252 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
253 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
254 (rsm_barrier_handle_t)seg->rsmseg_barrier);
255 if (e != RSM_SUCCESS) {
256 return (e);
257 }
258 }
259
260 for (i = 0; i < rep_cnt; i++) {
261 datap[i] = data_addr[i];
262 }
263
264 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
265 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
266 (rsm_barrier_handle_t)seg->rsmseg_barrier);
267 if (e != RSM_SUCCESS) {
268 return (e);
269 }
270 }
271
272 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
273 "__rsm_import_get64x64: exit\n"));
274
275 return (RSM_SUCCESS);
276 }
277
278 /*
279 * import side memory segment operations (write access functions):
280 */
281
282 /*
283 * XXX: Each one of the following cases ought to be a separate function loaded
284 * into a segment access ops vector. We determine the correct function at
285 * segment connect time. When a new controller is register, we can decode
286 * it's direct_access_size attribute and load the correct function. For
287 * loop back we need to create a special ops vector that bypasses all of
288 * this stuff.
289 *
290 * XXX: We need to create a special interrupt queue for the library to handle
291 * partial writes in the remote process.
292 */
293 static int
__rsm_put8x8(rsm_memseg_import_handle_t im_memseg,off_t off,uint8_t * datap,ulong_t rep_cnt,boolean_t swap)294 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
295 uint8_t *datap,
296 ulong_t rep_cnt,
297 boolean_t swap)
298 {
299 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
300 uint8_t *data_addr =
301 (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
302 uint_t i = 0;
303 int e;
304
305 swap = swap;
306
307 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
308 "__rsm_put8x8: enter\n"));
309
310 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
311 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
312 (rsm_barrier_handle_t)seg->rsmseg_barrier);
313 if (e != RSM_SUCCESS) {
314 return (e);
315 }
316 }
317
318 for (i = 0; i < rep_cnt; i++) {
319 data_addr[i] = datap[i];
320 }
321
322 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
323 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
324 (rsm_barrier_handle_t)seg->rsmseg_barrier);
325 if (e != RSM_SUCCESS) {
326 return (e);
327 }
328 }
329
330 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
331 "__rsm_put8x8: exit\n"));
332
333 return (RSM_SUCCESS);
334 }
335
336 static int
__rsm_put16x16(rsm_memseg_import_handle_t im_memseg,off_t off,uint16_t * datap,ulong_t rep_cnt,boolean_t swap)337 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
338 uint16_t *datap,
339 ulong_t rep_cnt,
340 boolean_t swap)
341 {
342 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
343 uint16_t *data_addr =
344 /* LINTED */
345 (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
346 uint_t i = 0;
347 int e;
348
349 swap = swap;
350
351 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
352 "__rsm_put16x16: enter\n"));
353
354 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
355 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
356 (rsm_barrier_handle_t)seg->rsmseg_barrier);
357 if (e != RSM_SUCCESS) {
358 return (e);
359 }
360 }
361
362 for (i = 0; i < rep_cnt; i++) {
363 data_addr[i] = datap[i];
364 }
365
366 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
367 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
368 (rsm_barrier_handle_t)seg->rsmseg_barrier);
369 if (e != RSM_SUCCESS) {
370 return (e);
371 }
372 }
373
374 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
375 "__rsm_put16x16: exit\n"));
376
377 return (RSM_SUCCESS);
378 }
379
380 static int
__rsm_put32x32(rsm_memseg_import_handle_t im_memseg,off_t off,uint32_t * datap,ulong_t rep_cnt,boolean_t swap)381 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
382 uint32_t *datap,
383 ulong_t rep_cnt,
384 boolean_t swap)
385 {
386 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
387 uint32_t *data_addr =
388 /* LINTED */
389 (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
390 uint_t i = 0;
391 int e;
392
393 swap = swap;
394
395 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
396 "__rsm_put32x32: enter\n"));
397
398 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
399 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
400 (rsm_barrier_handle_t)seg->rsmseg_barrier);
401 if (e != RSM_SUCCESS) {
402 return (e);
403 }
404 }
405
406 for (i = 0; i < rep_cnt; i++) {
407 data_addr[i] = datap[i];
408 }
409
410 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
411 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
412 (rsm_barrier_handle_t)seg->rsmseg_barrier);
413 if (e != RSM_SUCCESS) {
414 return (e);
415 }
416 }
417
418 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
419 "__rsm_put32x32: exit\n"));
420
421 return (RSM_SUCCESS);
422 }
423
424 static int
__rsm_put64x64(rsm_memseg_import_handle_t im_memseg,off_t off,uint64_t * datap,ulong_t rep_cnt,boolean_t swap)425 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
426 uint64_t *datap,
427 ulong_t rep_cnt,
428 boolean_t swap)
429 {
430 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
431 uint64_t *data_addr =
432 /* LINTED */
433 (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
434 uint_t i = 0;
435 int e;
436
437 swap = swap;
438
439 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
440 "__rsm_put64x64: enter\n"));
441
442 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
443 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
444 (rsm_barrier_handle_t)seg->rsmseg_barrier);
445 if (e != RSM_SUCCESS) {
446 return (e);
447 }
448 }
449
450 for (i = 0; i < rep_cnt; i++) {
451 data_addr[i] = datap[i];
452 }
453
454 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
455 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
456 (rsm_barrier_handle_t)seg->rsmseg_barrier);
457 if (e != RSM_SUCCESS) {
458 return (e);
459 }
460 }
461
462 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
463 "__rsm_put64x64: exit\n"));
464
465 return (RSM_SUCCESS);
466 }
467
468 static int
__rsm_get(rsm_memseg_import_handle_t im_memseg,off_t offset,void * dst_addr,size_t length)469 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
470 size_t length)
471 {
472 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
473 int e;
474
475 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
476 "__rsm_get: enter\n"));
477
478 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
479 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
480 (rsm_barrier_handle_t)seg->rsmseg_barrier);
481 if (e != RSM_SUCCESS) {
482 return (e);
483 }
484 }
485
486 (void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
487 dst_addr, length);
488
489 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
490 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
491 (rsm_barrier_handle_t)seg->rsmseg_barrier);
492 if (e != RSM_SUCCESS) {
493 return (e);
494 }
495 }
496
497 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
498 "__rsm_get: exit\n"));
499
500 return (RSM_SUCCESS);
501 }
502
503 static int
__rsm_getv(rsm_scat_gath_t * sg_io)504 __rsm_getv(rsm_scat_gath_t *sg_io)
505 {
506 rsm_iovec_t *iovec = sg_io->iovec;
507 rsmka_iovec_t ka_iovec_arr[RSM_MAX_IOVLEN];
508 rsmka_iovec_t *ka_iovec, *ka_iovec_start;
509 rsmka_iovec_t l_iovec_arr[RSM_MAX_IOVLEN];
510 rsmka_iovec_t *l_iovec, *l_iovec_start;
511 rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
512 rsmseg_handle_t *seg_hndl;
513 int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
514 int e, i;
515
516 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
517 "__rsm_getv: enter\n"));
518
519 /*
520 * Use loopback for single node operations.
521 * replace local handles with virtual addresses
522 */
523
524 if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
525 /*
526 * To use the loopback optimization map the segment
527 * here implicitly.
528 */
529 if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
530 caddr_t va;
531 va = mmap(NULL, im_seg_hndl->rsmseg_size,
532 PROT_READ|PROT_WRITE,
533 MAP_SHARED|MAP_NORESERVE,
534 im_seg_hndl->rsmseg_fd, 0);
535
536 if (va == MAP_FAILED) {
537 DBPRINTF((RSM_LIBRARY, RSM_ERR,
538 "implicit map failed:%d\n", errno));
539 if (errno == EINVAL)
540 return (RSMERR_BAD_MEM_ALIGNMENT);
541 else if (errno == ENOMEM || errno == ENXIO ||
542 errno == EOVERFLOW)
543 return (RSMERR_BAD_LENGTH);
544 else if (errno == EAGAIN)
545 return (RSMERR_INSUFFICIENT_RESOURCES);
546 else
547 return (errno);
548 }
549
550 im_seg_hndl->rsmseg_vaddr = va;
551 im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
552 im_seg_hndl->rsmseg_mapoffset = 0;
553 im_seg_hndl->rsmseg_state = IMPORT_MAP;
554 im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
555 }
556
557 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
558 l_iovec_start = l_iovec = malloc(iovec_size);
559 else
560 l_iovec_start = l_iovec = l_iovec_arr;
561
562 bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
563 for (i = 0; i < sg_io->io_request_count; i++) {
564 if (l_iovec->io_type == RSM_HANDLE_TYPE) {
565 /* Get the surrogate export segment handle */
566 seg_hndl = (rsmseg_handle_t *)
567 l_iovec->local.handle;
568 l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
569 l_iovec->io_type = RSM_VA_TYPE;
570 }
571 l_iovec++;
572 }
573 sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
574 e = loopback_getv(sg_io);
575 sg_io->iovec = iovec;
576 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
577 free(l_iovec_start);
578 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
579 "__rsm_getv: exit\n"));
580 return (e);
581 }
582
583 /* for the Kernel Agent, replace local handles with segment ids */
584 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
585 ka_iovec_start = ka_iovec = malloc(iovec_size);
586 else
587 ka_iovec_start = ka_iovec = ka_iovec_arr;
588
589 bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
590 for (i = 0; i < sg_io->io_request_count; i++) {
591 if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
592 seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
593 ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
594 }
595 ka_iovec++;
596 }
597
598 sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
599 e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
600 sg_io->iovec = iovec;
601
602 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
603 free(ka_iovec_start);
604
605 if (e < 0) {
606 DBPRINTF((RSM_LIBRARY, RSM_ERR,
607 " RSM_IOCTL_GETV failed\n"));
608 return (errno);
609 }
610
611 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
612 "__rsm_getv: exit\n"));
613
614 return (RSM_SUCCESS);
615 }
616
617
618 static int
__rsm_put(rsm_memseg_import_handle_t im_memseg,off_t offset,void * src_addr,size_t length)619 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
620 size_t length)
621 {
622 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
623 int e;
624
625 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
626 "__rsm_put: enter\n"));
627
628 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
629 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
630 (rsm_barrier_handle_t)seg->rsmseg_barrier);
631 if (e != RSM_SUCCESS) {
632 return (e);
633 }
634 }
635
636 bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
637 length);
638
639 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
640 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
641 (rsm_barrier_handle_t)seg->rsmseg_barrier);
642 if (e != RSM_SUCCESS) {
643 return (e);
644 }
645 }
646
647 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
648 "__rsm_put: exit\n"));
649
650 return (RSM_SUCCESS);
651 }
652
653 static int
__rsm_putv(rsm_scat_gath_t * sg_io)654 __rsm_putv(rsm_scat_gath_t *sg_io)
655 {
656 rsm_iovec_t *iovec = sg_io->iovec;
657 rsmka_iovec_t ka_iovec_arr[RSM_MAX_IOVLEN];
658 rsmka_iovec_t *ka_iovec, *ka_iovec_start;
659 rsmka_iovec_t l_iovec_arr[RSM_MAX_IOVLEN];
660 rsmka_iovec_t *l_iovec, *l_iovec_start;
661 rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
662 rsmseg_handle_t *seg_hndl;
663 int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
664 int e, i;
665
666 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
667 "__rsm_putv: enter\n"));
668
669 /*
670 * Use loopback for single node operations.
671 * replace local handles with virtual addresses
672 */
673
674 if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
675 /*
676 * To use the loopback optimization map the segment
677 * here implicitly.
678 */
679 if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
680 caddr_t va;
681 va = mmap(NULL, im_seg_hndl->rsmseg_size,
682 PROT_READ|PROT_WRITE,
683 MAP_SHARED|MAP_NORESERVE,
684 im_seg_hndl->rsmseg_fd, 0);
685
686 if (va == MAP_FAILED) {
687 DBPRINTF((RSM_LIBRARY, RSM_ERR,
688 "implicit map failed:%d\n", errno));
689 if (errno == EINVAL)
690 return (RSMERR_BAD_MEM_ALIGNMENT);
691 else if (errno == ENOMEM || errno == ENXIO ||
692 errno == EOVERFLOW)
693 return (RSMERR_BAD_LENGTH);
694 else if (errno == EAGAIN)
695 return (RSMERR_INSUFFICIENT_RESOURCES);
696 else
697 return (errno);
698 }
699 im_seg_hndl->rsmseg_vaddr = va;
700 im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
701 im_seg_hndl->rsmseg_mapoffset = 0;
702 im_seg_hndl->rsmseg_state = IMPORT_MAP;
703 im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
704 }
705
706 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
707 l_iovec_start = l_iovec = malloc(iovec_size);
708 else
709 l_iovec_start = l_iovec = l_iovec_arr;
710
711 bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
712 for (i = 0; i < sg_io->io_request_count; i++) {
713 if (l_iovec->io_type == RSM_HANDLE_TYPE) {
714 /* Get the surrogate export segment handle */
715 seg_hndl = (rsmseg_handle_t *)
716 l_iovec->local.handle;
717 l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
718 l_iovec->io_type = RSM_VA_TYPE;
719 }
720 l_iovec++;
721 }
722 sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
723 e = loopback_putv(sg_io);
724 sg_io->iovec = iovec;
725
726 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
727 free(l_iovec_start);
728
729 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
730 "__rsm_putv: exit\n"));
731
732
733 return (e);
734 }
735
736 /* for the Kernel Agent, replace local handles with segment ids */
737 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
738 ka_iovec_start = ka_iovec = malloc(iovec_size);
739 else
740 ka_iovec_start = ka_iovec = ka_iovec_arr;
741
742 bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
743
744 for (i = 0; i < sg_io->io_request_count; i++) {
745 if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
746 seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
747 ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
748 }
749 ka_iovec++;
750 }
751
752 sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
753 e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
754 sg_io->iovec = iovec;
755
756 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
757 free(ka_iovec_start);
758
759 if (e < 0) {
760 DBPRINTF((RSM_LIBRARY, RSM_ERR,
761 " RSM_IOCTL_PUTV failed\n"));
762 return (errno);
763 }
764
765 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
766 "__rsm_putv: exit\n"));
767
768 return (RSM_SUCCESS);
769 }
770
771 /*
772 * import side memory segment operations (barriers):
773 */
774 static int
__rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,rsm_barrier_type_t type,rsm_barrier_handle_t barrier)775 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
776 rsm_barrier_type_t type,
777 rsm_barrier_handle_t barrier)
778 {
779 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
780 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
781
782 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
783 ""
784 "__rsm_memseg_import_init_barrier: enter\n"));
785
786 type = type;
787
788 if (!seg) {
789 DBPRINTF((RSM_LIBRARY, RSM_ERR,
790 "invalid segment handle\n"));
791 return (RSMERR_BAD_SEG_HNDL);
792 }
793 if (!bar) {
794 DBPRINTF((RSM_LIBRARY, RSM_ERR,
795 "invalid barrier handle\n"));
796 return (RSMERR_BAD_BARRIER_PTR);
797 }
798
799 /* XXX: fix later. We only support span-of-node barriers */
800
801 bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
802 if (bar->rsmgenbar_data == NULL) {
803 DBPRINTF((RSM_LIBRARY, RSM_ERR,
804 "not enough memory\n"));
805 return (RSMERR_INSUFFICIENT_MEM);
806 }
807 bar->rsmgenbar_seg = seg;
808
809 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
810 "__rsm_memseg_import_init_barrier: exit\n"));
811
812 return (RSM_SUCCESS);
813 }
814
815 static int
__rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)816 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
817 {
818 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
819 rsmseg_handle_t *seg;
820 rsm_ioctlmsg_t msg;
821
822 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
823 "__rsm_memseg_import_open_barrier: enter\n"));
824
825 if (!bar) {
826 DBPRINTF((RSM_LIBRARY, RSM_ERR,
827 "invalid barrier pointer\n"));
828 return (RSMERR_BAD_BARRIER_PTR);
829 }
830
831 if ((seg = bar->rsmgenbar_seg) == 0) {
832 DBPRINTF((RSM_LIBRARY, RSM_ERR,
833 "uninitialized barrier\n"));
834 return (RSMERR_BARRIER_UNINITIALIZED);
835 }
836
837 /* lint -save -e718 -e746 */
838 msg.bar = *(bar->rsmgenbar_data);
839 if (ioctl(seg->rsmseg_fd,
840 RSM_IOCTL_BAR_OPEN, &msg) < 0) {
841 DBPRINTF((RSM_LIBRARY, RSM_ERR,
842 " RSM_IOCTL_BAR_OPEN failed\n"));
843 /* lint -restore */
844 return (RSMERR_BARRIER_OPEN_FAILED);
845 }
846
847 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
848 "__rsm_memseg_import_open_barrier: exit\n"));
849
850 return (RSM_SUCCESS);
851 }
852
853 static int
__rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)854 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
855 {
856 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
857 rsmseg_handle_t *seg;
858 rsm_ioctlmsg_t msg;
859
860 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
861 "__rsm_memseg_import_order_barrier: enter\n"));
862
863 if (!bar) {
864 DBPRINTF((RSM_LIBRARY, RSM_ERR,
865 "invalid barrier\n"));
866 return (RSMERR_BAD_BARRIER_PTR);
867 }
868 if ((seg = bar->rsmgenbar_seg) == 0) {
869 DBPRINTF((RSM_LIBRARY, RSM_ERR,
870 "uninitialized barrier\n"));
871 return (RSMERR_BARRIER_UNINITIALIZED);
872 }
873
874 msg.bar = *(bar->rsmgenbar_data);
875 if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
876 DBPRINTF((RSM_LIBRARY, RSM_ERR,
877 "RSM_IOCTL_BAR_ORDER failed\n"));
878 return (RSMERR_BARRIER_FAILURE);
879 }
880
881 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
882 "__rsm_memseg_import_order_barrier: exit\n"));
883
884 return (RSM_SUCCESS);
885 }
886
887 static int
__rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)888 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
889 {
890 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
891 rsmseg_handle_t *seg;
892 rsm_ioctlmsg_t msg;
893
894 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
895 "__rsm_memseg_import_close_barrier: enter\n"));
896
897 if (!bar) {
898 DBPRINTF((RSM_LIBRARY, RSM_ERR,
899 "invalid barrier\n"));
900 return (RSMERR_BAD_BARRIER_PTR);
901 }
902 if ((seg = bar->rsmgenbar_seg) == 0) {
903 DBPRINTF((RSM_LIBRARY, RSM_ERR,
904 "uninitialized barrier\n"));
905 return (RSMERR_BARRIER_UNINITIALIZED);
906 }
907
908 msg.bar = *(bar->rsmgenbar_data);
909 if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
910 DBPRINTF((RSM_LIBRARY, RSM_ERR,
911 " RSM_IOCTL_BAR_CLOSE failed\n"));
912 return (RSMERR_BARRIER_FAILURE);
913 }
914
915 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
916 "__rsm_memseg_import_close_barrier: exit\n"));
917
918 return (RSM_SUCCESS);
919 }
920
921 static int
__rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)922 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
923 {
924 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
925
926 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
927 "__rsm_memseg_import_destroy_barrier: enter\n"));
928
929 if (!bar) {
930 DBPRINTF((RSM_LIBRARY, RSM_ERR,
931 "invalid barrier\n"));
932 return (RSMERR_BAD_BARRIER_PTR);
933 }
934
935 free((void *) bar->rsmgenbar_data);
936
937 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
938 "__rsm_memseg_import_destroy_barrier: exit\n"));
939
940 return (RSM_SUCCESS);
941 }
942
943 /* lint -w1 */
944 static int
__rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,rsm_barrier_mode_t * mode)945 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
946 rsm_barrier_mode_t *mode)
947 {
948 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
949 "__rsm_memseg_import_get_mode: enter\n"));
950
951 im_memseg = im_memseg; mode = mode;
952
953 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
954 "__rsm_memseg_import_get_mode: exit\n"));
955
956 return (RSM_SUCCESS);
957 }
958 static int
__rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,rsm_barrier_mode_t mode)959 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
960 rsm_barrier_mode_t mode)
961 {
962 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
963 "__rsm_memseg_import_set_mode: enter\n"));
964
965 im_memseg = im_memseg; mode = mode;
966
967 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
968 "__rsm_memseg_import_set_mode: exit\n"));
969
970 return (RSM_SUCCESS);
971 }
972
973 static int
__rsm_create_memory_handle(rsmapi_controller_handle_t controller,rsm_localmemory_handle_t * local_hndl_p,caddr_t local_va,size_t len)974 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
975 rsm_localmemory_handle_t *local_hndl_p,
976 caddr_t local_va, size_t len)
977 {
978 rsm_memseg_export_handle_t memseg;
979 rsmapi_access_entry_t acl[1];
980 rsm_memseg_id_t segid = 0;
981 size_t size;
982 int e;
983
984
985 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
986 "__rsm_create_memory_handle: enter\n"));
987
988 /*
989 * create a surrogate segment (local memory will be locked down).
990 */
991 size = roundup(len, PAGESIZE);
992 e = rsm_memseg_export_create(controller, &memseg,
993 (void *)local_va, size,
994 RSM_ALLOW_REBIND);
995 if (e != RSM_SUCCESS) {
996 DBPRINTF((RSM_LIBRARY, RSM_ERR,
997 "export create failed\n"));
998 return (e);
999 }
1000
1001 /*
1002 * Publish the segment to the local node only. If the segment
1003 * length is very large then don't publish to the adapter driver
1004 * because that will consume too much DVMA space - this is indicated
1005 * to the Kernel Agent using null permissions. DVMA binding will
1006 * be done when the RDMA is set up.
1007 */
1008 acl[0].ae_node = rsm_local_nodeid;
1009 if (len > RSM_MAX_HANDLE_DVMA)
1010 acl[0].ae_permission = 0;
1011 else
1012 acl[0].ae_permission = RSM_PERM_RDWR;
1013
1014 e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1015 if (e != RSM_SUCCESS) {
1016 DBPRINTF((RSM_LIBRARY, RSM_ERR,
1017 "export publish failed\n"));
1018 rsm_memseg_export_destroy(memseg);
1019 return (e);
1020 }
1021
1022 /* Use the surrogate seghandle as the local memory handle */
1023 *local_hndl_p = (rsm_localmemory_handle_t)memseg;
1024
1025 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1026 "__rsm_create_memory_handle: exit\n"));
1027
1028 return (e);
1029 }
1030
1031 static int
__rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)1032 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1033 {
1034 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1035 "__rsm_free_memory_handle: enter\n"));
1036
1037 rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1038
1039 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1040 "__rsm_free_memory_handle: exit\n"));
1041
1042 return (RSM_SUCCESS);
1043 }
1044
1045 static int
__rsm_get_lib_attr(rsm_ndlib_attr_t ** libattrp)1046 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1047 {
1048
1049 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1050 "__rsm_get_lib_attr: enter\n"));
1051
1052 *libattrp = &_rsm_genlib_attr;
1053
1054 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1055 "__rsm_get_lib_attr: exit\n"));
1056
1057 return (RSM_SUCCESS);
1058 }
1059
1060 static int
__rsm_closedevice(rsmapi_controller_handle_t cntr_handle)1061 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1062 {
1063
1064 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1065 "__rsm_closedevice: enter\n"));
1066
1067 cntr_handle = cntr_handle;
1068
1069 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1070 "__rsm_closedevice: exit\n"));
1071
1072 return (RSM_SUCCESS);
1073 }
1074
1075 void
__rsmdefault_setops(rsm_segops_t * segops)1076 __rsmdefault_setops(rsm_segops_t *segops)
1077 {
1078
1079 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1080 "__rsmdefault_setops: enter\n"));
1081
1082 if (segops->rsm_memseg_import_connect == NULL) {
1083 segops->rsm_memseg_import_connect = __rsm_import_connect;
1084 }
1085 if (segops->rsm_memseg_import_disconnect == NULL) {
1086 segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1087 }
1088
1089 if (segops->rsm_memseg_import_get8 == NULL) {
1090 segops->rsm_memseg_import_get8 = __rsm_get8x8;
1091 }
1092 if (segops->rsm_memseg_import_get16 == NULL) {
1093 segops->rsm_memseg_import_get16 = __rsm_get16x16;
1094 }
1095 if (segops->rsm_memseg_import_get32 == NULL) {
1096 segops->rsm_memseg_import_get32 = __rsm_get32x32;
1097 }
1098 if (segops->rsm_memseg_import_get64 == NULL) {
1099 segops->rsm_memseg_import_get64 = __rsm_get64x64;
1100 }
1101 if (segops->rsm_memseg_import_get == NULL) {
1102 segops->rsm_memseg_import_get = __rsm_get;
1103 }
1104
1105 if (segops->rsm_memseg_import_put8 == NULL) {
1106 segops->rsm_memseg_import_put8 = __rsm_put8x8;
1107 }
1108 if (segops->rsm_memseg_import_put16 == NULL) {
1109 segops->rsm_memseg_import_put16 = __rsm_put16x16;
1110 }
1111 if (segops->rsm_memseg_import_put32 == NULL) {
1112 segops->rsm_memseg_import_put32 = __rsm_put32x32;
1113 }
1114 if (segops->rsm_memseg_import_put64 == NULL) {
1115 segops->rsm_memseg_import_put64 = __rsm_put64x64;
1116 }
1117 if (segops->rsm_memseg_import_put == NULL) {
1118 segops->rsm_memseg_import_put = __rsm_put;
1119 }
1120
1121 if (segops->rsm_memseg_import_putv == NULL) {
1122 segops->rsm_memseg_import_putv = __rsm_putv;
1123 }
1124
1125 if (segops->rsm_memseg_import_getv == NULL) {
1126 segops->rsm_memseg_import_getv = __rsm_getv;
1127 }
1128
1129 if (segops->rsm_create_localmemory_handle == NULL) {
1130 segops->rsm_create_localmemory_handle =
1131 __rsm_create_memory_handle;
1132 }
1133
1134 if (segops->rsm_free_localmemory_handle == NULL) {
1135 segops->rsm_free_localmemory_handle =
1136 __rsm_free_memory_handle;
1137 }
1138
1139 /* XXX: Need to support barrier functions */
1140 if (segops->rsm_memseg_import_init_barrier == NULL) {
1141 segops->rsm_memseg_import_init_barrier =
1142 __rsm_memseg_import_init_barrier;
1143 }
1144 if (segops->rsm_memseg_import_open_barrier == NULL) {
1145 segops->rsm_memseg_import_open_barrier =
1146 __rsm_memseg_import_open_barrier;
1147 }
1148 if (segops->rsm_memseg_import_order_barrier == NULL) {
1149 segops->rsm_memseg_import_order_barrier =
1150 __rsm_memseg_import_order_barrier;
1151 }
1152 if (segops->rsm_memseg_import_close_barrier == NULL) {
1153 segops->rsm_memseg_import_close_barrier =
1154 __rsm_memseg_import_close_barrier;
1155 }
1156 if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1157 segops->rsm_memseg_import_destroy_barrier =
1158 __rsm_memseg_import_destroy_barrier;
1159 }
1160
1161 if (segops->rsm_memseg_import_get_mode == NULL) {
1162 segops->rsm_memseg_import_get_mode =
1163 __rsm_memseg_import_get_mode;
1164 }
1165 if (segops->rsm_memseg_import_set_mode == NULL) {
1166 segops->rsm_memseg_import_set_mode =
1167 __rsm_memseg_import_set_mode;
1168 }
1169
1170 if (segops->rsm_get_lib_attr == NULL) {
1171 segops->rsm_get_lib_attr =
1172 __rsm_get_lib_attr;
1173 }
1174
1175 if (segops->rsm_closedevice == NULL) {
1176 segops->rsm_closedevice =
1177 __rsm_closedevice;
1178 }
1179
1180
1181 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1182 "__rsmdefault_setops: exit\n"));
1183
1184 }
1185