xref: /titanic_51/usr/src/lib/librsm/common/rsmgen.c (revision 5a7763bf3e9db4cfe6cb523b096cb74af71e3793)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #pragma ident	"%Z%%M%	%I%	%E% SMI"
29 
30 #include "c_synonyms.h"
31 #if !defined(__lint)	/* need a *_synonyms.h file */
32 #define	rsm_memseg_export_create	_rsm_memseg_export_create
33 #define	rsm_memseg_export_destroy	_rsm_memseg_export_destroy
34 #define	rsm_memseg_export_publish	_rsm_memseg_export_publish
35 #endif
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <string.h>
40 #include <strings.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <sys/mman.h>
44 #include <sys/uio.h>
45 #include <sys/sysmacros.h>
46 #include <unistd.h>
47 #include <errno.h>
48 #include <assert.h>
49 #include <malloc.h>
50 #include <fcntl.h>
51 #include <dlfcn.h>
52 #include <sched.h>
53 
54 #include <rsmapi.h>
55 #include <sys/rsm/rsmndi.h>
56 #include <rsmlib_in.h>
57 #include <sys/rsm/rsm.h>
58 
59 /* lint -w2 */
60 
61 extern rsm_node_id_t rsm_local_nodeid;
62 extern int loopback_getv(rsm_scat_gath_t *);
63 extern int loopback_putv(rsm_scat_gath_t *);
64 
65 static rsm_ndlib_attr_t _rsm_genlib_attr = {
66 	B_TRUE,		/* mapping needed for put/get */
67 	B_FALSE		/* mapping needed for putv/getv */
68 };
69 
70 static int
71 __rsm_import_connect(
72     rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
73     rsm_memseg_id_t segment_id, rsm_permission_t perm,
74     rsm_memseg_import_handle_t *im_memseg) {
75 
76 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
77 	    "__rsm_import_connect: enter\n"));
78 
79 	controller = controller;
80 	node_id = node_id;
81 	segment_id = segment_id;
82 	perm = perm;
83 	im_memseg = im_memseg;
84 
85 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
86 	    "__rsm_import_connect: exit\n"));
87 
88 	return (RSM_SUCCESS);
89 }
90 
91 static int
92 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
93 
94 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
95 	    "__rsm_import_disconnect: enter\n"));
96 
97 	im_memseg = im_memseg;
98 
99 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
100 	    "__rsm_import_disconnect: exit\n"));
101 
102 	return (RSM_SUCCESS);
103 }
104 
105 /*
106  * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
107  * We can have a new ops vector that makes that assumption.
108  */
109 
110 static int
111 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
112     uint8_t *datap,
113     ulong_t rep_cnt,
114     boolean_t swap)
115 {
116 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
117 	uint8_t *data_addr =
118 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
119 	uint_t i = 0;
120 	int	e;
121 
122 	swap = swap;
123 
124 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
125 	    "__rsm_import_get8x8: enter\n"));
126 
127 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
128 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
129 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
130 		if (e != RSM_SUCCESS) {
131 			return (e);
132 		}
133 	}
134 
135 	for (i = 0; i < rep_cnt; i++) {
136 		datap[i] = data_addr[i];
137 	}
138 
139 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
140 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
141 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
142 		if (e != RSM_SUCCESS) {
143 			return (e);
144 		}
145 	}
146 
147 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
148 	    "__rsm_import_get8x8: exit\n"));
149 
150 	return (RSM_SUCCESS);
151 }
152 
153 static int
154 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
155     uint16_t *datap,
156     ulong_t rep_cnt,
157     boolean_t swap)
158 {
159 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
160 	uint16_t *data_addr =
161 	    /* LINTED */
162 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
163 	uint_t i = 0;
164 	int	e;
165 
166 	swap = swap;
167 
168 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
169 	    "__rsm_import_get16x16: enter\n"));
170 
171 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
172 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
173 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
174 		if (e != RSM_SUCCESS) {
175 			return (e);
176 		}
177 	}
178 
179 	for (i = 0; i < rep_cnt; i++) {
180 		datap[i] = data_addr[i];
181 	}
182 
183 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
184 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
185 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
186 		if (e != RSM_SUCCESS) {
187 			return (e);
188 		}
189 	}
190 
191 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
192 	    "__rsm_import_get16x16: exit\n"));
193 
194 	return (RSM_SUCCESS);
195 }
196 
197 static int
198 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
199     uint32_t *datap,
200     ulong_t rep_cnt,
201     boolean_t swap)
202 {
203 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
204 	uint32_t *data_addr =
205 	    /* LINTED */
206 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
207 	uint_t i = 0;
208 	int	e;
209 
210 	swap = swap;
211 
212 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
213 	    "__rsm_import_get32x32: enter\n"));
214 
215 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
216 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
217 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
218 		if (e != RSM_SUCCESS) {
219 			return (e);
220 		}
221 	}
222 
223 	for (i = 0; i < rep_cnt; i++) {
224 		datap[i] = data_addr[i];
225 	}
226 
227 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
228 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
229 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
230 		if (e != RSM_SUCCESS) {
231 			return (e);
232 		}
233 	}
234 
235 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
236 	    "__rsm_import_get32x32: exit\n"));
237 
238 	return (RSM_SUCCESS);
239 }
240 
241 static int
242 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
243     uint64_t *datap,
244     ulong_t rep_cnt,
245     boolean_t swap)
246 {
247 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
248 	uint64_t *data_addr =
249 	    /* LINTED */
250 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
251 	uint_t i = 0;
252 	int	e;
253 
254 	swap = swap;
255 
256 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
257 	    "__rsm_import_get64x64: enter\n"));
258 
259 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
260 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
261 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
262 		if (e != RSM_SUCCESS) {
263 			return (e);
264 		}
265 	}
266 
267 	for (i = 0; i < rep_cnt; i++) {
268 		datap[i] = data_addr[i];
269 	}
270 
271 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
272 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
273 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
274 		if (e != RSM_SUCCESS) {
275 			return (e);
276 		}
277 	}
278 
279 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
280 	    "__rsm_import_get64x64: exit\n"));
281 
282 	return (RSM_SUCCESS);
283 }
284 
285 	/*
286 	 * import side memory segment operations (write access functions):
287 	 */
288 
289 /*
290  * XXX: Each one of the following cases ought to be a separate function loaded
291  * into a segment access ops vector. We determine the correct function at
292  * segment connect time. When a new controller is register, we can decode
293  * it's direct_access_size attribute and load the correct function. For
294  * loop back we need to create a special ops vector that bypasses all of
295  * this stuff.
296  *
297  * XXX: We need to create a special interrupt queue for the library to handle
298  * partial writes in the remote process.
299  */
300 static int
301 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
302     uint8_t *datap,
303     ulong_t rep_cnt,
304     boolean_t swap)
305 {
306 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
307 	uint8_t *data_addr =
308 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
309 	uint_t i = 0;
310 	int	e;
311 
312 	swap = swap;
313 
314 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
315 	    "__rsm_put8x8: enter\n"));
316 
317 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
318 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
319 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
320 		if (e != RSM_SUCCESS) {
321 			return (e);
322 		}
323 	}
324 
325 	for (i = 0; i < rep_cnt; i++) {
326 		data_addr[i] = datap[i];
327 	}
328 
329 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
330 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
331 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
332 		if (e != RSM_SUCCESS) {
333 			return (e);
334 		}
335 	}
336 
337 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
338 	    "__rsm_put8x8: exit\n"));
339 
340 	return (RSM_SUCCESS);
341 }
342 
343 static int
344 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
345     uint16_t *datap,
346     ulong_t rep_cnt,
347     boolean_t swap)
348 {
349 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
350 	uint16_t *data_addr =
351 	    /* LINTED */
352 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
353 	uint_t i = 0;
354 	int	e;
355 
356 	swap = swap;
357 
358 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
359 	    "__rsm_put16x16: enter\n"));
360 
361 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
362 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
363 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
364 		if (e != RSM_SUCCESS) {
365 			return (e);
366 		}
367 	}
368 
369 	for (i = 0; i < rep_cnt; i++) {
370 		data_addr[i] = datap[i];
371 	}
372 
373 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
374 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
375 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
376 		if (e != RSM_SUCCESS) {
377 			return (e);
378 		}
379 	}
380 
381 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
382 	    "__rsm_put16x16: exit\n"));
383 
384 	return (RSM_SUCCESS);
385 }
386 
387 static int
388 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
389     uint32_t *datap,
390     ulong_t rep_cnt,
391     boolean_t swap)
392 {
393 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
394 	uint32_t *data_addr =
395 	    /* LINTED */
396 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
397 	uint_t i = 0;
398 	int	e;
399 
400 	swap = swap;
401 
402 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
403 	    "__rsm_put32x32: enter\n"));
404 
405 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
406 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
407 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
408 		if (e != RSM_SUCCESS) {
409 			return (e);
410 		}
411 	}
412 
413 	for (i = 0; i < rep_cnt; i++) {
414 		data_addr[i] = datap[i];
415 	}
416 
417 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
418 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
419 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
420 		if (e != RSM_SUCCESS) {
421 			return (e);
422 		}
423 	}
424 
425 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
426 	    "__rsm_put32x32: exit\n"));
427 
428 	return (RSM_SUCCESS);
429 }
430 
431 static int
432 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
433     uint64_t *datap,
434     ulong_t rep_cnt,
435     boolean_t swap)
436 {
437 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
438 	uint64_t *data_addr =
439 	    /* LINTED */
440 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
441 	uint_t i = 0;
442 	int	e;
443 
444 	swap = swap;
445 
446 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
447 	    "__rsm_put64x64: enter\n"));
448 
449 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
450 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
451 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
452 		if (e != RSM_SUCCESS) {
453 			return (e);
454 		}
455 	}
456 
457 	for (i = 0; i < rep_cnt; i++) {
458 		data_addr[i] = datap[i];
459 	}
460 
461 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
462 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
463 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
464 		if (e != RSM_SUCCESS) {
465 			return (e);
466 		}
467 	}
468 
469 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
470 	    "__rsm_put64x64: exit\n"));
471 
472 	return (RSM_SUCCESS);
473 }
474 
475 static int
476 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
477     size_t length)
478 {
479 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
480 	int		e;
481 
482 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
483 	    "__rsm_get: enter\n"));
484 
485 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
486 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
487 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
488 		if (e != RSM_SUCCESS) {
489 			return (e);
490 		}
491 	}
492 
493 	(void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
494 	    dst_addr, length);
495 
496 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
497 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
498 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
499 		if (e != RSM_SUCCESS) {
500 			return (e);
501 		}
502 	}
503 
504 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
505 	    "__rsm_get: exit\n"));
506 
507 	return (RSM_SUCCESS);
508 }
509 
510 static int
511 __rsm_getv(rsm_scat_gath_t *sg_io)
512 {
513 	rsm_iovec_t 	*iovec = sg_io->iovec;
514 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
515 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
516 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
517 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
518 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
519 	rsmseg_handle_t *seg_hndl;
520 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
521 	int e, i;
522 
523 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
524 	    "__rsm_getv: enter\n"));
525 
526 	/*
527 	 * Use loopback for single node operations.
528 	 * replace local handles with virtual addresses
529 	 */
530 
531 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
532 		/*
533 		 * To use the loopback optimization map the segment
534 		 * here implicitly.
535 		 */
536 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
537 			caddr_t	va;
538 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
539 			    PROT_READ|PROT_WRITE,
540 			    MAP_SHARED|MAP_NORESERVE,
541 			    im_seg_hndl->rsmseg_fd, 0);
542 
543 			if (va == MAP_FAILED) {
544 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
545 				    "implicit map failed:%d\n", errno));
546 				if (errno == EINVAL)
547 					return (RSMERR_BAD_MEM_ALIGNMENT);
548 				else if (errno == ENOMEM || errno == ENXIO ||
549 					errno == EOVERFLOW)
550 						return (RSMERR_BAD_LENGTH);
551 				else if (errno == EAGAIN)
552 					return (RSMERR_INSUFFICIENT_RESOURCES);
553 				else
554 					return (errno);
555 			}
556 
557 			im_seg_hndl->rsmseg_vaddr = va;
558 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
559 			im_seg_hndl->rsmseg_mapoffset = 0;
560 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
561 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
562 		}
563 
564 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
565 			l_iovec_start = l_iovec = malloc(iovec_size);
566 		else
567 			l_iovec_start = l_iovec = l_iovec_arr;
568 
569 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
570 		for (i = 0; i < sg_io->io_request_count; i++) {
571 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
572 				/* Get the surrogate export segment handle */
573 				seg_hndl = (rsmseg_handle_t *)
574 				    l_iovec->local.handle;
575 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
576 				l_iovec->io_type = RSM_VA_TYPE;
577 			}
578 			l_iovec++;
579 		}
580 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
581 		e = loopback_getv(sg_io);
582 		sg_io->iovec = iovec;
583 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
584 			free(l_iovec_start);
585 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
586 		    "__rsm_getv: exit\n"));
587 		return (e);
588 	}
589 
590 	/* for the Kernel Agent, replace local handles with segment ids */
591 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
592 		ka_iovec_start = ka_iovec = malloc(iovec_size);
593 	else
594 		ka_iovec_start = ka_iovec = ka_iovec_arr;
595 
596 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
597 	for (i = 0; i < sg_io->io_request_count; i++) {
598 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
599 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
600 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
601 		}
602 		ka_iovec++;
603 	}
604 
605 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
606 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
607 	sg_io->iovec = iovec;
608 
609 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
610 		free(ka_iovec_start);
611 
612 	if (e < 0) {
613 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
614 		    " RSM_IOCTL_GETV failed\n"));
615 		return (errno);
616 	}
617 
618 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
619 	    "__rsm_getv: exit\n"));
620 
621 	return (RSM_SUCCESS);
622 }
623 
624 
625 static int
626 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
627     size_t length)
628 {
629 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
630 	int		e;
631 
632 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
633 	    "__rsm_put: enter\n"));
634 
635 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
636 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
637 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
638 		if (e != RSM_SUCCESS) {
639 			return (e);
640 		}
641 	}
642 
643 	bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
644 		length);
645 
646 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
647 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
648 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
649 		if (e != RSM_SUCCESS) {
650 			return (e);
651 		}
652 	}
653 
654 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
655 	    "__rsm_put: exit\n"));
656 
657 	return (RSM_SUCCESS);
658 }
659 
660 static int
661 __rsm_putv(rsm_scat_gath_t *sg_io)
662 {
663 	rsm_iovec_t 	*iovec = sg_io->iovec;
664 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
665 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
666 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
667 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
668 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
669 	rsmseg_handle_t *seg_hndl;
670 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
671 	int e, i;
672 
673 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
674 	    "__rsm_putv: enter\n"));
675 
676 	/*
677 	 * Use loopback for single node operations.
678 	 * replace local handles with virtual addresses
679 	 */
680 
681 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
682 		/*
683 		 * To use the loopback optimization map the segment
684 		 * here implicitly.
685 		 */
686 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
687 			caddr_t	va;
688 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
689 			    PROT_READ|PROT_WRITE,
690 			    MAP_SHARED|MAP_NORESERVE,
691 			    im_seg_hndl->rsmseg_fd, 0);
692 
693 			if (va == MAP_FAILED) {
694 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
695 				    "implicit map failed:%d\n", errno));
696 				if (errno == EINVAL)
697 					return (RSMERR_BAD_MEM_ALIGNMENT);
698 				else if (errno == ENOMEM || errno == ENXIO ||
699 					errno == EOVERFLOW)
700 						return (RSMERR_BAD_LENGTH);
701 				else if (errno == EAGAIN)
702 					return (RSMERR_INSUFFICIENT_RESOURCES);
703 				else
704 					return (errno);
705 			}
706 			im_seg_hndl->rsmseg_vaddr = va;
707 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
708 			im_seg_hndl->rsmseg_mapoffset = 0;
709 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
710 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
711 		}
712 
713 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
714 			l_iovec_start = l_iovec = malloc(iovec_size);
715 		else
716 			l_iovec_start = l_iovec = l_iovec_arr;
717 
718 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
719 		for (i = 0; i < sg_io->io_request_count; i++) {
720 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
721 				/* Get the surrogate export segment handle */
722 				seg_hndl = (rsmseg_handle_t *)
723 							l_iovec->local.handle;
724 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
725 				l_iovec->io_type = RSM_VA_TYPE;
726 			}
727 			l_iovec++;
728 		}
729 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
730 		e = loopback_putv(sg_io);
731 		sg_io->iovec = iovec;
732 
733 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
734 			free(l_iovec_start);
735 
736 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
737 		    "__rsm_putv: exit\n"));
738 
739 
740 		return (e);
741 	}
742 
743 	/* for the Kernel Agent, replace local handles with segment ids */
744 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
745 		ka_iovec_start = ka_iovec = malloc(iovec_size);
746 	else
747 		ka_iovec_start = ka_iovec = ka_iovec_arr;
748 
749 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
750 
751 	for (i = 0; i < sg_io->io_request_count; i++) {
752 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
753 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
754 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
755 		}
756 		ka_iovec++;
757 	}
758 
759 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
760 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
761 	sg_io->iovec = iovec;
762 
763 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
764 		free(ka_iovec_start);
765 
766 	if (e < 0) {
767 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
768 		    " RSM_IOCTL_PUTV failed\n"));
769 		return (errno);
770 	}
771 
772 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
773 	    "__rsm_putv: exit\n"));
774 
775 	return (RSM_SUCCESS);
776 }
777 
778 	/*
779 	 * import side memory segment operations (barriers):
780 	 */
781 static int
782 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
783     rsm_barrier_type_t type,
784     rsm_barrier_handle_t barrier)
785 {
786 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
787 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
788 
789 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
790 	    ""
791 	    "__rsm_memseg_import_init_barrier: enter\n"));
792 
793 	type = type;
794 
795 	if (!seg) {
796 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
797 		    "invalid segment handle\n"));
798 		return (RSMERR_BAD_SEG_HNDL);
799 	}
800 	if (!bar) {
801 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
802 		    "invalid barrier handle\n"));
803 		return (RSMERR_BAD_BARRIER_PTR);
804 	}
805 
806 	/* XXX: fix later. We only support span-of-node barriers */
807 
808 	bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
809 	if (bar->rsmgenbar_data == NULL) {
810 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
811 		    "not enough memory\n"));
812 		return (RSMERR_INSUFFICIENT_MEM);
813 	}
814 	bar->rsmgenbar_seg = seg;
815 
816 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
817 	    "__rsm_memseg_import_init_barrier: exit\n"));
818 
819 	return (RSM_SUCCESS);
820 }
821 
822 static int
823 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
824 {
825 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
826 	rsmseg_handle_t *seg;
827 	rsm_ioctlmsg_t msg;
828 
829 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
830 	    "__rsm_memseg_import_open_barrier: enter\n"));
831 
832 	if (!bar) {
833 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
834 		    "invalid barrier pointer\n"));
835 		return (RSMERR_BAD_BARRIER_PTR);
836 	}
837 
838 	if ((seg = bar->rsmgenbar_seg) == 0) {
839 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
840 		    "uninitialized barrier\n"));
841 		return (RSMERR_BARRIER_UNINITIALIZED);
842 	}
843 
844 /* lint -save -e718 -e746 */
845 	msg.bar = *(bar->rsmgenbar_data);
846 	if (ioctl(seg->rsmseg_fd,
847 	    RSM_IOCTL_BAR_OPEN, &msg) < 0) {
848 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
849 		    " RSM_IOCTL_BAR_OPEN failed\n"));
850 /* lint -restore */
851 		return (RSMERR_BARRIER_OPEN_FAILED);
852 	}
853 
854 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
855 	    "__rsm_memseg_import_open_barrier: exit\n"));
856 
857 	return (RSM_SUCCESS);
858 }
859 
860 static int
861 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
862 {
863 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
864 	rsmseg_handle_t *seg;
865 	rsm_ioctlmsg_t msg;
866 
867 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
868 	    "__rsm_memseg_import_order_barrier: enter\n"));
869 
870 	if (!bar) {
871 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
872 		    "invalid barrier\n"));
873 		return (RSMERR_BAD_BARRIER_PTR);
874 	}
875 	if ((seg = bar->rsmgenbar_seg) == 0) {
876 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
877 		    "uninitialized barrier\n"));
878 		return (RSMERR_BARRIER_UNINITIALIZED);
879 	}
880 
881 	msg.bar = *(bar->rsmgenbar_data);
882 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
883 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
884 		    "RSM_IOCTL_BAR_ORDER failed\n"));
885 		return (RSMERR_BARRIER_FAILURE);
886 	}
887 
888 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
889 	    "__rsm_memseg_import_order_barrier: exit\n"));
890 
891 	return (RSM_SUCCESS);
892 }
893 
894 static int
895 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
896 {
897 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
898 	rsmseg_handle_t *seg;
899 	rsm_ioctlmsg_t msg;
900 
901 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
902 	    "__rsm_memseg_import_close_barrier: enter\n"));
903 
904 	if (!bar) {
905 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
906 		    "invalid barrier\n"));
907 		return (RSMERR_BAD_BARRIER_PTR);
908 	}
909 	if ((seg = bar->rsmgenbar_seg) == 0) {
910 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
911 		    "uninitialized barrier\n"));
912 		return (RSMERR_BARRIER_UNINITIALIZED);
913 	}
914 
915 	msg.bar = *(bar->rsmgenbar_data);
916 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
917 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
918 		    " RSM_IOCTL_BAR_CLOSE failed\n"));
919 		return (RSMERR_BARRIER_FAILURE);
920 	}
921 
922 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
923 	    "__rsm_memseg_import_close_barrier: exit\n"));
924 
925 	return (RSM_SUCCESS);
926 }
927 
928 static int
929 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
930 {
931 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
932 
933 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
934 	    "__rsm_memseg_import_destroy_barrier: enter\n"));
935 
936 	if (!bar) {
937 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
938 		    "invalid barrier\n"));
939 		return (RSMERR_BAD_BARRIER_PTR);
940 	}
941 
942 	free((void *) bar->rsmgenbar_data);
943 
944 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
945 	    "__rsm_memseg_import_destroy_barrier: exit\n"));
946 
947 	return (RSM_SUCCESS);
948 }
949 
950 /* lint -w1 */
951 static int
952 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
953     rsm_barrier_mode_t *mode)
954 {
955 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
956 	    "__rsm_memseg_import_get_mode: enter\n"));
957 
958 	im_memseg = im_memseg; mode = mode;
959 
960 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
961 	    "__rsm_memseg_import_get_mode: exit\n"));
962 
963 	return (RSM_SUCCESS);
964 }
965 static int
966 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
967 				rsm_barrier_mode_t mode)
968 {
969 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
970 	    "__rsm_memseg_import_set_mode: enter\n"));
971 
972 	im_memseg = im_memseg; mode = mode;
973 
974 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
975 	    "__rsm_memseg_import_set_mode: exit\n"));
976 
977 	return (RSM_SUCCESS);
978 }
979 
980 static int
981 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
982     rsm_localmemory_handle_t *local_hndl_p,
983     caddr_t local_va, size_t len)
984 {
985 	rsm_memseg_export_handle_t memseg;
986 	rsmapi_access_entry_t	acl[1];
987 	rsm_memseg_id_t segid = 0;
988 	size_t size;
989 	int e;
990 
991 
992 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
993 	    "__rsm_create_memory_handle: enter\n"));
994 
995 	/*
996 	 * create a surrogate segment (local memory will be locked down).
997 	 */
998 	size =  roundup(len, PAGESIZE);
999 	e = rsm_memseg_export_create(controller, &memseg,
1000 	    (void *)local_va, size,
1001 	    RSM_ALLOW_REBIND);
1002 	if (e != RSM_SUCCESS) {
1003 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
1004 		    "export create failed\n"));
1005 		return (e);
1006 	}
1007 
1008 	/*
1009 	 * Publish the segment to the local node only.  If the segment
1010 	 * length is very large then don't publish to the adapter driver
1011 	 * because that will consume too much DVMA space - this is indicated
1012 	 * to the Kernel Agent using null permissions.  DVMA binding will
1013 	 * be done when the RDMA is set up.
1014 	 */
1015 	acl[0].ae_node = rsm_local_nodeid;
1016 	if (len > RSM_MAX_HANDLE_DVMA)
1017 		acl[0].ae_permission = 0;
1018 	else
1019 		acl[0].ae_permission = RSM_PERM_RDWR;
1020 
1021 	e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1022 	if (e != RSM_SUCCESS) {
1023 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
1024 		    "export publish failed\n"));
1025 		rsm_memseg_export_destroy(memseg);
1026 		return (e);
1027 	}
1028 
1029 	/* Use the surrogate seghandle as the local memory handle */
1030 	*local_hndl_p = (rsm_localmemory_handle_t)memseg;
1031 
1032 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1033 	    "__rsm_create_memory_handle: exit\n"));
1034 
1035 	return (e);
1036 }
1037 
1038 static int
1039 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1040 {
1041 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1042 	    "__rsm_free_memory_handle: enter\n"));
1043 
1044 	rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1045 
1046 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1047 	    "__rsm_free_memory_handle: exit\n"));
1048 
1049 	return (RSM_SUCCESS);
1050 }
1051 
1052 static int
1053 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1054 {
1055 
1056 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1057 	    "__rsm_get_lib_attr: enter\n"));
1058 
1059 	*libattrp = &_rsm_genlib_attr;
1060 
1061 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1062 	    "__rsm_get_lib_attr: exit\n"));
1063 
1064 	return (RSM_SUCCESS);
1065 }
1066 
1067 static int
1068 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1069 {
1070 
1071 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1072 	    "__rsm_closedevice: enter\n"));
1073 
1074 	cntr_handle = cntr_handle;
1075 
1076 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1077 	    "__rsm_closedevice: exit\n"));
1078 
1079 	return (RSM_SUCCESS);
1080 }
1081 
1082 void
1083 __rsmdefault_setops(rsm_segops_t *segops)
1084 {
1085 
1086 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1087 	    "__rsmdefault_setops: enter\n"));
1088 
1089 	if (segops->rsm_memseg_import_connect == NULL) {
1090 		segops->rsm_memseg_import_connect = __rsm_import_connect;
1091 	}
1092 	if (segops->rsm_memseg_import_disconnect == NULL) {
1093 		segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1094 	}
1095 
1096 	if (segops->rsm_memseg_import_get8 == NULL) {
1097 		segops->rsm_memseg_import_get8 = __rsm_get8x8;
1098 	}
1099 	if (segops->rsm_memseg_import_get16 == NULL) {
1100 		segops->rsm_memseg_import_get16 = __rsm_get16x16;
1101 	}
1102 	if (segops->rsm_memseg_import_get32 == NULL) {
1103 		segops->rsm_memseg_import_get32 = __rsm_get32x32;
1104 	}
1105 	if (segops->rsm_memseg_import_get64 == NULL) {
1106 		segops->rsm_memseg_import_get64 = __rsm_get64x64;
1107 	}
1108 	if (segops->rsm_memseg_import_get == NULL) {
1109 		segops->rsm_memseg_import_get = __rsm_get;
1110 	}
1111 
1112 	if (segops->rsm_memseg_import_put8 == NULL) {
1113 		segops->rsm_memseg_import_put8 = __rsm_put8x8;
1114 	}
1115 	if (segops->rsm_memseg_import_put16 == NULL) {
1116 		segops->rsm_memseg_import_put16 = __rsm_put16x16;
1117 	}
1118 	if (segops->rsm_memseg_import_put32 == NULL) {
1119 		segops->rsm_memseg_import_put32 = __rsm_put32x32;
1120 	}
1121 	if (segops->rsm_memseg_import_put64 == NULL) {
1122 		segops->rsm_memseg_import_put64 = __rsm_put64x64;
1123 	}
1124 	if (segops->rsm_memseg_import_put == NULL) {
1125 		segops->rsm_memseg_import_put = __rsm_put;
1126 	}
1127 
1128 	if (segops->rsm_memseg_import_putv == NULL) {
1129 		segops->rsm_memseg_import_putv = __rsm_putv;
1130 	}
1131 
1132 	if (segops->rsm_memseg_import_getv == NULL) {
1133 		segops->rsm_memseg_import_getv = __rsm_getv;
1134 	}
1135 
1136 	if (segops->rsm_create_localmemory_handle == NULL) {
1137 		segops->rsm_create_localmemory_handle =
1138 		    __rsm_create_memory_handle;
1139 	}
1140 
1141 	if (segops->rsm_free_localmemory_handle == NULL) {
1142 		segops->rsm_free_localmemory_handle =
1143 		    __rsm_free_memory_handle;
1144 	}
1145 
1146 	/* XXX: Need to support barrier functions */
1147 	if (segops->rsm_memseg_import_init_barrier == NULL) {
1148 		segops->rsm_memseg_import_init_barrier =
1149 		    __rsm_memseg_import_init_barrier;
1150 	}
1151 	if (segops->rsm_memseg_import_open_barrier == NULL) {
1152 		segops->rsm_memseg_import_open_barrier =
1153 		    __rsm_memseg_import_open_barrier;
1154 	}
1155 	if (segops->rsm_memseg_import_order_barrier == NULL) {
1156 		segops->rsm_memseg_import_order_barrier =
1157 		    __rsm_memseg_import_order_barrier;
1158 	}
1159 	if (segops->rsm_memseg_import_close_barrier == NULL) {
1160 		segops->rsm_memseg_import_close_barrier =
1161 		    __rsm_memseg_import_close_barrier;
1162 	}
1163 	if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1164 		segops->rsm_memseg_import_destroy_barrier =
1165 		    __rsm_memseg_import_destroy_barrier;
1166 	}
1167 
1168 	if (segops->rsm_memseg_import_get_mode == NULL) {
1169 		segops->rsm_memseg_import_get_mode =
1170 		    __rsm_memseg_import_get_mode;
1171 	}
1172 	if (segops->rsm_memseg_import_set_mode == NULL) {
1173 		segops->rsm_memseg_import_set_mode =
1174 		    __rsm_memseg_import_set_mode;
1175 	}
1176 
1177 	if (segops->rsm_get_lib_attr == NULL) {
1178 		segops->rsm_get_lib_attr =
1179 		    __rsm_get_lib_attr;
1180 	}
1181 
1182 	if (segops->rsm_closedevice == NULL) {
1183 		segops->rsm_closedevice =
1184 		    __rsm_closedevice;
1185 	}
1186 
1187 
1188 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1189 	    "__rsmdefault_setops: exit\n"));
1190 
1191 }
1192