xref: /titanic_41/usr/src/lib/librsm/common/rsmgen.c (revision fd9cb95cbb2f626355a60efb9d02c5f0a33c10e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 1999-2001 by Sun Microsystems, Inc.
24  * All rights reserved.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "synonyms.h"
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <strings.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <sys/mman.h>
38 #include <sys/uio.h>
39 #include <sys/sysmacros.h>
40 #include <unistd.h>
41 #include <errno.h>
42 #include <assert.h>
43 #include <malloc.h>
44 #include <fcntl.h>
45 #include <dlfcn.h>
46 #include <sched.h>
47 
48 #include <rsmapi.h>
49 #include <sys/rsm/rsmndi.h>
50 #include <rsmlib_in.h>
51 #include <sys/rsm/rsm.h>
52 
53 /* lint -w2 */
54 
55 extern rsm_node_id_t rsm_local_nodeid;
56 extern int loopback_getv(rsm_scat_gath_t *);
57 extern int loopback_putv(rsm_scat_gath_t *);
58 
59 static rsm_ndlib_attr_t _rsm_genlib_attr = {
60 	B_TRUE,		/* mapping needed for put/get */
61 	B_FALSE		/* mapping needed for putv/getv */
62 };
63 
64 static int
65 __rsm_import_connect(
66     rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
67     rsm_memseg_id_t segment_id, rsm_permission_t perm,
68     rsm_memseg_import_handle_t *im_memseg) {
69 
70 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
71 	    "__rsm_import_connect: enter\n"));
72 
73 	controller = controller;
74 	node_id = node_id;
75 	segment_id = segment_id;
76 	perm = perm;
77 	im_memseg = im_memseg;
78 
79 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
80 	    "__rsm_import_connect: exit\n"));
81 
82 	return (RSM_SUCCESS);
83 }
84 
85 static int
86 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
87 
88 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
89 	    "__rsm_import_disconnect: enter\n"));
90 
91 	im_memseg = im_memseg;
92 
93 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
94 	    "__rsm_import_disconnect: exit\n"));
95 
96 	return (RSM_SUCCESS);
97 }
98 
99 /*
100  * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
101  * We can have a new ops vector that makes that assumption.
102  */
103 
104 static int
105 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
106     uint8_t *datap,
107     ulong_t rep_cnt,
108     boolean_t swap)
109 {
110 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
111 	uint8_t *data_addr =
112 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
113 	uint_t i = 0;
114 	int	e;
115 
116 	swap = swap;
117 
118 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
119 	    "__rsm_import_get8x8: enter\n"));
120 
121 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
122 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
123 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
124 		if (e != RSM_SUCCESS) {
125 			return (e);
126 		}
127 	}
128 
129 	for (i = 0; i < rep_cnt; i++) {
130 		datap[i] = data_addr[i];
131 	}
132 
133 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
134 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
135 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
136 		if (e != RSM_SUCCESS) {
137 			return (e);
138 		}
139 	}
140 
141 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
142 	    "__rsm_import_get8x8: exit\n"));
143 
144 	return (RSM_SUCCESS);
145 }
146 
147 static int
148 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
149     uint16_t *datap,
150     ulong_t rep_cnt,
151     boolean_t swap)
152 {
153 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
154 	uint16_t *data_addr =
155 	    /* LINTED */
156 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
157 	uint_t i = 0;
158 	int	e;
159 
160 	swap = swap;
161 
162 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
163 	    "__rsm_import_get16x16: enter\n"));
164 
165 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
166 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
167 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
168 		if (e != RSM_SUCCESS) {
169 			return (e);
170 		}
171 	}
172 
173 	for (i = 0; i < rep_cnt; i++) {
174 		datap[i] = data_addr[i];
175 	}
176 
177 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
178 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
179 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
180 		if (e != RSM_SUCCESS) {
181 			return (e);
182 		}
183 	}
184 
185 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
186 	    "__rsm_import_get16x16: exit\n"));
187 
188 	return (RSM_SUCCESS);
189 }
190 
191 static int
192 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
193     uint32_t *datap,
194     ulong_t rep_cnt,
195     boolean_t swap)
196 {
197 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
198 	uint32_t *data_addr =
199 	    /* LINTED */
200 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
201 	uint_t i = 0;
202 	int	e;
203 
204 	swap = swap;
205 
206 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
207 	    "__rsm_import_get32x32: enter\n"));
208 
209 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
210 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
211 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
212 		if (e != RSM_SUCCESS) {
213 			return (e);
214 		}
215 	}
216 
217 	for (i = 0; i < rep_cnt; i++) {
218 		datap[i] = data_addr[i];
219 	}
220 
221 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
222 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
223 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
224 		if (e != RSM_SUCCESS) {
225 			return (e);
226 		}
227 	}
228 
229 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
230 	    "__rsm_import_get32x32: exit\n"));
231 
232 	return (RSM_SUCCESS);
233 }
234 
235 static int
236 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
237     uint64_t *datap,
238     ulong_t rep_cnt,
239     boolean_t swap)
240 {
241 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
242 	uint64_t *data_addr =
243 	    /* LINTED */
244 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
245 	uint_t i = 0;
246 	int	e;
247 
248 	swap = swap;
249 
250 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
251 	    "__rsm_import_get64x64: enter\n"));
252 
253 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
254 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
255 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
256 		if (e != RSM_SUCCESS) {
257 			return (e);
258 		}
259 	}
260 
261 	for (i = 0; i < rep_cnt; i++) {
262 		datap[i] = data_addr[i];
263 	}
264 
265 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
266 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
267 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
268 		if (e != RSM_SUCCESS) {
269 			return (e);
270 		}
271 	}
272 
273 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
274 	    "__rsm_import_get64x64: exit\n"));
275 
276 	return (RSM_SUCCESS);
277 }
278 
279 	/*
280 	 * import side memory segment operations (write access functions):
281 	 */
282 
283 /*
284  * XXX: Each one of the following cases ought to be a separate function loaded
285  * into a segment access ops vector. We determine the correct function at
286  * segment connect time. When a new controller is register, we can decode
287  * it's direct_access_size attribute and load the correct function. For
288  * loop back we need to create a special ops vector that bypasses all of
289  * this stuff.
290  *
291  * XXX: We need to create a special interrupt queue for the library to handle
292  * partial writes in the remote process.
293  */
294 static int
295 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
296     uint8_t *datap,
297     ulong_t rep_cnt,
298     boolean_t swap)
299 {
300 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
301 	uint8_t *data_addr =
302 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
303 	uint_t i = 0;
304 	int	e;
305 
306 	swap = swap;
307 
308 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
309 	    "__rsm_put8x8: enter\n"));
310 
311 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
312 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
313 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
314 		if (e != RSM_SUCCESS) {
315 			return (e);
316 		}
317 	}
318 
319 	for (i = 0; i < rep_cnt; i++) {
320 		data_addr[i] = datap[i];
321 	}
322 
323 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
324 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
325 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
326 		if (e != RSM_SUCCESS) {
327 			return (e);
328 		}
329 	}
330 
331 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
332 	    "__rsm_put8x8: exit\n"));
333 
334 	return (RSM_SUCCESS);
335 }
336 
337 static int
338 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
339     uint16_t *datap,
340     ulong_t rep_cnt,
341     boolean_t swap)
342 {
343 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
344 	uint16_t *data_addr =
345 	    /* LINTED */
346 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
347 	uint_t i = 0;
348 	int	e;
349 
350 	swap = swap;
351 
352 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
353 	    "__rsm_put16x16: enter\n"));
354 
355 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
356 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
357 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
358 		if (e != RSM_SUCCESS) {
359 			return (e);
360 		}
361 	}
362 
363 	for (i = 0; i < rep_cnt; i++) {
364 		data_addr[i] = datap[i];
365 	}
366 
367 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
368 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
369 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
370 		if (e != RSM_SUCCESS) {
371 			return (e);
372 		}
373 	}
374 
375 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
376 	    "__rsm_put16x16: exit\n"));
377 
378 	return (RSM_SUCCESS);
379 }
380 
381 static int
382 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
383     uint32_t *datap,
384     ulong_t rep_cnt,
385     boolean_t swap)
386 {
387 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
388 	uint32_t *data_addr =
389 	    /* LINTED */
390 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
391 	uint_t i = 0;
392 	int	e;
393 
394 	swap = swap;
395 
396 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
397 	    "__rsm_put32x32: enter\n"));
398 
399 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
400 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
401 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
402 		if (e != RSM_SUCCESS) {
403 			return (e);
404 		}
405 	}
406 
407 	for (i = 0; i < rep_cnt; i++) {
408 		data_addr[i] = datap[i];
409 	}
410 
411 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
412 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
413 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
414 		if (e != RSM_SUCCESS) {
415 			return (e);
416 		}
417 	}
418 
419 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
420 	    "__rsm_put32x32: exit\n"));
421 
422 	return (RSM_SUCCESS);
423 }
424 
425 static int
426 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
427     uint64_t *datap,
428     ulong_t rep_cnt,
429     boolean_t swap)
430 {
431 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
432 	uint64_t *data_addr =
433 	    /* LINTED */
434 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
435 	uint_t i = 0;
436 	int	e;
437 
438 	swap = swap;
439 
440 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
441 	    "__rsm_put64x64: enter\n"));
442 
443 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
444 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
445 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
446 		if (e != RSM_SUCCESS) {
447 			return (e);
448 		}
449 	}
450 
451 	for (i = 0; i < rep_cnt; i++) {
452 		data_addr[i] = datap[i];
453 	}
454 
455 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
456 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
457 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
458 		if (e != RSM_SUCCESS) {
459 			return (e);
460 		}
461 	}
462 
463 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
464 	    "__rsm_put64x64: exit\n"));
465 
466 	return (RSM_SUCCESS);
467 }
468 
469 static int
470 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
471     size_t length)
472 {
473 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
474 	int		e;
475 
476 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
477 	    "__rsm_get: enter\n"));
478 
479 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
480 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
481 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
482 		if (e != RSM_SUCCESS) {
483 			return (e);
484 		}
485 	}
486 
487 	(void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
488 	    dst_addr, length);
489 
490 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
491 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
492 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
493 		if (e != RSM_SUCCESS) {
494 			return (e);
495 		}
496 	}
497 
498 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
499 	    "__rsm_get: exit\n"));
500 
501 	return (RSM_SUCCESS);
502 }
503 
504 static int
505 __rsm_getv(rsm_scat_gath_t *sg_io)
506 {
507 	rsm_iovec_t 	*iovec = sg_io->iovec;
508 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
509 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
510 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
511 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
512 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
513 	rsmseg_handle_t *seg_hndl;
514 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
515 	int e, i;
516 
517 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
518 	    "__rsm_getv: enter\n"));
519 
520 	/*
521 	 * Use loopback for single node operations.
522 	 * replace local handles with virtual addresses
523 	 */
524 
525 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
526 		/*
527 		 * To use the loopback optimization map the segment
528 		 * here implicitly.
529 		 */
530 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
531 			caddr_t	va;
532 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
533 			    PROT_READ|PROT_WRITE,
534 			    MAP_SHARED|MAP_NORESERVE,
535 			    im_seg_hndl->rsmseg_fd, 0);
536 
537 			if (va == MAP_FAILED) {
538 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
539 				    "implicit map failed:%d\n", errno));
540 				if (errno == EINVAL)
541 					return (RSMERR_BAD_MEM_ALIGNMENT);
542 				else if (errno == ENOMEM || errno == ENXIO ||
543 					errno == EOVERFLOW)
544 						return (RSMERR_BAD_LENGTH);
545 				else if (errno == EAGAIN)
546 					return (RSMERR_INSUFFICIENT_RESOURCES);
547 				else
548 					return (errno);
549 			}
550 
551 			im_seg_hndl->rsmseg_vaddr = va;
552 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
553 			im_seg_hndl->rsmseg_mapoffset = 0;
554 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
555 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
556 		}
557 
558 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
559 			l_iovec_start = l_iovec = malloc(iovec_size);
560 		else
561 			l_iovec_start = l_iovec = l_iovec_arr;
562 
563 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
564 		for (i = 0; i < sg_io->io_request_count; i++) {
565 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
566 				/* Get the surrogate export segment handle */
567 				seg_hndl = (rsmseg_handle_t *)
568 				    l_iovec->local.handle;
569 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
570 				l_iovec->io_type = RSM_VA_TYPE;
571 			}
572 			l_iovec++;
573 		}
574 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
575 		e = loopback_getv(sg_io);
576 		sg_io->iovec = iovec;
577 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
578 			free(l_iovec_start);
579 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
580 		    "__rsm_getv: exit\n"));
581 		return (e);
582 	}
583 
584 	/* for the Kernel Agent, replace local handles with segment ids */
585 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
586 		ka_iovec_start = ka_iovec = malloc(iovec_size);
587 	else
588 		ka_iovec_start = ka_iovec = ka_iovec_arr;
589 
590 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
591 	for (i = 0; i < sg_io->io_request_count; i++) {
592 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
593 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
594 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
595 		}
596 		ka_iovec++;
597 	}
598 
599 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
600 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
601 	sg_io->iovec = iovec;
602 
603 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
604 		free(ka_iovec_start);
605 
606 	if (e < 0) {
607 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
608 		    " RSM_IOCTL_GETV failed\n"));
609 		return (errno);
610 	}
611 
612 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
613 	    "__rsm_getv: exit\n"));
614 
615 	return (RSM_SUCCESS);
616 }
617 
618 
619 static int
620 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
621     size_t length)
622 {
623 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
624 	int		e;
625 
626 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
627 	    "__rsm_put: enter\n"));
628 
629 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
630 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
631 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
632 		if (e != RSM_SUCCESS) {
633 			return (e);
634 		}
635 	}
636 
637 	bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
638 		length);
639 
640 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
641 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
642 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
643 		if (e != RSM_SUCCESS) {
644 			return (e);
645 		}
646 	}
647 
648 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
649 	    "__rsm_put: exit\n"));
650 
651 	return (RSM_SUCCESS);
652 }
653 
654 static int
655 __rsm_putv(rsm_scat_gath_t *sg_io)
656 {
657 	rsm_iovec_t 	*iovec = sg_io->iovec;
658 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
659 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
660 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
661 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
662 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
663 	rsmseg_handle_t *seg_hndl;
664 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
665 	int e, i;
666 
667 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
668 	    "__rsm_putv: enter\n"));
669 
670 	/*
671 	 * Use loopback for single node operations.
672 	 * replace local handles with virtual addresses
673 	 */
674 
675 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
676 		/*
677 		 * To use the loopback optimization map the segment
678 		 * here implicitly.
679 		 */
680 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
681 			caddr_t	va;
682 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
683 			    PROT_READ|PROT_WRITE,
684 			    MAP_SHARED|MAP_NORESERVE,
685 			    im_seg_hndl->rsmseg_fd, 0);
686 
687 			if (va == MAP_FAILED) {
688 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
689 				    "implicit map failed:%d\n", errno));
690 				if (errno == EINVAL)
691 					return (RSMERR_BAD_MEM_ALIGNMENT);
692 				else if (errno == ENOMEM || errno == ENXIO ||
693 					errno == EOVERFLOW)
694 						return (RSMERR_BAD_LENGTH);
695 				else if (errno == EAGAIN)
696 					return (RSMERR_INSUFFICIENT_RESOURCES);
697 				else
698 					return (errno);
699 			}
700 			im_seg_hndl->rsmseg_vaddr = va;
701 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
702 			im_seg_hndl->rsmseg_mapoffset = 0;
703 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
704 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
705 		}
706 
707 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
708 			l_iovec_start = l_iovec = malloc(iovec_size);
709 		else
710 			l_iovec_start = l_iovec = l_iovec_arr;
711 
712 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
713 		for (i = 0; i < sg_io->io_request_count; i++) {
714 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
715 				/* Get the surrogate export segment handle */
716 				seg_hndl = (rsmseg_handle_t *)
717 							l_iovec->local.handle;
718 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
719 				l_iovec->io_type = RSM_VA_TYPE;
720 			}
721 			l_iovec++;
722 		}
723 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
724 		e = loopback_putv(sg_io);
725 		sg_io->iovec = iovec;
726 
727 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
728 			free(l_iovec_start);
729 
730 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
731 		    "__rsm_putv: exit\n"));
732 
733 
734 		return (e);
735 	}
736 
737 	/* for the Kernel Agent, replace local handles with segment ids */
738 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
739 		ka_iovec_start = ka_iovec = malloc(iovec_size);
740 	else
741 		ka_iovec_start = ka_iovec = ka_iovec_arr;
742 
743 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
744 
745 	for (i = 0; i < sg_io->io_request_count; i++) {
746 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
747 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
748 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
749 		}
750 		ka_iovec++;
751 	}
752 
753 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
754 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
755 	sg_io->iovec = iovec;
756 
757 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
758 		free(ka_iovec_start);
759 
760 	if (e < 0) {
761 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
762 		    " RSM_IOCTL_PUTV failed\n"));
763 		return (errno);
764 	}
765 
766 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
767 	    "__rsm_putv: exit\n"));
768 
769 	return (RSM_SUCCESS);
770 }
771 
772 	/*
773 	 * import side memory segment operations (barriers):
774 	 */
775 static int
776 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
777     rsm_barrier_type_t type,
778     rsm_barrier_handle_t barrier)
779 {
780 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
781 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
782 
783 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
784 	    ""
785 	    "__rsm_memseg_import_init_barrier: enter\n"));
786 
787 	type = type;
788 
789 	if (!seg) {
790 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
791 		    "invalid segment handle\n"));
792 		return (RSMERR_BAD_SEG_HNDL);
793 	}
794 	if (!bar) {
795 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
796 		    "invalid barrier handle\n"));
797 		return (RSMERR_BAD_BARRIER_PTR);
798 	}
799 
800 	/* XXX: fix later. We only support span-of-node barriers */
801 
802 	bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
803 	if (bar->rsmgenbar_data == NULL) {
804 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
805 		    "not enough memory\n"));
806 		return (RSMERR_INSUFFICIENT_MEM);
807 	}
808 	bar->rsmgenbar_seg = seg;
809 
810 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
811 	    "__rsm_memseg_import_init_barrier: exit\n"));
812 
813 	return (RSM_SUCCESS);
814 }
815 
816 static int
817 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
818 {
819 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
820 	rsmseg_handle_t *seg;
821 	rsm_ioctlmsg_t msg;
822 
823 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
824 	    "__rsm_memseg_import_open_barrier: enter\n"));
825 
826 	if (!bar) {
827 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
828 		    "invalid barrier pointer\n"));
829 		return (RSMERR_BAD_BARRIER_PTR);
830 	}
831 
832 	if ((seg = bar->rsmgenbar_seg) == 0) {
833 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
834 		    "uninitialized barrier\n"));
835 		return (RSMERR_BARRIER_UNINITIALIZED);
836 	}
837 
838 /* lint -save -e718 -e746 */
839 	msg.bar = *(bar->rsmgenbar_data);
840 	if (ioctl(seg->rsmseg_fd,
841 	    RSM_IOCTL_BAR_OPEN, &msg) < 0) {
842 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
843 		    " RSM_IOCTL_BAR_OPEN failed\n"));
844 /* lint -restore */
845 		return (RSMERR_BARRIER_OPEN_FAILED);
846 	}
847 
848 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
849 	    "__rsm_memseg_import_open_barrier: exit\n"));
850 
851 	return (RSM_SUCCESS);
852 }
853 
854 static int
855 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
856 {
857 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
858 	rsmseg_handle_t *seg;
859 	rsm_ioctlmsg_t msg;
860 
861 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
862 	    "__rsm_memseg_import_order_barrier: enter\n"));
863 
864 	if (!bar) {
865 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
866 		    "invalid barrier\n"));
867 		return (RSMERR_BAD_BARRIER_PTR);
868 	}
869 	if ((seg = bar->rsmgenbar_seg) == 0) {
870 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
871 		    "uninitialized barrier\n"));
872 		return (RSMERR_BARRIER_UNINITIALIZED);
873 	}
874 
875 	msg.bar = *(bar->rsmgenbar_data);
876 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
877 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
878 		    "RSM_IOCTL_BAR_ORDER failed\n"));
879 		return (RSMERR_BARRIER_FAILURE);
880 	}
881 
882 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
883 	    "__rsm_memseg_import_order_barrier: exit\n"));
884 
885 	return (RSM_SUCCESS);
886 }
887 
888 static int
889 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
890 {
891 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
892 	rsmseg_handle_t *seg;
893 	rsm_ioctlmsg_t msg;
894 
895 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
896 	    "__rsm_memseg_import_close_barrier: enter\n"));
897 
898 	if (!bar) {
899 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
900 		    "invalid barrier\n"));
901 		return (RSMERR_BAD_BARRIER_PTR);
902 	}
903 	if ((seg = bar->rsmgenbar_seg) == 0) {
904 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
905 		    "uninitialized barrier\n"));
906 		return (RSMERR_BARRIER_UNINITIALIZED);
907 	}
908 
909 	msg.bar = *(bar->rsmgenbar_data);
910 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
911 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
912 		    " RSM_IOCTL_BAR_CLOSE failed\n"));
913 		return (RSMERR_BARRIER_FAILURE);
914 	}
915 
916 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
917 	    "__rsm_memseg_import_close_barrier: exit\n"));
918 
919 	return (RSM_SUCCESS);
920 }
921 
922 static int
923 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
924 {
925 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
926 
927 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
928 	    "__rsm_memseg_import_destroy_barrier: enter\n"));
929 
930 	if (!bar) {
931 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
932 		    "invalid barrier\n"));
933 		return (RSMERR_BAD_BARRIER_PTR);
934 	}
935 
936 	free((void *) bar->rsmgenbar_data);
937 
938 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
939 	    "__rsm_memseg_import_destroy_barrier: exit\n"));
940 
941 	return (RSM_SUCCESS);
942 }
943 
944 /* lint -w1 */
945 static int
946 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
947     rsm_barrier_mode_t *mode)
948 {
949 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
950 	    "__rsm_memseg_import_get_mode: enter\n"));
951 
952 	im_memseg = im_memseg; mode = mode;
953 
954 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
955 	    "__rsm_memseg_import_get_mode: exit\n"));
956 
957 	return (RSM_SUCCESS);
958 }
959 static int
960 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
961 				rsm_barrier_mode_t mode)
962 {
963 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
964 	    "__rsm_memseg_import_set_mode: enter\n"));
965 
966 	im_memseg = im_memseg; mode = mode;
967 
968 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
969 	    "__rsm_memseg_import_set_mode: exit\n"));
970 
971 	return (RSM_SUCCESS);
972 }
973 
974 static int
975 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
976     rsm_localmemory_handle_t *local_hndl_p,
977     caddr_t local_va, size_t len)
978 {
979 	rsm_memseg_export_handle_t memseg;
980 	rsmapi_access_entry_t	acl[1];
981 	rsm_memseg_id_t segid = 0;
982 	size_t size;
983 	int e;
984 
985 
986 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
987 	    "__rsm_create_memory_handle: enter\n"));
988 
989 	/*
990 	 * create a surrogate segment (local memory will be locked down).
991 	 */
992 	size =  roundup(len, PAGESIZE);
993 	e = rsm_memseg_export_create(controller, &memseg,
994 	    (void *)local_va, size,
995 	    RSM_ALLOW_REBIND);
996 	if (e != RSM_SUCCESS) {
997 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
998 		    "export create failed\n"));
999 		return (e);
1000 	}
1001 
1002 	/*
1003 	 * Publish the segment to the local node only.  If the segment
1004 	 * length is very large then don't publish to the adapter driver
1005 	 * because that will consume too much DVMA space - this is indicated
1006 	 * to the Kernel Agent using null permissions.  DVMA binding will
1007 	 * be done when the RDMA is set up.
1008 	 */
1009 	acl[0].ae_node = rsm_local_nodeid;
1010 	if (len > RSM_MAX_HANDLE_DVMA)
1011 		acl[0].ae_permission = 0;
1012 	else
1013 		acl[0].ae_permission = RSM_PERM_RDWR;
1014 
1015 	e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1016 	if (e != RSM_SUCCESS) {
1017 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
1018 		    "export publish failed\n"));
1019 		rsm_memseg_export_destroy(memseg);
1020 		return (e);
1021 	}
1022 
1023 	/* Use the surrogate seghandle as the local memory handle */
1024 	*local_hndl_p = (rsm_localmemory_handle_t)memseg;
1025 
1026 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1027 	    "__rsm_create_memory_handle: exit\n"));
1028 
1029 	return (e);
1030 }
1031 
1032 static int
1033 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1034 {
1035 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1036 	    "__rsm_free_memory_handle: enter\n"));
1037 
1038 	rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1039 
1040 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1041 	    "__rsm_free_memory_handle: exit\n"));
1042 
1043 	return (RSM_SUCCESS);
1044 }
1045 
1046 static int
1047 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1048 {
1049 
1050 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1051 	    "__rsm_get_lib_attr: enter\n"));
1052 
1053 	*libattrp = &_rsm_genlib_attr;
1054 
1055 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1056 	    "__rsm_get_lib_attr: exit\n"));
1057 
1058 	return (RSM_SUCCESS);
1059 }
1060 
1061 static int
1062 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1063 {
1064 
1065 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1066 	    "__rsm_closedevice: enter\n"));
1067 
1068 	cntr_handle = cntr_handle;
1069 
1070 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1071 	    "__rsm_closedevice: exit\n"));
1072 
1073 	return (RSM_SUCCESS);
1074 }
1075 
1076 void
1077 __rsmdefault_setops(rsm_segops_t *segops)
1078 {
1079 
1080 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1081 	    "__rsmdefault_setops: enter\n"));
1082 
1083 	if (segops->rsm_memseg_import_connect == NULL) {
1084 		segops->rsm_memseg_import_connect = __rsm_import_connect;
1085 	}
1086 	if (segops->rsm_memseg_import_disconnect == NULL) {
1087 		segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1088 	}
1089 
1090 	if (segops->rsm_memseg_import_get8 == NULL) {
1091 		segops->rsm_memseg_import_get8 = __rsm_get8x8;
1092 	}
1093 	if (segops->rsm_memseg_import_get16 == NULL) {
1094 		segops->rsm_memseg_import_get16 = __rsm_get16x16;
1095 	}
1096 	if (segops->rsm_memseg_import_get32 == NULL) {
1097 		segops->rsm_memseg_import_get32 = __rsm_get32x32;
1098 	}
1099 	if (segops->rsm_memseg_import_get64 == NULL) {
1100 		segops->rsm_memseg_import_get64 = __rsm_get64x64;
1101 	}
1102 	if (segops->rsm_memseg_import_get == NULL) {
1103 		segops->rsm_memseg_import_get = __rsm_get;
1104 	}
1105 
1106 	if (segops->rsm_memseg_import_put8 == NULL) {
1107 		segops->rsm_memseg_import_put8 = __rsm_put8x8;
1108 	}
1109 	if (segops->rsm_memseg_import_put16 == NULL) {
1110 		segops->rsm_memseg_import_put16 = __rsm_put16x16;
1111 	}
1112 	if (segops->rsm_memseg_import_put32 == NULL) {
1113 		segops->rsm_memseg_import_put32 = __rsm_put32x32;
1114 	}
1115 	if (segops->rsm_memseg_import_put64 == NULL) {
1116 		segops->rsm_memseg_import_put64 = __rsm_put64x64;
1117 	}
1118 	if (segops->rsm_memseg_import_put == NULL) {
1119 		segops->rsm_memseg_import_put = __rsm_put;
1120 	}
1121 
1122 	if (segops->rsm_memseg_import_putv == NULL) {
1123 		segops->rsm_memseg_import_putv = __rsm_putv;
1124 	}
1125 
1126 	if (segops->rsm_memseg_import_getv == NULL) {
1127 		segops->rsm_memseg_import_getv = __rsm_getv;
1128 	}
1129 
1130 	if (segops->rsm_create_localmemory_handle == NULL) {
1131 		segops->rsm_create_localmemory_handle =
1132 		    __rsm_create_memory_handle;
1133 	}
1134 
1135 	if (segops->rsm_free_localmemory_handle == NULL) {
1136 		segops->rsm_free_localmemory_handle =
1137 		    __rsm_free_memory_handle;
1138 	}
1139 
1140 	/* XXX: Need to support barrier functions */
1141 	if (segops->rsm_memseg_import_init_barrier == NULL) {
1142 		segops->rsm_memseg_import_init_barrier =
1143 		    __rsm_memseg_import_init_barrier;
1144 	}
1145 	if (segops->rsm_memseg_import_open_barrier == NULL) {
1146 		segops->rsm_memseg_import_open_barrier =
1147 		    __rsm_memseg_import_open_barrier;
1148 	}
1149 	if (segops->rsm_memseg_import_order_barrier == NULL) {
1150 		segops->rsm_memseg_import_order_barrier =
1151 		    __rsm_memseg_import_order_barrier;
1152 	}
1153 	if (segops->rsm_memseg_import_close_barrier == NULL) {
1154 		segops->rsm_memseg_import_close_barrier =
1155 		    __rsm_memseg_import_close_barrier;
1156 	}
1157 	if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1158 		segops->rsm_memseg_import_destroy_barrier =
1159 		    __rsm_memseg_import_destroy_barrier;
1160 	}
1161 
1162 	if (segops->rsm_memseg_import_get_mode == NULL) {
1163 		segops->rsm_memseg_import_get_mode =
1164 		    __rsm_memseg_import_get_mode;
1165 	}
1166 	if (segops->rsm_memseg_import_set_mode == NULL) {
1167 		segops->rsm_memseg_import_set_mode =
1168 		    __rsm_memseg_import_set_mode;
1169 	}
1170 
1171 	if (segops->rsm_get_lib_attr == NULL) {
1172 		segops->rsm_get_lib_attr =
1173 		    __rsm_get_lib_attr;
1174 	}
1175 
1176 	if (segops->rsm_closedevice == NULL) {
1177 		segops->rsm_closedevice =
1178 		    __rsm_closedevice;
1179 	}
1180 
1181 
1182 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1183 	    "__rsmdefault_setops: exit\n"));
1184 
1185 }
1186