xref: /titanic_50/usr/src/lib/librsm/common/rsmgen.c (revision 01f19855c272b5ab349dd1175fe302692565c657)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #pragma ident	"%Z%%M%	%I%	%E% SMI"
29 
30 #include "c_synonyms.h"
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <strings.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/mman.h>
39 #include <sys/uio.h>
40 #include <sys/sysmacros.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <assert.h>
44 #include <malloc.h>
45 #include <fcntl.h>
46 #include <dlfcn.h>
47 #include <sched.h>
48 
49 #include <rsmapi.h>
50 #include <sys/rsm/rsmndi.h>
51 #include <rsmlib_in.h>
52 #include <sys/rsm/rsm.h>
53 
54 /* lint -w2 */
55 
56 extern rsm_node_id_t rsm_local_nodeid;
57 extern int loopback_getv(rsm_scat_gath_t *);
58 extern int loopback_putv(rsm_scat_gath_t *);
59 
60 static rsm_ndlib_attr_t _rsm_genlib_attr = {
61 	B_TRUE,		/* mapping needed for put/get */
62 	B_FALSE		/* mapping needed for putv/getv */
63 };
64 
65 static int
66 __rsm_import_connect(
67     rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
68     rsm_memseg_id_t segment_id, rsm_permission_t perm,
69     rsm_memseg_import_handle_t *im_memseg) {
70 
71 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
72 	    "__rsm_import_connect: enter\n"));
73 
74 	controller = controller;
75 	node_id = node_id;
76 	segment_id = segment_id;
77 	perm = perm;
78 	im_memseg = im_memseg;
79 
80 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
81 	    "__rsm_import_connect: exit\n"));
82 
83 	return (RSM_SUCCESS);
84 }
85 
86 static int
87 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
88 
89 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
90 	    "__rsm_import_disconnect: enter\n"));
91 
92 	im_memseg = im_memseg;
93 
94 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
95 	    "__rsm_import_disconnect: exit\n"));
96 
97 	return (RSM_SUCCESS);
98 }
99 
100 /*
101  * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
102  * We can have a new ops vector that makes that assumption.
103  */
104 
105 static int
106 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
107     uint8_t *datap,
108     ulong_t rep_cnt,
109     boolean_t swap)
110 {
111 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
112 	uint8_t *data_addr =
113 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
114 	uint_t i = 0;
115 	int	e;
116 
117 	swap = swap;
118 
119 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
120 	    "__rsm_import_get8x8: enter\n"));
121 
122 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
123 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
124 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
125 		if (e != RSM_SUCCESS) {
126 			return (e);
127 		}
128 	}
129 
130 	for (i = 0; i < rep_cnt; i++) {
131 		datap[i] = data_addr[i];
132 	}
133 
134 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
135 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
136 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
137 		if (e != RSM_SUCCESS) {
138 			return (e);
139 		}
140 	}
141 
142 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
143 	    "__rsm_import_get8x8: exit\n"));
144 
145 	return (RSM_SUCCESS);
146 }
147 
148 static int
149 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
150     uint16_t *datap,
151     ulong_t rep_cnt,
152     boolean_t swap)
153 {
154 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
155 	uint16_t *data_addr =
156 	    /* LINTED */
157 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
158 	uint_t i = 0;
159 	int	e;
160 
161 	swap = swap;
162 
163 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
164 	    "__rsm_import_get16x16: enter\n"));
165 
166 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
167 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
168 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
169 		if (e != RSM_SUCCESS) {
170 			return (e);
171 		}
172 	}
173 
174 	for (i = 0; i < rep_cnt; i++) {
175 		datap[i] = data_addr[i];
176 	}
177 
178 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
179 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
180 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
181 		if (e != RSM_SUCCESS) {
182 			return (e);
183 		}
184 	}
185 
186 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
187 	    "__rsm_import_get16x16: exit\n"));
188 
189 	return (RSM_SUCCESS);
190 }
191 
192 static int
193 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
194     uint32_t *datap,
195     ulong_t rep_cnt,
196     boolean_t swap)
197 {
198 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
199 	uint32_t *data_addr =
200 	    /* LINTED */
201 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
202 	uint_t i = 0;
203 	int	e;
204 
205 	swap = swap;
206 
207 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
208 	    "__rsm_import_get32x32: enter\n"));
209 
210 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
211 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
212 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
213 		if (e != RSM_SUCCESS) {
214 			return (e);
215 		}
216 	}
217 
218 	for (i = 0; i < rep_cnt; i++) {
219 		datap[i] = data_addr[i];
220 	}
221 
222 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
223 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
224 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
225 		if (e != RSM_SUCCESS) {
226 			return (e);
227 		}
228 	}
229 
230 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
231 	    "__rsm_import_get32x32: exit\n"));
232 
233 	return (RSM_SUCCESS);
234 }
235 
236 static int
237 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
238     uint64_t *datap,
239     ulong_t rep_cnt,
240     boolean_t swap)
241 {
242 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
243 	uint64_t *data_addr =
244 	    /* LINTED */
245 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
246 	uint_t i = 0;
247 	int	e;
248 
249 	swap = swap;
250 
251 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
252 	    "__rsm_import_get64x64: enter\n"));
253 
254 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
255 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
256 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
257 		if (e != RSM_SUCCESS) {
258 			return (e);
259 		}
260 	}
261 
262 	for (i = 0; i < rep_cnt; i++) {
263 		datap[i] = data_addr[i];
264 	}
265 
266 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
267 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
268 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
269 		if (e != RSM_SUCCESS) {
270 			return (e);
271 		}
272 	}
273 
274 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
275 	    "__rsm_import_get64x64: exit\n"));
276 
277 	return (RSM_SUCCESS);
278 }
279 
280 	/*
281 	 * import side memory segment operations (write access functions):
282 	 */
283 
284 /*
285  * XXX: Each one of the following cases ought to be a separate function loaded
286  * into a segment access ops vector. We determine the correct function at
287  * segment connect time. When a new controller is register, we can decode
288  * it's direct_access_size attribute and load the correct function. For
289  * loop back we need to create a special ops vector that bypasses all of
290  * this stuff.
291  *
292  * XXX: We need to create a special interrupt queue for the library to handle
293  * partial writes in the remote process.
294  */
295 static int
296 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
297     uint8_t *datap,
298     ulong_t rep_cnt,
299     boolean_t swap)
300 {
301 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
302 	uint8_t *data_addr =
303 		(uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
304 	uint_t i = 0;
305 	int	e;
306 
307 	swap = swap;
308 
309 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
310 	    "__rsm_put8x8: enter\n"));
311 
312 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
313 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
314 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
315 		if (e != RSM_SUCCESS) {
316 			return (e);
317 		}
318 	}
319 
320 	for (i = 0; i < rep_cnt; i++) {
321 		data_addr[i] = datap[i];
322 	}
323 
324 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
325 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
326 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
327 		if (e != RSM_SUCCESS) {
328 			return (e);
329 		}
330 	}
331 
332 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
333 	    "__rsm_put8x8: exit\n"));
334 
335 	return (RSM_SUCCESS);
336 }
337 
338 static int
339 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
340     uint16_t *datap,
341     ulong_t rep_cnt,
342     boolean_t swap)
343 {
344 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
345 	uint16_t *data_addr =
346 	    /* LINTED */
347 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
348 	uint_t i = 0;
349 	int	e;
350 
351 	swap = swap;
352 
353 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
354 	    "__rsm_put16x16: enter\n"));
355 
356 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
357 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
358 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
359 		if (e != RSM_SUCCESS) {
360 			return (e);
361 		}
362 	}
363 
364 	for (i = 0; i < rep_cnt; i++) {
365 		data_addr[i] = datap[i];
366 	}
367 
368 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
369 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
370 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
371 		if (e != RSM_SUCCESS) {
372 			return (e);
373 		}
374 	}
375 
376 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
377 	    "__rsm_put16x16: exit\n"));
378 
379 	return (RSM_SUCCESS);
380 }
381 
382 static int
383 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
384     uint32_t *datap,
385     ulong_t rep_cnt,
386     boolean_t swap)
387 {
388 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
389 	uint32_t *data_addr =
390 	    /* LINTED */
391 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
392 	uint_t i = 0;
393 	int	e;
394 
395 	swap = swap;
396 
397 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
398 	    "__rsm_put32x32: enter\n"));
399 
400 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
401 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
402 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
403 		if (e != RSM_SUCCESS) {
404 			return (e);
405 		}
406 	}
407 
408 	for (i = 0; i < rep_cnt; i++) {
409 		data_addr[i] = datap[i];
410 	}
411 
412 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
413 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
414 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
415 		if (e != RSM_SUCCESS) {
416 			return (e);
417 		}
418 	}
419 
420 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
421 	    "__rsm_put32x32: exit\n"));
422 
423 	return (RSM_SUCCESS);
424 }
425 
426 static int
427 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
428     uint64_t *datap,
429     ulong_t rep_cnt,
430     boolean_t swap)
431 {
432 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
433 	uint64_t *data_addr =
434 	    /* LINTED */
435 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
436 	uint_t i = 0;
437 	int	e;
438 
439 	swap = swap;
440 
441 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
442 	    "__rsm_put64x64: enter\n"));
443 
444 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
445 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
446 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
447 		if (e != RSM_SUCCESS) {
448 			return (e);
449 		}
450 	}
451 
452 	for (i = 0; i < rep_cnt; i++) {
453 		data_addr[i] = datap[i];
454 	}
455 
456 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
457 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
458 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
459 		if (e != RSM_SUCCESS) {
460 			return (e);
461 		}
462 	}
463 
464 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
465 	    "__rsm_put64x64: exit\n"));
466 
467 	return (RSM_SUCCESS);
468 }
469 
470 static int
471 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
472     size_t length)
473 {
474 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
475 	int		e;
476 
477 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
478 	    "__rsm_get: enter\n"));
479 
480 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
481 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
482 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
483 		if (e != RSM_SUCCESS) {
484 			return (e);
485 		}
486 	}
487 
488 	(void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
489 	    dst_addr, length);
490 
491 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
492 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
493 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
494 		if (e != RSM_SUCCESS) {
495 			return (e);
496 		}
497 	}
498 
499 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
500 	    "__rsm_get: exit\n"));
501 
502 	return (RSM_SUCCESS);
503 }
504 
505 static int
506 __rsm_getv(rsm_scat_gath_t *sg_io)
507 {
508 	rsm_iovec_t 	*iovec = sg_io->iovec;
509 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
510 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
511 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
512 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
513 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
514 	rsmseg_handle_t *seg_hndl;
515 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
516 	int e, i;
517 
518 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
519 	    "__rsm_getv: enter\n"));
520 
521 	/*
522 	 * Use loopback for single node operations.
523 	 * replace local handles with virtual addresses
524 	 */
525 
526 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
527 		/*
528 		 * To use the loopback optimization map the segment
529 		 * here implicitly.
530 		 */
531 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
532 			caddr_t	va;
533 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
534 			    PROT_READ|PROT_WRITE,
535 			    MAP_SHARED|MAP_NORESERVE,
536 			    im_seg_hndl->rsmseg_fd, 0);
537 
538 			if (va == MAP_FAILED) {
539 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
540 				    "implicit map failed:%d\n", errno));
541 				if (errno == EINVAL)
542 					return (RSMERR_BAD_MEM_ALIGNMENT);
543 				else if (errno == ENOMEM || errno == ENXIO ||
544 					errno == EOVERFLOW)
545 						return (RSMERR_BAD_LENGTH);
546 				else if (errno == EAGAIN)
547 					return (RSMERR_INSUFFICIENT_RESOURCES);
548 				else
549 					return (errno);
550 			}
551 
552 			im_seg_hndl->rsmseg_vaddr = va;
553 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
554 			im_seg_hndl->rsmseg_mapoffset = 0;
555 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
556 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
557 		}
558 
559 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
560 			l_iovec_start = l_iovec = malloc(iovec_size);
561 		else
562 			l_iovec_start = l_iovec = l_iovec_arr;
563 
564 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
565 		for (i = 0; i < sg_io->io_request_count; i++) {
566 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
567 				/* Get the surrogate export segment handle */
568 				seg_hndl = (rsmseg_handle_t *)
569 				    l_iovec->local.handle;
570 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
571 				l_iovec->io_type = RSM_VA_TYPE;
572 			}
573 			l_iovec++;
574 		}
575 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
576 		e = loopback_getv(sg_io);
577 		sg_io->iovec = iovec;
578 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
579 			free(l_iovec_start);
580 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
581 		    "__rsm_getv: exit\n"));
582 		return (e);
583 	}
584 
585 	/* for the Kernel Agent, replace local handles with segment ids */
586 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
587 		ka_iovec_start = ka_iovec = malloc(iovec_size);
588 	else
589 		ka_iovec_start = ka_iovec = ka_iovec_arr;
590 
591 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
592 	for (i = 0; i < sg_io->io_request_count; i++) {
593 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
594 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
595 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
596 		}
597 		ka_iovec++;
598 	}
599 
600 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
601 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
602 	sg_io->iovec = iovec;
603 
604 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
605 		free(ka_iovec_start);
606 
607 	if (e < 0) {
608 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
609 		    " RSM_IOCTL_GETV failed\n"));
610 		return (errno);
611 	}
612 
613 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
614 	    "__rsm_getv: exit\n"));
615 
616 	return (RSM_SUCCESS);
617 }
618 
619 
620 static int
621 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
622     size_t length)
623 {
624 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
625 	int		e;
626 
627 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
628 	    "__rsm_put: enter\n"));
629 
630 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
631 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
632 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
633 		if (e != RSM_SUCCESS) {
634 			return (e);
635 		}
636 	}
637 
638 	bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
639 		length);
640 
641 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
642 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
643 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
644 		if (e != RSM_SUCCESS) {
645 			return (e);
646 		}
647 	}
648 
649 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
650 	    "__rsm_put: exit\n"));
651 
652 	return (RSM_SUCCESS);
653 }
654 
655 static int
656 __rsm_putv(rsm_scat_gath_t *sg_io)
657 {
658 	rsm_iovec_t 	*iovec = sg_io->iovec;
659 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
660 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
661 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
662 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
663 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
664 	rsmseg_handle_t *seg_hndl;
665 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
666 	int e, i;
667 
668 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
669 	    "__rsm_putv: enter\n"));
670 
671 	/*
672 	 * Use loopback for single node operations.
673 	 * replace local handles with virtual addresses
674 	 */
675 
676 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
677 		/*
678 		 * To use the loopback optimization map the segment
679 		 * here implicitly.
680 		 */
681 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
682 			caddr_t	va;
683 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
684 			    PROT_READ|PROT_WRITE,
685 			    MAP_SHARED|MAP_NORESERVE,
686 			    im_seg_hndl->rsmseg_fd, 0);
687 
688 			if (va == MAP_FAILED) {
689 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
690 				    "implicit map failed:%d\n", errno));
691 				if (errno == EINVAL)
692 					return (RSMERR_BAD_MEM_ALIGNMENT);
693 				else if (errno == ENOMEM || errno == ENXIO ||
694 					errno == EOVERFLOW)
695 						return (RSMERR_BAD_LENGTH);
696 				else if (errno == EAGAIN)
697 					return (RSMERR_INSUFFICIENT_RESOURCES);
698 				else
699 					return (errno);
700 			}
701 			im_seg_hndl->rsmseg_vaddr = va;
702 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
703 			im_seg_hndl->rsmseg_mapoffset = 0;
704 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
705 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
706 		}
707 
708 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
709 			l_iovec_start = l_iovec = malloc(iovec_size);
710 		else
711 			l_iovec_start = l_iovec = l_iovec_arr;
712 
713 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
714 		for (i = 0; i < sg_io->io_request_count; i++) {
715 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
716 				/* Get the surrogate export segment handle */
717 				seg_hndl = (rsmseg_handle_t *)
718 							l_iovec->local.handle;
719 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
720 				l_iovec->io_type = RSM_VA_TYPE;
721 			}
722 			l_iovec++;
723 		}
724 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
725 		e = loopback_putv(sg_io);
726 		sg_io->iovec = iovec;
727 
728 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
729 			free(l_iovec_start);
730 
731 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
732 		    "__rsm_putv: exit\n"));
733 
734 
735 		return (e);
736 	}
737 
738 	/* for the Kernel Agent, replace local handles with segment ids */
739 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
740 		ka_iovec_start = ka_iovec = malloc(iovec_size);
741 	else
742 		ka_iovec_start = ka_iovec = ka_iovec_arr;
743 
744 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
745 
746 	for (i = 0; i < sg_io->io_request_count; i++) {
747 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
748 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
749 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
750 		}
751 		ka_iovec++;
752 	}
753 
754 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
755 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
756 	sg_io->iovec = iovec;
757 
758 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
759 		free(ka_iovec_start);
760 
761 	if (e < 0) {
762 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
763 		    " RSM_IOCTL_PUTV failed\n"));
764 		return (errno);
765 	}
766 
767 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
768 	    "__rsm_putv: exit\n"));
769 
770 	return (RSM_SUCCESS);
771 }
772 
773 	/*
774 	 * import side memory segment operations (barriers):
775 	 */
776 static int
777 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
778     rsm_barrier_type_t type,
779     rsm_barrier_handle_t barrier)
780 {
781 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
782 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
783 
784 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
785 	    ""
786 	    "__rsm_memseg_import_init_barrier: enter\n"));
787 
788 	type = type;
789 
790 	if (!seg) {
791 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
792 		    "invalid segment handle\n"));
793 		return (RSMERR_BAD_SEG_HNDL);
794 	}
795 	if (!bar) {
796 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
797 		    "invalid barrier handle\n"));
798 		return (RSMERR_BAD_BARRIER_PTR);
799 	}
800 
801 	/* XXX: fix later. We only support span-of-node barriers */
802 
803 	bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
804 	if (bar->rsmgenbar_data == NULL) {
805 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
806 		    "not enough memory\n"));
807 		return (RSMERR_INSUFFICIENT_MEM);
808 	}
809 	bar->rsmgenbar_seg = seg;
810 
811 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
812 	    "__rsm_memseg_import_init_barrier: exit\n"));
813 
814 	return (RSM_SUCCESS);
815 }
816 
817 static int
818 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
819 {
820 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
821 	rsmseg_handle_t *seg;
822 	rsm_ioctlmsg_t msg;
823 
824 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
825 	    "__rsm_memseg_import_open_barrier: enter\n"));
826 
827 	if (!bar) {
828 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
829 		    "invalid barrier pointer\n"));
830 		return (RSMERR_BAD_BARRIER_PTR);
831 	}
832 
833 	if ((seg = bar->rsmgenbar_seg) == 0) {
834 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
835 		    "uninitialized barrier\n"));
836 		return (RSMERR_BARRIER_UNINITIALIZED);
837 	}
838 
839 /* lint -save -e718 -e746 */
840 	msg.bar = *(bar->rsmgenbar_data);
841 	if (ioctl(seg->rsmseg_fd,
842 	    RSM_IOCTL_BAR_OPEN, &msg) < 0) {
843 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
844 		    " RSM_IOCTL_BAR_OPEN failed\n"));
845 /* lint -restore */
846 		return (RSMERR_BARRIER_OPEN_FAILED);
847 	}
848 
849 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
850 	    "__rsm_memseg_import_open_barrier: exit\n"));
851 
852 	return (RSM_SUCCESS);
853 }
854 
855 static int
856 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
857 {
858 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
859 	rsmseg_handle_t *seg;
860 	rsm_ioctlmsg_t msg;
861 
862 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
863 	    "__rsm_memseg_import_order_barrier: enter\n"));
864 
865 	if (!bar) {
866 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
867 		    "invalid barrier\n"));
868 		return (RSMERR_BAD_BARRIER_PTR);
869 	}
870 	if ((seg = bar->rsmgenbar_seg) == 0) {
871 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
872 		    "uninitialized barrier\n"));
873 		return (RSMERR_BARRIER_UNINITIALIZED);
874 	}
875 
876 	msg.bar = *(bar->rsmgenbar_data);
877 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
878 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
879 		    "RSM_IOCTL_BAR_ORDER failed\n"));
880 		return (RSMERR_BARRIER_FAILURE);
881 	}
882 
883 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
884 	    "__rsm_memseg_import_order_barrier: exit\n"));
885 
886 	return (RSM_SUCCESS);
887 }
888 
889 static int
890 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
891 {
892 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
893 	rsmseg_handle_t *seg;
894 	rsm_ioctlmsg_t msg;
895 
896 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
897 	    "__rsm_memseg_import_close_barrier: enter\n"));
898 
899 	if (!bar) {
900 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
901 		    "invalid barrier\n"));
902 		return (RSMERR_BAD_BARRIER_PTR);
903 	}
904 	if ((seg = bar->rsmgenbar_seg) == 0) {
905 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
906 		    "uninitialized barrier\n"));
907 		return (RSMERR_BARRIER_UNINITIALIZED);
908 	}
909 
910 	msg.bar = *(bar->rsmgenbar_data);
911 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
912 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
913 		    " RSM_IOCTL_BAR_CLOSE failed\n"));
914 		return (RSMERR_BARRIER_FAILURE);
915 	}
916 
917 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
918 	    "__rsm_memseg_import_close_barrier: exit\n"));
919 
920 	return (RSM_SUCCESS);
921 }
922 
923 static int
924 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
925 {
926 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
927 
928 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
929 	    "__rsm_memseg_import_destroy_barrier: enter\n"));
930 
931 	if (!bar) {
932 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
933 		    "invalid barrier\n"));
934 		return (RSMERR_BAD_BARRIER_PTR);
935 	}
936 
937 	free((void *) bar->rsmgenbar_data);
938 
939 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
940 	    "__rsm_memseg_import_destroy_barrier: exit\n"));
941 
942 	return (RSM_SUCCESS);
943 }
944 
945 /* lint -w1 */
946 static int
947 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
948     rsm_barrier_mode_t *mode)
949 {
950 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
951 	    "__rsm_memseg_import_get_mode: enter\n"));
952 
953 	im_memseg = im_memseg; mode = mode;
954 
955 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
956 	    "__rsm_memseg_import_get_mode: exit\n"));
957 
958 	return (RSM_SUCCESS);
959 }
960 static int
961 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
962 				rsm_barrier_mode_t mode)
963 {
964 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
965 	    "__rsm_memseg_import_set_mode: enter\n"));
966 
967 	im_memseg = im_memseg; mode = mode;
968 
969 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
970 	    "__rsm_memseg_import_set_mode: exit\n"));
971 
972 	return (RSM_SUCCESS);
973 }
974 
975 static int
976 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
977     rsm_localmemory_handle_t *local_hndl_p,
978     caddr_t local_va, size_t len)
979 {
980 	rsm_memseg_export_handle_t memseg;
981 	rsmapi_access_entry_t	acl[1];
982 	rsm_memseg_id_t segid = 0;
983 	size_t size;
984 	int e;
985 
986 
987 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
988 	    "__rsm_create_memory_handle: enter\n"));
989 
990 	/*
991 	 * create a surrogate segment (local memory will be locked down).
992 	 */
993 	size =  roundup(len, PAGESIZE);
994 	e = rsm_memseg_export_create(controller, &memseg,
995 	    (void *)local_va, size,
996 	    RSM_ALLOW_REBIND);
997 	if (e != RSM_SUCCESS) {
998 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
999 		    "export create failed\n"));
1000 		return (e);
1001 	}
1002 
1003 	/*
1004 	 * Publish the segment to the local node only.  If the segment
1005 	 * length is very large then don't publish to the adapter driver
1006 	 * because that will consume too much DVMA space - this is indicated
1007 	 * to the Kernel Agent using null permissions.  DVMA binding will
1008 	 * be done when the RDMA is set up.
1009 	 */
1010 	acl[0].ae_node = rsm_local_nodeid;
1011 	if (len > RSM_MAX_HANDLE_DVMA)
1012 		acl[0].ae_permission = 0;
1013 	else
1014 		acl[0].ae_permission = RSM_PERM_RDWR;
1015 
1016 	e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1017 	if (e != RSM_SUCCESS) {
1018 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
1019 		    "export publish failed\n"));
1020 		rsm_memseg_export_destroy(memseg);
1021 		return (e);
1022 	}
1023 
1024 	/* Use the surrogate seghandle as the local memory handle */
1025 	*local_hndl_p = (rsm_localmemory_handle_t)memseg;
1026 
1027 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1028 	    "__rsm_create_memory_handle: exit\n"));
1029 
1030 	return (e);
1031 }
1032 
1033 static int
1034 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1035 {
1036 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1037 	    "__rsm_free_memory_handle: enter\n"));
1038 
1039 	rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1040 
1041 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1042 	    "__rsm_free_memory_handle: exit\n"));
1043 
1044 	return (RSM_SUCCESS);
1045 }
1046 
1047 static int
1048 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1049 {
1050 
1051 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1052 	    "__rsm_get_lib_attr: enter\n"));
1053 
1054 	*libattrp = &_rsm_genlib_attr;
1055 
1056 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1057 	    "__rsm_get_lib_attr: exit\n"));
1058 
1059 	return (RSM_SUCCESS);
1060 }
1061 
1062 static int
1063 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1064 {
1065 
1066 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1067 	    "__rsm_closedevice: enter\n"));
1068 
1069 	cntr_handle = cntr_handle;
1070 
1071 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1072 	    "__rsm_closedevice: exit\n"));
1073 
1074 	return (RSM_SUCCESS);
1075 }
1076 
1077 void
1078 __rsmdefault_setops(rsm_segops_t *segops)
1079 {
1080 
1081 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1082 	    "__rsmdefault_setops: enter\n"));
1083 
1084 	if (segops->rsm_memseg_import_connect == NULL) {
1085 		segops->rsm_memseg_import_connect = __rsm_import_connect;
1086 	}
1087 	if (segops->rsm_memseg_import_disconnect == NULL) {
1088 		segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1089 	}
1090 
1091 	if (segops->rsm_memseg_import_get8 == NULL) {
1092 		segops->rsm_memseg_import_get8 = __rsm_get8x8;
1093 	}
1094 	if (segops->rsm_memseg_import_get16 == NULL) {
1095 		segops->rsm_memseg_import_get16 = __rsm_get16x16;
1096 	}
1097 	if (segops->rsm_memseg_import_get32 == NULL) {
1098 		segops->rsm_memseg_import_get32 = __rsm_get32x32;
1099 	}
1100 	if (segops->rsm_memseg_import_get64 == NULL) {
1101 		segops->rsm_memseg_import_get64 = __rsm_get64x64;
1102 	}
1103 	if (segops->rsm_memseg_import_get == NULL) {
1104 		segops->rsm_memseg_import_get = __rsm_get;
1105 	}
1106 
1107 	if (segops->rsm_memseg_import_put8 == NULL) {
1108 		segops->rsm_memseg_import_put8 = __rsm_put8x8;
1109 	}
1110 	if (segops->rsm_memseg_import_put16 == NULL) {
1111 		segops->rsm_memseg_import_put16 = __rsm_put16x16;
1112 	}
1113 	if (segops->rsm_memseg_import_put32 == NULL) {
1114 		segops->rsm_memseg_import_put32 = __rsm_put32x32;
1115 	}
1116 	if (segops->rsm_memseg_import_put64 == NULL) {
1117 		segops->rsm_memseg_import_put64 = __rsm_put64x64;
1118 	}
1119 	if (segops->rsm_memseg_import_put == NULL) {
1120 		segops->rsm_memseg_import_put = __rsm_put;
1121 	}
1122 
1123 	if (segops->rsm_memseg_import_putv == NULL) {
1124 		segops->rsm_memseg_import_putv = __rsm_putv;
1125 	}
1126 
1127 	if (segops->rsm_memseg_import_getv == NULL) {
1128 		segops->rsm_memseg_import_getv = __rsm_getv;
1129 	}
1130 
1131 	if (segops->rsm_create_localmemory_handle == NULL) {
1132 		segops->rsm_create_localmemory_handle =
1133 		    __rsm_create_memory_handle;
1134 	}
1135 
1136 	if (segops->rsm_free_localmemory_handle == NULL) {
1137 		segops->rsm_free_localmemory_handle =
1138 		    __rsm_free_memory_handle;
1139 	}
1140 
1141 	/* XXX: Need to support barrier functions */
1142 	if (segops->rsm_memseg_import_init_barrier == NULL) {
1143 		segops->rsm_memseg_import_init_barrier =
1144 		    __rsm_memseg_import_init_barrier;
1145 	}
1146 	if (segops->rsm_memseg_import_open_barrier == NULL) {
1147 		segops->rsm_memseg_import_open_barrier =
1148 		    __rsm_memseg_import_open_barrier;
1149 	}
1150 	if (segops->rsm_memseg_import_order_barrier == NULL) {
1151 		segops->rsm_memseg_import_order_barrier =
1152 		    __rsm_memseg_import_order_barrier;
1153 	}
1154 	if (segops->rsm_memseg_import_close_barrier == NULL) {
1155 		segops->rsm_memseg_import_close_barrier =
1156 		    __rsm_memseg_import_close_barrier;
1157 	}
1158 	if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1159 		segops->rsm_memseg_import_destroy_barrier =
1160 		    __rsm_memseg_import_destroy_barrier;
1161 	}
1162 
1163 	if (segops->rsm_memseg_import_get_mode == NULL) {
1164 		segops->rsm_memseg_import_get_mode =
1165 		    __rsm_memseg_import_get_mode;
1166 	}
1167 	if (segops->rsm_memseg_import_set_mode == NULL) {
1168 		segops->rsm_memseg_import_set_mode =
1169 		    __rsm_memseg_import_set_mode;
1170 	}
1171 
1172 	if (segops->rsm_get_lib_attr == NULL) {
1173 		segops->rsm_get_lib_attr =
1174 		    __rsm_get_lib_attr;
1175 	}
1176 
1177 	if (segops->rsm_closedevice == NULL) {
1178 		segops->rsm_closedevice =
1179 		    __rsm_closedevice;
1180 	}
1181 
1182 
1183 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1184 	    "__rsmdefault_setops: exit\n"));
1185 
1186 }
1187