1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * etm_xport_api_dd.c FMA ETM-to-Transport API implementation
29 * for sun4v/Ontario
30 *
31 * library for establishing connections and transporting FMA events
32 * between ETMs (event transport modules) in separate fault domain,
33 * ie, between domain and service processor in same chassis, using
34 * a character device driver based transport
35 */
36
37 #pragma ident "%Z%%M% %I% %E% SMI"
38
39 /*
40 * --------------------------------- includes --------------------------------
41 */
42
43 #include <sys/types.h>
44 #include <sys/stat.h>
45 #include <sys/fm/protocol.h>
46 #include <fm/fmd_api.h>
47
48 #include <pthread.h>
49 #include <stdio.h>
50 #include <stropts.h>
51 #include <locale.h>
52 #include <strings.h>
53 #include <stdlib.h>
54 #include <unistd.h>
55 #include <limits.h>
56 #include <alloca.h>
57 #include <errno.h>
58 #include <fcntl.h>
59 #include <time.h>
60 #include <poll.h>
61 #include <sys/ldc.h>
62 #include <sys/vldc.h>
63
64 #include "etm_xport_api.h"
65 #include "etm_etm_proto.h"
66 #include "etm_impl.h"
67
68 /*
69 * ----------------------- private consts and defns --------------------------
70 */
71
72 /* magic numbers (32 bits) for transport address and connection handle */
73
74 #define ETM_XPORT_DD_MAGIC_ADDR (0x45544D41)
75 #define ETM_XPORT_DD_MAGIC_CONN (0x45544D43)
76
77 /* flags to use in opening transport device */
78
79 #define ETM_XPORT_OPEN_FLAGS (O_RDWR | O_NOCTTY)
80
81 /*
82 * transport address and connection handle structures overload fn and fd
83 * fields to include state information:
84 *
85 * fn file name NULL means unused or closed
86 * fd file descriptor -1 means unused or closed
87 */
88
89 typedef struct _etm_xport_addr {
90 uint32_t magic_num; /* magic number */
91 char *fn; /* fullpath to device node */
92 } _etm_xport_addr_t;
93
94 typedef struct _etm_xport_conn {
95 uint32_t magic_num; /* magic number */
96 int fd; /* open dev file descriptor */
97 _etm_xport_addr_t *addr; /* associated transport addr */
98 } _etm_xport_conn_t;
99
100 /*
101 * filename of device node to reach SP from domain. one of these two
102 * device nodes will be used:
103 * ETM_XPORT_DEV_FN_SP - the Ontario glvc
104 * ETM_XPORT_DEV_VLDC - the more recent LDOMS 1.0 (a.k.a. Ontario+) vldc
105 * When the latter is in use, use_vldc is set to 1.
106 *
107 * filenames of device nodes to reach domains from SP
108 * are NA because SP runs ALOM vs Solaris or Linux
109 * and ETM is for Unix based OSes
110 */
111 #define ETM_XPORT_DEV_FN_SP "/dev/spfma"
112
113 #define ETM_XPORT_DEV_VLDC \
114 "/devices/virtual-devices@100/channel-devices@200" \
115 "/virtual-channel-client@2:spfma"
116
117 /*
118 * -------------------------- global variables -------------------------------
119 */
120
121 static int use_vldc = 0;
122
123 static struct stats {
124
125 /* address handle failures */
126
127 fmd_stat_t xport_addr_magicnum_bad;
128 fmd_stat_t xport_addr_fn_bad;
129
130 /* connection handle failures */
131
132 fmd_stat_t xport_conn_magicnum_bad;
133 fmd_stat_t xport_conn_fd_bad;
134
135 /* internal read/peek failures */
136
137 fmd_stat_t xport_buffread_badargs;
138 fmd_stat_t xport_rawpeek_badargs;
139
140 /* xport API failures */
141
142 fmd_stat_t xport_accept_badargs;
143 fmd_stat_t xport_get_addr_conn_badargs;
144 fmd_stat_t xport_free_addr_badargs;
145 fmd_stat_t xport_free_addrv_badargs;
146 fmd_stat_t xport_get_any_lcc_badargs;
147
148 /* system and library failures */
149
150 fmd_stat_t xport_os_open_fail;
151 fmd_stat_t xport_os_close_fail;
152 fmd_stat_t xport_os_read_fail;
153 fmd_stat_t xport_os_write_fail;
154 fmd_stat_t xport_os_peek_fail;
155 fmd_stat_t xport_os_ioctl_fail;
156
157 } etm_xport_stats = {
158
159 /* address handle failures */
160
161 { "xport_addr_magicnum_bad", FMD_TYPE_UINT64,
162 "invalid address handle magic number" },
163 { "xport_addr_fn_bad", FMD_TYPE_UINT64,
164 "invalid address handle file name" },
165
166 /* connection handle failures */
167
168 { "xport_conn_magicnum_bad", FMD_TYPE_UINT64,
169 "invalid connection handle magic number" },
170 { "xport_conn_fd_bad", FMD_TYPE_UINT64,
171 "invalid connection handle file descriptor" },
172
173 /* internal read/peek failures */
174
175 { "xport_buffread_badargs", FMD_TYPE_UINT64,
176 "bad arguments in etm_xport_buffered_read" },
177 { "xport_rawpeek_badargs", FMD_TYPE_UINT64,
178 "bad arguments in etm_xport_raw_peek" },
179
180 /* xport API failures */
181
182 { "xport_accept_badargs", FMD_TYPE_UINT64,
183 "bad arguments in etm_xport_accept" },
184 { "xport_get_addr_conn_badargs", FMD_TYPE_UINT64,
185 "bad arguments in etm_xport_get_addr_conn" },
186 { "xport_free_addr_badargs", FMD_TYPE_UINT64,
187 "bad arguments in etm_xport_free_addr" },
188 { "xport_free_addrv_badargs", FMD_TYPE_UINT64,
189 "bad arguments in etm_xport_free_addrv" },
190 { "xport_get_any_lcc_badargs", FMD_TYPE_UINT64,
191 "bad arguments in etm_xport_get_any_lcc" },
192
193 /* system and library failures */
194
195 { "xport_os_open_fail", FMD_TYPE_UINT64,
196 "open system call failures" },
197 { "xport_os_close_fail", FMD_TYPE_UINT64,
198 "close system call failures" },
199 { "xport_os_read_fail", FMD_TYPE_UINT64,
200 "read system call failures" },
201 { "xport_os_write_fail", FMD_TYPE_UINT64,
202 "write system call failures" },
203 { "xport_os_peek_fail", FMD_TYPE_UINT64,
204 "peek (ioctl) failures" },
205 { "xport_os_ioctl_fail", FMD_TYPE_UINT64,
206 "ioctl system call failures" }
207 };
208
209 /* intermediate read buffer to [partially] emulate byte stream semantics */
210
211 static uint8_t *etm_xport_irb_area = NULL; /* buffered read area */
212 static uint8_t *etm_xport_irb_head = NULL; /* read head (dequeue) */
213 static uint8_t *etm_xport_irb_tail = NULL; /* read tail (enqueue) */
214 static size_t etm_xport_irb_mtu_sz = 0; /* MTU size (in bytes) */
215
216 /*
217 * -------------------------- private variables ------------------------------
218 */
219
220 static _etm_xport_conn_t *
221 etm_xport_vldc_conn = NULL; /* single connection handle for VLDC */
222
223 static pthread_mutex_t
224 etm_xport_vldc_lock = PTHREAD_MUTEX_INITIALIZER;
225 /* lock for open()/close() VLDC */
226
227 static int
228 etm_xport_debug_lvl = 0; /* debug level: 0 off, 1 on, 2 more, ... */
229
230 static char *
231 etm_xport_addrs = ""; /* spec str for transport addrs to use */
232
233 static int
234 etm_xport_should_fake_dd = 0; /* bool for whether to fake device driver */
235
236 /*
237 * -------------------------- private functions ------------------------------
238 */
239
240 /*
241 * etm_fake_ioctl - fake/simulate transport driver's ioctl() behavior
242 * [for unit testing with device driver absent or
243 * for alternative directory entry based transports],
244 * return 0 for success
245 * or -1 and set errno
246 * caveats:
247 * simulation may be incomplete, especially wrt peek()
248 *
249 * Design_Note: To avoid interfering with FMD's signal mask (SIGALRM)
250 * do not use [Solaris] sleep(3C) and instead use
251 * pthread_cond_wait() or nanosleep(), both of which
252 * are POSIX spec-ed to leave signal masks alone.
253 * This is needed for Solaris and Linux (domain and SP).
254 */
255
256 static int
etm_fake_ioctl(int fd,int op,void * buf)257 etm_fake_ioctl(int fd, int op, void *buf)
258 {
259 int rv; /* ret val */
260 etm_xport_opt_op_t *op_ctl_ptr; /* ptr for option ops */
261 etm_xport_msg_peek_t *peek_ctl_ptr; /* ptr for peeking */
262 struct stat stat_buf; /* file stat struct */
263 ssize_t n; /* gen use */
264 struct timespec tms; /* for nanosleep() */
265
266 tms.tv_sec = 0;
267 tms.tv_nsec = 0;
268
269 rv = 0; /* default is success */
270
271 if (op == ETM_XPORT_IOCTL_DATA_PEEK) {
272 peek_ctl_ptr = buf;
273 /* sleep until some data avail, potentially forever */
274 for (;;) {
275 if (fstat(fd, &stat_buf) < 0) {
276 rv = -1;
277 goto func_ret;
278 }
279 if (stat_buf.st_size > 0) {
280 n = MIN(peek_ctl_ptr->pk_buflen,
281 stat_buf.st_size);
282 peek_ctl_ptr->pk_buflen = n;
283 /* return bogus data assuming content unused */
284 (void) memset(peek_ctl_ptr->pk_buf, 0xA5, n);
285 goto func_ret;
286 }
287 tms.tv_sec = ETM_SLEEP_QUIK;
288 tms.tv_nsec = 0;
289 if ((n = nanosleep(&tms, NULL)) < 0) {
290 rv = -1;
291 goto func_ret;
292 }
293 } /* forever awaiting data */
294 } else if (op == ETM_XPORT_IOCTL_OPT_OP) {
295 op_ctl_ptr = buf;
296 /* default near MTU_SZ gets and agree with everything else */
297 if ((op_ctl_ptr->oo_op == ETM_XPORT_OPT_GET) &&
298 (op_ctl_ptr->oo_opt == ETM_XPORT_OPT_MTU_SZ)) {
299 op_ctl_ptr->oo_val = 7 * ETM_XPORT_MTU_SZ_DEF / 8;
300 }
301 goto func_ret;
302 } /* whether ioctl op is handled */
303
304 rv = -1;
305 errno = EINVAL;
306
307 func_ret:
308
309 return (rv);
310
311 } /* etm_fake_ioctl() */
312
313 /*
314 * etm_xport_get_fn - return a cached read-only copy
315 * of the device node name to use
316 * for the given I/O operation
317 */
318
319 static char *
etm_xport_get_fn(fmd_hdl_t * hdl,int io_op)320 etm_xport_get_fn(fmd_hdl_t *hdl, int io_op)
321 {
322 static char fn_wr[PATH_MAX] = {0}; /* fn for write */
323 static char fn_rd[PATH_MAX] = {0}; /* fn for read/peek */
324 char *rv; /* ret val */
325 char *prop_str; /* property string */
326 char *cp; /* char ptr */
327
328 rv = NULL;
329
330 /* use cached copies if avail */
331
332 if ((io_op == ETM_IO_OP_WR) && (fn_wr[0] != '\0')) {
333 return (fn_wr);
334 }
335 if (((io_op == ETM_IO_OP_RD) || (io_op == ETM_IO_OP_PK)) &&
336 (fn_rd[0] != '\0')) {
337 return (fn_rd);
338 }
339
340 /* create cached copies if empty "" property string */
341
342 prop_str = fmd_prop_get_string(hdl, ETM_PROP_NM_XPORT_ADDRS);
343 if (etm_xport_debug_lvl >= 2) {
344 fmd_hdl_debug(hdl, "info: etm_xport_get_fn prop_str %s\n",
345 prop_str);
346 }
347
348 if (strlen(prop_str) == 0) {
349 struct stat buf;
350 char *fname;
351
352 if (stat(ETM_XPORT_DEV_VLDC, &buf) == 0) {
353 use_vldc = 1;
354 fname = ETM_XPORT_DEV_VLDC;
355 } else {
356 use_vldc = 0;
357 fname = ETM_XPORT_DEV_FN_SP;
358 }
359
360 (void) strncpy(fn_wr, fname, PATH_MAX - 1);
361 (void) strncpy(fn_rd, fname, PATH_MAX - 1);
362 rv = fn_rd;
363 if (io_op == ETM_IO_OP_WR) {
364 rv = fn_wr;
365 }
366 goto func_ret;
367 } /* if no/empty property set */
368
369 /* create cached copies if "write[|read]" property string */
370
371 if (io_op == ETM_IO_OP_WR) {
372 (void) strncpy(fn_wr, prop_str, PATH_MAX - 1);
373 if ((cp = strchr(fn_wr, '|')) != NULL) {
374 *cp = '\0';
375 }
376 rv = fn_wr;
377 } else {
378 if ((cp = strchr(prop_str, '|')) != NULL) {
379 cp++;
380 } else {
381 cp = prop_str;
382 }
383 (void) strncpy(fn_rd, cp, PATH_MAX - 1);
384 rv = fn_rd;
385 } /* whether io op is write/read/peek */
386
387 func_ret:
388
389 if (etm_xport_debug_lvl >= 2) {
390 fmd_hdl_debug(hdl, "info: etm_xport_get_fn fn_wr %s fn_rd %s\n",
391 fn_wr, fn_rd);
392 }
393 fmd_prop_free_string(hdl, prop_str);
394 return (rv);
395
396 } /* etm_xport_get_fn() */
397
398 /*
399 * etm_xport_valid_addr - validate the given transport address,
400 * return 0 if valid
401 * or -errno value if not
402 */
403
404 static int
etm_xport_valid_addr(etm_xport_addr_t addr)405 etm_xport_valid_addr(etm_xport_addr_t addr)
406 {
407 _etm_xport_addr_t *_addr; /* transport address */
408 struct stat stat_buf; /* buffer for stat() results */
409
410 _addr = addr;
411
412 if (_addr == NULL) {
413 return (-EINVAL);
414 }
415
416 if (_addr->magic_num != ETM_XPORT_DD_MAGIC_ADDR) {
417 etm_xport_stats.xport_addr_magicnum_bad.fmds_value.ui64++;
418 return (-EFAULT);
419 }
420
421 if (stat(_addr->fn, &stat_buf) < 0) {
422 /* errno assumed set by above call */
423 etm_xport_stats.xport_addr_fn_bad.fmds_value.ui64++;
424 return (-errno);
425 }
426
427 return (0);
428
429 } /* etm_xport_valid_addr() */
430
431 /*
432 * etm_xport_valid_conn - validate the given connection handle,
433 * return 0 if valid
434 * or -errno value if not
435 */
436
437 static int
etm_xport_valid_conn(etm_xport_conn_t conn)438 etm_xport_valid_conn(etm_xport_conn_t conn)
439 {
440 _etm_xport_conn_t *_conn; /* connection handle */
441
442 _conn = conn;
443
444 if (_conn == NULL) {
445 return (-EINVAL);
446 }
447
448 if (_conn->magic_num != ETM_XPORT_DD_MAGIC_CONN) {
449 etm_xport_stats.xport_conn_magicnum_bad.fmds_value.ui64++;
450 return (-EFAULT);
451 }
452
453 if (_conn->fd <= -1) {
454 etm_xport_stats.xport_conn_fd_bad.fmds_value.ui64++;
455 return (-EBADF);
456 }
457
458 return (0);
459
460 } /* etm_xport_valid_conn() */
461
462 /*
463 * etm_xport_free_addr - free the given transport address
464 */
465
466 static void
etm_xport_free_addr(fmd_hdl_t * hdl,etm_xport_addr_t addr)467 etm_xport_free_addr(fmd_hdl_t *hdl, etm_xport_addr_t addr)
468 {
469 if (addr == NULL) {
470 etm_xport_stats.xport_free_addr_badargs.fmds_value.ui64++;
471 return;
472 }
473
474 fmd_hdl_free(hdl, addr, sizeof (_etm_xport_addr_t));
475
476 } /* etm_xport_free_addr() */
477
478 /*
479 * etm_xport_dup_addr - duplicate the given transport address,
480 * which is to be freed separately,
481 * return the newly allocated transport address
482 * pending until possible to do so
483 */
484
485 static etm_xport_addr_t
etm_xport_dup_addr(fmd_hdl_t * hdl,etm_xport_addr_t addr)486 etm_xport_dup_addr(fmd_hdl_t *hdl, etm_xport_addr_t addr)
487 {
488 etm_xport_addr_t new_addr; /* new transport address */
489
490 new_addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t), FMD_SLEEP);
491 (void) memcpy(new_addr, addr, sizeof (_etm_xport_addr_t));
492 return (new_addr);
493
494 } /* etm_xport_dup_addr() */
495
496 /*
497 * etm_xport_raw_peek - try to peek N <= MTU bytes from the connection
498 * into the caller's given buffer,
499 * return how many bytes actually peeked
500 * or -errno value
501 * caveats:
502 * peeked data is NOT guaranteed by all platform transports
503 * to remain enqueued if this process/thread crashes;
504 * this casts some doubt on the utility of this func
505 *
506 * transport does NOT support peek sizes > MTU
507 */
508
509 static ssize_t
etm_xport_raw_peek(fmd_hdl_t * hdl,_etm_xport_conn_t * _conn,void * buf,size_t byte_cnt)510 etm_xport_raw_peek(fmd_hdl_t *hdl, _etm_xport_conn_t *_conn,
511 void *buf, size_t byte_cnt)
512 {
513 ssize_t rv; /* ret val */
514 ssize_t n; /* gen use */
515 etm_xport_msg_peek_t peek_ctl; /* struct for peeking */
516
517 rv = 0;
518
519 /* sanity check args */
520
521 if ((hdl == NULL) || (_conn == NULL) || (buf == NULL)) {
522 etm_xport_stats.xport_rawpeek_badargs.fmds_value.ui64++;
523 return (-EINVAL);
524 }
525
526 if ((etm_xport_irb_mtu_sz > 0) && (byte_cnt > etm_xport_irb_mtu_sz)) {
527 etm_xport_stats.xport_rawpeek_badargs.fmds_value.ui64++;
528 return (-EINVAL);
529 }
530
531 /* try to peek requested amt of data */
532
533 peek_ctl.pk_buf = buf;
534 peek_ctl.pk_buflen = byte_cnt;
535 peek_ctl.pk_flags = 0;
536 peek_ctl.pk_rsvd = 0;
537
538 if (etm_xport_should_fake_dd) {
539 n = etm_fake_ioctl(_conn->fd, ETM_XPORT_IOCTL_DATA_PEEK,
540 &peek_ctl);
541 } else {
542 n = ioctl(_conn->fd, ETM_XPORT_IOCTL_DATA_PEEK, &peek_ctl);
543 }
544 if (n < 0) {
545 /* errno assumed set by above call */
546 etm_xport_stats.xport_os_peek_fail.fmds_value.ui64++;
547 rv = (-errno);
548 } else {
549 rv = peek_ctl.pk_buflen;
550 }
551
552 if (etm_xport_debug_lvl >= 3) {
553 fmd_hdl_debug(hdl, "info: [fake] ioctl(_PEEK) ~= %d bytes\n",
554 rv);
555 }
556 return (rv);
557
558 } /* etm_xport_raw_peek() */
559
560 /*
561 * Design_Note:
562 *
563 * The transport device driver did not implement byte stream semantics
564 * per the spec; its behavior is closer to that of a block device.
565 * Consequently, ETM within its Transport API attempts to make the device
566 * look like a byte stream by using an intermediate buffer in user space
567 * and maintaining progress pointers within that buffer which is populated
568 * in near-MTU sized reads. We think it's OK to leave the write side
569 * implementation as it was originally written for byte stream semantics
570 * because we were told subsequent write()s will pend until the earlier
571 * content is read() at the remote end -- essentially each write() must be
572 * paired with a single read() -- the device driver does not buffer any I/O.
573 *
574 * The early driver bugs of returning more data than requested (thus
575 * causing buffer overrun corruptions/crashes) and requiring user buffers
576 * to be stack based vs heap based, have both been corrected.
577 */
578
579 /*
580 * etm_xport_buffered_read - try to read N <= MTU bytes from the connection
581 * or from an privately maintained intermediate buffer,
582 * into the caller's given buffer,
583 * return how many bytes actually read
584 * or -errno value
585 *
586 * caveats:
587 * simple buffer scheme consumes 2x MTU bytes of memory and
588 * may do unnecesssary memory copies for ease of coding
589 */
590
591 static ssize_t
etm_xport_buffered_read(fmd_hdl_t * hdl,_etm_xport_conn_t * _conn,void * buf,size_t byte_cnt)592 etm_xport_buffered_read(fmd_hdl_t *hdl, _etm_xport_conn_t *_conn,
593 void *buf, size_t byte_cnt)
594 {
595 ssize_t i, n; /* gen use */
596
597 /* perform one-time initializations */
598
599 /*
600 * Design_Note:
601 *
602 * These initializations are not done in etm_xport_init() because
603 * the connection/device is not yet open and hence the MTU size
604 * is not yet known. However, the corresponding cleanup is done
605 * in etm_xport_fini(). The buffering for byte stream semantics
606 * should be done on a per device vs per connection basis; the
607 * MTU size is assumed to remain constant across all connections.
608 */
609
610 if (etm_xport_irb_mtu_sz == 0) {
611 if ((n = etm_xport_get_opt(hdl, _conn,
612 ETM_XPORT_OPT_MTU_SZ)) < 0) {
613 etm_xport_irb_mtu_sz = ETM_XPORT_MTU_SZ_DEF;
614 } else {
615 etm_xport_irb_mtu_sz = n;
616 }
617 }
618 if (etm_xport_irb_area == NULL) {
619 etm_xport_irb_area = fmd_hdl_zalloc(hdl,
620 2 * etm_xport_irb_mtu_sz, FMD_SLEEP);
621 etm_xport_irb_head = etm_xport_irb_area;
622 etm_xport_irb_tail = etm_xport_irb_head;
623 }
624
625 /* sanity check the byte count after have MTU */
626
627 if (byte_cnt > etm_xport_irb_mtu_sz) {
628 etm_xport_stats.xport_buffread_badargs.fmds_value.ui64++;
629 return (-EINVAL);
630 }
631
632 /* if intermediate buffer can satisfy request do so w/out xport read */
633
634 if (byte_cnt <= (etm_xport_irb_tail - etm_xport_irb_head)) {
635 (void) memcpy(buf, etm_xport_irb_head, byte_cnt);
636 etm_xport_irb_head += byte_cnt;
637 if (etm_xport_debug_lvl >= 2) {
638 fmd_hdl_debug(hdl, "info: quik buffered read == %d\n",
639 byte_cnt);
640 }
641 return (byte_cnt);
642 }
643
644 /* slide buffer contents to front to make room for [MTU] more bytes */
645
646 n = etm_xport_irb_tail - etm_xport_irb_head;
647 (void) memmove(etm_xport_irb_area, etm_xport_irb_head, n);
648 etm_xport_irb_head = etm_xport_irb_area;
649 etm_xport_irb_tail = etm_xport_irb_head + n;
650
651 /*
652 * peek to see how much data is avail and read all of it;
653 * there is no race condition between peeking and reading
654 * due to unbuffered design of the device driver
655 */
656 if (use_vldc) {
657 pollfd_t pollfd;
658
659 pollfd.events = POLLIN;
660 pollfd.revents = 0;
661 pollfd.fd = _conn->fd;
662
663 if ((n = poll(&pollfd, 1, -1)) < 1) {
664 if (n == 0)
665 return (-EIO);
666 else
667 return (-errno);
668 }
669
670 /*
671 * set i to the maximum size --- read(..., i) below will
672 * pull in n bytes (n <= i) anyway
673 */
674 i = etm_xport_irb_mtu_sz;
675 } else {
676 if ((i = etm_xport_raw_peek(hdl, _conn, etm_xport_irb_tail,
677 etm_xport_irb_mtu_sz)) < 0) {
678 return (i);
679 }
680 }
681 if ((n = read(_conn->fd, etm_xport_irb_tail, i)) < 0) {
682 /* errno assumed set by above call */
683 etm_xport_stats.xport_os_read_fail.fmds_value.ui64++;
684 return (-errno);
685 }
686 etm_xport_irb_tail += n;
687
688 /* satisfy request as best we can with what we now have */
689
690 n = MIN(byte_cnt, (etm_xport_irb_tail - etm_xport_irb_head));
691 (void) memcpy(buf, etm_xport_irb_head, n);
692 etm_xport_irb_head += n;
693 if (etm_xport_debug_lvl >= 2) {
694 fmd_hdl_debug(hdl, "info: slow buffered read == %d\n", n);
695 }
696 return (n);
697
698 } /* etm_xport_buffered_read() */
699
700 /*
701 * ------------------ connection establishment functions ---------------------
702 */
703
704 /*
705 * etm_xport_init - initialize/setup any transport infrastructure
706 * before any connections are opened,
707 * return 0 or -errno value if initialization failed
708 */
709
710 int
etm_xport_init(fmd_hdl_t * hdl)711 etm_xport_init(fmd_hdl_t *hdl)
712 {
713 _etm_xport_addr_t **_addrv; /* address vector */
714 int i; /* vector index */
715 ssize_t n; /* gen use */
716 int rv; /* ret val */
717 struct stat stat_buf; /* file stat struct */
718 char *fn; /* filename of dev node */
719
720 rv = 0; /* assume good */
721
722 _addrv = NULL;
723
724 if (hdl == NULL) {
725 rv = (-EINVAL);
726 goto func_ret;
727 }
728
729 fmd_hdl_debug(hdl, "info: xport initializing\n");
730
731 /* setup statistics and properties from FMD */
732
733 (void) fmd_stat_create(hdl, FMD_STAT_NOALLOC,
734 sizeof (etm_xport_stats) / sizeof (fmd_stat_t),
735 (fmd_stat_t *)&etm_xport_stats);
736
737 etm_xport_debug_lvl = fmd_prop_get_int32(hdl, ETM_PROP_NM_DEBUG_LVL);
738 etm_xport_addrs = fmd_prop_get_string(hdl, ETM_PROP_NM_XPORT_ADDRS);
739 fmd_hdl_debug(hdl, "info: etm_xport_debug_lvl %d\n",
740 etm_xport_debug_lvl);
741 fmd_hdl_debug(hdl, "info: etm_xport_addrs %s\n", etm_xport_addrs);
742
743 /* decide whether to fake [some of] the device driver behavior */
744
745 etm_xport_should_fake_dd = 0; /* default to false */
746
747 fn = etm_xport_get_fn(hdl, ETM_IO_OP_RD);
748 if (stat(fn, &stat_buf) < 0) {
749 /* errno assumed set by above call */
750 fmd_hdl_error(hdl, "error: bad device node %s errno %d\n",
751 fn, errno);
752 rv = (-errno);
753 goto func_ret;
754 }
755 if (!S_ISCHR(stat_buf.st_mode) && use_vldc == 0) {
756 etm_xport_should_fake_dd = 1; /* not a char driver */
757 }
758 fmd_hdl_debug(hdl, "info: etm_xport_should_fake_dd %d\n",
759 etm_xport_should_fake_dd);
760
761 /* validate each default dst transport address */
762
763 if ((_addrv = (void *)etm_xport_get_ev_addrv(hdl, NULL)) == NULL) {
764 /* errno assumed set by above call */
765 rv = (-errno);
766 goto func_ret;
767 }
768
769 for (i = 0; _addrv[i] != NULL; i++) {
770 if ((n = etm_xport_valid_addr(_addrv[i])) < 0) {
771 fmd_hdl_error(hdl, "error: bad xport addr %p\n",
772 _addrv[i]);
773 rv = n;
774 goto func_ret;
775 }
776 } /* foreach dst addr */
777
778 if (use_vldc) {
779 etm_xport_vldc_conn = etm_xport_open(hdl, _addrv[0]);
780 if (etm_xport_vldc_conn == NULL) {
781 fmd_hdl_debug(hdl, "info: etm_xport_open() failed\n");
782 }
783 }
784
785 func_ret:
786
787 if (_addrv != NULL) {
788 etm_xport_free_addrv(hdl, (void *)_addrv);
789 }
790 if (rv >= 0) {
791 fmd_hdl_debug(hdl, "info: xport initialized ok\n");
792 }
793 return (rv);
794
795 } /* etm_xport_init() */
796
797 /*
798 * etm_xport_open - open a connection with the given endpoint,
799 * return the connection handle,
800 * or NULL and set errno if open failed
801 *
802 * Design_Note: The current transport device driver's open()
803 * call will succeed even if the SP is down;
804 * hence there's currently no need for a retry
805 * mechanism.
806 */
807
808 etm_xport_conn_t
etm_xport_open(fmd_hdl_t * hdl,etm_xport_addr_t addr)809 etm_xport_open(fmd_hdl_t *hdl, etm_xport_addr_t addr)
810 {
811 _etm_xport_addr_t *_addr; /* address handle */
812 _etm_xport_conn_t *_conn; /* connection handle */
813 ssize_t n; /* gen use */
814
815 if ((n = etm_xport_valid_addr(addr)) < 0) {
816 errno = (-n);
817 return (NULL);
818 }
819
820 _addr = etm_xport_dup_addr(hdl, addr);
821
822 /* allocate a connection handle and start populating it */
823
824 _conn = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_conn_t), FMD_SLEEP);
825
826 (void) pthread_mutex_lock(&etm_xport_vldc_lock);
827
828 if (use_vldc == 0 || etm_xport_vldc_conn == NULL) {
829 if ((_conn->fd = open(_addr->fn,
830 ETM_XPORT_OPEN_FLAGS, 0)) == -1) {
831 /* errno assumed set by above call */
832 etm_xport_free_addr(hdl, _addr);
833 fmd_hdl_free(hdl, _conn, sizeof (_etm_xport_conn_t));
834 etm_xport_stats.xport_os_open_fail.fmds_value.ui64++;
835 (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
836 return (NULL);
837 }
838 }
839
840 if (use_vldc && etm_xport_vldc_conn == NULL) {
841 vldc_opt_op_t op;
842
843 /* Set the channel to reliable mode */
844 op.op_sel = VLDC_OP_SET;
845 op.opt_sel = VLDC_OPT_MODE;
846 op.opt_val = LDC_MODE_RELIABLE;
847
848 if (ioctl(_conn->fd, VLDC_IOCTL_OPT_OP, &op) != 0) {
849 /* errno assumed set by above call */
850 (void) close(_conn->fd);
851 etm_xport_free_addr(hdl, _addr);
852 fmd_hdl_free(hdl, _conn, sizeof (_etm_xport_conn_t));
853 etm_xport_stats.xport_os_ioctl_fail.fmds_value.ui64++;
854 (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
855 return (NULL);
856 }
857
858 etm_xport_vldc_conn = _conn;
859 } else if (use_vldc && etm_xport_vldc_conn != NULL) {
860 _conn->fd = dup(etm_xport_vldc_conn->fd);
861 }
862
863 (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
864
865 /* return the fully formed connection handle */
866
867 _conn->magic_num = ETM_XPORT_DD_MAGIC_CONN;
868 _conn->addr = _addr;
869
870 return (_conn);
871
872 } /* etm_xport_open() */
873
874 /*
875 * etm_xport_accept - accept a request to open a connection,
876 * pending until a remote endpoint opens a
877 * a new connection to us [and sends an ETM msg],
878 * per non-NULL addrp optionally indicate the
879 * remote address if known/avail (NULL if not),
880 * return the connection handle,
881 * or NULL and set errno on failure
882 *
883 * caveats:
884 * any returned transport address is valid only for
885 * as long as the associated connection remains open;
886 * callers should not try to free the transport address
887 *
888 * if new connections are rapid relative to how
889 * frequently this function is called, fairness will
890 * be provided among which connections are accepted
891 *
892 * this function may maintain state to recognize [new]
893 * connections and/or to provide fairness
894 */
895
896 etm_xport_conn_t
etm_xport_accept(fmd_hdl_t * hdl,etm_xport_addr_t * addrp)897 etm_xport_accept(fmd_hdl_t *hdl, etm_xport_addr_t *addrp)
898 {
899 _etm_xport_addr_t *_addr; /* address handle */
900 _etm_xport_addr_t **_addrv; /* vector of addresses */
901 _etm_xport_conn_t *_conn; /* connection handle */
902 _etm_xport_conn_t *rv; /* ret val */
903 uint8_t buf[4]; /* buffer for peeking */
904 int n; /* byte cnt */
905 struct timespec tms; /* for nanosleep() */
906
907 rv = NULL; /* default is failure */
908
909 _conn = NULL;
910 _addrv = NULL;
911
912 tms.tv_sec = ETM_SLEEP_QUIK;
913 tms.tv_nsec = 0;
914
915 /*
916 * get the default dst transport address and open a connection to it;
917 * there is only 1 default addr
918 */
919
920 if ((_addrv = (void*)etm_xport_get_ev_addrv(hdl, NULL)) == NULL) {
921 /* errno assumed set by above call */
922 goto func_ret;
923 }
924
925 if (_addrv[0] == NULL) {
926 errno = ENXIO; /* missing addr */
927 etm_xport_stats.xport_accept_badargs.fmds_value.ui64++;
928 goto func_ret;
929 }
930
931 if (_addrv[1] != NULL) {
932 errno = E2BIG; /* too many addrs */
933 etm_xport_stats.xport_accept_badargs.fmds_value.ui64++;
934 goto func_ret;
935 }
936
937 _addr = _addrv[0];
938 _addr->fn = etm_xport_get_fn(hdl, ETM_IO_OP_RD);
939
940 if ((_conn = etm_xport_open(hdl, _addr)) == NULL) {
941 /* errno assumed set by above call */
942 goto func_ret;
943 }
944
945 if (etm_xport_should_fake_dd) {
946 (void) nanosleep(&tms, NULL); /* delay [for resp capture] */
947 (void) ftruncate(_conn->fd, 0); /* act like socket/queue/pipe */
948 }
949
950 /*
951 * peek from the connection to simulate an accept() system call
952 * behavior; this will pend until some ETM message is written
953 * from the other end
954 */
955
956 if (use_vldc) {
957 pollfd_t pollfd;
958
959 pollfd.events = POLLIN;
960 pollfd.revents = 0;
961 pollfd.fd = _conn->fd;
962
963 if ((n = poll(&pollfd, 1, -1)) < 1) {
964 if (n == 0) {
965 errno = EIO;
966 }
967 goto func_ret;
968 }
969 } else {
970 if ((n = etm_xport_raw_peek(hdl, _conn, buf, 1)) < 0) {
971 errno = (-n);
972 goto func_ret;
973 }
974 }
975
976 rv = _conn; /* success, return the open connection */
977
978 func_ret:
979
980 /* cleanup the connection if failed */
981
982 if (rv == NULL) {
983 if (_conn != NULL) {
984 (void) etm_xport_close(hdl, _conn);
985 }
986 } else {
987 if (addrp != NULL) {
988 *addrp = _conn->addr;
989 }
990 }
991
992 /* free _addrv and all its transport addresses */
993
994 if (_addrv != NULL) {
995 etm_xport_free_addrv(hdl, (void *)_addrv);
996 }
997
998 if (etm_xport_debug_lvl >= 2) {
999 fmd_hdl_debug(hdl, "info: accept conn %p w/ *addrp %p\n",
1000 rv, (addrp != NULL ? *addrp : NULL));
1001 }
1002
1003 return (rv);
1004
1005 } /* etm_xport_accept() */
1006
1007 /*
1008 * etm_xport_close - close a connection from either endpoint,
1009 * return the original connection handle,
1010 * or NULL and set errno if close failed
1011 */
1012
1013 etm_xport_conn_t
etm_xport_close(fmd_hdl_t * hdl,etm_xport_conn_t conn)1014 etm_xport_close(fmd_hdl_t *hdl, etm_xport_conn_t conn)
1015 {
1016 etm_xport_conn_t rv; /* ret val */
1017 _etm_xport_conn_t *_conn; /* connection handle */
1018 int nev; /* -errno val */
1019
1020 _conn = conn;
1021
1022 rv = _conn; /* assume success */
1023
1024 if ((nev = etm_xport_valid_conn(_conn)) < 0) {
1025 _conn = NULL;
1026 rv = NULL;
1027 goto func_ret;
1028 }
1029
1030 /* close the device node */
1031
1032 (void) pthread_mutex_lock(&etm_xport_vldc_lock);
1033
1034 if (close(_conn->fd) < 0) {
1035 /* errno assumed set by above call */
1036 etm_xport_stats.xport_os_close_fail.fmds_value.ui64++;
1037 nev = (-errno);
1038 rv = NULL;
1039 }
1040
1041 if (use_vldc && (_conn == etm_xport_vldc_conn)) {
1042 etm_xport_vldc_conn = NULL;
1043 }
1044
1045 (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
1046
1047 func_ret:
1048
1049 /* cleanup the connection */
1050
1051 if (_conn != NULL) {
1052 etm_xport_free_addr(hdl, _conn->addr);
1053 _conn->addr = NULL;
1054 _conn->magic_num = 0;
1055 _conn->fd = -1;
1056 fmd_hdl_free(hdl, _conn, sizeof (_etm_xport_conn_t));
1057 }
1058
1059 if (rv == NULL) {
1060 errno = (-nev);
1061 }
1062 return (rv);
1063
1064 } /* etm_xport_close() */
1065
1066 /*
1067 * etm_xport_get_ev_addrv - indicate which transport addresses
1068 * are implied as destinations by the
1069 * given FMA event, if given no FMA event
1070 * (NULL) indicate default or policy
1071 * driven dst transport addresses,
1072 * return an allocated NULL terminated
1073 * vector of allocated transport addresses,
1074 * or NULL and set errno if none
1075 * caveats:
1076 * callers should never try to individually free an addr
1077 * within the returned vector
1078 */
1079
1080 etm_xport_addr_t *
etm_xport_get_ev_addrv(fmd_hdl_t * hdl,nvlist_t * evp)1081 etm_xport_get_ev_addrv(fmd_hdl_t *hdl, nvlist_t *evp)
1082 {
1083 _etm_xport_addr_t *_addr; /* address handle */
1084 _etm_xport_addr_t **_addrv; /* vector of addresses */
1085
1086 if (evp == NULL) {
1087
1088 /*
1089 * allocate address handles for default/policy destinations
1090 *
1091 * in reality we have just 1 dst transport addr
1092 */
1093
1094 _addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t),
1095 FMD_SLEEP);
1096 } else {
1097
1098 /*
1099 * allocate address handles per FMA event content
1100 *
1101 * in reality we have just 1 dst transport addr
1102 */
1103
1104 _addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t),
1105 FMD_SLEEP);
1106 } /* whether caller passed in a FMA event */
1107
1108 /* allocate vector with 1 non-NULL transport addr */
1109
1110 _addrv = fmd_hdl_zalloc(hdl, 2 * sizeof (_etm_xport_addr_t *),
1111 FMD_SLEEP);
1112
1113 _addr->fn = etm_xport_get_fn(hdl, ETM_IO_OP_WR);
1114 _addr->magic_num = ETM_XPORT_DD_MAGIC_ADDR;
1115 _addrv[0] = _addr;
1116 _addrv[1] = NULL;
1117
1118 return ((void *) _addrv);
1119
1120 } /* etm_xport_get_ev_addrv() */
1121
1122 /*
1123 * etm_xport_free_addrv - free the given vector of transport addresses,
1124 * including each transport address
1125 */
1126
1127 void
etm_xport_free_addrv(fmd_hdl_t * hdl,etm_xport_addr_t * addrv)1128 etm_xport_free_addrv(fmd_hdl_t *hdl, etm_xport_addr_t *addrv)
1129 {
1130 _etm_xport_addr_t **_addrv; /* vector of addrs */
1131 int i; /* vector index */
1132
1133 if (addrv == NULL) {
1134 etm_xport_stats.xport_free_addrv_badargs.fmds_value.ui64++;
1135 return;
1136 }
1137
1138 _addrv = (void*)addrv;
1139
1140 for (i = 0; _addrv[i] != NULL; i++) {
1141 etm_xport_free_addr(hdl, _addrv[i]);
1142 _addrv[i] = NULL;
1143 }
1144 fmd_hdl_free(hdl, _addrv, (i + 1) * sizeof (_etm_xport_addr_t *));
1145
1146 } /* etm_xport_free_addrv() */
1147
1148 /*
1149 * etm_xport_get_addr_conn - indicate which connections in a NULL
1150 * terminated vector of connection
1151 * handles are associated with the
1152 * given transport address,
1153 * return an allocated NULL terminated
1154 * vector of those connection handles,
1155 * or NULL and set errno if none
1156 */
1157
1158 etm_xport_conn_t *
etm_xport_get_addr_conn(fmd_hdl_t * hdl,etm_xport_conn_t * connv,etm_xport_addr_t addr)1159 etm_xport_get_addr_conn(fmd_hdl_t *hdl, etm_xport_conn_t *connv,
1160 etm_xport_addr_t addr)
1161 {
1162 _etm_xport_conn_t **_connv; /* vector of connections */
1163 _etm_xport_conn_t **_mcv; /* matching connections vector */
1164 _etm_xport_addr_t *_addr; /* transport addr to match */
1165 int n; /* matching transport addr cnt */
1166 int i; /* vector index */
1167
1168 if ((connv == NULL) || (addr == NULL)) {
1169 errno = EINVAL;
1170 etm_xport_stats.xport_get_addr_conn_badargs.fmds_value.ui64++;
1171 return (NULL);
1172 }
1173
1174 _connv = (void*)connv;
1175 _addr = (void*)addr;
1176
1177 /* count, allocate space for, and copy, all matching addrs */
1178
1179 n = 0;
1180 for (i = 0; _connv[i] != NULL; i++) {
1181 if ((_connv[i]->addr == _addr) ||
1182 ((_connv[i]->addr != NULL) &&
1183 (_connv[i]->addr->fn == _addr->fn))) {
1184 n++;
1185 }
1186 } /* for counting how many addresses match */
1187
1188 _mcv = fmd_hdl_zalloc(hdl, (n + 1) * sizeof (_etm_xport_conn_t *),
1189 FMD_SLEEP);
1190 n = 0;
1191 for (i = 0; _connv[i] != NULL; i++) {
1192 if ((_connv[i]->addr == _addr) ||
1193 ((_connv[i]->addr != NULL) &&
1194 (_connv[i]->addr->fn == _addr->fn))) {
1195 _mcv[n] = _connv[i];
1196 n++;
1197 }
1198 } /* for copying matching address pointers */
1199 _mcv[n] = NULL;
1200
1201 return ((void *) _mcv);
1202
1203 } /* etm_xport_get_addr_conn() */
1204
1205 /*
1206 * etm_xport_get_any_lcc - indicate which endpoint has undergone
1207 * a life cycle change and what that change
1208 * was (ex: came up), pending until a change
1209 * has occured for some/any endpoint,
1210 * return the appropriate address handle,
1211 * or NULL and set errno if problem
1212 *
1213 * caveats:
1214 * this function maintains or accesses state/history
1215 * regarding life cycle changes of endpoints
1216 *
1217 * if life cycle changes are rapid relative to how
1218 * frequently this function is called, fairness will
1219 * be provided among which endpoints are reported
1220 */
1221
1222 etm_xport_addr_t
etm_xport_get_any_lcc(fmd_hdl_t * hdl,etm_xport_lcc_t * lccp)1223 etm_xport_get_any_lcc(fmd_hdl_t *hdl, etm_xport_lcc_t *lccp)
1224 {
1225 if ((hdl == NULL) || (lccp == NULL)) {
1226 etm_xport_stats.xport_get_any_lcc_badargs.fmds_value.ui64++;
1227 errno = EINVAL;
1228 return (NULL);
1229 }
1230
1231 /*
1232 * function not needed in FMA Phase 1 for sun4v/Ontario
1233 */
1234
1235 errno = ENOTSUP;
1236 return (NULL);
1237
1238 } /* etm_xport_get_any_lcc() */
1239
1240 /*
1241 * etm_xport_fini - finish/teardown any transport infrastructure
1242 * after all connections are closed,
1243 * return 0 or -errno value if teardown failed
1244 */
1245
1246 int
etm_xport_fini(fmd_hdl_t * hdl)1247 etm_xport_fini(fmd_hdl_t *hdl)
1248 {
1249 fmd_hdl_debug(hdl, "info: xport finalizing\n");
1250
1251 if (use_vldc && (etm_xport_vldc_conn != NULL)) {
1252 (void) etm_xport_close(hdl, etm_xport_vldc_conn);
1253 etm_xport_vldc_conn = NULL;
1254 }
1255
1256 /* free any long standing properties from FMD */
1257
1258 fmd_prop_free_string(hdl, etm_xport_addrs);
1259
1260 /* cleanup the intermediate read buffer */
1261
1262 if (etm_xport_irb_tail != etm_xport_irb_head) {
1263 fmd_hdl_debug(hdl, "warning: xport %d bytes stale data\n",
1264 (int)(etm_xport_irb_tail - etm_xport_irb_head));
1265 }
1266 fmd_hdl_free(hdl, etm_xport_irb_area, 2 * etm_xport_irb_mtu_sz);
1267 etm_xport_irb_area = NULL;
1268 etm_xport_irb_head = NULL;
1269 etm_xport_irb_tail = NULL;
1270 etm_xport_irb_mtu_sz = 0;
1271
1272 /* cleanup statistics from FMD */
1273
1274 (void) fmd_stat_destroy(hdl,
1275 sizeof (etm_xport_stats) / sizeof (fmd_stat_t),
1276 (fmd_stat_t *)&etm_xport_stats);
1277
1278 fmd_hdl_debug(hdl, "info: xport finalized ok\n");
1279 return (0);
1280
1281 } /* etm_xport_fini() */
1282
1283 /*
1284 * ------------------------ input/output functions ---------------------------
1285 */
1286
1287 /*
1288 * etm_xport_read - try to read N bytes from the connection
1289 * into the given buffer,
1290 * return how many bytes actually read
1291 * or -errno value
1292 */
1293
1294 ssize_t
etm_xport_read(fmd_hdl_t * hdl,etm_xport_conn_t conn,void * buf,size_t byte_cnt)1295 etm_xport_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, void *buf,
1296 size_t byte_cnt)
1297 {
1298 return (etm_xport_buffered_read(hdl, conn, buf, byte_cnt));
1299
1300 } /* etm_xport_read() */
1301
1302 /*
1303 * etm_xport_write - try to write N bytes to the connection
1304 * from the given buffer,
1305 * return how many bytes actually written
1306 * or -errno value
1307 */
1308
1309 ssize_t
etm_xport_write(fmd_hdl_t * hdl,etm_xport_conn_t conn,void * buf,size_t byte_cnt)1310 etm_xport_write(fmd_hdl_t *hdl, etm_xport_conn_t conn, void *buf,
1311 size_t byte_cnt)
1312 {
1313 _etm_xport_conn_t *_conn; /* connection handle */
1314 int n; /* byte cnt */
1315
1316 _conn = conn;
1317
1318 if (hdl == NULL) { /* appease lint */
1319 return (-EINVAL);
1320 }
1321 if ((n = etm_xport_valid_conn(_conn)) < 0) {
1322 return (n);
1323 }
1324
1325 /* write to the connection device's open file descriptor */
1326
1327 if ((n = write(_conn->fd, buf, byte_cnt)) < 0) {
1328 /* errno assumed set by above call */
1329 etm_xport_stats.xport_os_write_fail.fmds_value.ui64++;
1330 n = (-errno);
1331 }
1332
1333 return (n);
1334
1335 } /* etm_xport_write() */
1336
1337 /*
1338 * ------------------------ miscellaneous functions --------------------------
1339 */
1340
1341 /*
1342 * etm_xport_get_opt - get a connection's transport option value,
1343 * return the current value
1344 * or -errno value (ex: -ENOTSUP)
1345 */
1346
1347 ssize_t
etm_xport_get_opt(fmd_hdl_t * hdl,etm_xport_conn_t conn,etm_xport_opt_t opt)1348 etm_xport_get_opt(fmd_hdl_t *hdl, etm_xport_conn_t conn, etm_xport_opt_t opt)
1349 {
1350 ssize_t rv; /* ret val */
1351 _etm_xport_conn_t *_conn; /* connection handle */
1352 etm_xport_opt_op_t op_ctl; /* struct for option ops */
1353 ssize_t n; /* gen use */
1354
1355 rv = 0;
1356 _conn = conn;
1357
1358 if (hdl == NULL) { /* appease lint */
1359 return (-EINVAL);
1360 }
1361 if ((n = etm_xport_valid_conn(_conn)) < 0) {
1362 return (n);
1363 }
1364
1365 op_ctl.oo_op = ETM_XPORT_OPT_GET;
1366 op_ctl.oo_opt = opt;
1367
1368 if (etm_xport_should_fake_dd) {
1369 n = etm_fake_ioctl(_conn->fd, ETM_XPORT_IOCTL_OPT_OP, &op_ctl);
1370 } else if (use_vldc) {
1371 if (opt == ETM_XPORT_OPT_MTU_SZ) {
1372 vldc_opt_op_t operation;
1373
1374 operation.op_sel = VLDC_OP_GET;
1375 operation.opt_sel = VLDC_OPT_MTU_SZ;
1376
1377 n = ioctl(_conn->fd, VLDC_IOCTL_OPT_OP, &operation);
1378
1379 op_ctl.oo_val = operation.opt_val;
1380 } else {
1381 return (-EINVAL);
1382 }
1383 } else {
1384 n = ioctl(_conn->fd, ETM_XPORT_IOCTL_OPT_OP, &op_ctl);
1385 }
1386 if (n < 0) {
1387 /* errno assumed set by above call */
1388 rv = (-errno);
1389 etm_xport_stats.xport_os_ioctl_fail.fmds_value.ui64++;
1390 } else {
1391 rv = (int)op_ctl.oo_val;
1392 }
1393
1394 return (rv);
1395
1396 } /* etm_xport_get_opt() */
1397