1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
23 /* All Rights Reserved */
24
25 /*
26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29 /*
30 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
31 */
32
33 #include "mt.h"
34 #include <stdlib.h>
35 #include <string.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <errno.h>
39 #include <stropts.h>
40 #include <sys/stream.h>
41 #define _SUN_TPI_VERSION 2
42 #include <sys/tihdr.h>
43 #include <sys/timod.h>
44 #include <sys/stat.h>
45 #include <xti.h>
46 #include <fcntl.h>
47 #include <signal.h>
48 #include <assert.h>
49 #include <syslog.h>
50 #include <limits.h>
51 #include <ucred.h>
52 #include "tx.h"
53
54 #define DEFSIZE 2048
55
56 /*
57 * The following used to be in tiuser.h, but was causing too much namespace
58 * pollution.
59 */
60 #define ROUNDUP32(X) ((X + 0x03)&~0x03)
61
62 static struct _ti_user *find_tilink(int s);
63 static struct _ti_user *add_tilink(int s);
64 static void _t_free_lookbufs(struct _ti_user *tiptr);
65 static unsigned int _t_setsize(t_scalar_t infosize, boolean_t option);
66 static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf);
67 static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf);
68 static int _t_adjust_state(int fd, int instate);
69 static int _t_alloc_bufs(int fd, struct _ti_user *tiptr,
70 struct T_info_ack *tsap);
71
72 mutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */
73
74 /*
75 * Checkfd - checks validity of file descriptor
76 */
77 struct _ti_user *
_t_checkfd(int fd,int force_sync,int api_semantics)78 _t_checkfd(int fd, int force_sync, int api_semantics)
79 {
80 sigset_t mask;
81 struct _ti_user *tiptr;
82 int retval, timodpushed;
83
84 if (fd < 0) {
85 t_errno = TBADF;
86 return (NULL);
87 }
88
89 if (!force_sync) {
90 sig_mutex_lock(&_ti_userlock);
91 tiptr = find_tilink(fd);
92 sig_mutex_unlock(&_ti_userlock);
93 if (tiptr != NULL)
94 return (tiptr);
95 }
96
97 /*
98 * Not found or a forced sync is required.
99 * check if this is a valid TLI/XTI descriptor.
100 */
101 timodpushed = 0;
102 do {
103 retval = ioctl(fd, I_FIND, "timod");
104 } while (retval < 0 && errno == EINTR);
105
106 if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) {
107 /*
108 * not a stream or a TLI endpoint with no timod
109 * XXX Note: If it is a XTI call, we push "timod" and
110 * try to convert it into a transport endpoint later.
111 * We do not do it for TLI and "retain" the old buggy
112 * behavior because ypbind and a lot of other deamons seem
113 * to use a buggy logic test of the form
114 * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
115 * they we ever invoked with request on stdin and drop into
116 * untested code. This test is in code generated by rpcgen
117 * which is why it is replicated test in many daemons too.
118 * We will need to fix that test too with "IsaTLIendpoint"
119 * test if we ever fix this for TLI
120 */
121 t_errno = TBADF;
122 return (NULL);
123 }
124
125 if (retval == 0) {
126 /*
127 * "timod" not already on stream, then push it
128 */
129 do {
130 /*
131 * Assumes (correctly) that I_PUSH is
132 * atomic w.r.t signals (EINTR error)
133 */
134 retval = ioctl(fd, I_PUSH, "timod");
135 } while (retval < 0 && errno == EINTR);
136
137 if (retval < 0) {
138 t_errno = TSYSERR;
139 return (NULL);
140 }
141 timodpushed = 1;
142 }
143 /*
144 * Try to (re)constitute the info at user level from state
145 * in the kernel. This could be information that lost due
146 * to an exec or being instantiated at a new descriptor due
147 * to , open(), dup2() etc.
148 *
149 * _t_create() requires that all signals be blocked.
150 * Note that sig_mutex_lock() only defers signals, it does not
151 * block them, so interruptible syscalls could still get EINTR.
152 */
153 (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
154 sig_mutex_lock(&_ti_userlock);
155 tiptr = _t_create(fd, NULL, api_semantics, NULL);
156 if (tiptr == NULL) {
157 int sv_errno = errno;
158 sig_mutex_unlock(&_ti_userlock);
159 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
160 /*
161 * restore to stream before timod pushed. It may
162 * not have been a network transport stream.
163 */
164 if (timodpushed)
165 (void) ioctl(fd, I_POP, 0);
166 errno = sv_errno;
167 return (NULL);
168 }
169 sig_mutex_unlock(&_ti_userlock);
170 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
171 return (tiptr);
172 }
173
174 /*
175 * copy data to output buffer making sure the output buffer is 32 bit
176 * aligned, even though the input buffer may not be.
177 */
178 int
_t_aligned_copy(struct strbuf * strbufp,int len,int init_offset,char * datap,t_scalar_t * rtn_offset)179 _t_aligned_copy(
180 struct strbuf *strbufp,
181 int len,
182 int init_offset,
183 char *datap,
184 t_scalar_t *rtn_offset)
185 {
186 *rtn_offset = ROUNDUP32(init_offset);
187 if ((*rtn_offset + len) > strbufp->maxlen) {
188 /*
189 * Aligned copy will overflow buffer
190 */
191 return (-1);
192 }
193 (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len);
194
195 return (0);
196 }
197
198
199 /*
200 * append data and control info in look buffer (list in the MT case)
201 *
202 * The only thing that can be in look buffer is a T_DISCON_IND,
203 * T_ORDREL_IND or a T_UDERROR_IND.
204 *
205 * It also enforces priority of T_DISCONDs over any T_ORDREL_IND
206 * already in the buffer. It assumes no T_ORDREL_IND is appended
207 * when there is already something on the looklist (error case) and
208 * that a T_ORDREL_IND if present will always be the first on the
209 * list.
210 *
211 * This also assumes ti_lock is held via sig_mutex_lock(),
212 * so signals are deferred here.
213 */
214 int
_t_register_lookevent(struct _ti_user * tiptr,caddr_t dptr,int dsize,caddr_t cptr,int csize)215 _t_register_lookevent(
216 struct _ti_user *tiptr,
217 caddr_t dptr,
218 int dsize,
219 caddr_t cptr,
220 int csize)
221 {
222 struct _ti_lookbufs *tlbs;
223 int cbuf_size, dbuf_size;
224
225 assert(MUTEX_HELD(&tiptr->ti_lock));
226
227 cbuf_size = tiptr->ti_ctlsize;
228 dbuf_size = tiptr->ti_rcvsize;
229
230 if ((csize > cbuf_size) || dsize > dbuf_size) {
231 /* can't fit - return error */
232 return (-1); /* error */
233 }
234 /*
235 * Enforce priority of T_DISCON_IND over T_ORDREL_IND
236 * queued earlier.
237 * Note: Since there can be only at most one T_ORDREL_IND
238 * queued (more than one is error case), and we look for it
239 * on each append of T_DISCON_IND, it can only be at the
240 * head of the list if it is there.
241 */
242 if (tiptr->ti_lookcnt > 0) { /* something already on looklist */
243 if (cptr && csize >= (int)sizeof (struct T_discon_ind) &&
244 /* LINTED pointer cast */
245 *(t_scalar_t *)cptr == T_DISCON_IND) {
246 /* appending discon ind */
247 assert(tiptr->ti_servtype != T_CLTS);
248 /* LINTED pointer cast */
249 if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf ==
250 T_ORDREL_IND) { /* T_ORDREL_IND is on list */
251 /*
252 * Blow away T_ORDREL_IND
253 */
254 _t_free_looklist_head(tiptr);
255 }
256 }
257 }
258 tlbs = &tiptr->ti_lookbufs;
259 if (tiptr->ti_lookcnt > 0) {
260 int listcount = 0;
261 /*
262 * Allocate and append a new lookbuf to the
263 * existing list. (Should only happen in MT case)
264 */
265 while (tlbs->tl_next != NULL) {
266 listcount++;
267 tlbs = tlbs->tl_next;
268 }
269 assert(tiptr->ti_lookcnt == listcount);
270
271 /*
272 * signals are deferred, calls to malloc() are safe.
273 */
274 if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) ==
275 NULL)
276 return (-1); /* error */
277 tlbs = tlbs->tl_next;
278 /*
279 * Allocate the buffers. The sizes derived from the
280 * sizes of other related buffers. See _t_alloc_bufs()
281 * for details.
282 */
283 if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) {
284 /* giving up - free other memory chunks */
285 free(tlbs);
286 return (-1); /* error */
287 }
288 if ((dsize > 0) &&
289 ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) {
290 /* giving up - free other memory chunks */
291 free(tlbs->tl_lookcbuf);
292 free(tlbs);
293 return (-1); /* error */
294 }
295 }
296
297 (void) memcpy(tlbs->tl_lookcbuf, cptr, csize);
298 if (dsize > 0)
299 (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize);
300 tlbs->tl_lookdlen = dsize;
301 tlbs->tl_lookclen = csize;
302 tlbs->tl_next = NULL;
303 tiptr->ti_lookcnt++;
304 return (0); /* ok return */
305 }
306
307 /*
308 * Is there something that needs attention?
309 * Assumes tiptr->ti_lock held and this threads signals blocked
310 * in MT case.
311 */
312 int
_t_is_event(int fd,struct _ti_user * tiptr)313 _t_is_event(int fd, struct _ti_user *tiptr)
314 {
315 int size, retval;
316
317 assert(MUTEX_HELD(&tiptr->ti_lock));
318 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
319 t_errno = TSYSERR;
320 return (-1);
321 }
322
323 if ((retval > 0) || (tiptr->ti_lookcnt > 0)) {
324 t_errno = TLOOK;
325 return (-1);
326 }
327 return (0);
328 }
329
330 /*
331 * wait for T_OK_ACK
332 * assumes tiptr->ti_lock held in MT case
333 */
334 int
_t_is_ok(int fd,struct _ti_user * tiptr,t_scalar_t type)335 _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type)
336 {
337 struct strbuf ctlbuf;
338 struct strbuf databuf;
339 union T_primitives *pptr;
340 int retval, cntlflag;
341 int size;
342 int didalloc, didralloc;
343 int flags = 0;
344
345 assert(MUTEX_HELD(&tiptr->ti_lock));
346 /*
347 * Acquire ctlbuf for use in sending/receiving control part
348 * of the message.
349 */
350 if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0)
351 return (-1);
352 /*
353 * Acquire databuf for use in sending/receiving data part
354 */
355 if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
356 if (didalloc)
357 free(ctlbuf.buf);
358 else
359 tiptr->ti_ctlbuf = ctlbuf.buf;
360 return (-1);
361 }
362
363 /*
364 * Temporarily convert a non blocking endpoint to a
365 * blocking one and restore status later
366 */
367 cntlflag = fcntl(fd, F_GETFL, 0);
368 if (cntlflag & (O_NDELAY | O_NONBLOCK))
369 (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK));
370
371 flags = RS_HIPRI;
372
373 while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) {
374 if (errno == EINTR)
375 continue;
376 if (cntlflag & (O_NDELAY | O_NONBLOCK))
377 (void) fcntl(fd, F_SETFL, cntlflag);
378 t_errno = TSYSERR;
379 goto err_out;
380 }
381
382 /* did I get entire message */
383 if (retval > 0) {
384 if (cntlflag & (O_NDELAY | O_NONBLOCK))
385 (void) fcntl(fd, F_SETFL, cntlflag);
386 t_errno = TSYSERR;
387 errno = EIO;
388 goto err_out;
389 }
390
391 /*
392 * is ctl part large enough to determine type?
393 */
394 if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
395 if (cntlflag & (O_NDELAY | O_NONBLOCK))
396 (void) fcntl(fd, F_SETFL, cntlflag);
397 t_errno = TSYSERR;
398 errno = EPROTO;
399 goto err_out;
400 }
401
402 if (cntlflag & (O_NDELAY | O_NONBLOCK))
403 (void) fcntl(fd, F_SETFL, cntlflag);
404
405 /* LINTED pointer cast */
406 pptr = (union T_primitives *)ctlbuf.buf;
407
408 switch (pptr->type) {
409 case T_OK_ACK:
410 if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) ||
411 (pptr->ok_ack.CORRECT_prim != type)) {
412 t_errno = TSYSERR;
413 errno = EPROTO;
414 goto err_out;
415 }
416 if (didalloc)
417 free(ctlbuf.buf);
418 else
419 tiptr->ti_ctlbuf = ctlbuf.buf;
420 if (didralloc)
421 free(databuf.buf);
422 else
423 tiptr->ti_rcvbuf = databuf.buf;
424 return (0);
425
426 case T_ERROR_ACK:
427 if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) ||
428 (pptr->error_ack.ERROR_prim != type)) {
429 t_errno = TSYSERR;
430 errno = EPROTO;
431 goto err_out;
432 }
433 /*
434 * if error is out of state and there is something
435 * on read queue, then indicate to user that
436 * there is something that needs attention
437 */
438 if (pptr->error_ack.TLI_error == TOUTSTATE) {
439 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
440 t_errno = TSYSERR;
441 goto err_out;
442 }
443 if (retval > 0)
444 t_errno = TLOOK;
445 else
446 t_errno = TOUTSTATE;
447 } else {
448 t_errno = pptr->error_ack.TLI_error;
449 if (t_errno == TSYSERR)
450 errno = pptr->error_ack.UNIX_error;
451 }
452 goto err_out;
453 default:
454 t_errno = TSYSERR;
455 errno = EPROTO;
456 /* fallthru to err_out: */
457 }
458 err_out:
459 if (didalloc)
460 free(ctlbuf.buf);
461 else
462 tiptr->ti_ctlbuf = ctlbuf.buf;
463 if (didralloc)
464 free(databuf.buf);
465 else
466 tiptr->ti_rcvbuf = databuf.buf;
467 return (-1);
468 }
469
470 /*
471 * timod ioctl
472 */
473 int
_t_do_ioctl(int fd,char * buf,int size,int cmd,int * retlenp)474 _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp)
475 {
476 int retval;
477 struct strioctl strioc;
478
479 strioc.ic_cmd = cmd;
480 strioc.ic_timout = -1;
481 strioc.ic_len = size;
482 strioc.ic_dp = buf;
483
484 if ((retval = ioctl(fd, I_STR, &strioc)) < 0) {
485 t_errno = TSYSERR;
486 return (-1);
487 }
488
489 if (retval > 0) {
490 t_errno = retval & 0xff;
491 if (t_errno == TSYSERR)
492 errno = (retval >> 8) & 0xff;
493 return (-1);
494 }
495 if (retlenp)
496 *retlenp = strioc.ic_len;
497 return (0);
498 }
499
500 /*
501 * alloc scratch buffers and look buffers
502 */
503 /* ARGSUSED */
504 static int
_t_alloc_bufs(int fd,struct _ti_user * tiptr,struct T_info_ack * tsap)505 _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap)
506 {
507 unsigned int size1, size2;
508 t_scalar_t optsize;
509 unsigned int csize, dsize, asize, osize;
510 char *ctlbuf, *rcvbuf;
511 char *lookdbuf, *lookcbuf;
512
513 csize = _t_setsize(tsap->CDATA_size, B_FALSE);
514 dsize = _t_setsize(tsap->DDATA_size, B_FALSE);
515
516 size1 = _T_MAX(csize, dsize);
517
518 if (size1 != 0) {
519 if ((rcvbuf = malloc(size1)) == NULL)
520 return (-1);
521 if ((lookdbuf = malloc(size1)) == NULL) {
522 free(rcvbuf);
523 return (-1);
524 }
525 } else {
526 rcvbuf = NULL;
527 lookdbuf = NULL;
528 }
529
530 asize = _t_setsize(tsap->ADDR_size, B_FALSE);
531 if (tsap->OPT_size >= 0)
532 /* compensate for XTI level options */
533 optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF;
534 else
535 optsize = tsap->OPT_size;
536 osize = _t_setsize(optsize, B_TRUE);
537
538 /*
539 * We compute the largest buffer size needed for this provider by
540 * adding the components. [ An extra sizeof (t_scalar_t) is added to
541 * take care of rounding off for alignment) for each buffer ]
542 * The goal here is compute the size of largest possible buffer that
543 * might be needed to hold a TPI message for the transport provider
544 * on this endpoint.
545 * Note: T_ADDR_ACK contains potentially two address buffers.
546 */
547
548 size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */
549 + asize + (unsigned int)sizeof (t_scalar_t) +
550 /* first addr buffer plus alignment */
551 asize + (unsigned int)sizeof (t_scalar_t) +
552 /* second addr buffer plus ailignment */
553 osize + (unsigned int)sizeof (t_scalar_t);
554 /* option buffer plus alignment */
555
556 if ((ctlbuf = malloc(size2)) == NULL) {
557 if (size1 != 0) {
558 free(rcvbuf);
559 free(lookdbuf);
560 }
561 return (-1);
562 }
563
564 if ((lookcbuf = malloc(size2)) == NULL) {
565 if (size1 != 0) {
566 free(rcvbuf);
567 free(lookdbuf);
568 }
569 free(ctlbuf);
570 return (-1);
571 }
572
573 tiptr->ti_rcvsize = size1;
574 tiptr->ti_rcvbuf = rcvbuf;
575 tiptr->ti_ctlsize = size2;
576 tiptr->ti_ctlbuf = ctlbuf;
577
578 /*
579 * Note: The head of the lookbuffers list (and associated buffers)
580 * is allocated here on initialization.
581 * More allocated on demand.
582 */
583 tiptr->ti_lookbufs.tl_lookclen = 0;
584 tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf;
585 tiptr->ti_lookbufs.tl_lookdlen = 0;
586 tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf;
587
588 return (0);
589 }
590
591
592 /*
593 * set sizes of buffers
594 */
595 static unsigned int
_t_setsize(t_scalar_t infosize,boolean_t option)596 _t_setsize(t_scalar_t infosize, boolean_t option)
597 {
598 static size_t optinfsize;
599
600 switch (infosize) {
601 case T_INFINITE /* -1 */:
602 if (option) {
603 if (optinfsize == 0) {
604 size_t uc = ucred_size();
605 if (uc < DEFSIZE/2)
606 optinfsize = DEFSIZE;
607 else
608 optinfsize = ucred_size() + DEFSIZE/2;
609 }
610 return ((unsigned int)optinfsize);
611 }
612 return (DEFSIZE);
613 case T_INVALID /* -2 */:
614 return (0);
615 default:
616 return ((unsigned int) infosize);
617 }
618 }
619
620 static void
_t_reinit_tiptr(struct _ti_user * tiptr)621 _t_reinit_tiptr(struct _ti_user *tiptr)
622 {
623 /*
624 * Note: This routine is designed for a "reinitialization"
625 * Following fields are not modified here and preserved.
626 * - ti_fd field
627 * - ti_lock
628 * - ti_next
629 * - ti_prev
630 * The above fields have to be separately initialized if this
631 * is used for a fresh initialization.
632 */
633
634 tiptr->ti_flags = 0;
635 tiptr->ti_rcvsize = 0;
636 tiptr->ti_rcvbuf = NULL;
637 tiptr->ti_ctlsize = 0;
638 tiptr->ti_ctlbuf = NULL;
639 tiptr->ti_lookbufs.tl_lookdbuf = NULL;
640 tiptr->ti_lookbufs.tl_lookcbuf = NULL;
641 tiptr->ti_lookbufs.tl_lookdlen = 0;
642 tiptr->ti_lookbufs.tl_lookclen = 0;
643 tiptr->ti_lookbufs.tl_next = NULL;
644 tiptr->ti_maxpsz = 0;
645 tiptr->ti_tsdusize = 0;
646 tiptr->ti_etsdusize = 0;
647 tiptr->ti_cdatasize = 0;
648 tiptr->ti_ddatasize = 0;
649 tiptr->ti_servtype = 0;
650 tiptr->ti_lookcnt = 0;
651 tiptr->ti_state = 0;
652 tiptr->ti_ocnt = 0;
653 tiptr->ti_prov_flag = 0;
654 tiptr->ti_qlen = 0;
655 }
656
657 /*
658 * Link manipulation routines.
659 *
660 * NBUCKETS hash buckets are used to give fast
661 * access. The number is derived the file descriptor softlimit
662 * number (64).
663 */
664
665 #define NBUCKETS 64
666 static struct _ti_user *hash_bucket[NBUCKETS];
667
668 /*
669 * Allocates a new link and returns a pointer to it.
670 * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
671 * so signals are deferred here.
672 */
673 static struct _ti_user *
add_tilink(int s)674 add_tilink(int s)
675 {
676 struct _ti_user *tiptr;
677 struct _ti_user *prevptr;
678 struct _ti_user *curptr;
679 int x;
680 struct stat stbuf;
681
682 assert(MUTEX_HELD(&_ti_userlock));
683
684 if (s < 0 || fstat(s, &stbuf) != 0)
685 return (NULL);
686
687 x = s % NBUCKETS;
688 if (hash_bucket[x] != NULL) {
689 /*
690 * Walk along the bucket looking for
691 * duplicate entry or the end.
692 */
693 for (curptr = hash_bucket[x]; curptr != NULL;
694 curptr = curptr->ti_next) {
695 if (curptr->ti_fd == s) {
696 /*
697 * This can happen when the user has close(2)'ed
698 * a descriptor and then been allocated it again
699 * via t_open().
700 *
701 * We will re-use the existing _ti_user struct
702 * in this case rather than using the one
703 * we allocated above. If there are buffers
704 * associated with the existing _ti_user
705 * struct, they may not be the correct size,
706 * so we can not use it. We free them
707 * here and re-allocate a new ones
708 * later on.
709 */
710 if (curptr->ti_rcvbuf != NULL)
711 free(curptr->ti_rcvbuf);
712 free(curptr->ti_ctlbuf);
713 _t_free_lookbufs(curptr);
714 _t_reinit_tiptr(curptr);
715 curptr->ti_rdev = stbuf.st_rdev;
716 curptr->ti_ino = stbuf.st_ino;
717 return (curptr);
718 }
719 prevptr = curptr;
720 }
721 /*
722 * Allocate and link in a new one.
723 */
724 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
725 return (NULL);
726 /*
727 * First initialize fields common with reinitialization and
728 * then other fields too
729 */
730 _t_reinit_tiptr(tiptr);
731 prevptr->ti_next = tiptr;
732 tiptr->ti_prev = prevptr;
733 } else {
734 /*
735 * First entry.
736 */
737 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
738 return (NULL);
739 _t_reinit_tiptr(tiptr);
740 hash_bucket[x] = tiptr;
741 tiptr->ti_prev = NULL;
742 }
743 tiptr->ti_next = NULL;
744 tiptr->ti_fd = s;
745 tiptr->ti_rdev = stbuf.st_rdev;
746 tiptr->ti_ino = stbuf.st_ino;
747 (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL);
748 return (tiptr);
749 }
750
751 /*
752 * Find a link by descriptor
753 * Assumes that the caller is holding _ti_userlock.
754 */
755 static struct _ti_user *
find_tilink(int s)756 find_tilink(int s)
757 {
758 struct _ti_user *curptr;
759 int x;
760 struct stat stbuf;
761
762 assert(MUTEX_HELD(&_ti_userlock));
763
764 if (s < 0 || fstat(s, &stbuf) != 0)
765 return (NULL);
766
767 x = s % NBUCKETS;
768 /*
769 * Walk along the bucket looking for the descriptor.
770 */
771 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
772 if (curptr->ti_fd == s) {
773 if (curptr->ti_rdev == stbuf.st_rdev &&
774 curptr->ti_ino == stbuf.st_ino)
775 return (curptr);
776 (void) _t_delete_tilink(s);
777 }
778 }
779 return (NULL);
780 }
781
782 /*
783 * Assumes that the caller is holding _ti_userlock.
784 * Also assumes that all signals are blocked.
785 */
786 int
_t_delete_tilink(int s)787 _t_delete_tilink(int s)
788 {
789 struct _ti_user *curptr;
790 int x;
791
792 /*
793 * Find the link.
794 */
795 assert(MUTEX_HELD(&_ti_userlock));
796 if (s < 0)
797 return (-1);
798 x = s % NBUCKETS;
799 /*
800 * Walk along the bucket looking for
801 * the descriptor.
802 */
803 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
804 if (curptr->ti_fd == s) {
805 struct _ti_user *nextptr;
806 struct _ti_user *prevptr;
807
808 nextptr = curptr->ti_next;
809 prevptr = curptr->ti_prev;
810 if (prevptr)
811 prevptr->ti_next = nextptr;
812 else
813 hash_bucket[x] = nextptr;
814 if (nextptr)
815 nextptr->ti_prev = prevptr;
816
817 /*
818 * free resource associated with the curptr
819 */
820 if (curptr->ti_rcvbuf != NULL)
821 free(curptr->ti_rcvbuf);
822 free(curptr->ti_ctlbuf);
823 _t_free_lookbufs(curptr);
824 (void) mutex_destroy(&curptr->ti_lock);
825 free(curptr);
826 return (0);
827 }
828 }
829 return (-1);
830 }
831
832 /*
833 * Allocate a TLI state structure and synch it with the kernel
834 * *tiptr is returned
835 * Assumes that the caller is holding the _ti_userlock and has blocked signals.
836 *
837 * This function may fail the first time it is called with given transport if it
838 * doesn't support T_CAPABILITY_REQ TPI message.
839 */
840 struct _ti_user *
_t_create(int fd,struct t_info * info,int api_semantics,int * t_capreq_failed)841 _t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed)
842 {
843 /*
844 * Aligned data buffer for ioctl.
845 */
846 union {
847 struct ti_sync_req ti_req;
848 struct ti_sync_ack ti_ack;
849 union T_primitives t_prim;
850 char pad[128];
851 } ioctl_data;
852 void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */
853 /* preferred location first local variable */
854 /* see note below */
855 /*
856 * Note: We use "ioctlbuf" allocated on stack above with
857 * room to grow since (struct ti_sync_ack) can grow in size
858 * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
859 * part of instance structure which may not exist yet)
860 * Its preferred declaration location is first local variable in this
861 * procedure as bugs causing overruns will be detectable on
862 * platforms where procedure calling conventions place return
863 * address on stack (such as x86) instead of causing silent
864 * memory corruption.
865 */
866 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
867 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
868 struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf;
869 struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf;
870 struct T_info_ack *tiap = &tcap->INFO_ack;
871 struct _ti_user *ntiptr;
872 int expected_acksize;
873 int retlen, rstate, sv_errno, rval;
874
875 assert(MUTEX_HELD(&_ti_userlock));
876
877 /*
878 * Use ioctl required for sync'ing state with kernel.
879 * We use two ioctls. TI_CAPABILITY is used to get TPI information and
880 * TI_SYNC is used to synchronise state with timod. Statically linked
881 * TLI applications will no longer work on older releases where there
882 * are no TI_SYNC and TI_CAPABILITY.
883 */
884
885 /*
886 * Request info about transport.
887 * Assumes that TC1_INFO should always be implemented.
888 * For TI_CAPABILITY size argument to ioctl specifies maximum buffer
889 * size.
890 */
891 tcrp->PRIM_type = T_CAPABILITY_REQ;
892 tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID;
893 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
894 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
895 expected_acksize = (int)sizeof (struct T_capability_ack);
896
897 if (rval < 0) {
898 /*
899 * TI_CAPABILITY may fail when transport provider doesn't
900 * support T_CAPABILITY_REQ message type. In this case file
901 * descriptor may be unusable (when transport provider sent
902 * M_ERROR in response to T_CAPABILITY_REQ). This should only
903 * happen once during system lifetime for given transport
904 * provider since timod will emulate TI_CAPABILITY after it
905 * detected the failure.
906 */
907 if (t_capreq_failed != NULL)
908 *t_capreq_failed = 1;
909 return (NULL);
910 }
911
912 if (retlen != expected_acksize) {
913 t_errno = TSYSERR;
914 errno = EIO;
915 return (NULL);
916 }
917
918 if ((tcap->CAP_bits1 & TC1_INFO) == 0) {
919 t_errno = TSYSERR;
920 errno = EPROTO;
921 return (NULL);
922 }
923 if (info != NULL) {
924 if (tiap->PRIM_type != T_INFO_ACK) {
925 t_errno = TSYSERR;
926 errno = EPROTO;
927 return (NULL);
928 }
929 info->addr = tiap->ADDR_size;
930 info->options = tiap->OPT_size;
931 info->tsdu = tiap->TSDU_size;
932 info->etsdu = tiap->ETSDU_size;
933 info->connect = tiap->CDATA_size;
934 info->discon = tiap->DDATA_size;
935 info->servtype = tiap->SERV_type;
936 if (_T_IS_XTI(api_semantics)) {
937 /*
938 * XTI ONLY - TLI "struct t_info" does not
939 * have "flags"
940 */
941 info->flags = 0;
942 if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
943 info->flags |= T_SENDZERO;
944 /*
945 * Some day there MAY be a NEW bit in T_info_ack
946 * PROVIDER_flag namespace exposed by TPI header
947 * <sys/tihdr.h> which will functionally correspond to
948 * role played by T_ORDRELDATA in info->flags namespace
949 * When that bit exists, we can add a test to see if
950 * it is set and set T_ORDRELDATA.
951 * Note: Currently only mOSI ("minimal OSI") provider
952 * is specified to use T_ORDRELDATA so probability of
953 * needing it is minimal.
954 */
955 }
956 }
957
958 /*
959 * if first time or no instance (after fork/exec, dup etc,
960 * then create initialize data structure
961 * and allocate buffers
962 */
963 ntiptr = add_tilink(fd);
964 if (ntiptr == NULL) {
965 t_errno = TSYSERR;
966 errno = ENOMEM;
967 return (NULL);
968 }
969
970 /*
971 * Allocate buffers for the new descriptor
972 */
973 if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) {
974 sv_errno = errno;
975 (void) _t_delete_tilink(fd);
976 t_errno = TSYSERR;
977 errno = sv_errno;
978 return (NULL);
979 }
980
981 /* Fill instance structure */
982
983 ntiptr->ti_lookcnt = 0;
984 ntiptr->ti_flags = USED;
985 ntiptr->ti_state = T_UNINIT;
986 ntiptr->ti_ocnt = 0;
987
988 assert(tiap->TIDU_size > 0);
989 ntiptr->ti_maxpsz = tiap->TIDU_size;
990 assert(tiap->TSDU_size >= -2);
991 ntiptr->ti_tsdusize = tiap->TSDU_size;
992 assert(tiap->ETSDU_size >= -2);
993 ntiptr->ti_etsdusize = tiap->ETSDU_size;
994 assert(tiap->CDATA_size >= -2);
995 ntiptr->ti_cdatasize = tiap->CDATA_size;
996 assert(tiap->DDATA_size >= -2);
997 ntiptr->ti_ddatasize = tiap->DDATA_size;
998 ntiptr->ti_servtype = tiap->SERV_type;
999 ntiptr->ti_prov_flag = tiap->PROVIDER_flag;
1000
1001 if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) {
1002 ntiptr->acceptor_id = tcap->ACCEPTOR_id;
1003 ntiptr->ti_flags |= V_ACCEPTOR_ID;
1004 }
1005 else
1006 ntiptr->ti_flags &= ~V_ACCEPTOR_ID;
1007
1008 /*
1009 * Restore state from kernel (caveat some heuristics)
1010 */
1011 switch (tiap->CURRENT_state) {
1012
1013 case TS_UNBND:
1014 ntiptr->ti_state = T_UNBND;
1015 break;
1016
1017 case TS_IDLE:
1018 if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) {
1019 sv_errno = errno;
1020 (void) _t_delete_tilink(fd);
1021 errno = sv_errno;
1022 return (NULL);
1023 }
1024 ntiptr->ti_state = rstate;
1025 break;
1026
1027 case TS_WRES_CIND:
1028 ntiptr->ti_state = T_INCON;
1029 break;
1030
1031 case TS_WCON_CREQ:
1032 ntiptr->ti_state = T_OUTCON;
1033 break;
1034
1035 case TS_DATA_XFER:
1036 if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) {
1037 sv_errno = errno;
1038 (void) _t_delete_tilink(fd);
1039 errno = sv_errno;
1040 return (NULL);
1041 }
1042 ntiptr->ti_state = rstate;
1043 break;
1044
1045 case TS_WIND_ORDREL:
1046 ntiptr->ti_state = T_OUTREL;
1047 break;
1048
1049 case TS_WREQ_ORDREL:
1050 if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) {
1051 sv_errno = errno;
1052 (void) _t_delete_tilink(fd);
1053 errno = sv_errno;
1054 return (NULL);
1055 }
1056 ntiptr->ti_state = rstate;
1057 break;
1058 default:
1059 t_errno = TSTATECHNG;
1060 (void) _t_delete_tilink(fd);
1061 return (NULL);
1062 }
1063
1064 /*
1065 * Sync information with timod.
1066 */
1067 tsrp->tsr_flags = TSRF_QLEN_REQ;
1068
1069 rval = _t_do_ioctl(fd, ioctlbuf,
1070 (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen);
1071 expected_acksize = (int)sizeof (struct ti_sync_ack);
1072
1073 if (rval < 0) {
1074 sv_errno = errno;
1075 (void) _t_delete_tilink(fd);
1076 t_errno = TSYSERR;
1077 errno = sv_errno;
1078 return (NULL);
1079 }
1080
1081 /*
1082 * This is a "less than" check as "struct ti_sync_ack" returned by
1083 * TI_SYNC can grow in size in future kernels. If/when a statically
1084 * linked application is run on a future kernel, it should not fail.
1085 */
1086 if (retlen < expected_acksize) {
1087 sv_errno = errno;
1088 (void) _t_delete_tilink(fd);
1089 t_errno = TSYSERR;
1090 errno = sv_errno;
1091 return (NULL);
1092 }
1093
1094 if (_T_IS_TLI(api_semantics))
1095 tsap->tsa_qlen = 0; /* not needed for TLI */
1096
1097 ntiptr->ti_qlen = tsap->tsa_qlen;
1098
1099 return (ntiptr);
1100 }
1101
1102
1103 static int
_t_adjust_state(int fd,int instate)1104 _t_adjust_state(int fd, int instate)
1105 {
1106 char ctlbuf[sizeof (t_scalar_t)];
1107 char databuf[sizeof (int)]; /* size unimportant - anything > 0 */
1108 struct strpeek arg;
1109 int outstate, retval;
1110
1111 /*
1112 * Peek at message on stream head (if any)
1113 * and see if it is data
1114 */
1115 arg.ctlbuf.buf = ctlbuf;
1116 arg.ctlbuf.maxlen = (int)sizeof (ctlbuf);
1117 arg.ctlbuf.len = 0;
1118
1119 arg.databuf.buf = databuf;
1120 arg.databuf.maxlen = (int)sizeof (databuf);
1121 arg.databuf.len = 0;
1122
1123 arg.flags = 0;
1124
1125 if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) {
1126 t_errno = TSYSERR;
1127 return (-1);
1128 }
1129 outstate = instate;
1130 /*
1131 * If peek shows something at stream head, then
1132 * Adjust "outstate" based on some heuristics.
1133 */
1134 if (retval > 0) {
1135 switch (instate) {
1136 case T_IDLE:
1137 /*
1138 * The following heuristic is to handle data
1139 * ahead of T_DISCON_IND indications that might
1140 * be at the stream head waiting to be
1141 * read (T_DATA_IND or M_DATA)
1142 */
1143 if (((arg.ctlbuf.len == 4) &&
1144 /* LINTED pointer cast */
1145 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1146 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1147 outstate = T_DATAXFER;
1148 }
1149 break;
1150 case T_DATAXFER:
1151 /*
1152 * The following heuristic is to handle
1153 * the case where the connection is established
1154 * and in data transfer state at the provider
1155 * but the T_CONN_CON has not yet been read
1156 * from the stream head.
1157 */
1158 if ((arg.ctlbuf.len == 4) &&
1159 /* LINTED pointer cast */
1160 ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON))
1161 outstate = T_OUTCON;
1162 break;
1163 case T_INREL:
1164 /*
1165 * The following heuristic is to handle data
1166 * ahead of T_ORDREL_IND indications that might
1167 * be at the stream head waiting to be
1168 * read (T_DATA_IND or M_DATA)
1169 */
1170 if (((arg.ctlbuf.len == 4) &&
1171 /* LINTED pointer cast */
1172 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1173 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1174 outstate = T_DATAXFER;
1175 }
1176 break;
1177 default:
1178 break;
1179 }
1180 }
1181 return (outstate);
1182 }
1183
1184 /*
1185 * Assumes caller has blocked signals at least in this thread (for safe
1186 * malloc/free operations)
1187 */
1188 static int
_t_cbuf_alloc(struct _ti_user * tiptr,char ** retbuf)1189 _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1190 {
1191 unsigned size2;
1192
1193 assert(MUTEX_HELD(&tiptr->ti_lock));
1194 size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */
1195
1196 if ((*retbuf = malloc(size2)) == NULL) {
1197 return (-1);
1198 }
1199 return (size2);
1200 }
1201
1202
1203 /*
1204 * Assumes caller has blocked signals at least in this thread (for safe
1205 * malloc/free operations)
1206 */
1207 int
_t_rbuf_alloc(struct _ti_user * tiptr,char ** retbuf)1208 _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1209 {
1210 unsigned size1;
1211
1212 assert(MUTEX_HELD(&tiptr->ti_lock));
1213 size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */
1214
1215 if ((*retbuf = malloc(size1)) == NULL) {
1216 return (-1);
1217 }
1218 return (size1);
1219 }
1220
1221 /*
1222 * Free lookbuffer structures and associated resources
1223 * Assumes ti_lock held for MT case.
1224 */
1225 static void
_t_free_lookbufs(struct _ti_user * tiptr)1226 _t_free_lookbufs(struct _ti_user *tiptr)
1227 {
1228 struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs;
1229
1230 /*
1231 * Assertion:
1232 * The structure lock should be held or the global list
1233 * manipulation lock. The assumption is that nothing
1234 * else can access the descriptor since global list manipulation
1235 * lock is held so it is OK to manipulate fields without the
1236 * structure lock
1237 */
1238 assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock));
1239
1240 /*
1241 * Free only the buffers in the first lookbuf
1242 */
1243 head_tlbs = &tiptr->ti_lookbufs;
1244 if (head_tlbs->tl_lookdbuf != NULL) {
1245 free(head_tlbs->tl_lookdbuf);
1246 head_tlbs->tl_lookdbuf = NULL;
1247 }
1248 free(head_tlbs->tl_lookcbuf);
1249 head_tlbs->tl_lookcbuf = NULL;
1250 /*
1251 * Free the node and the buffers in the rest of the
1252 * list
1253 */
1254
1255 tlbs = head_tlbs->tl_next;
1256 head_tlbs->tl_next = NULL;
1257
1258 while (tlbs != NULL) {
1259 if (tlbs->tl_lookdbuf != NULL)
1260 free(tlbs->tl_lookdbuf);
1261 free(tlbs->tl_lookcbuf);
1262 prev_tlbs = tlbs;
1263 tlbs = tlbs->tl_next;
1264 free(prev_tlbs);
1265 }
1266 }
1267
1268 /*
1269 * Free lookbuffer event list head.
1270 * Consume current lookbuffer event
1271 * Assumes ti_lock held for MT case.
1272 * Note: The head of this list is part of the instance
1273 * structure so the code is a little unorthodox.
1274 */
1275 void
_t_free_looklist_head(struct _ti_user * tiptr)1276 _t_free_looklist_head(struct _ti_user *tiptr)
1277 {
1278 struct _ti_lookbufs *tlbs, *next_tlbs;
1279
1280 tlbs = &tiptr->ti_lookbufs;
1281
1282 if (tlbs->tl_next) {
1283 /*
1284 * Free the control and data buffers
1285 */
1286 if (tlbs->tl_lookdbuf != NULL)
1287 free(tlbs->tl_lookdbuf);
1288 free(tlbs->tl_lookcbuf);
1289 /*
1290 * Replace with next lookbuf event contents
1291 */
1292 next_tlbs = tlbs->tl_next;
1293 tlbs->tl_next = next_tlbs->tl_next;
1294 tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf;
1295 tlbs->tl_lookclen = next_tlbs->tl_lookclen;
1296 tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf;
1297 tlbs->tl_lookdlen = next_tlbs->tl_lookdlen;
1298 free(next_tlbs);
1299 /*
1300 * Decrement the flag - should never get to zero.
1301 * in this path
1302 */
1303 tiptr->ti_lookcnt--;
1304 assert(tiptr->ti_lookcnt > 0);
1305 } else {
1306 /*
1307 * No more look buffer events - just clear the flag
1308 * and leave the buffers alone
1309 */
1310 assert(tiptr->ti_lookcnt == 1);
1311 tiptr->ti_lookcnt = 0;
1312 }
1313 }
1314
1315 /*
1316 * Discard lookbuffer events.
1317 * Assumes ti_lock held for MT case.
1318 */
1319 void
_t_flush_lookevents(struct _ti_user * tiptr)1320 _t_flush_lookevents(struct _ti_user *tiptr)
1321 {
1322 struct _ti_lookbufs *tlbs, *prev_tlbs;
1323
1324 /*
1325 * Leave the first nodes buffers alone (i.e. allocated)
1326 * but reset the flag.
1327 */
1328 assert(MUTEX_HELD(&tiptr->ti_lock));
1329 tiptr->ti_lookcnt = 0;
1330 /*
1331 * Blow away the rest of the list
1332 */
1333 tlbs = tiptr->ti_lookbufs.tl_next;
1334 tiptr->ti_lookbufs.tl_next = NULL;
1335 while (tlbs != NULL) {
1336 if (tlbs->tl_lookdbuf != NULL)
1337 free(tlbs->tl_lookdbuf);
1338 free(tlbs->tl_lookcbuf);
1339 prev_tlbs = tlbs;
1340 tlbs = tlbs->tl_next;
1341 free(prev_tlbs);
1342 }
1343 }
1344
1345
1346 /*
1347 * This routine checks if the receive. buffer in the instance structure
1348 * is available (non-null). If it is, the buffer is acquired and marked busy
1349 * (null). If it is busy (possible in MT programs), it allocates a new
1350 * buffer and sets a flag indicating new memory was allocated and the caller
1351 * has to free it.
1352 */
1353 int
_t_acquire_ctlbuf(struct _ti_user * tiptr,struct strbuf * ctlbufp,int * didallocp)1354 _t_acquire_ctlbuf(
1355 struct _ti_user *tiptr,
1356 struct strbuf *ctlbufp,
1357 int *didallocp)
1358 {
1359 *didallocp = 0;
1360
1361 ctlbufp->len = 0;
1362 if (tiptr->ti_ctlbuf) {
1363 ctlbufp->buf = tiptr->ti_ctlbuf;
1364 tiptr->ti_ctlbuf = NULL;
1365 ctlbufp->maxlen = tiptr->ti_ctlsize;
1366 } else {
1367 /*
1368 * tiptr->ti_ctlbuf is in use
1369 * allocate new buffer and free after use.
1370 */
1371 if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr,
1372 &ctlbufp->buf)) < 0) {
1373 t_errno = TSYSERR;
1374 return (-1);
1375 }
1376 *didallocp = 1;
1377 }
1378 return (0);
1379 }
1380
1381 /*
1382 * This routine checks if the receive buffer in the instance structure
1383 * is available (non-null). If it is, the buffer is acquired and marked busy
1384 * (null). If it is busy (possible in MT programs), it allocates a new
1385 * buffer and sets a flag indicating new memory was allocated and the caller
1386 * has to free it.
1387 * Note: The receive buffer pointer can also be null if the transport
1388 * provider does not support connect/disconnect data, (e.g. TCP) - not
1389 * just when it is "busy". In that case, ti_rcvsize will be 0 and that is
1390 * used to instantiate the databuf which points to a null buffer of
1391 * length 0 which is the right thing to do for that case.
1392 */
1393 int
_t_acquire_databuf(struct _ti_user * tiptr,struct strbuf * databufp,int * didallocp)1394 _t_acquire_databuf(
1395 struct _ti_user *tiptr,
1396 struct strbuf *databufp,
1397 int *didallocp)
1398 {
1399 *didallocp = 0;
1400
1401 databufp->len = 0;
1402 if (tiptr->ti_rcvbuf) {
1403 assert(tiptr->ti_rcvsize != 0);
1404 databufp->buf = tiptr->ti_rcvbuf;
1405 tiptr->ti_rcvbuf = NULL;
1406 databufp->maxlen = tiptr->ti_rcvsize;
1407 } else if (tiptr->ti_rcvsize == 0) {
1408 databufp->buf = NULL;
1409 databufp->maxlen = 0;
1410 } else {
1411 /*
1412 * tiptr->ti_rcvbuf is in use
1413 * allocate new buffer and free after use.
1414 */
1415 if ((databufp->maxlen = _t_rbuf_alloc(tiptr,
1416 &databufp->buf)) < 0) {
1417 t_errno = TSYSERR;
1418 return (-1);
1419 }
1420 *didallocp = 1;
1421 }
1422 return (0);
1423 }
1424
1425 /*
1426 * This routine requests timod to look for any expedited data
1427 * queued in the "receive buffers" in the kernel. Used for XTI
1428 * t_look() semantics for transports that send expedited data
1429 * data inline (e.g TCP).
1430 * Returns -1 for failure
1431 * Returns 0 for success
1432 * On a successful return, the location pointed by "expedited_queuedp"
1433 * contains
1434 * 0 if no expedited data is found queued in "receive buffers"
1435 * 1 if expedited data is found queued in "receive buffers"
1436 */
1437
1438 int
_t_expinline_queued(int fd,int * expedited_queuedp)1439 _t_expinline_queued(int fd, int *expedited_queuedp)
1440 {
1441 union {
1442 struct ti_sync_req ti_req;
1443 struct ti_sync_ack ti_ack;
1444 char pad[128];
1445 } ioctl_data;
1446 void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */
1447 /* preferred location first local variable */
1448 /* see note in _t_create above */
1449 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
1450 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
1451 int rval, retlen;
1452
1453 *expedited_queuedp = 0;
1454 /* request info on rq expinds */
1455 tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF;
1456 do {
1457 rval = _t_do_ioctl(fd, ioctlbuf,
1458 (int)sizeof (struct T_info_req), TI_SYNC, &retlen);
1459 } while (rval < 0 && errno == EINTR);
1460
1461 if (rval < 0)
1462 return (-1);
1463
1464 /*
1465 * This is a "less than" check as "struct ti_sync_ack" returned by
1466 * TI_SYNC can grow in size in future kernels. If/when a statically
1467 * linked application is run on a future kernel, it should not fail.
1468 */
1469 if (retlen < (int)sizeof (struct ti_sync_ack)) {
1470 t_errno = TSYSERR;
1471 errno = EIO;
1472 return (-1);
1473 }
1474 if (tsap->tsa_flags & TSAF_EXP_QUEUED)
1475 *expedited_queuedp = 1;
1476 return (0);
1477 }
1478
1479 /*
1480 * Support functions for use by functions that do scatter/gather
1481 * like t_sndv(), t_rcvv() etc..follow below.
1482 */
1483
1484 /*
1485 * _t_bytecount_upto_intmax() :
1486 * Sum of the lengths of the individual buffers in
1487 * the t_iovec array. If the sum exceeds INT_MAX
1488 * it is truncated to INT_MAX.
1489 */
1490 unsigned int
_t_bytecount_upto_intmax(const struct t_iovec * tiov,unsigned int tiovcount)1491 _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount)
1492 {
1493 size_t nbytes;
1494 int i;
1495
1496 nbytes = 0;
1497 for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) {
1498 if (tiov[i].iov_len >= INT_MAX) {
1499 nbytes = INT_MAX;
1500 break;
1501 }
1502 nbytes += tiov[i].iov_len;
1503 }
1504
1505 if (nbytes > INT_MAX)
1506 nbytes = INT_MAX;
1507
1508 return ((unsigned int)nbytes);
1509 }
1510
1511 /*
1512 * Gather the data in the t_iovec buffers, into a single linear buffer
1513 * starting at dataptr. Caller must have allocated sufficient space
1514 * starting at dataptr. The total amount of data that is gathered is
1515 * limited to INT_MAX. Any remaining data in the t_iovec buffers is
1516 * not copied.
1517 */
1518 void
_t_gather(char * dataptr,const struct t_iovec * tiov,unsigned int tiovcount)1519 _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount)
1520 {
1521 char *curptr;
1522 unsigned int cur_count;
1523 unsigned int nbytes_remaining;
1524 int i;
1525
1526 curptr = dataptr;
1527 cur_count = 0;
1528
1529 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1530 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1531 if (tiov[i].iov_len <= nbytes_remaining)
1532 cur_count = (int)tiov[i].iov_len;
1533 else
1534 cur_count = nbytes_remaining;
1535 (void) memcpy(curptr, tiov[i].iov_base, cur_count);
1536 curptr += cur_count;
1537 nbytes_remaining -= cur_count;
1538 }
1539 }
1540
1541 /*
1542 * Scatter the data from the single linear buffer at pdatabuf->buf into
1543 * the t_iovec buffers.
1544 */
1545 void
_t_scatter(struct strbuf * pdatabuf,struct t_iovec * tiov,int tiovcount)1546 _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount)
1547 {
1548 char *curptr;
1549 unsigned int nbytes_remaining;
1550 unsigned int curlen;
1551 int i;
1552
1553 /*
1554 * There cannot be any uncopied data leftover in pdatabuf
1555 * at the conclusion of this function. (asserted below)
1556 */
1557 assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount));
1558 curptr = pdatabuf->buf;
1559 nbytes_remaining = pdatabuf->len;
1560 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1561 if (tiov[i].iov_len < nbytes_remaining)
1562 curlen = (unsigned int)tiov[i].iov_len;
1563 else
1564 curlen = nbytes_remaining;
1565 (void) memcpy(tiov[i].iov_base, curptr, curlen);
1566 curptr += curlen;
1567 nbytes_remaining -= curlen;
1568 }
1569 }
1570
1571 /*
1572 * Adjust the iovec array, for subsequent use. Examine each element in the
1573 * iovec array,and zero out the iov_len if the buffer was sent fully.
1574 * otherwise the buffer was only partially sent, so adjust both iov_len and
1575 * iov_base.
1576 *
1577 */
1578 void
_t_adjust_iov(int bytes_sent,struct iovec * iov,int * iovcountp)1579 _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp)
1580 {
1581
1582 int i;
1583
1584 for (i = 0; i < *iovcountp && bytes_sent; i++) {
1585 if (iov[i].iov_len == 0)
1586 continue;
1587 if (bytes_sent < iov[i].iov_len)
1588 break;
1589 else {
1590 bytes_sent -= iov[i].iov_len;
1591 iov[i].iov_len = 0;
1592 }
1593 }
1594 iov[i].iov_len -= bytes_sent;
1595 iov[i].iov_base += bytes_sent;
1596 }
1597
1598 /*
1599 * Copy the t_iovec array to the iovec array while taking care to see
1600 * that the sum of the buffer lengths in the result is not more than
1601 * INT_MAX. This function requires that T_IOV_MAX is no larger than
1602 * IOV_MAX. Otherwise the resulting array is not a suitable input to
1603 * writev(). If the sum of the lengths in t_iovec is zero, so is the
1604 * resulting iovec.
1605 */
1606 void
_t_copy_tiov_to_iov(const struct t_iovec * tiov,int tiovcount,struct iovec * iov,int * iovcountp)1607 _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount,
1608 struct iovec *iov, int *iovcountp)
1609 {
1610 int i;
1611 unsigned int nbytes_remaining;
1612
1613 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1614 i = 0;
1615 do {
1616 iov[i].iov_base = tiov[i].iov_base;
1617 if (tiov[i].iov_len > nbytes_remaining)
1618 iov[i].iov_len = nbytes_remaining;
1619 else
1620 iov[i].iov_len = tiov[i].iov_len;
1621 nbytes_remaining -= iov[i].iov_len;
1622 i++;
1623 } while (nbytes_remaining != 0 && i < tiovcount);
1624
1625 *iovcountp = i;
1626 }
1627
1628 /*
1629 * Routine called after connection establishment on transports where
1630 * connection establishment changes certain transport attributes such as
1631 * TIDU_size
1632 */
1633 int
_t_do_postconn_sync(int fd,struct _ti_user * tiptr)1634 _t_do_postconn_sync(int fd, struct _ti_user *tiptr)
1635 {
1636 union {
1637 struct T_capability_req tc_req;
1638 struct T_capability_ack tc_ack;
1639 } ioctl_data;
1640
1641 void *ioctlbuf = &ioctl_data;
1642 int expected_acksize;
1643 int retlen, rval;
1644 struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf;
1645 struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf;
1646 struct T_info_ack *tiap;
1647
1648 /*
1649 * This T_CAPABILITY_REQ should not fail, even if it is unsupported
1650 * by the transport provider. timod will emulate it in that case.
1651 */
1652 tc_reqp->PRIM_type = T_CAPABILITY_REQ;
1653 tc_reqp->CAP_bits1 = TC1_INFO;
1654 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
1655 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
1656 expected_acksize = (int)sizeof (struct T_capability_ack);
1657
1658 if (rval < 0)
1659 return (-1);
1660
1661 /*
1662 * T_capability TPI messages are extensible and can grow in future.
1663 * However timod will take care of returning no more information
1664 * than what was requested, and truncating the "extended"
1665 * information towards the end of the T_capability_ack, if necessary.
1666 */
1667 if (retlen != expected_acksize) {
1668 t_errno = TSYSERR;
1669 errno = EIO;
1670 return (-1);
1671 }
1672
1673 /*
1674 * The T_info_ack part of the T_capability_ack is guaranteed to be
1675 * present only if the corresponding TC1_INFO bit is set
1676 */
1677 if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) {
1678 t_errno = TSYSERR;
1679 errno = EPROTO;
1680 return (-1);
1681 }
1682
1683 tiap = &tc_ackp->INFO_ack;
1684 if (tiap->PRIM_type != T_INFO_ACK) {
1685 t_errno = TSYSERR;
1686 errno = EPROTO;
1687 return (-1);
1688 }
1689
1690 /*
1691 * Note: Sync with latest information returned in "struct T_info_ack
1692 * but we deliberately not sync the state here as user level state
1693 * construction here is not required, only update of attributes which
1694 * may have changed because of negotations during connection
1695 * establsihment
1696 */
1697 assert(tiap->TIDU_size > 0);
1698 tiptr->ti_maxpsz = tiap->TIDU_size;
1699 assert(tiap->TSDU_size >= T_INVALID);
1700 tiptr->ti_tsdusize = tiap->TSDU_size;
1701 assert(tiap->ETSDU_size >= T_INVALID);
1702 tiptr->ti_etsdusize = tiap->ETSDU_size;
1703 assert(tiap->CDATA_size >= T_INVALID);
1704 tiptr->ti_cdatasize = tiap->CDATA_size;
1705 assert(tiap->DDATA_size >= T_INVALID);
1706 tiptr->ti_ddatasize = tiap->DDATA_size;
1707 tiptr->ti_prov_flag = tiap->PROVIDER_flag;
1708
1709 return (0);
1710 }
1711