17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 567dbe2beSCasper H.S. Dik * Common Development and Distribution License (the "License"). 667dbe2beSCasper H.S. Dik * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2161961e0fSrobinson 227c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 237c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate /* 2667dbe2beSCasper H.S. Dik * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 277c478bd9Sstevel@tonic-gate * Use is subject to license terms. 287c478bd9Sstevel@tonic-gate */ 29c62da278SMarcel Telka /* 30c62da278SMarcel Telka * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 31c62da278SMarcel Telka */ 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate #include "mt.h" 347c478bd9Sstevel@tonic-gate #include <stdlib.h> 357c478bd9Sstevel@tonic-gate #include <string.h> 367c478bd9Sstevel@tonic-gate #include <strings.h> 377c478bd9Sstevel@tonic-gate #include <unistd.h> 387c478bd9Sstevel@tonic-gate #include <errno.h> 397c478bd9Sstevel@tonic-gate #include <stropts.h> 407c478bd9Sstevel@tonic-gate #include <sys/stream.h> 417c478bd9Sstevel@tonic-gate #define _SUN_TPI_VERSION 2 427c478bd9Sstevel@tonic-gate #include <sys/tihdr.h> 437c478bd9Sstevel@tonic-gate #include <sys/timod.h> 447c478bd9Sstevel@tonic-gate #include <sys/stat.h> 457c478bd9Sstevel@tonic-gate #include <xti.h> 467c478bd9Sstevel@tonic-gate #include <fcntl.h> 477c478bd9Sstevel@tonic-gate #include <signal.h> 487c478bd9Sstevel@tonic-gate #include <assert.h> 497c478bd9Sstevel@tonic-gate #include <syslog.h> 507c478bd9Sstevel@tonic-gate #include <limits.h> 5167dbe2beSCasper H.S. Dik #include <ucred.h> 527c478bd9Sstevel@tonic-gate #include "tx.h" 537c478bd9Sstevel@tonic-gate 547c478bd9Sstevel@tonic-gate #define DEFSIZE 2048 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate /* 577c478bd9Sstevel@tonic-gate * The following used to be in tiuser.h, but was causing too much namespace 587c478bd9Sstevel@tonic-gate * pollution. 597c478bd9Sstevel@tonic-gate */ 607c478bd9Sstevel@tonic-gate #define ROUNDUP32(X) ((X + 0x03)&~0x03) 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate static struct _ti_user *find_tilink(int s); 637c478bd9Sstevel@tonic-gate static struct _ti_user *add_tilink(int s); 647c478bd9Sstevel@tonic-gate static void _t_free_lookbufs(struct _ti_user *tiptr); 6567dbe2beSCasper H.S. Dik static unsigned int _t_setsize(t_scalar_t infosize, boolean_t option); 667c478bd9Sstevel@tonic-gate static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf); 677c478bd9Sstevel@tonic-gate static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf); 687c478bd9Sstevel@tonic-gate static int _t_adjust_state(int fd, int instate); 697c478bd9Sstevel@tonic-gate static int _t_alloc_bufs(int fd, struct _ti_user *tiptr, 707c478bd9Sstevel@tonic-gate struct T_info_ack *tsap); 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate mutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */ 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate /* 757c478bd9Sstevel@tonic-gate * Checkfd - checks validity of file descriptor 767c478bd9Sstevel@tonic-gate */ 777c478bd9Sstevel@tonic-gate struct _ti_user * 787c478bd9Sstevel@tonic-gate _t_checkfd(int fd, int force_sync, int api_semantics) 797c478bd9Sstevel@tonic-gate { 807c478bd9Sstevel@tonic-gate sigset_t mask; 817c478bd9Sstevel@tonic-gate struct _ti_user *tiptr; 827c478bd9Sstevel@tonic-gate int retval, timodpushed; 837c478bd9Sstevel@tonic-gate 847c478bd9Sstevel@tonic-gate if (fd < 0) { 857c478bd9Sstevel@tonic-gate t_errno = TBADF; 867c478bd9Sstevel@tonic-gate return (NULL); 877c478bd9Sstevel@tonic-gate } 88c62da278SMarcel Telka 897c478bd9Sstevel@tonic-gate if (!force_sync) { 90c62da278SMarcel Telka sig_mutex_lock(&_ti_userlock); 91c62da278SMarcel Telka tiptr = find_tilink(fd); 927c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 93c62da278SMarcel Telka if (tiptr != NULL) 947c478bd9Sstevel@tonic-gate return (tiptr); 957c478bd9Sstevel@tonic-gate } 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * Not found or a forced sync is required. 997c478bd9Sstevel@tonic-gate * check if this is a valid TLI/XTI descriptor. 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate timodpushed = 0; 1027c478bd9Sstevel@tonic-gate do { 103e8031f0aSraf retval = ioctl(fd, I_FIND, "timod"); 1047c478bd9Sstevel@tonic-gate } while (retval < 0 && errno == EINTR); 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) { 1077c478bd9Sstevel@tonic-gate /* 1087c478bd9Sstevel@tonic-gate * not a stream or a TLI endpoint with no timod 1097c478bd9Sstevel@tonic-gate * XXX Note: If it is a XTI call, we push "timod" and 1107c478bd9Sstevel@tonic-gate * try to convert it into a transport endpoint later. 1117c478bd9Sstevel@tonic-gate * We do not do it for TLI and "retain" the old buggy 1127c478bd9Sstevel@tonic-gate * behavior because ypbind and a lot of other deamons seem 1137c478bd9Sstevel@tonic-gate * to use a buggy logic test of the form 1147c478bd9Sstevel@tonic-gate * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if 1157c478bd9Sstevel@tonic-gate * they we ever invoked with request on stdin and drop into 1167c478bd9Sstevel@tonic-gate * untested code. This test is in code generated by rpcgen 1177c478bd9Sstevel@tonic-gate * which is why it is replicated test in many daemons too. 1187c478bd9Sstevel@tonic-gate * We will need to fix that test too with "IsaTLIendpoint" 1197c478bd9Sstevel@tonic-gate * test if we ever fix this for TLI 1207c478bd9Sstevel@tonic-gate */ 1217c478bd9Sstevel@tonic-gate t_errno = TBADF; 1227c478bd9Sstevel@tonic-gate return (NULL); 1237c478bd9Sstevel@tonic-gate } 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate if (retval == 0) { 1267c478bd9Sstevel@tonic-gate /* 1277c478bd9Sstevel@tonic-gate * "timod" not already on stream, then push it 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate do { 1307c478bd9Sstevel@tonic-gate /* 1317c478bd9Sstevel@tonic-gate * Assumes (correctly) that I_PUSH is 1327c478bd9Sstevel@tonic-gate * atomic w.r.t signals (EINTR error) 1337c478bd9Sstevel@tonic-gate */ 134e8031f0aSraf retval = ioctl(fd, I_PUSH, "timod"); 1357c478bd9Sstevel@tonic-gate } while (retval < 0 && errno == EINTR); 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate if (retval < 0) { 1387c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 1397c478bd9Sstevel@tonic-gate return (NULL); 1407c478bd9Sstevel@tonic-gate } 1417c478bd9Sstevel@tonic-gate timodpushed = 1; 1427c478bd9Sstevel@tonic-gate } 1437c478bd9Sstevel@tonic-gate /* 1447c478bd9Sstevel@tonic-gate * Try to (re)constitute the info at user level from state 1457c478bd9Sstevel@tonic-gate * in the kernel. This could be information that lost due 1467c478bd9Sstevel@tonic-gate * to an exec or being instantiated at a new descriptor due 1477c478bd9Sstevel@tonic-gate * to , open(), dup2() etc. 1487c478bd9Sstevel@tonic-gate * 1497c478bd9Sstevel@tonic-gate * _t_create() requires that all signals be blocked. 1507c478bd9Sstevel@tonic-gate * Note that sig_mutex_lock() only defers signals, it does not 1517c478bd9Sstevel@tonic-gate * block them, so interruptible syscalls could still get EINTR. 1527c478bd9Sstevel@tonic-gate */ 1537c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); 1547c478bd9Sstevel@tonic-gate sig_mutex_lock(&_ti_userlock); 1557c478bd9Sstevel@tonic-gate tiptr = _t_create(fd, NULL, api_semantics, NULL); 1567c478bd9Sstevel@tonic-gate if (tiptr == NULL) { 1577c478bd9Sstevel@tonic-gate int sv_errno = errno; 1587c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 1597c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * restore to stream before timod pushed. It may 1627c478bd9Sstevel@tonic-gate * not have been a network transport stream. 1637c478bd9Sstevel@tonic-gate */ 1647c478bd9Sstevel@tonic-gate if (timodpushed) 165e8031f0aSraf (void) ioctl(fd, I_POP, 0); 1667c478bd9Sstevel@tonic-gate errno = sv_errno; 1677c478bd9Sstevel@tonic-gate return (NULL); 1687c478bd9Sstevel@tonic-gate } 1697c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 1707c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); 1717c478bd9Sstevel@tonic-gate return (tiptr); 1727c478bd9Sstevel@tonic-gate } 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate /* 1757c478bd9Sstevel@tonic-gate * copy data to output buffer making sure the output buffer is 32 bit 1767c478bd9Sstevel@tonic-gate * aligned, even though the input buffer may not be. 1777c478bd9Sstevel@tonic-gate */ 1787c478bd9Sstevel@tonic-gate int 1797c478bd9Sstevel@tonic-gate _t_aligned_copy( 1807c478bd9Sstevel@tonic-gate struct strbuf *strbufp, 1817c478bd9Sstevel@tonic-gate int len, 1827c478bd9Sstevel@tonic-gate int init_offset, 1837c478bd9Sstevel@tonic-gate char *datap, 1847c478bd9Sstevel@tonic-gate t_scalar_t *rtn_offset) 1857c478bd9Sstevel@tonic-gate { 1867c478bd9Sstevel@tonic-gate *rtn_offset = ROUNDUP32(init_offset); 1877c478bd9Sstevel@tonic-gate if ((*rtn_offset + len) > strbufp->maxlen) { 1887c478bd9Sstevel@tonic-gate /* 1897c478bd9Sstevel@tonic-gate * Aligned copy will overflow buffer 1907c478bd9Sstevel@tonic-gate */ 1917c478bd9Sstevel@tonic-gate return (-1); 1927c478bd9Sstevel@tonic-gate } 1937c478bd9Sstevel@tonic-gate (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len); 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate return (0); 1967c478bd9Sstevel@tonic-gate } 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * append data and control info in look buffer (list in the MT case) 2017c478bd9Sstevel@tonic-gate * 2027c478bd9Sstevel@tonic-gate * The only thing that can be in look buffer is a T_DISCON_IND, 2037c478bd9Sstevel@tonic-gate * T_ORDREL_IND or a T_UDERROR_IND. 2047c478bd9Sstevel@tonic-gate * 2057c478bd9Sstevel@tonic-gate * It also enforces priority of T_DISCONDs over any T_ORDREL_IND 2067c478bd9Sstevel@tonic-gate * already in the buffer. It assumes no T_ORDREL_IND is appended 2077c478bd9Sstevel@tonic-gate * when there is already something on the looklist (error case) and 2087c478bd9Sstevel@tonic-gate * that a T_ORDREL_IND if present will always be the first on the 2097c478bd9Sstevel@tonic-gate * list. 2107c478bd9Sstevel@tonic-gate * 2117c478bd9Sstevel@tonic-gate * This also assumes ti_lock is held via sig_mutex_lock(), 2127c478bd9Sstevel@tonic-gate * so signals are deferred here. 2137c478bd9Sstevel@tonic-gate */ 2147c478bd9Sstevel@tonic-gate int 2157c478bd9Sstevel@tonic-gate _t_register_lookevent( 2167c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 2177c478bd9Sstevel@tonic-gate caddr_t dptr, 2187c478bd9Sstevel@tonic-gate int dsize, 2197c478bd9Sstevel@tonic-gate caddr_t cptr, 2207c478bd9Sstevel@tonic-gate int csize) 2217c478bd9Sstevel@tonic-gate { 2227c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs; 2237c478bd9Sstevel@tonic-gate int cbuf_size, dbuf_size; 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate cbuf_size = tiptr->ti_ctlsize; 2287c478bd9Sstevel@tonic-gate dbuf_size = tiptr->ti_rcvsize; 2297c478bd9Sstevel@tonic-gate 2307c478bd9Sstevel@tonic-gate if ((csize > cbuf_size) || dsize > dbuf_size) { 2317c478bd9Sstevel@tonic-gate /* can't fit - return error */ 2327c478bd9Sstevel@tonic-gate return (-1); /* error */ 2337c478bd9Sstevel@tonic-gate } 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * Enforce priority of T_DISCON_IND over T_ORDREL_IND 2367c478bd9Sstevel@tonic-gate * queued earlier. 2377c478bd9Sstevel@tonic-gate * Note: Since there can be only at most one T_ORDREL_IND 2387c478bd9Sstevel@tonic-gate * queued (more than one is error case), and we look for it 2397c478bd9Sstevel@tonic-gate * on each append of T_DISCON_IND, it can only be at the 2407c478bd9Sstevel@tonic-gate * head of the list if it is there. 2417c478bd9Sstevel@tonic-gate */ 2427c478bd9Sstevel@tonic-gate if (tiptr->ti_lookcnt > 0) { /* something already on looklist */ 2437c478bd9Sstevel@tonic-gate if (cptr && csize >= (int)sizeof (struct T_discon_ind) && 24461961e0fSrobinson /* LINTED pointer cast */ 2457c478bd9Sstevel@tonic-gate *(t_scalar_t *)cptr == T_DISCON_IND) { 2467c478bd9Sstevel@tonic-gate /* appending discon ind */ 2477c478bd9Sstevel@tonic-gate assert(tiptr->ti_servtype != T_CLTS); 24861961e0fSrobinson /* LINTED pointer cast */ 2497c478bd9Sstevel@tonic-gate if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf == 2507c478bd9Sstevel@tonic-gate T_ORDREL_IND) { /* T_ORDREL_IND is on list */ 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Blow away T_ORDREL_IND 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate _t_free_looklist_head(tiptr); 2557c478bd9Sstevel@tonic-gate } 2567c478bd9Sstevel@tonic-gate } 2577c478bd9Sstevel@tonic-gate } 2587c478bd9Sstevel@tonic-gate tlbs = &tiptr->ti_lookbufs; 2597c478bd9Sstevel@tonic-gate if (tiptr->ti_lookcnt > 0) { 2607c478bd9Sstevel@tonic-gate int listcount = 0; 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Allocate and append a new lookbuf to the 2637c478bd9Sstevel@tonic-gate * existing list. (Should only happen in MT case) 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate while (tlbs->tl_next != NULL) { 2667c478bd9Sstevel@tonic-gate listcount++; 2677c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 2687c478bd9Sstevel@tonic-gate } 2697c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt == listcount); 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate /* 2727c478bd9Sstevel@tonic-gate * signals are deferred, calls to malloc() are safe. 2737c478bd9Sstevel@tonic-gate */ 27461961e0fSrobinson if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) == 27561961e0fSrobinson NULL) 2767c478bd9Sstevel@tonic-gate return (-1); /* error */ 2777c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 2787c478bd9Sstevel@tonic-gate /* 2797c478bd9Sstevel@tonic-gate * Allocate the buffers. The sizes derived from the 2807c478bd9Sstevel@tonic-gate * sizes of other related buffers. See _t_alloc_bufs() 2817c478bd9Sstevel@tonic-gate * for details. 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) { 2847c478bd9Sstevel@tonic-gate /* giving up - free other memory chunks */ 2857c478bd9Sstevel@tonic-gate free(tlbs); 2867c478bd9Sstevel@tonic-gate return (-1); /* error */ 2877c478bd9Sstevel@tonic-gate } 2887c478bd9Sstevel@tonic-gate if ((dsize > 0) && 2897c478bd9Sstevel@tonic-gate ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) { 2907c478bd9Sstevel@tonic-gate /* giving up - free other memory chunks */ 2917c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 2927c478bd9Sstevel@tonic-gate free(tlbs); 2937c478bd9Sstevel@tonic-gate return (-1); /* error */ 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate } 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate (void) memcpy(tlbs->tl_lookcbuf, cptr, csize); 2987c478bd9Sstevel@tonic-gate if (dsize > 0) 2997c478bd9Sstevel@tonic-gate (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize); 3007c478bd9Sstevel@tonic-gate tlbs->tl_lookdlen = dsize; 3017c478bd9Sstevel@tonic-gate tlbs->tl_lookclen = csize; 3027c478bd9Sstevel@tonic-gate tlbs->tl_next = NULL; 3037c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt++; 3047c478bd9Sstevel@tonic-gate return (0); /* ok return */ 3057c478bd9Sstevel@tonic-gate } 3067c478bd9Sstevel@tonic-gate 3077c478bd9Sstevel@tonic-gate /* 3087c478bd9Sstevel@tonic-gate * Is there something that needs attention? 3097c478bd9Sstevel@tonic-gate * Assumes tiptr->ti_lock held and this threads signals blocked 3107c478bd9Sstevel@tonic-gate * in MT case. 3117c478bd9Sstevel@tonic-gate */ 3127c478bd9Sstevel@tonic-gate int 3137c478bd9Sstevel@tonic-gate _t_is_event(int fd, struct _ti_user *tiptr) 3147c478bd9Sstevel@tonic-gate { 3157c478bd9Sstevel@tonic-gate int size, retval; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 318e8031f0aSraf if ((retval = ioctl(fd, I_NREAD, &size)) < 0) { 3197c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3207c478bd9Sstevel@tonic-gate return (-1); 3217c478bd9Sstevel@tonic-gate } 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate if ((retval > 0) || (tiptr->ti_lookcnt > 0)) { 3247c478bd9Sstevel@tonic-gate t_errno = TLOOK; 3257c478bd9Sstevel@tonic-gate return (-1); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate return (0); 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate /* 3317c478bd9Sstevel@tonic-gate * wait for T_OK_ACK 3327c478bd9Sstevel@tonic-gate * assumes tiptr->ti_lock held in MT case 3337c478bd9Sstevel@tonic-gate */ 3347c478bd9Sstevel@tonic-gate int 3357c478bd9Sstevel@tonic-gate _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type) 3367c478bd9Sstevel@tonic-gate { 3377c478bd9Sstevel@tonic-gate struct strbuf ctlbuf; 3387c478bd9Sstevel@tonic-gate struct strbuf databuf; 3397c478bd9Sstevel@tonic-gate union T_primitives *pptr; 3407c478bd9Sstevel@tonic-gate int retval, cntlflag; 3417c478bd9Sstevel@tonic-gate int size; 3427c478bd9Sstevel@tonic-gate int didalloc, didralloc; 3437c478bd9Sstevel@tonic-gate int flags = 0; 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 3467c478bd9Sstevel@tonic-gate /* 3477c478bd9Sstevel@tonic-gate * Acquire ctlbuf for use in sending/receiving control part 3487c478bd9Sstevel@tonic-gate * of the message. 3497c478bd9Sstevel@tonic-gate */ 35061961e0fSrobinson if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) 3517c478bd9Sstevel@tonic-gate return (-1); 3527c478bd9Sstevel@tonic-gate /* 3537c478bd9Sstevel@tonic-gate * Acquire databuf for use in sending/receiving data part 3547c478bd9Sstevel@tonic-gate */ 3557c478bd9Sstevel@tonic-gate if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) { 3567c478bd9Sstevel@tonic-gate if (didalloc) 3577c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 3587c478bd9Sstevel@tonic-gate else 3597c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 3607c478bd9Sstevel@tonic-gate return (-1); 3617c478bd9Sstevel@tonic-gate } 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate /* 3647c478bd9Sstevel@tonic-gate * Temporarily convert a non blocking endpoint to a 3657c478bd9Sstevel@tonic-gate * blocking one and restore status later 3667c478bd9Sstevel@tonic-gate */ 367e8031f0aSraf cntlflag = fcntl(fd, F_GETFL, 0); 3687c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 369e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK)); 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate flags = RS_HIPRI; 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) { 3747c478bd9Sstevel@tonic-gate if (errno == EINTR) 3757c478bd9Sstevel@tonic-gate continue; 3767c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 377e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3787c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3797c478bd9Sstevel@tonic-gate goto err_out; 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate /* did I get entire message */ 3837c478bd9Sstevel@tonic-gate if (retval > 0) { 3847c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 385e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3867c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3877c478bd9Sstevel@tonic-gate errno = EIO; 3887c478bd9Sstevel@tonic-gate goto err_out; 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate /* 3927c478bd9Sstevel@tonic-gate * is ctl part large enough to determine type? 3937c478bd9Sstevel@tonic-gate */ 3947c478bd9Sstevel@tonic-gate if (ctlbuf.len < (int)sizeof (t_scalar_t)) { 3957c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 396e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3977c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3987c478bd9Sstevel@tonic-gate errno = EPROTO; 3997c478bd9Sstevel@tonic-gate goto err_out; 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 403e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 4047c478bd9Sstevel@tonic-gate 40561961e0fSrobinson /* LINTED pointer cast */ 4067c478bd9Sstevel@tonic-gate pptr = (union T_primitives *)ctlbuf.buf; 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate switch (pptr->type) { 4097c478bd9Sstevel@tonic-gate case T_OK_ACK: 4107c478bd9Sstevel@tonic-gate if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) || 4117c478bd9Sstevel@tonic-gate (pptr->ok_ack.CORRECT_prim != type)) { 4127c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4137c478bd9Sstevel@tonic-gate errno = EPROTO; 4147c478bd9Sstevel@tonic-gate goto err_out; 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate if (didalloc) 4177c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 4187c478bd9Sstevel@tonic-gate else 4197c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 4207c478bd9Sstevel@tonic-gate if (didralloc) 4217c478bd9Sstevel@tonic-gate free(databuf.buf); 4227c478bd9Sstevel@tonic-gate else 4237c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = databuf.buf; 4247c478bd9Sstevel@tonic-gate return (0); 4257c478bd9Sstevel@tonic-gate 4267c478bd9Sstevel@tonic-gate case T_ERROR_ACK: 4277c478bd9Sstevel@tonic-gate if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) || 4287c478bd9Sstevel@tonic-gate (pptr->error_ack.ERROR_prim != type)) { 4297c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4307c478bd9Sstevel@tonic-gate errno = EPROTO; 4317c478bd9Sstevel@tonic-gate goto err_out; 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate /* 4347c478bd9Sstevel@tonic-gate * if error is out of state and there is something 4357c478bd9Sstevel@tonic-gate * on read queue, then indicate to user that 4367c478bd9Sstevel@tonic-gate * there is something that needs attention 4377c478bd9Sstevel@tonic-gate */ 4387c478bd9Sstevel@tonic-gate if (pptr->error_ack.TLI_error == TOUTSTATE) { 439e8031f0aSraf if ((retval = ioctl(fd, I_NREAD, &size)) < 0) { 4407c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4417c478bd9Sstevel@tonic-gate goto err_out; 4427c478bd9Sstevel@tonic-gate } 4437c478bd9Sstevel@tonic-gate if (retval > 0) 4447c478bd9Sstevel@tonic-gate t_errno = TLOOK; 4457c478bd9Sstevel@tonic-gate else 4467c478bd9Sstevel@tonic-gate t_errno = TOUTSTATE; 4477c478bd9Sstevel@tonic-gate } else { 4487c478bd9Sstevel@tonic-gate t_errno = pptr->error_ack.TLI_error; 4497c478bd9Sstevel@tonic-gate if (t_errno == TSYSERR) 4507c478bd9Sstevel@tonic-gate errno = pptr->error_ack.UNIX_error; 4517c478bd9Sstevel@tonic-gate } 4527c478bd9Sstevel@tonic-gate goto err_out; 4537c478bd9Sstevel@tonic-gate default: 4547c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4557c478bd9Sstevel@tonic-gate errno = EPROTO; 4567c478bd9Sstevel@tonic-gate /* fallthru to err_out: */ 4577c478bd9Sstevel@tonic-gate } 4587c478bd9Sstevel@tonic-gate err_out: 4597c478bd9Sstevel@tonic-gate if (didalloc) 4607c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 4617c478bd9Sstevel@tonic-gate else 4627c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 4637c478bd9Sstevel@tonic-gate if (didralloc) 4647c478bd9Sstevel@tonic-gate free(databuf.buf); 4657c478bd9Sstevel@tonic-gate else 4667c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = databuf.buf; 4677c478bd9Sstevel@tonic-gate return (-1); 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate /* 4717c478bd9Sstevel@tonic-gate * timod ioctl 4727c478bd9Sstevel@tonic-gate */ 4737c478bd9Sstevel@tonic-gate int 4747c478bd9Sstevel@tonic-gate _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp) 4757c478bd9Sstevel@tonic-gate { 47661961e0fSrobinson int retval; 4777c478bd9Sstevel@tonic-gate struct strioctl strioc; 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate strioc.ic_cmd = cmd; 4807c478bd9Sstevel@tonic-gate strioc.ic_timout = -1; 4817c478bd9Sstevel@tonic-gate strioc.ic_len = size; 4827c478bd9Sstevel@tonic-gate strioc.ic_dp = buf; 4837c478bd9Sstevel@tonic-gate 484e8031f0aSraf if ((retval = ioctl(fd, I_STR, &strioc)) < 0) { 4857c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4867c478bd9Sstevel@tonic-gate return (-1); 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate if (retval > 0) { 4907c478bd9Sstevel@tonic-gate t_errno = retval & 0xff; 4917c478bd9Sstevel@tonic-gate if (t_errno == TSYSERR) 4927c478bd9Sstevel@tonic-gate errno = (retval >> 8) & 0xff; 4937c478bd9Sstevel@tonic-gate return (-1); 4947c478bd9Sstevel@tonic-gate } 4957c478bd9Sstevel@tonic-gate if (retlenp) 4967c478bd9Sstevel@tonic-gate *retlenp = strioc.ic_len; 4977c478bd9Sstevel@tonic-gate return (0); 4987c478bd9Sstevel@tonic-gate } 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* 5017c478bd9Sstevel@tonic-gate * alloc scratch buffers and look buffers 5027c478bd9Sstevel@tonic-gate */ 5037c478bd9Sstevel@tonic-gate /* ARGSUSED */ 5047c478bd9Sstevel@tonic-gate static int 5057c478bd9Sstevel@tonic-gate _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap) 5067c478bd9Sstevel@tonic-gate { 5077c478bd9Sstevel@tonic-gate unsigned int size1, size2; 5087c478bd9Sstevel@tonic-gate t_scalar_t optsize; 5097c478bd9Sstevel@tonic-gate unsigned int csize, dsize, asize, osize; 5107c478bd9Sstevel@tonic-gate char *ctlbuf, *rcvbuf; 5117c478bd9Sstevel@tonic-gate char *lookdbuf, *lookcbuf; 5127c478bd9Sstevel@tonic-gate 51367dbe2beSCasper H.S. Dik csize = _t_setsize(tsap->CDATA_size, B_FALSE); 51467dbe2beSCasper H.S. Dik dsize = _t_setsize(tsap->DDATA_size, B_FALSE); 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate size1 = _T_MAX(csize, dsize); 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate if (size1 != 0) { 51961961e0fSrobinson if ((rcvbuf = malloc(size1)) == NULL) 5207c478bd9Sstevel@tonic-gate return (-1); 5217c478bd9Sstevel@tonic-gate if ((lookdbuf = malloc(size1)) == NULL) { 5227c478bd9Sstevel@tonic-gate free(rcvbuf); 5237c478bd9Sstevel@tonic-gate return (-1); 5247c478bd9Sstevel@tonic-gate } 5257c478bd9Sstevel@tonic-gate } else { 5267c478bd9Sstevel@tonic-gate rcvbuf = NULL; 5277c478bd9Sstevel@tonic-gate lookdbuf = NULL; 5287c478bd9Sstevel@tonic-gate } 5297c478bd9Sstevel@tonic-gate 53067dbe2beSCasper H.S. Dik asize = _t_setsize(tsap->ADDR_size, B_FALSE); 5317c478bd9Sstevel@tonic-gate if (tsap->OPT_size >= 0) 5327c478bd9Sstevel@tonic-gate /* compensate for XTI level options */ 5337c478bd9Sstevel@tonic-gate optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF; 5347c478bd9Sstevel@tonic-gate else 5357c478bd9Sstevel@tonic-gate optsize = tsap->OPT_size; 53667dbe2beSCasper H.S. Dik osize = _t_setsize(optsize, B_TRUE); 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* 5397c478bd9Sstevel@tonic-gate * We compute the largest buffer size needed for this provider by 5407c478bd9Sstevel@tonic-gate * adding the components. [ An extra sizeof (t_scalar_t) is added to 5417c478bd9Sstevel@tonic-gate * take care of rounding off for alignment) for each buffer ] 5427c478bd9Sstevel@tonic-gate * The goal here is compute the size of largest possible buffer that 5437c478bd9Sstevel@tonic-gate * might be needed to hold a TPI message for the transport provider 5447c478bd9Sstevel@tonic-gate * on this endpoint. 5457c478bd9Sstevel@tonic-gate * Note: T_ADDR_ACK contains potentially two address buffers. 5467c478bd9Sstevel@tonic-gate */ 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */ 5497c478bd9Sstevel@tonic-gate + asize + (unsigned int)sizeof (t_scalar_t) + 5507c478bd9Sstevel@tonic-gate /* first addr buffer plus alignment */ 5517c478bd9Sstevel@tonic-gate asize + (unsigned int)sizeof (t_scalar_t) + 5527c478bd9Sstevel@tonic-gate /* second addr buffer plus ailignment */ 5537c478bd9Sstevel@tonic-gate osize + (unsigned int)sizeof (t_scalar_t); 5547c478bd9Sstevel@tonic-gate /* option buffer plus alignment */ 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate if ((ctlbuf = malloc(size2)) == NULL) { 5577c478bd9Sstevel@tonic-gate if (size1 != 0) { 5587c478bd9Sstevel@tonic-gate free(rcvbuf); 5597c478bd9Sstevel@tonic-gate free(lookdbuf); 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate return (-1); 5627c478bd9Sstevel@tonic-gate } 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate if ((lookcbuf = malloc(size2)) == NULL) { 5657c478bd9Sstevel@tonic-gate if (size1 != 0) { 5667c478bd9Sstevel@tonic-gate free(rcvbuf); 5677c478bd9Sstevel@tonic-gate free(lookdbuf); 5687c478bd9Sstevel@tonic-gate } 5697c478bd9Sstevel@tonic-gate free(ctlbuf); 5707c478bd9Sstevel@tonic-gate return (-1); 5717c478bd9Sstevel@tonic-gate } 5727c478bd9Sstevel@tonic-gate 5737c478bd9Sstevel@tonic-gate tiptr->ti_rcvsize = size1; 5747c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = rcvbuf; 5757c478bd9Sstevel@tonic-gate tiptr->ti_ctlsize = size2; 5767c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf; 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Note: The head of the lookbuffers list (and associated buffers) 5807c478bd9Sstevel@tonic-gate * is allocated here on initialization. 5817c478bd9Sstevel@tonic-gate * More allocated on demand. 5827c478bd9Sstevel@tonic-gate */ 5837c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookclen = 0; 5847c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf; 5857c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdlen = 0; 5867c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf; 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate return (0); 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate /* 5937c478bd9Sstevel@tonic-gate * set sizes of buffers 5947c478bd9Sstevel@tonic-gate */ 5957c478bd9Sstevel@tonic-gate static unsigned int 59667dbe2beSCasper H.S. Dik _t_setsize(t_scalar_t infosize, boolean_t option) 5977c478bd9Sstevel@tonic-gate { 59867dbe2beSCasper H.S. Dik static size_t optinfsize; 59967dbe2beSCasper H.S. Dik 6007c478bd9Sstevel@tonic-gate switch (infosize) { 6017c478bd9Sstevel@tonic-gate case T_INFINITE /* -1 */: 60267dbe2beSCasper H.S. Dik if (option) { 60367dbe2beSCasper H.S. Dik if (optinfsize == 0) { 60467dbe2beSCasper H.S. Dik size_t uc = ucred_size(); 60567dbe2beSCasper H.S. Dik if (uc < DEFSIZE/2) 60667dbe2beSCasper H.S. Dik optinfsize = DEFSIZE; 60767dbe2beSCasper H.S. Dik else 60867dbe2beSCasper H.S. Dik optinfsize = ucred_size() + DEFSIZE/2; 60967dbe2beSCasper H.S. Dik } 61067dbe2beSCasper H.S. Dik return ((unsigned int)optinfsize); 61167dbe2beSCasper H.S. Dik } 6127c478bd9Sstevel@tonic-gate return (DEFSIZE); 6137c478bd9Sstevel@tonic-gate case T_INVALID /* -2 */: 6147c478bd9Sstevel@tonic-gate return (0); 6157c478bd9Sstevel@tonic-gate default: 6167c478bd9Sstevel@tonic-gate return ((unsigned int) infosize); 6177c478bd9Sstevel@tonic-gate } 6187c478bd9Sstevel@tonic-gate } 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate static void 6217c478bd9Sstevel@tonic-gate _t_reinit_tiptr(struct _ti_user *tiptr) 6227c478bd9Sstevel@tonic-gate { 6237c478bd9Sstevel@tonic-gate /* 6247c478bd9Sstevel@tonic-gate * Note: This routine is designed for a "reinitialization" 6257c478bd9Sstevel@tonic-gate * Following fields are not modified here and preserved. 6267c478bd9Sstevel@tonic-gate * - ti_fd field 6277c478bd9Sstevel@tonic-gate * - ti_lock 6287c478bd9Sstevel@tonic-gate * - ti_next 6297c478bd9Sstevel@tonic-gate * - ti_prev 6307c478bd9Sstevel@tonic-gate * The above fields have to be separately initialized if this 6317c478bd9Sstevel@tonic-gate * is used for a fresh initialization. 6327c478bd9Sstevel@tonic-gate */ 6337c478bd9Sstevel@tonic-gate 6347c478bd9Sstevel@tonic-gate tiptr->ti_flags = 0; 6357c478bd9Sstevel@tonic-gate tiptr->ti_rcvsize = 0; 6367c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = NULL; 6377c478bd9Sstevel@tonic-gate tiptr->ti_ctlsize = 0; 6387c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = NULL; 6397c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdbuf = NULL; 6407c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookcbuf = NULL; 6417c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdlen = 0; 6427c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookclen = 0; 6437c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_next = NULL; 6447c478bd9Sstevel@tonic-gate tiptr->ti_maxpsz = 0; 6457c478bd9Sstevel@tonic-gate tiptr->ti_tsdusize = 0; 6467c478bd9Sstevel@tonic-gate tiptr->ti_etsdusize = 0; 6477c478bd9Sstevel@tonic-gate tiptr->ti_cdatasize = 0; 6487c478bd9Sstevel@tonic-gate tiptr->ti_ddatasize = 0; 6497c478bd9Sstevel@tonic-gate tiptr->ti_servtype = 0; 6507c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 6517c478bd9Sstevel@tonic-gate tiptr->ti_state = 0; 6527c478bd9Sstevel@tonic-gate tiptr->ti_ocnt = 0; 6537c478bd9Sstevel@tonic-gate tiptr->ti_prov_flag = 0; 6547c478bd9Sstevel@tonic-gate tiptr->ti_qlen = 0; 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate /* 6587c478bd9Sstevel@tonic-gate * Link manipulation routines. 6597c478bd9Sstevel@tonic-gate * 6607c478bd9Sstevel@tonic-gate * NBUCKETS hash buckets are used to give fast 6617c478bd9Sstevel@tonic-gate * access. The number is derived the file descriptor softlimit 6627c478bd9Sstevel@tonic-gate * number (64). 6637c478bd9Sstevel@tonic-gate */ 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate #define NBUCKETS 64 6667c478bd9Sstevel@tonic-gate static struct _ti_user *hash_bucket[NBUCKETS]; 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate /* 6697c478bd9Sstevel@tonic-gate * Allocates a new link and returns a pointer to it. 6707c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(), 6717c478bd9Sstevel@tonic-gate * so signals are deferred here. 6727c478bd9Sstevel@tonic-gate */ 6737c478bd9Sstevel@tonic-gate static struct _ti_user * 6747c478bd9Sstevel@tonic-gate add_tilink(int s) 6757c478bd9Sstevel@tonic-gate { 6767c478bd9Sstevel@tonic-gate struct _ti_user *tiptr; 6777c478bd9Sstevel@tonic-gate struct _ti_user *prevptr; 6787c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 6797c478bd9Sstevel@tonic-gate int x; 6807c478bd9Sstevel@tonic-gate struct stat stbuf; 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate if (s < 0 || fstat(s, &stbuf) != 0) 6857c478bd9Sstevel@tonic-gate return (NULL); 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 6887c478bd9Sstevel@tonic-gate if (hash_bucket[x] != NULL) { 6897c478bd9Sstevel@tonic-gate /* 6907c478bd9Sstevel@tonic-gate * Walk along the bucket looking for 6917c478bd9Sstevel@tonic-gate * duplicate entry or the end. 6927c478bd9Sstevel@tonic-gate */ 6937c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr != NULL; 6947c478bd9Sstevel@tonic-gate curptr = curptr->ti_next) { 6957c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 6967c478bd9Sstevel@tonic-gate /* 6977c478bd9Sstevel@tonic-gate * This can happen when the user has close(2)'ed 6987c478bd9Sstevel@tonic-gate * a descriptor and then been allocated it again 6997c478bd9Sstevel@tonic-gate * via t_open(). 7007c478bd9Sstevel@tonic-gate * 7017c478bd9Sstevel@tonic-gate * We will re-use the existing _ti_user struct 7027c478bd9Sstevel@tonic-gate * in this case rather than using the one 7037c478bd9Sstevel@tonic-gate * we allocated above. If there are buffers 7047c478bd9Sstevel@tonic-gate * associated with the existing _ti_user 7057c478bd9Sstevel@tonic-gate * struct, they may not be the correct size, 7067c478bd9Sstevel@tonic-gate * so we can not use it. We free them 7077c478bd9Sstevel@tonic-gate * here and re-allocate a new ones 7087c478bd9Sstevel@tonic-gate * later on. 7097c478bd9Sstevel@tonic-gate */ 7107c478bd9Sstevel@tonic-gate if (curptr->ti_rcvbuf != NULL) 7117c478bd9Sstevel@tonic-gate free(curptr->ti_rcvbuf); 7127c478bd9Sstevel@tonic-gate free(curptr->ti_ctlbuf); 7137c478bd9Sstevel@tonic-gate _t_free_lookbufs(curptr); 7147c478bd9Sstevel@tonic-gate _t_reinit_tiptr(curptr); 7157c478bd9Sstevel@tonic-gate curptr->ti_rdev = stbuf.st_rdev; 7167c478bd9Sstevel@tonic-gate curptr->ti_ino = stbuf.st_ino; 7177c478bd9Sstevel@tonic-gate return (curptr); 7187c478bd9Sstevel@tonic-gate } 7197c478bd9Sstevel@tonic-gate prevptr = curptr; 7207c478bd9Sstevel@tonic-gate } 7217c478bd9Sstevel@tonic-gate /* 7227c478bd9Sstevel@tonic-gate * Allocate and link in a new one. 7237c478bd9Sstevel@tonic-gate */ 72461961e0fSrobinson if ((tiptr = malloc(sizeof (*tiptr))) == NULL) 7257c478bd9Sstevel@tonic-gate return (NULL); 7267c478bd9Sstevel@tonic-gate /* 7277c478bd9Sstevel@tonic-gate * First initialize fields common with reinitialization and 7287c478bd9Sstevel@tonic-gate * then other fields too 7297c478bd9Sstevel@tonic-gate */ 7307c478bd9Sstevel@tonic-gate _t_reinit_tiptr(tiptr); 7317c478bd9Sstevel@tonic-gate prevptr->ti_next = tiptr; 7327c478bd9Sstevel@tonic-gate tiptr->ti_prev = prevptr; 7337c478bd9Sstevel@tonic-gate } else { 7347c478bd9Sstevel@tonic-gate /* 7357c478bd9Sstevel@tonic-gate * First entry. 7367c478bd9Sstevel@tonic-gate */ 73761961e0fSrobinson if ((tiptr = malloc(sizeof (*tiptr))) == NULL) 7387c478bd9Sstevel@tonic-gate return (NULL); 7397c478bd9Sstevel@tonic-gate _t_reinit_tiptr(tiptr); 7407c478bd9Sstevel@tonic-gate hash_bucket[x] = tiptr; 7417c478bd9Sstevel@tonic-gate tiptr->ti_prev = NULL; 7427c478bd9Sstevel@tonic-gate } 7437c478bd9Sstevel@tonic-gate tiptr->ti_next = NULL; 7447c478bd9Sstevel@tonic-gate tiptr->ti_fd = s; 7457c478bd9Sstevel@tonic-gate tiptr->ti_rdev = stbuf.st_rdev; 7467c478bd9Sstevel@tonic-gate tiptr->ti_ino = stbuf.st_ino; 74761961e0fSrobinson (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL); 7487c478bd9Sstevel@tonic-gate return (tiptr); 7497c478bd9Sstevel@tonic-gate } 7507c478bd9Sstevel@tonic-gate 7517c478bd9Sstevel@tonic-gate /* 7527c478bd9Sstevel@tonic-gate * Find a link by descriptor 7537c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock. 7547c478bd9Sstevel@tonic-gate */ 7557c478bd9Sstevel@tonic-gate static struct _ti_user * 7567c478bd9Sstevel@tonic-gate find_tilink(int s) 7577c478bd9Sstevel@tonic-gate { 7587c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 7597c478bd9Sstevel@tonic-gate int x; 7607c478bd9Sstevel@tonic-gate struct stat stbuf; 7617c478bd9Sstevel@tonic-gate 7627c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate if (s < 0 || fstat(s, &stbuf) != 0) 7657c478bd9Sstevel@tonic-gate return (NULL); 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 7687c478bd9Sstevel@tonic-gate /* 7697c478bd9Sstevel@tonic-gate * Walk along the bucket looking for the descriptor. 7707c478bd9Sstevel@tonic-gate */ 7717c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) { 7727c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 7737c478bd9Sstevel@tonic-gate if (curptr->ti_rdev == stbuf.st_rdev && 7747c478bd9Sstevel@tonic-gate curptr->ti_ino == stbuf.st_ino) 7757c478bd9Sstevel@tonic-gate return (curptr); 7767c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(s); 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate } 7797c478bd9Sstevel@tonic-gate return (NULL); 7807c478bd9Sstevel@tonic-gate } 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate /* 7837c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock. 7847c478bd9Sstevel@tonic-gate * Also assumes that all signals are blocked. 7857c478bd9Sstevel@tonic-gate */ 7867c478bd9Sstevel@tonic-gate int 7877c478bd9Sstevel@tonic-gate _t_delete_tilink(int s) 7887c478bd9Sstevel@tonic-gate { 7897c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 7907c478bd9Sstevel@tonic-gate int x; 7917c478bd9Sstevel@tonic-gate 7927c478bd9Sstevel@tonic-gate /* 7937c478bd9Sstevel@tonic-gate * Find the link. 7947c478bd9Sstevel@tonic-gate */ 7957c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 7967c478bd9Sstevel@tonic-gate if (s < 0) 7977c478bd9Sstevel@tonic-gate return (-1); 7987c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * Walk along the bucket looking for 8017c478bd9Sstevel@tonic-gate * the descriptor. 8027c478bd9Sstevel@tonic-gate */ 8037c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) { 8047c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 8057c478bd9Sstevel@tonic-gate struct _ti_user *nextptr; 8067c478bd9Sstevel@tonic-gate struct _ti_user *prevptr; 8077c478bd9Sstevel@tonic-gate 8087c478bd9Sstevel@tonic-gate nextptr = curptr->ti_next; 8097c478bd9Sstevel@tonic-gate prevptr = curptr->ti_prev; 8107c478bd9Sstevel@tonic-gate if (prevptr) 8117c478bd9Sstevel@tonic-gate prevptr->ti_next = nextptr; 8127c478bd9Sstevel@tonic-gate else 8137c478bd9Sstevel@tonic-gate hash_bucket[x] = nextptr; 8147c478bd9Sstevel@tonic-gate if (nextptr) 8157c478bd9Sstevel@tonic-gate nextptr->ti_prev = prevptr; 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate /* 8187c478bd9Sstevel@tonic-gate * free resource associated with the curptr 8197c478bd9Sstevel@tonic-gate */ 8207c478bd9Sstevel@tonic-gate if (curptr->ti_rcvbuf != NULL) 8217c478bd9Sstevel@tonic-gate free(curptr->ti_rcvbuf); 8227c478bd9Sstevel@tonic-gate free(curptr->ti_ctlbuf); 8237c478bd9Sstevel@tonic-gate _t_free_lookbufs(curptr); 82461961e0fSrobinson (void) mutex_destroy(&curptr->ti_lock); 8257c478bd9Sstevel@tonic-gate free(curptr); 8267c478bd9Sstevel@tonic-gate return (0); 8277c478bd9Sstevel@tonic-gate } 8287c478bd9Sstevel@tonic-gate } 8297c478bd9Sstevel@tonic-gate return (-1); 8307c478bd9Sstevel@tonic-gate } 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * Allocate a TLI state structure and synch it with the kernel 8347c478bd9Sstevel@tonic-gate * *tiptr is returned 8357c478bd9Sstevel@tonic-gate * Assumes that the caller is holding the _ti_userlock and has blocked signals. 8367c478bd9Sstevel@tonic-gate * 8377c478bd9Sstevel@tonic-gate * This function may fail the first time it is called with given transport if it 8387c478bd9Sstevel@tonic-gate * doesn't support T_CAPABILITY_REQ TPI message. 8397c478bd9Sstevel@tonic-gate */ 8407c478bd9Sstevel@tonic-gate struct _ti_user * 8417c478bd9Sstevel@tonic-gate _t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed) 8427c478bd9Sstevel@tonic-gate { 8437c478bd9Sstevel@tonic-gate /* 8447c478bd9Sstevel@tonic-gate * Aligned data buffer for ioctl. 8457c478bd9Sstevel@tonic-gate */ 8467c478bd9Sstevel@tonic-gate union { 8477c478bd9Sstevel@tonic-gate struct ti_sync_req ti_req; 8487c478bd9Sstevel@tonic-gate struct ti_sync_ack ti_ack; 8497c478bd9Sstevel@tonic-gate union T_primitives t_prim; 8507c478bd9Sstevel@tonic-gate char pad[128]; 8517c478bd9Sstevel@tonic-gate } ioctl_data; 8527c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */ 8537c478bd9Sstevel@tonic-gate /* preferred location first local variable */ 8547c478bd9Sstevel@tonic-gate /* see note below */ 8557c478bd9Sstevel@tonic-gate /* 8567c478bd9Sstevel@tonic-gate * Note: We use "ioctlbuf" allocated on stack above with 8577c478bd9Sstevel@tonic-gate * room to grow since (struct ti_sync_ack) can grow in size 8587c478bd9Sstevel@tonic-gate * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that 8597c478bd9Sstevel@tonic-gate * part of instance structure which may not exist yet) 8607c478bd9Sstevel@tonic-gate * Its preferred declaration location is first local variable in this 8617c478bd9Sstevel@tonic-gate * procedure as bugs causing overruns will be detectable on 8627c478bd9Sstevel@tonic-gate * platforms where procedure calling conventions place return 8637c478bd9Sstevel@tonic-gate * address on stack (such as x86) instead of causing silent 8647c478bd9Sstevel@tonic-gate * memory corruption. 8657c478bd9Sstevel@tonic-gate */ 8667c478bd9Sstevel@tonic-gate struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf; 8677c478bd9Sstevel@tonic-gate struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf; 8687c478bd9Sstevel@tonic-gate struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf; 8697c478bd9Sstevel@tonic-gate struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf; 8707c478bd9Sstevel@tonic-gate struct T_info_ack *tiap = &tcap->INFO_ack; 8717c478bd9Sstevel@tonic-gate struct _ti_user *ntiptr; 8727c478bd9Sstevel@tonic-gate int expected_acksize; 8737c478bd9Sstevel@tonic-gate int retlen, rstate, sv_errno, rval; 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 8767c478bd9Sstevel@tonic-gate 8777c478bd9Sstevel@tonic-gate /* 8787c478bd9Sstevel@tonic-gate * Use ioctl required for sync'ing state with kernel. 8797c478bd9Sstevel@tonic-gate * We use two ioctls. TI_CAPABILITY is used to get TPI information and 8807c478bd9Sstevel@tonic-gate * TI_SYNC is used to synchronise state with timod. Statically linked 8817c478bd9Sstevel@tonic-gate * TLI applications will no longer work on older releases where there 8827c478bd9Sstevel@tonic-gate * are no TI_SYNC and TI_CAPABILITY. 8837c478bd9Sstevel@tonic-gate */ 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate /* 8867c478bd9Sstevel@tonic-gate * Request info about transport. 8877c478bd9Sstevel@tonic-gate * Assumes that TC1_INFO should always be implemented. 8887c478bd9Sstevel@tonic-gate * For TI_CAPABILITY size argument to ioctl specifies maximum buffer 8897c478bd9Sstevel@tonic-gate * size. 8907c478bd9Sstevel@tonic-gate */ 8917c478bd9Sstevel@tonic-gate tcrp->PRIM_type = T_CAPABILITY_REQ; 8927c478bd9Sstevel@tonic-gate tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID; 8937c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, (char *)ioctlbuf, 8947c478bd9Sstevel@tonic-gate (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen); 8957c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct T_capability_ack); 8967c478bd9Sstevel@tonic-gate 8977c478bd9Sstevel@tonic-gate if (rval < 0) { 8987c478bd9Sstevel@tonic-gate /* 8997c478bd9Sstevel@tonic-gate * TI_CAPABILITY may fail when transport provider doesn't 9007c478bd9Sstevel@tonic-gate * support T_CAPABILITY_REQ message type. In this case file 9017c478bd9Sstevel@tonic-gate * descriptor may be unusable (when transport provider sent 9027c478bd9Sstevel@tonic-gate * M_ERROR in response to T_CAPABILITY_REQ). This should only 9037c478bd9Sstevel@tonic-gate * happen once during system lifetime for given transport 9047c478bd9Sstevel@tonic-gate * provider since timod will emulate TI_CAPABILITY after it 9057c478bd9Sstevel@tonic-gate * detected the failure. 9067c478bd9Sstevel@tonic-gate */ 9077c478bd9Sstevel@tonic-gate if (t_capreq_failed != NULL) 9087c478bd9Sstevel@tonic-gate *t_capreq_failed = 1; 9097c478bd9Sstevel@tonic-gate return (NULL); 9107c478bd9Sstevel@tonic-gate } 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate if (retlen != expected_acksize) { 9137c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9147c478bd9Sstevel@tonic-gate errno = EIO; 9157c478bd9Sstevel@tonic-gate return (NULL); 9167c478bd9Sstevel@tonic-gate } 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate if ((tcap->CAP_bits1 & TC1_INFO) == 0) { 9197c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9207c478bd9Sstevel@tonic-gate errno = EPROTO; 9217c478bd9Sstevel@tonic-gate return (NULL); 9227c478bd9Sstevel@tonic-gate } 9237c478bd9Sstevel@tonic-gate if (info != NULL) { 9247c478bd9Sstevel@tonic-gate if (tiap->PRIM_type != T_INFO_ACK) { 9257c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9267c478bd9Sstevel@tonic-gate errno = EPROTO; 9277c478bd9Sstevel@tonic-gate return (NULL); 9287c478bd9Sstevel@tonic-gate } 9297c478bd9Sstevel@tonic-gate info->addr = tiap->ADDR_size; 9307c478bd9Sstevel@tonic-gate info->options = tiap->OPT_size; 9317c478bd9Sstevel@tonic-gate info->tsdu = tiap->TSDU_size; 9327c478bd9Sstevel@tonic-gate info->etsdu = tiap->ETSDU_size; 9337c478bd9Sstevel@tonic-gate info->connect = tiap->CDATA_size; 9347c478bd9Sstevel@tonic-gate info->discon = tiap->DDATA_size; 9357c478bd9Sstevel@tonic-gate info->servtype = tiap->SERV_type; 9367c478bd9Sstevel@tonic-gate if (_T_IS_XTI(api_semantics)) { 9377c478bd9Sstevel@tonic-gate /* 9387c478bd9Sstevel@tonic-gate * XTI ONLY - TLI "struct t_info" does not 9397c478bd9Sstevel@tonic-gate * have "flags" 9407c478bd9Sstevel@tonic-gate */ 9417c478bd9Sstevel@tonic-gate info->flags = 0; 9427c478bd9Sstevel@tonic-gate if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO)) 9437c478bd9Sstevel@tonic-gate info->flags |= T_SENDZERO; 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * Some day there MAY be a NEW bit in T_info_ack 9467c478bd9Sstevel@tonic-gate * PROVIDER_flag namespace exposed by TPI header 9477c478bd9Sstevel@tonic-gate * <sys/tihdr.h> which will functionally correspond to 9487c478bd9Sstevel@tonic-gate * role played by T_ORDRELDATA in info->flags namespace 9497c478bd9Sstevel@tonic-gate * When that bit exists, we can add a test to see if 9507c478bd9Sstevel@tonic-gate * it is set and set T_ORDRELDATA. 9517c478bd9Sstevel@tonic-gate * Note: Currently only mOSI ("minimal OSI") provider 9527c478bd9Sstevel@tonic-gate * is specified to use T_ORDRELDATA so probability of 9537c478bd9Sstevel@tonic-gate * needing it is minimal. 9547c478bd9Sstevel@tonic-gate */ 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate /* 9597c478bd9Sstevel@tonic-gate * if first time or no instance (after fork/exec, dup etc, 9607c478bd9Sstevel@tonic-gate * then create initialize data structure 9617c478bd9Sstevel@tonic-gate * and allocate buffers 9627c478bd9Sstevel@tonic-gate */ 9637c478bd9Sstevel@tonic-gate ntiptr = add_tilink(fd); 9647c478bd9Sstevel@tonic-gate if (ntiptr == NULL) { 9657c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9667c478bd9Sstevel@tonic-gate errno = ENOMEM; 9677c478bd9Sstevel@tonic-gate return (NULL); 9687c478bd9Sstevel@tonic-gate } 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate /* 9717c478bd9Sstevel@tonic-gate * Allocate buffers for the new descriptor 9727c478bd9Sstevel@tonic-gate */ 9737c478bd9Sstevel@tonic-gate if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) { 9747c478bd9Sstevel@tonic-gate sv_errno = errno; 9757c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 9767c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9777c478bd9Sstevel@tonic-gate errno = sv_errno; 9787c478bd9Sstevel@tonic-gate return (NULL); 9797c478bd9Sstevel@tonic-gate } 9807c478bd9Sstevel@tonic-gate 9817c478bd9Sstevel@tonic-gate /* Fill instance structure */ 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate ntiptr->ti_lookcnt = 0; 9847c478bd9Sstevel@tonic-gate ntiptr->ti_flags = USED; 9857c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_UNINIT; 9867c478bd9Sstevel@tonic-gate ntiptr->ti_ocnt = 0; 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate assert(tiap->TIDU_size > 0); 9897c478bd9Sstevel@tonic-gate ntiptr->ti_maxpsz = tiap->TIDU_size; 9907c478bd9Sstevel@tonic-gate assert(tiap->TSDU_size >= -2); 9917c478bd9Sstevel@tonic-gate ntiptr->ti_tsdusize = tiap->TSDU_size; 9927c478bd9Sstevel@tonic-gate assert(tiap->ETSDU_size >= -2); 9937c478bd9Sstevel@tonic-gate ntiptr->ti_etsdusize = tiap->ETSDU_size; 9947c478bd9Sstevel@tonic-gate assert(tiap->CDATA_size >= -2); 9957c478bd9Sstevel@tonic-gate ntiptr->ti_cdatasize = tiap->CDATA_size; 9967c478bd9Sstevel@tonic-gate assert(tiap->DDATA_size >= -2); 9977c478bd9Sstevel@tonic-gate ntiptr->ti_ddatasize = tiap->DDATA_size; 9987c478bd9Sstevel@tonic-gate ntiptr->ti_servtype = tiap->SERV_type; 9997c478bd9Sstevel@tonic-gate ntiptr->ti_prov_flag = tiap->PROVIDER_flag; 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) { 10027c478bd9Sstevel@tonic-gate ntiptr->acceptor_id = tcap->ACCEPTOR_id; 10037c478bd9Sstevel@tonic-gate ntiptr->ti_flags |= V_ACCEPTOR_ID; 10047c478bd9Sstevel@tonic-gate } 10057c478bd9Sstevel@tonic-gate else 10067c478bd9Sstevel@tonic-gate ntiptr->ti_flags &= ~V_ACCEPTOR_ID; 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate /* 10097c478bd9Sstevel@tonic-gate * Restore state from kernel (caveat some heuristics) 10107c478bd9Sstevel@tonic-gate */ 10117c478bd9Sstevel@tonic-gate switch (tiap->CURRENT_state) { 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate case TS_UNBND: 10147c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_UNBND; 10157c478bd9Sstevel@tonic-gate break; 10167c478bd9Sstevel@tonic-gate 10177c478bd9Sstevel@tonic-gate case TS_IDLE: 10187c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) { 10197c478bd9Sstevel@tonic-gate sv_errno = errno; 10207c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10217c478bd9Sstevel@tonic-gate errno = sv_errno; 10227c478bd9Sstevel@tonic-gate return (NULL); 10237c478bd9Sstevel@tonic-gate } 10247c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10257c478bd9Sstevel@tonic-gate break; 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate case TS_WRES_CIND: 10287c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_INCON; 10297c478bd9Sstevel@tonic-gate break; 10307c478bd9Sstevel@tonic-gate 10317c478bd9Sstevel@tonic-gate case TS_WCON_CREQ: 10327c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_OUTCON; 10337c478bd9Sstevel@tonic-gate break; 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate case TS_DATA_XFER: 10367c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) { 10377c478bd9Sstevel@tonic-gate sv_errno = errno; 10387c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10397c478bd9Sstevel@tonic-gate errno = sv_errno; 10407c478bd9Sstevel@tonic-gate return (NULL); 10417c478bd9Sstevel@tonic-gate } 10427c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10437c478bd9Sstevel@tonic-gate break; 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate case TS_WIND_ORDREL: 10467c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_OUTREL; 10477c478bd9Sstevel@tonic-gate break; 10487c478bd9Sstevel@tonic-gate 10497c478bd9Sstevel@tonic-gate case TS_WREQ_ORDREL: 10507c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) { 10517c478bd9Sstevel@tonic-gate sv_errno = errno; 10527c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10537c478bd9Sstevel@tonic-gate errno = sv_errno; 10547c478bd9Sstevel@tonic-gate return (NULL); 10557c478bd9Sstevel@tonic-gate } 10567c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10577c478bd9Sstevel@tonic-gate break; 10587c478bd9Sstevel@tonic-gate default: 10597c478bd9Sstevel@tonic-gate t_errno = TSTATECHNG; 10607c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10617c478bd9Sstevel@tonic-gate return (NULL); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate /* 10657c478bd9Sstevel@tonic-gate * Sync information with timod. 10667c478bd9Sstevel@tonic-gate */ 10677c478bd9Sstevel@tonic-gate tsrp->tsr_flags = TSRF_QLEN_REQ; 10687c478bd9Sstevel@tonic-gate 10697c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, ioctlbuf, 10707c478bd9Sstevel@tonic-gate (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen); 10717c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct ti_sync_ack); 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate if (rval < 0) { 10747c478bd9Sstevel@tonic-gate sv_errno = errno; 10757c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10767c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 10777c478bd9Sstevel@tonic-gate errno = sv_errno; 10787c478bd9Sstevel@tonic-gate return (NULL); 10797c478bd9Sstevel@tonic-gate } 10807c478bd9Sstevel@tonic-gate 10817c478bd9Sstevel@tonic-gate /* 10827c478bd9Sstevel@tonic-gate * This is a "less than" check as "struct ti_sync_ack" returned by 10837c478bd9Sstevel@tonic-gate * TI_SYNC can grow in size in future kernels. If/when a statically 10847c478bd9Sstevel@tonic-gate * linked application is run on a future kernel, it should not fail. 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate if (retlen < expected_acksize) { 10877c478bd9Sstevel@tonic-gate sv_errno = errno; 10887c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10897c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 10907c478bd9Sstevel@tonic-gate errno = sv_errno; 10917c478bd9Sstevel@tonic-gate return (NULL); 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate if (_T_IS_TLI(api_semantics)) 10957c478bd9Sstevel@tonic-gate tsap->tsa_qlen = 0; /* not needed for TLI */ 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate ntiptr->ti_qlen = tsap->tsa_qlen; 1098*d0fcb88aSMarcel Telka 10997c478bd9Sstevel@tonic-gate return (ntiptr); 11007c478bd9Sstevel@tonic-gate } 11017c478bd9Sstevel@tonic-gate 11027c478bd9Sstevel@tonic-gate 11037c478bd9Sstevel@tonic-gate static int 11047c478bd9Sstevel@tonic-gate _t_adjust_state(int fd, int instate) 11057c478bd9Sstevel@tonic-gate { 11067c478bd9Sstevel@tonic-gate char ctlbuf[sizeof (t_scalar_t)]; 11077c478bd9Sstevel@tonic-gate char databuf[sizeof (int)]; /* size unimportant - anything > 0 */ 11087c478bd9Sstevel@tonic-gate struct strpeek arg; 11097c478bd9Sstevel@tonic-gate int outstate, retval; 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate /* 11127c478bd9Sstevel@tonic-gate * Peek at message on stream head (if any) 11137c478bd9Sstevel@tonic-gate * and see if it is data 11147c478bd9Sstevel@tonic-gate */ 11157c478bd9Sstevel@tonic-gate arg.ctlbuf.buf = ctlbuf; 11167c478bd9Sstevel@tonic-gate arg.ctlbuf.maxlen = (int)sizeof (ctlbuf); 11177c478bd9Sstevel@tonic-gate arg.ctlbuf.len = 0; 11187c478bd9Sstevel@tonic-gate 11197c478bd9Sstevel@tonic-gate arg.databuf.buf = databuf; 11207c478bd9Sstevel@tonic-gate arg.databuf.maxlen = (int)sizeof (databuf); 11217c478bd9Sstevel@tonic-gate arg.databuf.len = 0; 11227c478bd9Sstevel@tonic-gate 11237c478bd9Sstevel@tonic-gate arg.flags = 0; 11247c478bd9Sstevel@tonic-gate 1125e8031f0aSraf if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) { 11267c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 11277c478bd9Sstevel@tonic-gate return (-1); 11287c478bd9Sstevel@tonic-gate } 11297c478bd9Sstevel@tonic-gate outstate = instate; 11307c478bd9Sstevel@tonic-gate /* 11317c478bd9Sstevel@tonic-gate * If peek shows something at stream head, then 11327c478bd9Sstevel@tonic-gate * Adjust "outstate" based on some heuristics. 11337c478bd9Sstevel@tonic-gate */ 11347c478bd9Sstevel@tonic-gate if (retval > 0) { 11357c478bd9Sstevel@tonic-gate switch (instate) { 11367c478bd9Sstevel@tonic-gate case T_IDLE: 11377c478bd9Sstevel@tonic-gate /* 11387c478bd9Sstevel@tonic-gate * The following heuristic is to handle data 11397c478bd9Sstevel@tonic-gate * ahead of T_DISCON_IND indications that might 11407c478bd9Sstevel@tonic-gate * be at the stream head waiting to be 11417c478bd9Sstevel@tonic-gate * read (T_DATA_IND or M_DATA) 11427c478bd9Sstevel@tonic-gate */ 11437c478bd9Sstevel@tonic-gate if (((arg.ctlbuf.len == 4) && 114461961e0fSrobinson /* LINTED pointer cast */ 11457c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) || 11467c478bd9Sstevel@tonic-gate ((arg.ctlbuf.len == 0) && arg.databuf.len)) { 11477c478bd9Sstevel@tonic-gate outstate = T_DATAXFER; 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate break; 11507c478bd9Sstevel@tonic-gate case T_DATAXFER: 11517c478bd9Sstevel@tonic-gate /* 11527c478bd9Sstevel@tonic-gate * The following heuristic is to handle 11537c478bd9Sstevel@tonic-gate * the case where the connection is established 11547c478bd9Sstevel@tonic-gate * and in data transfer state at the provider 11557c478bd9Sstevel@tonic-gate * but the T_CONN_CON has not yet been read 11567c478bd9Sstevel@tonic-gate * from the stream head. 11577c478bd9Sstevel@tonic-gate */ 11587c478bd9Sstevel@tonic-gate if ((arg.ctlbuf.len == 4) && 115961961e0fSrobinson /* LINTED pointer cast */ 11607c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON)) 11617c478bd9Sstevel@tonic-gate outstate = T_OUTCON; 11627c478bd9Sstevel@tonic-gate break; 11637c478bd9Sstevel@tonic-gate case T_INREL: 11647c478bd9Sstevel@tonic-gate /* 11657c478bd9Sstevel@tonic-gate * The following heuristic is to handle data 11667c478bd9Sstevel@tonic-gate * ahead of T_ORDREL_IND indications that might 11677c478bd9Sstevel@tonic-gate * be at the stream head waiting to be 11687c478bd9Sstevel@tonic-gate * read (T_DATA_IND or M_DATA) 11697c478bd9Sstevel@tonic-gate */ 11707c478bd9Sstevel@tonic-gate if (((arg.ctlbuf.len == 4) && 117161961e0fSrobinson /* LINTED pointer cast */ 11727c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) || 11737c478bd9Sstevel@tonic-gate ((arg.ctlbuf.len == 0) && arg.databuf.len)) { 11747c478bd9Sstevel@tonic-gate outstate = T_DATAXFER; 11757c478bd9Sstevel@tonic-gate } 11767c478bd9Sstevel@tonic-gate break; 11777c478bd9Sstevel@tonic-gate default: 11787c478bd9Sstevel@tonic-gate break; 11797c478bd9Sstevel@tonic-gate } 11807c478bd9Sstevel@tonic-gate } 11817c478bd9Sstevel@tonic-gate return (outstate); 11827c478bd9Sstevel@tonic-gate } 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate /* 11857c478bd9Sstevel@tonic-gate * Assumes caller has blocked signals at least in this thread (for safe 11867c478bd9Sstevel@tonic-gate * malloc/free operations) 11877c478bd9Sstevel@tonic-gate */ 11887c478bd9Sstevel@tonic-gate static int 11897c478bd9Sstevel@tonic-gate _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf) 11907c478bd9Sstevel@tonic-gate { 11917c478bd9Sstevel@tonic-gate unsigned size2; 11927c478bd9Sstevel@tonic-gate 11937c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 11947c478bd9Sstevel@tonic-gate size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */ 11957c478bd9Sstevel@tonic-gate 11967c478bd9Sstevel@tonic-gate if ((*retbuf = malloc(size2)) == NULL) { 11977c478bd9Sstevel@tonic-gate return (-1); 11987c478bd9Sstevel@tonic-gate } 11997c478bd9Sstevel@tonic-gate return (size2); 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate 12037c478bd9Sstevel@tonic-gate /* 12047c478bd9Sstevel@tonic-gate * Assumes caller has blocked signals at least in this thread (for safe 12057c478bd9Sstevel@tonic-gate * malloc/free operations) 12067c478bd9Sstevel@tonic-gate */ 12077c478bd9Sstevel@tonic-gate int 12087c478bd9Sstevel@tonic-gate _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf) 12097c478bd9Sstevel@tonic-gate { 12107c478bd9Sstevel@tonic-gate unsigned size1; 12117c478bd9Sstevel@tonic-gate 12127c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 12137c478bd9Sstevel@tonic-gate size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */ 12147c478bd9Sstevel@tonic-gate 12157c478bd9Sstevel@tonic-gate if ((*retbuf = malloc(size1)) == NULL) { 12167c478bd9Sstevel@tonic-gate return (-1); 12177c478bd9Sstevel@tonic-gate } 12187c478bd9Sstevel@tonic-gate return (size1); 12197c478bd9Sstevel@tonic-gate } 12207c478bd9Sstevel@tonic-gate 12217c478bd9Sstevel@tonic-gate /* 12227c478bd9Sstevel@tonic-gate * Free lookbuffer structures and associated resources 12237c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 12247c478bd9Sstevel@tonic-gate */ 12257c478bd9Sstevel@tonic-gate static void 12267c478bd9Sstevel@tonic-gate _t_free_lookbufs(struct _ti_user *tiptr) 12277c478bd9Sstevel@tonic-gate { 12287c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs; 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate /* 12317c478bd9Sstevel@tonic-gate * Assertion: 12327c478bd9Sstevel@tonic-gate * The structure lock should be held or the global list 12337c478bd9Sstevel@tonic-gate * manipulation lock. The assumption is that nothing 12347c478bd9Sstevel@tonic-gate * else can access the descriptor since global list manipulation 12357c478bd9Sstevel@tonic-gate * lock is held so it is OK to manipulate fields without the 12367c478bd9Sstevel@tonic-gate * structure lock 12377c478bd9Sstevel@tonic-gate */ 12387c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock)); 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate /* 12417c478bd9Sstevel@tonic-gate * Free only the buffers in the first lookbuf 12427c478bd9Sstevel@tonic-gate */ 12437c478bd9Sstevel@tonic-gate head_tlbs = &tiptr->ti_lookbufs; 12447c478bd9Sstevel@tonic-gate if (head_tlbs->tl_lookdbuf != NULL) { 12457c478bd9Sstevel@tonic-gate free(head_tlbs->tl_lookdbuf); 12467c478bd9Sstevel@tonic-gate head_tlbs->tl_lookdbuf = NULL; 12477c478bd9Sstevel@tonic-gate } 12487c478bd9Sstevel@tonic-gate free(head_tlbs->tl_lookcbuf); 12497c478bd9Sstevel@tonic-gate head_tlbs->tl_lookcbuf = NULL; 12507c478bd9Sstevel@tonic-gate /* 12517c478bd9Sstevel@tonic-gate * Free the node and the buffers in the rest of the 12527c478bd9Sstevel@tonic-gate * list 12537c478bd9Sstevel@tonic-gate */ 12547c478bd9Sstevel@tonic-gate 12557c478bd9Sstevel@tonic-gate tlbs = head_tlbs->tl_next; 12567c478bd9Sstevel@tonic-gate head_tlbs->tl_next = NULL; 12577c478bd9Sstevel@tonic-gate 12587c478bd9Sstevel@tonic-gate while (tlbs != NULL) { 12597c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 12607c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 12617c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 12627c478bd9Sstevel@tonic-gate prev_tlbs = tlbs; 12637c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 126461961e0fSrobinson free(prev_tlbs); 12657c478bd9Sstevel@tonic-gate } 12667c478bd9Sstevel@tonic-gate } 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate /* 12697c478bd9Sstevel@tonic-gate * Free lookbuffer event list head. 12707c478bd9Sstevel@tonic-gate * Consume current lookbuffer event 12717c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 12727c478bd9Sstevel@tonic-gate * Note: The head of this list is part of the instance 12737c478bd9Sstevel@tonic-gate * structure so the code is a little unorthodox. 12747c478bd9Sstevel@tonic-gate */ 12757c478bd9Sstevel@tonic-gate void 12767c478bd9Sstevel@tonic-gate _t_free_looklist_head(struct _ti_user *tiptr) 12777c478bd9Sstevel@tonic-gate { 12787c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *next_tlbs; 12797c478bd9Sstevel@tonic-gate 12807c478bd9Sstevel@tonic-gate tlbs = &tiptr->ti_lookbufs; 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate if (tlbs->tl_next) { 12837c478bd9Sstevel@tonic-gate /* 12847c478bd9Sstevel@tonic-gate * Free the control and data buffers 12857c478bd9Sstevel@tonic-gate */ 12867c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 12877c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 12887c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * Replace with next lookbuf event contents 12917c478bd9Sstevel@tonic-gate */ 12927c478bd9Sstevel@tonic-gate next_tlbs = tlbs->tl_next; 12937c478bd9Sstevel@tonic-gate tlbs->tl_next = next_tlbs->tl_next; 12947c478bd9Sstevel@tonic-gate tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf; 12957c478bd9Sstevel@tonic-gate tlbs->tl_lookclen = next_tlbs->tl_lookclen; 12967c478bd9Sstevel@tonic-gate tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf; 12977c478bd9Sstevel@tonic-gate tlbs->tl_lookdlen = next_tlbs->tl_lookdlen; 12987c478bd9Sstevel@tonic-gate free(next_tlbs); 12997c478bd9Sstevel@tonic-gate /* 13007c478bd9Sstevel@tonic-gate * Decrement the flag - should never get to zero. 13017c478bd9Sstevel@tonic-gate * in this path 13027c478bd9Sstevel@tonic-gate */ 13037c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt--; 13047c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt > 0); 13057c478bd9Sstevel@tonic-gate } else { 13067c478bd9Sstevel@tonic-gate /* 13077c478bd9Sstevel@tonic-gate * No more look buffer events - just clear the flag 13087c478bd9Sstevel@tonic-gate * and leave the buffers alone 13097c478bd9Sstevel@tonic-gate */ 13107c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt == 1); 13117c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 13127c478bd9Sstevel@tonic-gate } 13137c478bd9Sstevel@tonic-gate } 13147c478bd9Sstevel@tonic-gate 13157c478bd9Sstevel@tonic-gate /* 13167c478bd9Sstevel@tonic-gate * Discard lookbuffer events. 13177c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 13187c478bd9Sstevel@tonic-gate */ 13197c478bd9Sstevel@tonic-gate void 13207c478bd9Sstevel@tonic-gate _t_flush_lookevents(struct _ti_user *tiptr) 13217c478bd9Sstevel@tonic-gate { 13227c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *prev_tlbs; 13237c478bd9Sstevel@tonic-gate 13247c478bd9Sstevel@tonic-gate /* 13257c478bd9Sstevel@tonic-gate * Leave the first nodes buffers alone (i.e. allocated) 13267c478bd9Sstevel@tonic-gate * but reset the flag. 13277c478bd9Sstevel@tonic-gate */ 13287c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 13297c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 13307c478bd9Sstevel@tonic-gate /* 13317c478bd9Sstevel@tonic-gate * Blow away the rest of the list 13327c478bd9Sstevel@tonic-gate */ 13337c478bd9Sstevel@tonic-gate tlbs = tiptr->ti_lookbufs.tl_next; 13347c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_next = NULL; 13357c478bd9Sstevel@tonic-gate while (tlbs != NULL) { 13367c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 13377c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 13387c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 13397c478bd9Sstevel@tonic-gate prev_tlbs = tlbs; 13407c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 134161961e0fSrobinson free(prev_tlbs); 13427c478bd9Sstevel@tonic-gate } 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate 13467c478bd9Sstevel@tonic-gate /* 13477c478bd9Sstevel@tonic-gate * This routine checks if the receive. buffer in the instance structure 13487c478bd9Sstevel@tonic-gate * is available (non-null). If it is, the buffer is acquired and marked busy 13497c478bd9Sstevel@tonic-gate * (null). If it is busy (possible in MT programs), it allocates a new 13507c478bd9Sstevel@tonic-gate * buffer and sets a flag indicating new memory was allocated and the caller 13517c478bd9Sstevel@tonic-gate * has to free it. 13527c478bd9Sstevel@tonic-gate */ 13537c478bd9Sstevel@tonic-gate int 13547c478bd9Sstevel@tonic-gate _t_acquire_ctlbuf( 13557c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 13567c478bd9Sstevel@tonic-gate struct strbuf *ctlbufp, 13577c478bd9Sstevel@tonic-gate int *didallocp) 13587c478bd9Sstevel@tonic-gate { 13597c478bd9Sstevel@tonic-gate *didallocp = 0; 13607c478bd9Sstevel@tonic-gate 13617c478bd9Sstevel@tonic-gate ctlbufp->len = 0; 13627c478bd9Sstevel@tonic-gate if (tiptr->ti_ctlbuf) { 13637c478bd9Sstevel@tonic-gate ctlbufp->buf = tiptr->ti_ctlbuf; 13647c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = NULL; 13657c478bd9Sstevel@tonic-gate ctlbufp->maxlen = tiptr->ti_ctlsize; 13667c478bd9Sstevel@tonic-gate } else { 13677c478bd9Sstevel@tonic-gate /* 13687c478bd9Sstevel@tonic-gate * tiptr->ti_ctlbuf is in use 13697c478bd9Sstevel@tonic-gate * allocate new buffer and free after use. 13707c478bd9Sstevel@tonic-gate */ 13717c478bd9Sstevel@tonic-gate if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr, 13727c478bd9Sstevel@tonic-gate &ctlbufp->buf)) < 0) { 13737c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 13747c478bd9Sstevel@tonic-gate return (-1); 13757c478bd9Sstevel@tonic-gate } 13767c478bd9Sstevel@tonic-gate *didallocp = 1; 13777c478bd9Sstevel@tonic-gate } 13787c478bd9Sstevel@tonic-gate return (0); 13797c478bd9Sstevel@tonic-gate } 13807c478bd9Sstevel@tonic-gate 13817c478bd9Sstevel@tonic-gate /* 13827c478bd9Sstevel@tonic-gate * This routine checks if the receive buffer in the instance structure 13837c478bd9Sstevel@tonic-gate * is available (non-null). If it is, the buffer is acquired and marked busy 13847c478bd9Sstevel@tonic-gate * (null). If it is busy (possible in MT programs), it allocates a new 13857c478bd9Sstevel@tonic-gate * buffer and sets a flag indicating new memory was allocated and the caller 13867c478bd9Sstevel@tonic-gate * has to free it. 13877c478bd9Sstevel@tonic-gate * Note: The receive buffer pointer can also be null if the transport 13887c478bd9Sstevel@tonic-gate * provider does not support connect/disconnect data, (e.g. TCP) - not 13897c478bd9Sstevel@tonic-gate * just when it is "busy". In that case, ti_rcvsize will be 0 and that is 13907c478bd9Sstevel@tonic-gate * used to instantiate the databuf which points to a null buffer of 13917c478bd9Sstevel@tonic-gate * length 0 which is the right thing to do for that case. 13927c478bd9Sstevel@tonic-gate */ 13937c478bd9Sstevel@tonic-gate int 13947c478bd9Sstevel@tonic-gate _t_acquire_databuf( 13957c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 13967c478bd9Sstevel@tonic-gate struct strbuf *databufp, 13977c478bd9Sstevel@tonic-gate int *didallocp) 13987c478bd9Sstevel@tonic-gate { 13997c478bd9Sstevel@tonic-gate *didallocp = 0; 14007c478bd9Sstevel@tonic-gate 14017c478bd9Sstevel@tonic-gate databufp->len = 0; 14027c478bd9Sstevel@tonic-gate if (tiptr->ti_rcvbuf) { 14037c478bd9Sstevel@tonic-gate assert(tiptr->ti_rcvsize != 0); 14047c478bd9Sstevel@tonic-gate databufp->buf = tiptr->ti_rcvbuf; 14057c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = NULL; 14067c478bd9Sstevel@tonic-gate databufp->maxlen = tiptr->ti_rcvsize; 14077c478bd9Sstevel@tonic-gate } else if (tiptr->ti_rcvsize == 0) { 14087c478bd9Sstevel@tonic-gate databufp->buf = NULL; 14097c478bd9Sstevel@tonic-gate databufp->maxlen = 0; 14107c478bd9Sstevel@tonic-gate } else { 14117c478bd9Sstevel@tonic-gate /* 14127c478bd9Sstevel@tonic-gate * tiptr->ti_rcvbuf is in use 14137c478bd9Sstevel@tonic-gate * allocate new buffer and free after use. 14147c478bd9Sstevel@tonic-gate */ 14157c478bd9Sstevel@tonic-gate if ((databufp->maxlen = _t_rbuf_alloc(tiptr, 14167c478bd9Sstevel@tonic-gate &databufp->buf)) < 0) { 14177c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 14187c478bd9Sstevel@tonic-gate return (-1); 14197c478bd9Sstevel@tonic-gate } 14207c478bd9Sstevel@tonic-gate *didallocp = 1; 14217c478bd9Sstevel@tonic-gate } 14227c478bd9Sstevel@tonic-gate return (0); 14237c478bd9Sstevel@tonic-gate } 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate /* 14267c478bd9Sstevel@tonic-gate * This routine requests timod to look for any expedited data 14277c478bd9Sstevel@tonic-gate * queued in the "receive buffers" in the kernel. Used for XTI 14287c478bd9Sstevel@tonic-gate * t_look() semantics for transports that send expedited data 14297c478bd9Sstevel@tonic-gate * data inline (e.g TCP). 14307c478bd9Sstevel@tonic-gate * Returns -1 for failure 14317c478bd9Sstevel@tonic-gate * Returns 0 for success 14327c478bd9Sstevel@tonic-gate * On a successful return, the location pointed by "expedited_queuedp" 14337c478bd9Sstevel@tonic-gate * contains 14347c478bd9Sstevel@tonic-gate * 0 if no expedited data is found queued in "receive buffers" 14357c478bd9Sstevel@tonic-gate * 1 if expedited data is found queued in "receive buffers" 14367c478bd9Sstevel@tonic-gate */ 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate int 14397c478bd9Sstevel@tonic-gate _t_expinline_queued(int fd, int *expedited_queuedp) 14407c478bd9Sstevel@tonic-gate { 14417c478bd9Sstevel@tonic-gate union { 14427c478bd9Sstevel@tonic-gate struct ti_sync_req ti_req; 14437c478bd9Sstevel@tonic-gate struct ti_sync_ack ti_ack; 14447c478bd9Sstevel@tonic-gate char pad[128]; 14457c478bd9Sstevel@tonic-gate } ioctl_data; 14467c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */ 14477c478bd9Sstevel@tonic-gate /* preferred location first local variable */ 14487c478bd9Sstevel@tonic-gate /* see note in _t_create above */ 14497c478bd9Sstevel@tonic-gate struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf; 14507c478bd9Sstevel@tonic-gate struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf; 14517c478bd9Sstevel@tonic-gate int rval, retlen; 14527c478bd9Sstevel@tonic-gate 14537c478bd9Sstevel@tonic-gate *expedited_queuedp = 0; 14547c478bd9Sstevel@tonic-gate /* request info on rq expinds */ 14557c478bd9Sstevel@tonic-gate tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF; 14567c478bd9Sstevel@tonic-gate do { 14577c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, ioctlbuf, 14587c478bd9Sstevel@tonic-gate (int)sizeof (struct T_info_req), TI_SYNC, &retlen); 14597c478bd9Sstevel@tonic-gate } while (rval < 0 && errno == EINTR); 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate if (rval < 0) 14627c478bd9Sstevel@tonic-gate return (-1); 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate /* 14657c478bd9Sstevel@tonic-gate * This is a "less than" check as "struct ti_sync_ack" returned by 14667c478bd9Sstevel@tonic-gate * TI_SYNC can grow in size in future kernels. If/when a statically 14677c478bd9Sstevel@tonic-gate * linked application is run on a future kernel, it should not fail. 14687c478bd9Sstevel@tonic-gate */ 14697c478bd9Sstevel@tonic-gate if (retlen < (int)sizeof (struct ti_sync_ack)) { 14707c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 14717c478bd9Sstevel@tonic-gate errno = EIO; 14727c478bd9Sstevel@tonic-gate return (-1); 14737c478bd9Sstevel@tonic-gate } 14747c478bd9Sstevel@tonic-gate if (tsap->tsa_flags & TSAF_EXP_QUEUED) 14757c478bd9Sstevel@tonic-gate *expedited_queuedp = 1; 14767c478bd9Sstevel@tonic-gate return (0); 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate /* 14807c478bd9Sstevel@tonic-gate * Support functions for use by functions that do scatter/gather 14817c478bd9Sstevel@tonic-gate * like t_sndv(), t_rcvv() etc..follow below. 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate 14847c478bd9Sstevel@tonic-gate /* 14857c478bd9Sstevel@tonic-gate * _t_bytecount_upto_intmax() : 14867c478bd9Sstevel@tonic-gate * Sum of the lengths of the individual buffers in 14877c478bd9Sstevel@tonic-gate * the t_iovec array. If the sum exceeds INT_MAX 14887c478bd9Sstevel@tonic-gate * it is truncated to INT_MAX. 14897c478bd9Sstevel@tonic-gate */ 14907c478bd9Sstevel@tonic-gate unsigned int 14917c478bd9Sstevel@tonic-gate _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount) 14927c478bd9Sstevel@tonic-gate { 14937c478bd9Sstevel@tonic-gate size_t nbytes; 14947c478bd9Sstevel@tonic-gate int i; 14957c478bd9Sstevel@tonic-gate 14967c478bd9Sstevel@tonic-gate nbytes = 0; 14977c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) { 14987c478bd9Sstevel@tonic-gate if (tiov[i].iov_len >= INT_MAX) { 14997c478bd9Sstevel@tonic-gate nbytes = INT_MAX; 15007c478bd9Sstevel@tonic-gate break; 15017c478bd9Sstevel@tonic-gate } 15027c478bd9Sstevel@tonic-gate nbytes += tiov[i].iov_len; 15037c478bd9Sstevel@tonic-gate } 15047c478bd9Sstevel@tonic-gate 15057c478bd9Sstevel@tonic-gate if (nbytes > INT_MAX) 15067c478bd9Sstevel@tonic-gate nbytes = INT_MAX; 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate return ((unsigned int)nbytes); 15097c478bd9Sstevel@tonic-gate } 15107c478bd9Sstevel@tonic-gate 15117c478bd9Sstevel@tonic-gate /* 15127c478bd9Sstevel@tonic-gate * Gather the data in the t_iovec buffers, into a single linear buffer 15137c478bd9Sstevel@tonic-gate * starting at dataptr. Caller must have allocated sufficient space 15147c478bd9Sstevel@tonic-gate * starting at dataptr. The total amount of data that is gathered is 15157c478bd9Sstevel@tonic-gate * limited to INT_MAX. Any remaining data in the t_iovec buffers is 15167c478bd9Sstevel@tonic-gate * not copied. 15177c478bd9Sstevel@tonic-gate */ 15187c478bd9Sstevel@tonic-gate void 15197c478bd9Sstevel@tonic-gate _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount) 15207c478bd9Sstevel@tonic-gate { 15217c478bd9Sstevel@tonic-gate char *curptr; 15227c478bd9Sstevel@tonic-gate unsigned int cur_count; 15237c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 15247c478bd9Sstevel@tonic-gate int i; 15257c478bd9Sstevel@tonic-gate 15267c478bd9Sstevel@tonic-gate curptr = dataptr; 15277c478bd9Sstevel@tonic-gate cur_count = 0; 15287c478bd9Sstevel@tonic-gate 15297c478bd9Sstevel@tonic-gate nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount); 15307c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) { 15317c478bd9Sstevel@tonic-gate if (tiov[i].iov_len <= nbytes_remaining) 15327c478bd9Sstevel@tonic-gate cur_count = (int)tiov[i].iov_len; 15337c478bd9Sstevel@tonic-gate else 15347c478bd9Sstevel@tonic-gate cur_count = nbytes_remaining; 15357c478bd9Sstevel@tonic-gate (void) memcpy(curptr, tiov[i].iov_base, cur_count); 15367c478bd9Sstevel@tonic-gate curptr += cur_count; 15377c478bd9Sstevel@tonic-gate nbytes_remaining -= cur_count; 15387c478bd9Sstevel@tonic-gate } 15397c478bd9Sstevel@tonic-gate } 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate /* 15427c478bd9Sstevel@tonic-gate * Scatter the data from the single linear buffer at pdatabuf->buf into 15437c478bd9Sstevel@tonic-gate * the t_iovec buffers. 15447c478bd9Sstevel@tonic-gate */ 15457c478bd9Sstevel@tonic-gate void 15467c478bd9Sstevel@tonic-gate _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount) 15477c478bd9Sstevel@tonic-gate { 15487c478bd9Sstevel@tonic-gate char *curptr; 15497c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 15507c478bd9Sstevel@tonic-gate unsigned int curlen; 15517c478bd9Sstevel@tonic-gate int i; 15527c478bd9Sstevel@tonic-gate 15537c478bd9Sstevel@tonic-gate /* 15547c478bd9Sstevel@tonic-gate * There cannot be any uncopied data leftover in pdatabuf 15557c478bd9Sstevel@tonic-gate * at the conclusion of this function. (asserted below) 15567c478bd9Sstevel@tonic-gate */ 15577c478bd9Sstevel@tonic-gate assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount)); 15587c478bd9Sstevel@tonic-gate curptr = pdatabuf->buf; 15597c478bd9Sstevel@tonic-gate nbytes_remaining = pdatabuf->len; 15607c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) { 15617c478bd9Sstevel@tonic-gate if (tiov[i].iov_len < nbytes_remaining) 15627c478bd9Sstevel@tonic-gate curlen = (unsigned int)tiov[i].iov_len; 15637c478bd9Sstevel@tonic-gate else 15647c478bd9Sstevel@tonic-gate curlen = nbytes_remaining; 15657c478bd9Sstevel@tonic-gate (void) memcpy(tiov[i].iov_base, curptr, curlen); 15667c478bd9Sstevel@tonic-gate curptr += curlen; 15677c478bd9Sstevel@tonic-gate nbytes_remaining -= curlen; 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate } 15707c478bd9Sstevel@tonic-gate 15717c478bd9Sstevel@tonic-gate /* 15727c478bd9Sstevel@tonic-gate * Adjust the iovec array, for subsequent use. Examine each element in the 15737c478bd9Sstevel@tonic-gate * iovec array,and zero out the iov_len if the buffer was sent fully. 15747c478bd9Sstevel@tonic-gate * otherwise the buffer was only partially sent, so adjust both iov_len and 15757c478bd9Sstevel@tonic-gate * iov_base. 15767c478bd9Sstevel@tonic-gate * 15777c478bd9Sstevel@tonic-gate */ 15787c478bd9Sstevel@tonic-gate void 15797c478bd9Sstevel@tonic-gate _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp) 15807c478bd9Sstevel@tonic-gate { 15817c478bd9Sstevel@tonic-gate 15827c478bd9Sstevel@tonic-gate int i; 15837c478bd9Sstevel@tonic-gate 15847c478bd9Sstevel@tonic-gate for (i = 0; i < *iovcountp && bytes_sent; i++) { 15857c478bd9Sstevel@tonic-gate if (iov[i].iov_len == 0) 15867c478bd9Sstevel@tonic-gate continue; 15877c478bd9Sstevel@tonic-gate if (bytes_sent < iov[i].iov_len) 15887c478bd9Sstevel@tonic-gate break; 15897c478bd9Sstevel@tonic-gate else { 15907c478bd9Sstevel@tonic-gate bytes_sent -= iov[i].iov_len; 15917c478bd9Sstevel@tonic-gate iov[i].iov_len = 0; 15927c478bd9Sstevel@tonic-gate } 15937c478bd9Sstevel@tonic-gate } 15947c478bd9Sstevel@tonic-gate iov[i].iov_len -= bytes_sent; 15957c478bd9Sstevel@tonic-gate iov[i].iov_base += bytes_sent; 15967c478bd9Sstevel@tonic-gate } 15977c478bd9Sstevel@tonic-gate 15987c478bd9Sstevel@tonic-gate /* 15997c478bd9Sstevel@tonic-gate * Copy the t_iovec array to the iovec array while taking care to see 16007c478bd9Sstevel@tonic-gate * that the sum of the buffer lengths in the result is not more than 16017c478bd9Sstevel@tonic-gate * INT_MAX. This function requires that T_IOV_MAX is no larger than 16027c478bd9Sstevel@tonic-gate * IOV_MAX. Otherwise the resulting array is not a suitable input to 16037c478bd9Sstevel@tonic-gate * writev(). If the sum of the lengths in t_iovec is zero, so is the 16047c478bd9Sstevel@tonic-gate * resulting iovec. 16057c478bd9Sstevel@tonic-gate */ 16067c478bd9Sstevel@tonic-gate void 16077c478bd9Sstevel@tonic-gate _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount, 16087c478bd9Sstevel@tonic-gate struct iovec *iov, int *iovcountp) 16097c478bd9Sstevel@tonic-gate { 16107c478bd9Sstevel@tonic-gate int i; 16117c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 16127c478bd9Sstevel@tonic-gate 16137c478bd9Sstevel@tonic-gate nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount); 16147c478bd9Sstevel@tonic-gate i = 0; 16157c478bd9Sstevel@tonic-gate do { 16167c478bd9Sstevel@tonic-gate iov[i].iov_base = tiov[i].iov_base; 16177c478bd9Sstevel@tonic-gate if (tiov[i].iov_len > nbytes_remaining) 16187c478bd9Sstevel@tonic-gate iov[i].iov_len = nbytes_remaining; 16197c478bd9Sstevel@tonic-gate else 16207c478bd9Sstevel@tonic-gate iov[i].iov_len = tiov[i].iov_len; 16217c478bd9Sstevel@tonic-gate nbytes_remaining -= iov[i].iov_len; 16227c478bd9Sstevel@tonic-gate i++; 16237c478bd9Sstevel@tonic-gate } while (nbytes_remaining != 0 && i < tiovcount); 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate *iovcountp = i; 16267c478bd9Sstevel@tonic-gate } 16277c478bd9Sstevel@tonic-gate 16287c478bd9Sstevel@tonic-gate /* 16297c478bd9Sstevel@tonic-gate * Routine called after connection establishment on transports where 16307c478bd9Sstevel@tonic-gate * connection establishment changes certain transport attributes such as 16317c478bd9Sstevel@tonic-gate * TIDU_size 16327c478bd9Sstevel@tonic-gate */ 16337c478bd9Sstevel@tonic-gate int 16347c478bd9Sstevel@tonic-gate _t_do_postconn_sync(int fd, struct _ti_user *tiptr) 16357c478bd9Sstevel@tonic-gate { 16367c478bd9Sstevel@tonic-gate union { 16377c478bd9Sstevel@tonic-gate struct T_capability_req tc_req; 16387c478bd9Sstevel@tonic-gate struct T_capability_ack tc_ack; 16397c478bd9Sstevel@tonic-gate } ioctl_data; 16407c478bd9Sstevel@tonic-gate 16417c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; 16427c478bd9Sstevel@tonic-gate int expected_acksize; 16437c478bd9Sstevel@tonic-gate int retlen, rval; 16447c478bd9Sstevel@tonic-gate struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf; 16457c478bd9Sstevel@tonic-gate struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf; 16467c478bd9Sstevel@tonic-gate struct T_info_ack *tiap; 16477c478bd9Sstevel@tonic-gate 16487c478bd9Sstevel@tonic-gate /* 16497c478bd9Sstevel@tonic-gate * This T_CAPABILITY_REQ should not fail, even if it is unsupported 16507c478bd9Sstevel@tonic-gate * by the transport provider. timod will emulate it in that case. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate tc_reqp->PRIM_type = T_CAPABILITY_REQ; 16537c478bd9Sstevel@tonic-gate tc_reqp->CAP_bits1 = TC1_INFO; 16547c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, (char *)ioctlbuf, 16557c478bd9Sstevel@tonic-gate (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen); 16567c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct T_capability_ack); 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate if (rval < 0) 16597c478bd9Sstevel@tonic-gate return (-1); 16607c478bd9Sstevel@tonic-gate 16617c478bd9Sstevel@tonic-gate /* 16627c478bd9Sstevel@tonic-gate * T_capability TPI messages are extensible and can grow in future. 16637c478bd9Sstevel@tonic-gate * However timod will take care of returning no more information 16647c478bd9Sstevel@tonic-gate * than what was requested, and truncating the "extended" 16657c478bd9Sstevel@tonic-gate * information towards the end of the T_capability_ack, if necessary. 16667c478bd9Sstevel@tonic-gate */ 16677c478bd9Sstevel@tonic-gate if (retlen != expected_acksize) { 16687c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16697c478bd9Sstevel@tonic-gate errno = EIO; 16707c478bd9Sstevel@tonic-gate return (-1); 16717c478bd9Sstevel@tonic-gate } 16727c478bd9Sstevel@tonic-gate 16737c478bd9Sstevel@tonic-gate /* 16747c478bd9Sstevel@tonic-gate * The T_info_ack part of the T_capability_ack is guaranteed to be 16757c478bd9Sstevel@tonic-gate * present only if the corresponding TC1_INFO bit is set 16767c478bd9Sstevel@tonic-gate */ 16777c478bd9Sstevel@tonic-gate if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) { 16787c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16797c478bd9Sstevel@tonic-gate errno = EPROTO; 16807c478bd9Sstevel@tonic-gate return (-1); 16817c478bd9Sstevel@tonic-gate } 16827c478bd9Sstevel@tonic-gate 16837c478bd9Sstevel@tonic-gate tiap = &tc_ackp->INFO_ack; 16847c478bd9Sstevel@tonic-gate if (tiap->PRIM_type != T_INFO_ACK) { 16857c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16867c478bd9Sstevel@tonic-gate errno = EPROTO; 16877c478bd9Sstevel@tonic-gate return (-1); 16887c478bd9Sstevel@tonic-gate } 16897c478bd9Sstevel@tonic-gate 16907c478bd9Sstevel@tonic-gate /* 16917c478bd9Sstevel@tonic-gate * Note: Sync with latest information returned in "struct T_info_ack 16927c478bd9Sstevel@tonic-gate * but we deliberately not sync the state here as user level state 16937c478bd9Sstevel@tonic-gate * construction here is not required, only update of attributes which 16947c478bd9Sstevel@tonic-gate * may have changed because of negotations during connection 16957c478bd9Sstevel@tonic-gate * establsihment 16967c478bd9Sstevel@tonic-gate */ 16977c478bd9Sstevel@tonic-gate assert(tiap->TIDU_size > 0); 16987c478bd9Sstevel@tonic-gate tiptr->ti_maxpsz = tiap->TIDU_size; 16997c478bd9Sstevel@tonic-gate assert(tiap->TSDU_size >= T_INVALID); 17007c478bd9Sstevel@tonic-gate tiptr->ti_tsdusize = tiap->TSDU_size; 17017c478bd9Sstevel@tonic-gate assert(tiap->ETSDU_size >= T_INVALID); 17027c478bd9Sstevel@tonic-gate tiptr->ti_etsdusize = tiap->ETSDU_size; 17037c478bd9Sstevel@tonic-gate assert(tiap->CDATA_size >= T_INVALID); 17047c478bd9Sstevel@tonic-gate tiptr->ti_cdatasize = tiap->CDATA_size; 17057c478bd9Sstevel@tonic-gate assert(tiap->DDATA_size >= T_INVALID); 17067c478bd9Sstevel@tonic-gate tiptr->ti_ddatasize = tiap->DDATA_size; 17077c478bd9Sstevel@tonic-gate tiptr->ti_prov_flag = tiap->PROVIDER_flag; 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate return (0); 17107c478bd9Sstevel@tonic-gate } 1711