17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 2261961e0fSrobinson 237c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 247c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 27*e8031f0aSraf * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 287c478bd9Sstevel@tonic-gate * Use is subject to license terms. 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate #include "mt.h" 347c478bd9Sstevel@tonic-gate #include <stdlib.h> 357c478bd9Sstevel@tonic-gate #include <string.h> 367c478bd9Sstevel@tonic-gate #include <strings.h> 377c478bd9Sstevel@tonic-gate #include <unistd.h> 387c478bd9Sstevel@tonic-gate #include <errno.h> 397c478bd9Sstevel@tonic-gate #include <stropts.h> 407c478bd9Sstevel@tonic-gate #include <sys/stream.h> 417c478bd9Sstevel@tonic-gate #define _SUN_TPI_VERSION 2 427c478bd9Sstevel@tonic-gate #include <sys/tihdr.h> 437c478bd9Sstevel@tonic-gate #include <sys/timod.h> 447c478bd9Sstevel@tonic-gate #include <sys/stat.h> 457c478bd9Sstevel@tonic-gate #include <xti.h> 467c478bd9Sstevel@tonic-gate #include <fcntl.h> 477c478bd9Sstevel@tonic-gate #include <signal.h> 487c478bd9Sstevel@tonic-gate #include <assert.h> 497c478bd9Sstevel@tonic-gate #include <syslog.h> 507c478bd9Sstevel@tonic-gate #include <limits.h> 517c478bd9Sstevel@tonic-gate #include "tx.h" 527c478bd9Sstevel@tonic-gate 537c478bd9Sstevel@tonic-gate #define DEFSIZE 2048 547c478bd9Sstevel@tonic-gate 557c478bd9Sstevel@tonic-gate /* 567c478bd9Sstevel@tonic-gate * The following used to be in tiuser.h, but was causing too much namespace 577c478bd9Sstevel@tonic-gate * pollution. 587c478bd9Sstevel@tonic-gate */ 597c478bd9Sstevel@tonic-gate #define ROUNDUP32(X) ((X + 0x03)&~0x03) 607c478bd9Sstevel@tonic-gate 617c478bd9Sstevel@tonic-gate static struct _ti_user *find_tilink(int s); 627c478bd9Sstevel@tonic-gate static struct _ti_user *add_tilink(int s); 637c478bd9Sstevel@tonic-gate static void _t_free_lookbufs(struct _ti_user *tiptr); 647c478bd9Sstevel@tonic-gate static unsigned int _t_setsize(t_scalar_t infosize); 657c478bd9Sstevel@tonic-gate static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf); 667c478bd9Sstevel@tonic-gate static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf); 677c478bd9Sstevel@tonic-gate static int _t_adjust_state(int fd, int instate); 687c478bd9Sstevel@tonic-gate static int _t_alloc_bufs(int fd, struct _ti_user *tiptr, 697c478bd9Sstevel@tonic-gate struct T_info_ack *tsap); 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate mutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */ 727c478bd9Sstevel@tonic-gate 737c478bd9Sstevel@tonic-gate /* 747c478bd9Sstevel@tonic-gate * Checkfd - checks validity of file descriptor 757c478bd9Sstevel@tonic-gate */ 767c478bd9Sstevel@tonic-gate struct _ti_user * 777c478bd9Sstevel@tonic-gate _t_checkfd(int fd, int force_sync, int api_semantics) 787c478bd9Sstevel@tonic-gate { 797c478bd9Sstevel@tonic-gate sigset_t mask; 807c478bd9Sstevel@tonic-gate struct _ti_user *tiptr; 817c478bd9Sstevel@tonic-gate int retval, timodpushed; 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate if (fd < 0) { 847c478bd9Sstevel@tonic-gate t_errno = TBADF; 857c478bd9Sstevel@tonic-gate return (NULL); 867c478bd9Sstevel@tonic-gate } 877c478bd9Sstevel@tonic-gate tiptr = NULL; 887c478bd9Sstevel@tonic-gate sig_mutex_lock(&_ti_userlock); 897c478bd9Sstevel@tonic-gate if ((tiptr = find_tilink(fd)) != NULL) { 907c478bd9Sstevel@tonic-gate if (!force_sync) { 917c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 927c478bd9Sstevel@tonic-gate return (tiptr); 937c478bd9Sstevel@tonic-gate } 947c478bd9Sstevel@tonic-gate } 957c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * Not found or a forced sync is required. 997c478bd9Sstevel@tonic-gate * check if this is a valid TLI/XTI descriptor. 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate timodpushed = 0; 1027c478bd9Sstevel@tonic-gate do { 103*e8031f0aSraf retval = ioctl(fd, I_FIND, "timod"); 1047c478bd9Sstevel@tonic-gate } while (retval < 0 && errno == EINTR); 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) { 1077c478bd9Sstevel@tonic-gate /* 1087c478bd9Sstevel@tonic-gate * not a stream or a TLI endpoint with no timod 1097c478bd9Sstevel@tonic-gate * XXX Note: If it is a XTI call, we push "timod" and 1107c478bd9Sstevel@tonic-gate * try to convert it into a transport endpoint later. 1117c478bd9Sstevel@tonic-gate * We do not do it for TLI and "retain" the old buggy 1127c478bd9Sstevel@tonic-gate * behavior because ypbind and a lot of other deamons seem 1137c478bd9Sstevel@tonic-gate * to use a buggy logic test of the form 1147c478bd9Sstevel@tonic-gate * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if 1157c478bd9Sstevel@tonic-gate * they we ever invoked with request on stdin and drop into 1167c478bd9Sstevel@tonic-gate * untested code. This test is in code generated by rpcgen 1177c478bd9Sstevel@tonic-gate * which is why it is replicated test in many daemons too. 1187c478bd9Sstevel@tonic-gate * We will need to fix that test too with "IsaTLIendpoint" 1197c478bd9Sstevel@tonic-gate * test if we ever fix this for TLI 1207c478bd9Sstevel@tonic-gate */ 1217c478bd9Sstevel@tonic-gate t_errno = TBADF; 1227c478bd9Sstevel@tonic-gate return (NULL); 1237c478bd9Sstevel@tonic-gate } 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate if (retval == 0) { 1267c478bd9Sstevel@tonic-gate /* 1277c478bd9Sstevel@tonic-gate * "timod" not already on stream, then push it 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate do { 1307c478bd9Sstevel@tonic-gate /* 1317c478bd9Sstevel@tonic-gate * Assumes (correctly) that I_PUSH is 1327c478bd9Sstevel@tonic-gate * atomic w.r.t signals (EINTR error) 1337c478bd9Sstevel@tonic-gate */ 134*e8031f0aSraf retval = ioctl(fd, I_PUSH, "timod"); 1357c478bd9Sstevel@tonic-gate } while (retval < 0 && errno == EINTR); 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate if (retval < 0) { 1387c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 1397c478bd9Sstevel@tonic-gate return (NULL); 1407c478bd9Sstevel@tonic-gate } 1417c478bd9Sstevel@tonic-gate timodpushed = 1; 1427c478bd9Sstevel@tonic-gate } 1437c478bd9Sstevel@tonic-gate /* 1447c478bd9Sstevel@tonic-gate * Try to (re)constitute the info at user level from state 1457c478bd9Sstevel@tonic-gate * in the kernel. This could be information that lost due 1467c478bd9Sstevel@tonic-gate * to an exec or being instantiated at a new descriptor due 1477c478bd9Sstevel@tonic-gate * to , open(), dup2() etc. 1487c478bd9Sstevel@tonic-gate * 1497c478bd9Sstevel@tonic-gate * _t_create() requires that all signals be blocked. 1507c478bd9Sstevel@tonic-gate * Note that sig_mutex_lock() only defers signals, it does not 1517c478bd9Sstevel@tonic-gate * block them, so interruptible syscalls could still get EINTR. 1527c478bd9Sstevel@tonic-gate */ 1537c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); 1547c478bd9Sstevel@tonic-gate sig_mutex_lock(&_ti_userlock); 1557c478bd9Sstevel@tonic-gate tiptr = _t_create(fd, NULL, api_semantics, NULL); 1567c478bd9Sstevel@tonic-gate if (tiptr == NULL) { 1577c478bd9Sstevel@tonic-gate int sv_errno = errno; 1587c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 1597c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * restore to stream before timod pushed. It may 1627c478bd9Sstevel@tonic-gate * not have been a network transport stream. 1637c478bd9Sstevel@tonic-gate */ 1647c478bd9Sstevel@tonic-gate if (timodpushed) 165*e8031f0aSraf (void) ioctl(fd, I_POP, 0); 1667c478bd9Sstevel@tonic-gate errno = sv_errno; 1677c478bd9Sstevel@tonic-gate return (NULL); 1687c478bd9Sstevel@tonic-gate } 1697c478bd9Sstevel@tonic-gate sig_mutex_unlock(&_ti_userlock); 1707c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); 1717c478bd9Sstevel@tonic-gate return (tiptr); 1727c478bd9Sstevel@tonic-gate } 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate /* 1757c478bd9Sstevel@tonic-gate * copy data to output buffer making sure the output buffer is 32 bit 1767c478bd9Sstevel@tonic-gate * aligned, even though the input buffer may not be. 1777c478bd9Sstevel@tonic-gate */ 1787c478bd9Sstevel@tonic-gate int 1797c478bd9Sstevel@tonic-gate _t_aligned_copy( 1807c478bd9Sstevel@tonic-gate struct strbuf *strbufp, 1817c478bd9Sstevel@tonic-gate int len, 1827c478bd9Sstevel@tonic-gate int init_offset, 1837c478bd9Sstevel@tonic-gate char *datap, 1847c478bd9Sstevel@tonic-gate t_scalar_t *rtn_offset) 1857c478bd9Sstevel@tonic-gate { 1867c478bd9Sstevel@tonic-gate *rtn_offset = ROUNDUP32(init_offset); 1877c478bd9Sstevel@tonic-gate if ((*rtn_offset + len) > strbufp->maxlen) { 1887c478bd9Sstevel@tonic-gate /* 1897c478bd9Sstevel@tonic-gate * Aligned copy will overflow buffer 1907c478bd9Sstevel@tonic-gate */ 1917c478bd9Sstevel@tonic-gate return (-1); 1927c478bd9Sstevel@tonic-gate } 1937c478bd9Sstevel@tonic-gate (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len); 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate return (0); 1967c478bd9Sstevel@tonic-gate } 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * append data and control info in look buffer (list in the MT case) 2017c478bd9Sstevel@tonic-gate * 2027c478bd9Sstevel@tonic-gate * The only thing that can be in look buffer is a T_DISCON_IND, 2037c478bd9Sstevel@tonic-gate * T_ORDREL_IND or a T_UDERROR_IND. 2047c478bd9Sstevel@tonic-gate * 2057c478bd9Sstevel@tonic-gate * It also enforces priority of T_DISCONDs over any T_ORDREL_IND 2067c478bd9Sstevel@tonic-gate * already in the buffer. It assumes no T_ORDREL_IND is appended 2077c478bd9Sstevel@tonic-gate * when there is already something on the looklist (error case) and 2087c478bd9Sstevel@tonic-gate * that a T_ORDREL_IND if present will always be the first on the 2097c478bd9Sstevel@tonic-gate * list. 2107c478bd9Sstevel@tonic-gate * 2117c478bd9Sstevel@tonic-gate * This also assumes ti_lock is held via sig_mutex_lock(), 2127c478bd9Sstevel@tonic-gate * so signals are deferred here. 2137c478bd9Sstevel@tonic-gate */ 2147c478bd9Sstevel@tonic-gate int 2157c478bd9Sstevel@tonic-gate _t_register_lookevent( 2167c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 2177c478bd9Sstevel@tonic-gate caddr_t dptr, 2187c478bd9Sstevel@tonic-gate int dsize, 2197c478bd9Sstevel@tonic-gate caddr_t cptr, 2207c478bd9Sstevel@tonic-gate int csize) 2217c478bd9Sstevel@tonic-gate { 2227c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs; 2237c478bd9Sstevel@tonic-gate int cbuf_size, dbuf_size; 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate cbuf_size = tiptr->ti_ctlsize; 2287c478bd9Sstevel@tonic-gate dbuf_size = tiptr->ti_rcvsize; 2297c478bd9Sstevel@tonic-gate 2307c478bd9Sstevel@tonic-gate if ((csize > cbuf_size) || dsize > dbuf_size) { 2317c478bd9Sstevel@tonic-gate /* can't fit - return error */ 2327c478bd9Sstevel@tonic-gate return (-1); /* error */ 2337c478bd9Sstevel@tonic-gate } 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * Enforce priority of T_DISCON_IND over T_ORDREL_IND 2367c478bd9Sstevel@tonic-gate * queued earlier. 2377c478bd9Sstevel@tonic-gate * Note: Since there can be only at most one T_ORDREL_IND 2387c478bd9Sstevel@tonic-gate * queued (more than one is error case), and we look for it 2397c478bd9Sstevel@tonic-gate * on each append of T_DISCON_IND, it can only be at the 2407c478bd9Sstevel@tonic-gate * head of the list if it is there. 2417c478bd9Sstevel@tonic-gate */ 2427c478bd9Sstevel@tonic-gate if (tiptr->ti_lookcnt > 0) { /* something already on looklist */ 2437c478bd9Sstevel@tonic-gate if (cptr && csize >= (int)sizeof (struct T_discon_ind) && 24461961e0fSrobinson /* LINTED pointer cast */ 2457c478bd9Sstevel@tonic-gate *(t_scalar_t *)cptr == T_DISCON_IND) { 2467c478bd9Sstevel@tonic-gate /* appending discon ind */ 2477c478bd9Sstevel@tonic-gate assert(tiptr->ti_servtype != T_CLTS); 24861961e0fSrobinson /* LINTED pointer cast */ 2497c478bd9Sstevel@tonic-gate if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf == 2507c478bd9Sstevel@tonic-gate T_ORDREL_IND) { /* T_ORDREL_IND is on list */ 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Blow away T_ORDREL_IND 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate _t_free_looklist_head(tiptr); 2557c478bd9Sstevel@tonic-gate } 2567c478bd9Sstevel@tonic-gate } 2577c478bd9Sstevel@tonic-gate } 2587c478bd9Sstevel@tonic-gate tlbs = &tiptr->ti_lookbufs; 2597c478bd9Sstevel@tonic-gate if (tiptr->ti_lookcnt > 0) { 2607c478bd9Sstevel@tonic-gate int listcount = 0; 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Allocate and append a new lookbuf to the 2637c478bd9Sstevel@tonic-gate * existing list. (Should only happen in MT case) 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate while (tlbs->tl_next != NULL) { 2667c478bd9Sstevel@tonic-gate listcount++; 2677c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 2687c478bd9Sstevel@tonic-gate } 2697c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt == listcount); 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate /* 2727c478bd9Sstevel@tonic-gate * signals are deferred, calls to malloc() are safe. 2737c478bd9Sstevel@tonic-gate */ 27461961e0fSrobinson if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) == 27561961e0fSrobinson NULL) 2767c478bd9Sstevel@tonic-gate return (-1); /* error */ 2777c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 2787c478bd9Sstevel@tonic-gate /* 2797c478bd9Sstevel@tonic-gate * Allocate the buffers. The sizes derived from the 2807c478bd9Sstevel@tonic-gate * sizes of other related buffers. See _t_alloc_bufs() 2817c478bd9Sstevel@tonic-gate * for details. 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) { 2847c478bd9Sstevel@tonic-gate /* giving up - free other memory chunks */ 2857c478bd9Sstevel@tonic-gate free(tlbs); 2867c478bd9Sstevel@tonic-gate return (-1); /* error */ 2877c478bd9Sstevel@tonic-gate } 2887c478bd9Sstevel@tonic-gate if ((dsize > 0) && 2897c478bd9Sstevel@tonic-gate ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) { 2907c478bd9Sstevel@tonic-gate /* giving up - free other memory chunks */ 2917c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 2927c478bd9Sstevel@tonic-gate free(tlbs); 2937c478bd9Sstevel@tonic-gate return (-1); /* error */ 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate } 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate (void) memcpy(tlbs->tl_lookcbuf, cptr, csize); 2987c478bd9Sstevel@tonic-gate if (dsize > 0) 2997c478bd9Sstevel@tonic-gate (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize); 3007c478bd9Sstevel@tonic-gate tlbs->tl_lookdlen = dsize; 3017c478bd9Sstevel@tonic-gate tlbs->tl_lookclen = csize; 3027c478bd9Sstevel@tonic-gate tlbs->tl_next = NULL; 3037c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt++; 3047c478bd9Sstevel@tonic-gate return (0); /* ok return */ 3057c478bd9Sstevel@tonic-gate } 3067c478bd9Sstevel@tonic-gate 3077c478bd9Sstevel@tonic-gate /* 3087c478bd9Sstevel@tonic-gate * Is there something that needs attention? 3097c478bd9Sstevel@tonic-gate * Assumes tiptr->ti_lock held and this threads signals blocked 3107c478bd9Sstevel@tonic-gate * in MT case. 3117c478bd9Sstevel@tonic-gate */ 3127c478bd9Sstevel@tonic-gate int 3137c478bd9Sstevel@tonic-gate _t_is_event(int fd, struct _ti_user *tiptr) 3147c478bd9Sstevel@tonic-gate { 3157c478bd9Sstevel@tonic-gate int size, retval; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 318*e8031f0aSraf if ((retval = ioctl(fd, I_NREAD, &size)) < 0) { 3197c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3207c478bd9Sstevel@tonic-gate return (-1); 3217c478bd9Sstevel@tonic-gate } 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate if ((retval > 0) || (tiptr->ti_lookcnt > 0)) { 3247c478bd9Sstevel@tonic-gate t_errno = TLOOK; 3257c478bd9Sstevel@tonic-gate return (-1); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate return (0); 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate /* 3317c478bd9Sstevel@tonic-gate * wait for T_OK_ACK 3327c478bd9Sstevel@tonic-gate * assumes tiptr->ti_lock held in MT case 3337c478bd9Sstevel@tonic-gate */ 3347c478bd9Sstevel@tonic-gate int 3357c478bd9Sstevel@tonic-gate _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type) 3367c478bd9Sstevel@tonic-gate { 3377c478bd9Sstevel@tonic-gate struct strbuf ctlbuf; 3387c478bd9Sstevel@tonic-gate struct strbuf databuf; 3397c478bd9Sstevel@tonic-gate union T_primitives *pptr; 3407c478bd9Sstevel@tonic-gate int retval, cntlflag; 3417c478bd9Sstevel@tonic-gate int size; 3427c478bd9Sstevel@tonic-gate int didalloc, didralloc; 3437c478bd9Sstevel@tonic-gate int flags = 0; 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 3467c478bd9Sstevel@tonic-gate /* 3477c478bd9Sstevel@tonic-gate * Acquire ctlbuf for use in sending/receiving control part 3487c478bd9Sstevel@tonic-gate * of the message. 3497c478bd9Sstevel@tonic-gate */ 35061961e0fSrobinson if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) 3517c478bd9Sstevel@tonic-gate return (-1); 3527c478bd9Sstevel@tonic-gate /* 3537c478bd9Sstevel@tonic-gate * Acquire databuf for use in sending/receiving data part 3547c478bd9Sstevel@tonic-gate */ 3557c478bd9Sstevel@tonic-gate if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) { 3567c478bd9Sstevel@tonic-gate if (didalloc) 3577c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 3587c478bd9Sstevel@tonic-gate else 3597c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 3607c478bd9Sstevel@tonic-gate return (-1); 3617c478bd9Sstevel@tonic-gate } 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate /* 3647c478bd9Sstevel@tonic-gate * Temporarily convert a non blocking endpoint to a 3657c478bd9Sstevel@tonic-gate * blocking one and restore status later 3667c478bd9Sstevel@tonic-gate */ 367*e8031f0aSraf cntlflag = fcntl(fd, F_GETFL, 0); 3687c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 369*e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK)); 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate flags = RS_HIPRI; 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) { 3747c478bd9Sstevel@tonic-gate if (errno == EINTR) 3757c478bd9Sstevel@tonic-gate continue; 3767c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 377*e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3787c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3797c478bd9Sstevel@tonic-gate goto err_out; 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate /* did I get entire message */ 3837c478bd9Sstevel@tonic-gate if (retval > 0) { 3847c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 385*e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3867c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3877c478bd9Sstevel@tonic-gate errno = EIO; 3887c478bd9Sstevel@tonic-gate goto err_out; 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate /* 3927c478bd9Sstevel@tonic-gate * is ctl part large enough to determine type? 3937c478bd9Sstevel@tonic-gate */ 3947c478bd9Sstevel@tonic-gate if (ctlbuf.len < (int)sizeof (t_scalar_t)) { 3957c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 396*e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 3977c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 3987c478bd9Sstevel@tonic-gate errno = EPROTO; 3997c478bd9Sstevel@tonic-gate goto err_out; 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate if (cntlflag & (O_NDELAY | O_NONBLOCK)) 403*e8031f0aSraf (void) fcntl(fd, F_SETFL, cntlflag); 4047c478bd9Sstevel@tonic-gate 40561961e0fSrobinson /* LINTED pointer cast */ 4067c478bd9Sstevel@tonic-gate pptr = (union T_primitives *)ctlbuf.buf; 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate switch (pptr->type) { 4097c478bd9Sstevel@tonic-gate case T_OK_ACK: 4107c478bd9Sstevel@tonic-gate if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) || 4117c478bd9Sstevel@tonic-gate (pptr->ok_ack.CORRECT_prim != type)) { 4127c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4137c478bd9Sstevel@tonic-gate errno = EPROTO; 4147c478bd9Sstevel@tonic-gate goto err_out; 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate if (didalloc) 4177c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 4187c478bd9Sstevel@tonic-gate else 4197c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 4207c478bd9Sstevel@tonic-gate if (didralloc) 4217c478bd9Sstevel@tonic-gate free(databuf.buf); 4227c478bd9Sstevel@tonic-gate else 4237c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = databuf.buf; 4247c478bd9Sstevel@tonic-gate return (0); 4257c478bd9Sstevel@tonic-gate 4267c478bd9Sstevel@tonic-gate case T_ERROR_ACK: 4277c478bd9Sstevel@tonic-gate if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) || 4287c478bd9Sstevel@tonic-gate (pptr->error_ack.ERROR_prim != type)) { 4297c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4307c478bd9Sstevel@tonic-gate errno = EPROTO; 4317c478bd9Sstevel@tonic-gate goto err_out; 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate /* 4347c478bd9Sstevel@tonic-gate * if error is out of state and there is something 4357c478bd9Sstevel@tonic-gate * on read queue, then indicate to user that 4367c478bd9Sstevel@tonic-gate * there is something that needs attention 4377c478bd9Sstevel@tonic-gate */ 4387c478bd9Sstevel@tonic-gate if (pptr->error_ack.TLI_error == TOUTSTATE) { 439*e8031f0aSraf if ((retval = ioctl(fd, I_NREAD, &size)) < 0) { 4407c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4417c478bd9Sstevel@tonic-gate goto err_out; 4427c478bd9Sstevel@tonic-gate } 4437c478bd9Sstevel@tonic-gate if (retval > 0) 4447c478bd9Sstevel@tonic-gate t_errno = TLOOK; 4457c478bd9Sstevel@tonic-gate else 4467c478bd9Sstevel@tonic-gate t_errno = TOUTSTATE; 4477c478bd9Sstevel@tonic-gate } else { 4487c478bd9Sstevel@tonic-gate t_errno = pptr->error_ack.TLI_error; 4497c478bd9Sstevel@tonic-gate if (t_errno == TSYSERR) 4507c478bd9Sstevel@tonic-gate errno = pptr->error_ack.UNIX_error; 4517c478bd9Sstevel@tonic-gate } 4527c478bd9Sstevel@tonic-gate goto err_out; 4537c478bd9Sstevel@tonic-gate default: 4547c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4557c478bd9Sstevel@tonic-gate errno = EPROTO; 4567c478bd9Sstevel@tonic-gate /* fallthru to err_out: */ 4577c478bd9Sstevel@tonic-gate } 4587c478bd9Sstevel@tonic-gate err_out: 4597c478bd9Sstevel@tonic-gate if (didalloc) 4607c478bd9Sstevel@tonic-gate free(ctlbuf.buf); 4617c478bd9Sstevel@tonic-gate else 4627c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf.buf; 4637c478bd9Sstevel@tonic-gate if (didralloc) 4647c478bd9Sstevel@tonic-gate free(databuf.buf); 4657c478bd9Sstevel@tonic-gate else 4667c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = databuf.buf; 4677c478bd9Sstevel@tonic-gate return (-1); 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate /* 4717c478bd9Sstevel@tonic-gate * timod ioctl 4727c478bd9Sstevel@tonic-gate */ 4737c478bd9Sstevel@tonic-gate int 4747c478bd9Sstevel@tonic-gate _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp) 4757c478bd9Sstevel@tonic-gate { 47661961e0fSrobinson int retval; 4777c478bd9Sstevel@tonic-gate struct strioctl strioc; 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate strioc.ic_cmd = cmd; 4807c478bd9Sstevel@tonic-gate strioc.ic_timout = -1; 4817c478bd9Sstevel@tonic-gate strioc.ic_len = size; 4827c478bd9Sstevel@tonic-gate strioc.ic_dp = buf; 4837c478bd9Sstevel@tonic-gate 484*e8031f0aSraf if ((retval = ioctl(fd, I_STR, &strioc)) < 0) { 4857c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 4867c478bd9Sstevel@tonic-gate return (-1); 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate if (retval > 0) { 4907c478bd9Sstevel@tonic-gate t_errno = retval&0xff; 4917c478bd9Sstevel@tonic-gate if (t_errno == TSYSERR) 4927c478bd9Sstevel@tonic-gate errno = (retval >> 8)&0xff; 4937c478bd9Sstevel@tonic-gate return (-1); 4947c478bd9Sstevel@tonic-gate } 4957c478bd9Sstevel@tonic-gate if (retlenp) 4967c478bd9Sstevel@tonic-gate *retlenp = strioc.ic_len; 4977c478bd9Sstevel@tonic-gate return (0); 4987c478bd9Sstevel@tonic-gate } 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* 5017c478bd9Sstevel@tonic-gate * alloc scratch buffers and look buffers 5027c478bd9Sstevel@tonic-gate */ 5037c478bd9Sstevel@tonic-gate /* ARGSUSED */ 5047c478bd9Sstevel@tonic-gate static int 5057c478bd9Sstevel@tonic-gate _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap) 5067c478bd9Sstevel@tonic-gate { 5077c478bd9Sstevel@tonic-gate unsigned int size1, size2; 5087c478bd9Sstevel@tonic-gate t_scalar_t optsize; 5097c478bd9Sstevel@tonic-gate unsigned int csize, dsize, asize, osize; 5107c478bd9Sstevel@tonic-gate char *ctlbuf, *rcvbuf; 5117c478bd9Sstevel@tonic-gate char *lookdbuf, *lookcbuf; 5127c478bd9Sstevel@tonic-gate 5137c478bd9Sstevel@tonic-gate csize = _t_setsize(tsap->CDATA_size); 5147c478bd9Sstevel@tonic-gate dsize = _t_setsize(tsap->DDATA_size); 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate size1 = _T_MAX(csize, dsize); 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate if (size1 != 0) { 51961961e0fSrobinson if ((rcvbuf = malloc(size1)) == NULL) 5207c478bd9Sstevel@tonic-gate return (-1); 5217c478bd9Sstevel@tonic-gate if ((lookdbuf = malloc(size1)) == NULL) { 5227c478bd9Sstevel@tonic-gate free(rcvbuf); 5237c478bd9Sstevel@tonic-gate return (-1); 5247c478bd9Sstevel@tonic-gate } 5257c478bd9Sstevel@tonic-gate } else { 5267c478bd9Sstevel@tonic-gate rcvbuf = NULL; 5277c478bd9Sstevel@tonic-gate lookdbuf = NULL; 5287c478bd9Sstevel@tonic-gate } 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate asize = _t_setsize(tsap->ADDR_size); 5317c478bd9Sstevel@tonic-gate if (tsap->OPT_size >= 0) 5327c478bd9Sstevel@tonic-gate /* compensate for XTI level options */ 5337c478bd9Sstevel@tonic-gate optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF; 5347c478bd9Sstevel@tonic-gate else 5357c478bd9Sstevel@tonic-gate optsize = tsap->OPT_size; 5367c478bd9Sstevel@tonic-gate osize = _t_setsize(optsize); 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* 5397c478bd9Sstevel@tonic-gate * We compute the largest buffer size needed for this provider by 5407c478bd9Sstevel@tonic-gate * adding the components. [ An extra sizeof (t_scalar_t) is added to 5417c478bd9Sstevel@tonic-gate * take care of rounding off for alignment) for each buffer ] 5427c478bd9Sstevel@tonic-gate * The goal here is compute the size of largest possible buffer that 5437c478bd9Sstevel@tonic-gate * might be needed to hold a TPI message for the transport provider 5447c478bd9Sstevel@tonic-gate * on this endpoint. 5457c478bd9Sstevel@tonic-gate * Note: T_ADDR_ACK contains potentially two address buffers. 5467c478bd9Sstevel@tonic-gate */ 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */ 5497c478bd9Sstevel@tonic-gate + asize + (unsigned int)sizeof (t_scalar_t) + 5507c478bd9Sstevel@tonic-gate /* first addr buffer plus alignment */ 5517c478bd9Sstevel@tonic-gate asize + (unsigned int)sizeof (t_scalar_t) + 5527c478bd9Sstevel@tonic-gate /* second addr buffer plus ailignment */ 5537c478bd9Sstevel@tonic-gate osize + (unsigned int)sizeof (t_scalar_t); 5547c478bd9Sstevel@tonic-gate /* option buffer plus alignment */ 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate if ((ctlbuf = malloc(size2)) == NULL) { 5577c478bd9Sstevel@tonic-gate if (size1 != 0) { 5587c478bd9Sstevel@tonic-gate free(rcvbuf); 5597c478bd9Sstevel@tonic-gate free(lookdbuf); 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate return (-1); 5627c478bd9Sstevel@tonic-gate } 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate if ((lookcbuf = malloc(size2)) == NULL) { 5657c478bd9Sstevel@tonic-gate if (size1 != 0) { 5667c478bd9Sstevel@tonic-gate free(rcvbuf); 5677c478bd9Sstevel@tonic-gate free(lookdbuf); 5687c478bd9Sstevel@tonic-gate } 5697c478bd9Sstevel@tonic-gate free(ctlbuf); 5707c478bd9Sstevel@tonic-gate return (-1); 5717c478bd9Sstevel@tonic-gate } 5727c478bd9Sstevel@tonic-gate 5737c478bd9Sstevel@tonic-gate tiptr->ti_rcvsize = size1; 5747c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = rcvbuf; 5757c478bd9Sstevel@tonic-gate tiptr->ti_ctlsize = size2; 5767c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = ctlbuf; 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Note: The head of the lookbuffers list (and associated buffers) 5807c478bd9Sstevel@tonic-gate * is allocated here on initialization. 5817c478bd9Sstevel@tonic-gate * More allocated on demand. 5827c478bd9Sstevel@tonic-gate */ 5837c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookclen = 0; 5847c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf; 5857c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdlen = 0; 5867c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf; 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate return (0); 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate /* 5937c478bd9Sstevel@tonic-gate * set sizes of buffers 5947c478bd9Sstevel@tonic-gate */ 5957c478bd9Sstevel@tonic-gate static unsigned int 5967c478bd9Sstevel@tonic-gate _t_setsize(t_scalar_t infosize) 5977c478bd9Sstevel@tonic-gate { 5987c478bd9Sstevel@tonic-gate switch (infosize) { 5997c478bd9Sstevel@tonic-gate case T_INFINITE /* -1 */: 6007c478bd9Sstevel@tonic-gate return (DEFSIZE); 6017c478bd9Sstevel@tonic-gate case T_INVALID /* -2 */: 6027c478bd9Sstevel@tonic-gate return (0); 6037c478bd9Sstevel@tonic-gate default: 6047c478bd9Sstevel@tonic-gate return ((unsigned int) infosize); 6057c478bd9Sstevel@tonic-gate } 6067c478bd9Sstevel@tonic-gate } 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate static void 6097c478bd9Sstevel@tonic-gate _t_reinit_tiptr(struct _ti_user *tiptr) 6107c478bd9Sstevel@tonic-gate { 6117c478bd9Sstevel@tonic-gate /* 6127c478bd9Sstevel@tonic-gate * Note: This routine is designed for a "reinitialization" 6137c478bd9Sstevel@tonic-gate * Following fields are not modified here and preserved. 6147c478bd9Sstevel@tonic-gate * - ti_fd field 6157c478bd9Sstevel@tonic-gate * - ti_lock 6167c478bd9Sstevel@tonic-gate * - ti_next 6177c478bd9Sstevel@tonic-gate * - ti_prev 6187c478bd9Sstevel@tonic-gate * The above fields have to be separately initialized if this 6197c478bd9Sstevel@tonic-gate * is used for a fresh initialization. 6207c478bd9Sstevel@tonic-gate */ 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate tiptr->ti_flags = 0; 6237c478bd9Sstevel@tonic-gate tiptr->ti_rcvsize = 0; 6247c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = NULL; 6257c478bd9Sstevel@tonic-gate tiptr->ti_ctlsize = 0; 6267c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = NULL; 6277c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdbuf = NULL; 6287c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookcbuf = NULL; 6297c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookdlen = 0; 6307c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_lookclen = 0; 6317c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_next = NULL; 6327c478bd9Sstevel@tonic-gate tiptr->ti_maxpsz = 0; 6337c478bd9Sstevel@tonic-gate tiptr->ti_tsdusize = 0; 6347c478bd9Sstevel@tonic-gate tiptr->ti_etsdusize = 0; 6357c478bd9Sstevel@tonic-gate tiptr->ti_cdatasize = 0; 6367c478bd9Sstevel@tonic-gate tiptr->ti_ddatasize = 0; 6377c478bd9Sstevel@tonic-gate tiptr->ti_servtype = 0; 6387c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 6397c478bd9Sstevel@tonic-gate tiptr->ti_state = 0; 6407c478bd9Sstevel@tonic-gate tiptr->ti_ocnt = 0; 6417c478bd9Sstevel@tonic-gate tiptr->ti_prov_flag = 0; 6427c478bd9Sstevel@tonic-gate tiptr->ti_qlen = 0; 6437c478bd9Sstevel@tonic-gate } 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate /* 6467c478bd9Sstevel@tonic-gate * Link manipulation routines. 6477c478bd9Sstevel@tonic-gate * 6487c478bd9Sstevel@tonic-gate * NBUCKETS hash buckets are used to give fast 6497c478bd9Sstevel@tonic-gate * access. The number is derived the file descriptor softlimit 6507c478bd9Sstevel@tonic-gate * number (64). 6517c478bd9Sstevel@tonic-gate */ 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate #define NBUCKETS 64 6547c478bd9Sstevel@tonic-gate static struct _ti_user *hash_bucket[NBUCKETS]; 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate /* 6577c478bd9Sstevel@tonic-gate * Allocates a new link and returns a pointer to it. 6587c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(), 6597c478bd9Sstevel@tonic-gate * so signals are deferred here. 6607c478bd9Sstevel@tonic-gate */ 6617c478bd9Sstevel@tonic-gate static struct _ti_user * 6627c478bd9Sstevel@tonic-gate add_tilink(int s) 6637c478bd9Sstevel@tonic-gate { 6647c478bd9Sstevel@tonic-gate struct _ti_user *tiptr; 6657c478bd9Sstevel@tonic-gate struct _ti_user *prevptr; 6667c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 6677c478bd9Sstevel@tonic-gate int x; 6687c478bd9Sstevel@tonic-gate struct stat stbuf; 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate if (s < 0 || fstat(s, &stbuf) != 0) 6737c478bd9Sstevel@tonic-gate return (NULL); 6747c478bd9Sstevel@tonic-gate 6757c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 6767c478bd9Sstevel@tonic-gate if (hash_bucket[x] != NULL) { 6777c478bd9Sstevel@tonic-gate /* 6787c478bd9Sstevel@tonic-gate * Walk along the bucket looking for 6797c478bd9Sstevel@tonic-gate * duplicate entry or the end. 6807c478bd9Sstevel@tonic-gate */ 6817c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr != NULL; 6827c478bd9Sstevel@tonic-gate curptr = curptr->ti_next) { 6837c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * This can happen when the user has close(2)'ed 6867c478bd9Sstevel@tonic-gate * a descriptor and then been allocated it again 6877c478bd9Sstevel@tonic-gate * via t_open(). 6887c478bd9Sstevel@tonic-gate * 6897c478bd9Sstevel@tonic-gate * We will re-use the existing _ti_user struct 6907c478bd9Sstevel@tonic-gate * in this case rather than using the one 6917c478bd9Sstevel@tonic-gate * we allocated above. If there are buffers 6927c478bd9Sstevel@tonic-gate * associated with the existing _ti_user 6937c478bd9Sstevel@tonic-gate * struct, they may not be the correct size, 6947c478bd9Sstevel@tonic-gate * so we can not use it. We free them 6957c478bd9Sstevel@tonic-gate * here and re-allocate a new ones 6967c478bd9Sstevel@tonic-gate * later on. 6977c478bd9Sstevel@tonic-gate */ 6987c478bd9Sstevel@tonic-gate if (curptr->ti_rcvbuf != NULL) 6997c478bd9Sstevel@tonic-gate free(curptr->ti_rcvbuf); 7007c478bd9Sstevel@tonic-gate free(curptr->ti_ctlbuf); 7017c478bd9Sstevel@tonic-gate _t_free_lookbufs(curptr); 7027c478bd9Sstevel@tonic-gate _t_reinit_tiptr(curptr); 7037c478bd9Sstevel@tonic-gate curptr->ti_rdev = stbuf.st_rdev; 7047c478bd9Sstevel@tonic-gate curptr->ti_ino = stbuf.st_ino; 7057c478bd9Sstevel@tonic-gate return (curptr); 7067c478bd9Sstevel@tonic-gate } 7077c478bd9Sstevel@tonic-gate prevptr = curptr; 7087c478bd9Sstevel@tonic-gate } 7097c478bd9Sstevel@tonic-gate /* 7107c478bd9Sstevel@tonic-gate * Allocate and link in a new one. 7117c478bd9Sstevel@tonic-gate */ 71261961e0fSrobinson if ((tiptr = malloc(sizeof (*tiptr))) == NULL) 7137c478bd9Sstevel@tonic-gate return (NULL); 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * First initialize fields common with reinitialization and 7167c478bd9Sstevel@tonic-gate * then other fields too 7177c478bd9Sstevel@tonic-gate */ 7187c478bd9Sstevel@tonic-gate _t_reinit_tiptr(tiptr); 7197c478bd9Sstevel@tonic-gate prevptr->ti_next = tiptr; 7207c478bd9Sstevel@tonic-gate tiptr->ti_prev = prevptr; 7217c478bd9Sstevel@tonic-gate } else { 7227c478bd9Sstevel@tonic-gate /* 7237c478bd9Sstevel@tonic-gate * First entry. 7247c478bd9Sstevel@tonic-gate */ 72561961e0fSrobinson if ((tiptr = malloc(sizeof (*tiptr))) == NULL) 7267c478bd9Sstevel@tonic-gate return (NULL); 7277c478bd9Sstevel@tonic-gate _t_reinit_tiptr(tiptr); 7287c478bd9Sstevel@tonic-gate hash_bucket[x] = tiptr; 7297c478bd9Sstevel@tonic-gate tiptr->ti_prev = NULL; 7307c478bd9Sstevel@tonic-gate } 7317c478bd9Sstevel@tonic-gate tiptr->ti_next = NULL; 7327c478bd9Sstevel@tonic-gate tiptr->ti_fd = s; 7337c478bd9Sstevel@tonic-gate tiptr->ti_rdev = stbuf.st_rdev; 7347c478bd9Sstevel@tonic-gate tiptr->ti_ino = stbuf.st_ino; 73561961e0fSrobinson (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL); 7367c478bd9Sstevel@tonic-gate return (tiptr); 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate /* 7407c478bd9Sstevel@tonic-gate * Find a link by descriptor 7417c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock. 7427c478bd9Sstevel@tonic-gate */ 7437c478bd9Sstevel@tonic-gate static struct _ti_user * 7447c478bd9Sstevel@tonic-gate find_tilink(int s) 7457c478bd9Sstevel@tonic-gate { 7467c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 7477c478bd9Sstevel@tonic-gate int x; 7487c478bd9Sstevel@tonic-gate struct stat stbuf; 7497c478bd9Sstevel@tonic-gate 7507c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 7517c478bd9Sstevel@tonic-gate 7527c478bd9Sstevel@tonic-gate if (s < 0 || fstat(s, &stbuf) != 0) 7537c478bd9Sstevel@tonic-gate return (NULL); 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 7567c478bd9Sstevel@tonic-gate /* 7577c478bd9Sstevel@tonic-gate * Walk along the bucket looking for the descriptor. 7587c478bd9Sstevel@tonic-gate */ 7597c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) { 7607c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 7617c478bd9Sstevel@tonic-gate if (curptr->ti_rdev == stbuf.st_rdev && 7627c478bd9Sstevel@tonic-gate curptr->ti_ino == stbuf.st_ino) 7637c478bd9Sstevel@tonic-gate return (curptr); 7647c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(s); 7657c478bd9Sstevel@tonic-gate } 7667c478bd9Sstevel@tonic-gate } 7677c478bd9Sstevel@tonic-gate return (NULL); 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate /* 7717c478bd9Sstevel@tonic-gate * Assumes that the caller is holding _ti_userlock. 7727c478bd9Sstevel@tonic-gate * Also assumes that all signals are blocked. 7737c478bd9Sstevel@tonic-gate */ 7747c478bd9Sstevel@tonic-gate int 7757c478bd9Sstevel@tonic-gate _t_delete_tilink(int s) 7767c478bd9Sstevel@tonic-gate { 7777c478bd9Sstevel@tonic-gate struct _ti_user *curptr; 7787c478bd9Sstevel@tonic-gate int x; 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate /* 7817c478bd9Sstevel@tonic-gate * Find the link. 7827c478bd9Sstevel@tonic-gate */ 7837c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 7847c478bd9Sstevel@tonic-gate if (s < 0) 7857c478bd9Sstevel@tonic-gate return (-1); 7867c478bd9Sstevel@tonic-gate x = s % NBUCKETS; 7877c478bd9Sstevel@tonic-gate /* 7887c478bd9Sstevel@tonic-gate * Walk along the bucket looking for 7897c478bd9Sstevel@tonic-gate * the descriptor. 7907c478bd9Sstevel@tonic-gate */ 7917c478bd9Sstevel@tonic-gate for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) { 7927c478bd9Sstevel@tonic-gate if (curptr->ti_fd == s) { 7937c478bd9Sstevel@tonic-gate struct _ti_user *nextptr; 7947c478bd9Sstevel@tonic-gate struct _ti_user *prevptr; 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate nextptr = curptr->ti_next; 7977c478bd9Sstevel@tonic-gate prevptr = curptr->ti_prev; 7987c478bd9Sstevel@tonic-gate if (prevptr) 7997c478bd9Sstevel@tonic-gate prevptr->ti_next = nextptr; 8007c478bd9Sstevel@tonic-gate else 8017c478bd9Sstevel@tonic-gate hash_bucket[x] = nextptr; 8027c478bd9Sstevel@tonic-gate if (nextptr) 8037c478bd9Sstevel@tonic-gate nextptr->ti_prev = prevptr; 8047c478bd9Sstevel@tonic-gate 8057c478bd9Sstevel@tonic-gate /* 8067c478bd9Sstevel@tonic-gate * free resource associated with the curptr 8077c478bd9Sstevel@tonic-gate */ 8087c478bd9Sstevel@tonic-gate if (curptr->ti_rcvbuf != NULL) 8097c478bd9Sstevel@tonic-gate free(curptr->ti_rcvbuf); 8107c478bd9Sstevel@tonic-gate free(curptr->ti_ctlbuf); 8117c478bd9Sstevel@tonic-gate _t_free_lookbufs(curptr); 81261961e0fSrobinson (void) mutex_destroy(&curptr->ti_lock); 8137c478bd9Sstevel@tonic-gate free(curptr); 8147c478bd9Sstevel@tonic-gate return (0); 8157c478bd9Sstevel@tonic-gate } 8167c478bd9Sstevel@tonic-gate } 8177c478bd9Sstevel@tonic-gate return (-1); 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate /* 8217c478bd9Sstevel@tonic-gate * Allocate a TLI state structure and synch it with the kernel 8227c478bd9Sstevel@tonic-gate * *tiptr is returned 8237c478bd9Sstevel@tonic-gate * Assumes that the caller is holding the _ti_userlock and has blocked signals. 8247c478bd9Sstevel@tonic-gate * 8257c478bd9Sstevel@tonic-gate * This function may fail the first time it is called with given transport if it 8267c478bd9Sstevel@tonic-gate * doesn't support T_CAPABILITY_REQ TPI message. 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate struct _ti_user * 8297c478bd9Sstevel@tonic-gate _t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed) 8307c478bd9Sstevel@tonic-gate { 8317c478bd9Sstevel@tonic-gate /* 8327c478bd9Sstevel@tonic-gate * Aligned data buffer for ioctl. 8337c478bd9Sstevel@tonic-gate */ 8347c478bd9Sstevel@tonic-gate union { 8357c478bd9Sstevel@tonic-gate struct ti_sync_req ti_req; 8367c478bd9Sstevel@tonic-gate struct ti_sync_ack ti_ack; 8377c478bd9Sstevel@tonic-gate union T_primitives t_prim; 8387c478bd9Sstevel@tonic-gate char pad[128]; 8397c478bd9Sstevel@tonic-gate } ioctl_data; 8407c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */ 8417c478bd9Sstevel@tonic-gate /* preferred location first local variable */ 8427c478bd9Sstevel@tonic-gate /* see note below */ 8437c478bd9Sstevel@tonic-gate /* 8447c478bd9Sstevel@tonic-gate * Note: We use "ioctlbuf" allocated on stack above with 8457c478bd9Sstevel@tonic-gate * room to grow since (struct ti_sync_ack) can grow in size 8467c478bd9Sstevel@tonic-gate * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that 8477c478bd9Sstevel@tonic-gate * part of instance structure which may not exist yet) 8487c478bd9Sstevel@tonic-gate * Its preferred declaration location is first local variable in this 8497c478bd9Sstevel@tonic-gate * procedure as bugs causing overruns will be detectable on 8507c478bd9Sstevel@tonic-gate * platforms where procedure calling conventions place return 8517c478bd9Sstevel@tonic-gate * address on stack (such as x86) instead of causing silent 8527c478bd9Sstevel@tonic-gate * memory corruption. 8537c478bd9Sstevel@tonic-gate */ 8547c478bd9Sstevel@tonic-gate struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf; 8557c478bd9Sstevel@tonic-gate struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf; 8567c478bd9Sstevel@tonic-gate struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf; 8577c478bd9Sstevel@tonic-gate struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf; 8587c478bd9Sstevel@tonic-gate struct T_info_ack *tiap = &tcap->INFO_ack; 8597c478bd9Sstevel@tonic-gate struct _ti_user *ntiptr; 8607c478bd9Sstevel@tonic-gate int expected_acksize; 8617c478bd9Sstevel@tonic-gate int retlen, rstate, sv_errno, rval; 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&_ti_userlock)); 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate /* 8667c478bd9Sstevel@tonic-gate * Use ioctl required for sync'ing state with kernel. 8677c478bd9Sstevel@tonic-gate * We use two ioctls. TI_CAPABILITY is used to get TPI information and 8687c478bd9Sstevel@tonic-gate * TI_SYNC is used to synchronise state with timod. Statically linked 8697c478bd9Sstevel@tonic-gate * TLI applications will no longer work on older releases where there 8707c478bd9Sstevel@tonic-gate * are no TI_SYNC and TI_CAPABILITY. 8717c478bd9Sstevel@tonic-gate */ 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate /* 8747c478bd9Sstevel@tonic-gate * Request info about transport. 8757c478bd9Sstevel@tonic-gate * Assumes that TC1_INFO should always be implemented. 8767c478bd9Sstevel@tonic-gate * For TI_CAPABILITY size argument to ioctl specifies maximum buffer 8777c478bd9Sstevel@tonic-gate * size. 8787c478bd9Sstevel@tonic-gate */ 8797c478bd9Sstevel@tonic-gate tcrp->PRIM_type = T_CAPABILITY_REQ; 8807c478bd9Sstevel@tonic-gate tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID; 8817c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, (char *)ioctlbuf, 8827c478bd9Sstevel@tonic-gate (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen); 8837c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct T_capability_ack); 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate if (rval < 0) { 8867c478bd9Sstevel@tonic-gate /* 8877c478bd9Sstevel@tonic-gate * TI_CAPABILITY may fail when transport provider doesn't 8887c478bd9Sstevel@tonic-gate * support T_CAPABILITY_REQ message type. In this case file 8897c478bd9Sstevel@tonic-gate * descriptor may be unusable (when transport provider sent 8907c478bd9Sstevel@tonic-gate * M_ERROR in response to T_CAPABILITY_REQ). This should only 8917c478bd9Sstevel@tonic-gate * happen once during system lifetime for given transport 8927c478bd9Sstevel@tonic-gate * provider since timod will emulate TI_CAPABILITY after it 8937c478bd9Sstevel@tonic-gate * detected the failure. 8947c478bd9Sstevel@tonic-gate */ 8957c478bd9Sstevel@tonic-gate if (t_capreq_failed != NULL) 8967c478bd9Sstevel@tonic-gate *t_capreq_failed = 1; 8977c478bd9Sstevel@tonic-gate return (NULL); 8987c478bd9Sstevel@tonic-gate } 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate if (retlen != expected_acksize) { 9017c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9027c478bd9Sstevel@tonic-gate errno = EIO; 9037c478bd9Sstevel@tonic-gate return (NULL); 9047c478bd9Sstevel@tonic-gate } 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate if ((tcap->CAP_bits1 & TC1_INFO) == 0) { 9077c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9087c478bd9Sstevel@tonic-gate errno = EPROTO; 9097c478bd9Sstevel@tonic-gate return (NULL); 9107c478bd9Sstevel@tonic-gate } 9117c478bd9Sstevel@tonic-gate if (info != NULL) { 9127c478bd9Sstevel@tonic-gate if (tiap->PRIM_type != T_INFO_ACK) { 9137c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9147c478bd9Sstevel@tonic-gate errno = EPROTO; 9157c478bd9Sstevel@tonic-gate return (NULL); 9167c478bd9Sstevel@tonic-gate } 9177c478bd9Sstevel@tonic-gate info->addr = tiap->ADDR_size; 9187c478bd9Sstevel@tonic-gate info->options = tiap->OPT_size; 9197c478bd9Sstevel@tonic-gate info->tsdu = tiap->TSDU_size; 9207c478bd9Sstevel@tonic-gate info->etsdu = tiap->ETSDU_size; 9217c478bd9Sstevel@tonic-gate info->connect = tiap->CDATA_size; 9227c478bd9Sstevel@tonic-gate info->discon = tiap->DDATA_size; 9237c478bd9Sstevel@tonic-gate info->servtype = tiap->SERV_type; 9247c478bd9Sstevel@tonic-gate if (_T_IS_XTI(api_semantics)) { 9257c478bd9Sstevel@tonic-gate /* 9267c478bd9Sstevel@tonic-gate * XTI ONLY - TLI "struct t_info" does not 9277c478bd9Sstevel@tonic-gate * have "flags" 9287c478bd9Sstevel@tonic-gate */ 9297c478bd9Sstevel@tonic-gate info->flags = 0; 9307c478bd9Sstevel@tonic-gate if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO)) 9317c478bd9Sstevel@tonic-gate info->flags |= T_SENDZERO; 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * Some day there MAY be a NEW bit in T_info_ack 9347c478bd9Sstevel@tonic-gate * PROVIDER_flag namespace exposed by TPI header 9357c478bd9Sstevel@tonic-gate * <sys/tihdr.h> which will functionally correspond to 9367c478bd9Sstevel@tonic-gate * role played by T_ORDRELDATA in info->flags namespace 9377c478bd9Sstevel@tonic-gate * When that bit exists, we can add a test to see if 9387c478bd9Sstevel@tonic-gate * it is set and set T_ORDRELDATA. 9397c478bd9Sstevel@tonic-gate * Note: Currently only mOSI ("minimal OSI") provider 9407c478bd9Sstevel@tonic-gate * is specified to use T_ORDRELDATA so probability of 9417c478bd9Sstevel@tonic-gate * needing it is minimal. 9427c478bd9Sstevel@tonic-gate */ 9437c478bd9Sstevel@tonic-gate } 9447c478bd9Sstevel@tonic-gate } 9457c478bd9Sstevel@tonic-gate 9467c478bd9Sstevel@tonic-gate /* 9477c478bd9Sstevel@tonic-gate * if first time or no instance (after fork/exec, dup etc, 9487c478bd9Sstevel@tonic-gate * then create initialize data structure 9497c478bd9Sstevel@tonic-gate * and allocate buffers 9507c478bd9Sstevel@tonic-gate */ 9517c478bd9Sstevel@tonic-gate ntiptr = add_tilink(fd); 9527c478bd9Sstevel@tonic-gate if (ntiptr == NULL) { 9537c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9547c478bd9Sstevel@tonic-gate errno = ENOMEM; 9557c478bd9Sstevel@tonic-gate return (NULL); 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate sig_mutex_lock(&ntiptr->ti_lock); 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate /* 9607c478bd9Sstevel@tonic-gate * Allocate buffers for the new descriptor 9617c478bd9Sstevel@tonic-gate */ 9627c478bd9Sstevel@tonic-gate if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) { 9637c478bd9Sstevel@tonic-gate sv_errno = errno; 9647c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 9657c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 9667c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 9677c478bd9Sstevel@tonic-gate errno = sv_errno; 9687c478bd9Sstevel@tonic-gate return (NULL); 9697c478bd9Sstevel@tonic-gate } 9707c478bd9Sstevel@tonic-gate 9717c478bd9Sstevel@tonic-gate /* Fill instance structure */ 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate ntiptr->ti_lookcnt = 0; 9747c478bd9Sstevel@tonic-gate ntiptr->ti_flags = USED; 9757c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_UNINIT; 9767c478bd9Sstevel@tonic-gate ntiptr->ti_ocnt = 0; 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate assert(tiap->TIDU_size > 0); 9797c478bd9Sstevel@tonic-gate ntiptr->ti_maxpsz = tiap->TIDU_size; 9807c478bd9Sstevel@tonic-gate assert(tiap->TSDU_size >= -2); 9817c478bd9Sstevel@tonic-gate ntiptr->ti_tsdusize = tiap->TSDU_size; 9827c478bd9Sstevel@tonic-gate assert(tiap->ETSDU_size >= -2); 9837c478bd9Sstevel@tonic-gate ntiptr->ti_etsdusize = tiap->ETSDU_size; 9847c478bd9Sstevel@tonic-gate assert(tiap->CDATA_size >= -2); 9857c478bd9Sstevel@tonic-gate ntiptr->ti_cdatasize = tiap->CDATA_size; 9867c478bd9Sstevel@tonic-gate assert(tiap->DDATA_size >= -2); 9877c478bd9Sstevel@tonic-gate ntiptr->ti_ddatasize = tiap->DDATA_size; 9887c478bd9Sstevel@tonic-gate ntiptr->ti_servtype = tiap->SERV_type; 9897c478bd9Sstevel@tonic-gate ntiptr->ti_prov_flag = tiap->PROVIDER_flag; 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) { 9927c478bd9Sstevel@tonic-gate ntiptr->acceptor_id = tcap->ACCEPTOR_id; 9937c478bd9Sstevel@tonic-gate ntiptr->ti_flags |= V_ACCEPTOR_ID; 9947c478bd9Sstevel@tonic-gate } 9957c478bd9Sstevel@tonic-gate else 9967c478bd9Sstevel@tonic-gate ntiptr->ti_flags &= ~V_ACCEPTOR_ID; 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate /* 9997c478bd9Sstevel@tonic-gate * Restore state from kernel (caveat some heuristics) 10007c478bd9Sstevel@tonic-gate */ 10017c478bd9Sstevel@tonic-gate switch (tiap->CURRENT_state) { 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate case TS_UNBND: 10047c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_UNBND; 10057c478bd9Sstevel@tonic-gate break; 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate case TS_IDLE: 10087c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) { 10097c478bd9Sstevel@tonic-gate sv_errno = errno; 10107c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10117c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10127c478bd9Sstevel@tonic-gate errno = sv_errno; 10137c478bd9Sstevel@tonic-gate return (NULL); 10147c478bd9Sstevel@tonic-gate } 10157c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10167c478bd9Sstevel@tonic-gate break; 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate case TS_WRES_CIND: 10197c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_INCON; 10207c478bd9Sstevel@tonic-gate break; 10217c478bd9Sstevel@tonic-gate 10227c478bd9Sstevel@tonic-gate case TS_WCON_CREQ: 10237c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_OUTCON; 10247c478bd9Sstevel@tonic-gate break; 10257c478bd9Sstevel@tonic-gate 10267c478bd9Sstevel@tonic-gate case TS_DATA_XFER: 10277c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) { 10287c478bd9Sstevel@tonic-gate sv_errno = errno; 10297c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10307c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10317c478bd9Sstevel@tonic-gate errno = sv_errno; 10327c478bd9Sstevel@tonic-gate return (NULL); 10337c478bd9Sstevel@tonic-gate } 10347c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10357c478bd9Sstevel@tonic-gate break; 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate case TS_WIND_ORDREL: 10387c478bd9Sstevel@tonic-gate ntiptr->ti_state = T_OUTREL; 10397c478bd9Sstevel@tonic-gate break; 10407c478bd9Sstevel@tonic-gate 10417c478bd9Sstevel@tonic-gate case TS_WREQ_ORDREL: 10427c478bd9Sstevel@tonic-gate if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) { 10437c478bd9Sstevel@tonic-gate sv_errno = errno; 10447c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10457c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10467c478bd9Sstevel@tonic-gate errno = sv_errno; 10477c478bd9Sstevel@tonic-gate return (NULL); 10487c478bd9Sstevel@tonic-gate } 10497c478bd9Sstevel@tonic-gate ntiptr->ti_state = rstate; 10507c478bd9Sstevel@tonic-gate break; 10517c478bd9Sstevel@tonic-gate default: 10527c478bd9Sstevel@tonic-gate t_errno = TSTATECHNG; 10537c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10547c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10557c478bd9Sstevel@tonic-gate return (NULL); 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * Sync information with timod. 10607c478bd9Sstevel@tonic-gate */ 10617c478bd9Sstevel@tonic-gate tsrp->tsr_flags = TSRF_QLEN_REQ; 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, ioctlbuf, 10647c478bd9Sstevel@tonic-gate (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen); 10657c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct ti_sync_ack); 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate if (rval < 0) { 10687c478bd9Sstevel@tonic-gate sv_errno = errno; 10697c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10707c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 10717c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10727c478bd9Sstevel@tonic-gate errno = sv_errno; 10737c478bd9Sstevel@tonic-gate return (NULL); 10747c478bd9Sstevel@tonic-gate } 10757c478bd9Sstevel@tonic-gate 10767c478bd9Sstevel@tonic-gate /* 10777c478bd9Sstevel@tonic-gate * This is a "less than" check as "struct ti_sync_ack" returned by 10787c478bd9Sstevel@tonic-gate * TI_SYNC can grow in size in future kernels. If/when a statically 10797c478bd9Sstevel@tonic-gate * linked application is run on a future kernel, it should not fail. 10807c478bd9Sstevel@tonic-gate */ 10817c478bd9Sstevel@tonic-gate if (retlen < expected_acksize) { 10827c478bd9Sstevel@tonic-gate sv_errno = errno; 10837c478bd9Sstevel@tonic-gate (void) _t_delete_tilink(fd); 10847c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 10857c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10867c478bd9Sstevel@tonic-gate errno = sv_errno; 10877c478bd9Sstevel@tonic-gate return (NULL); 10887c478bd9Sstevel@tonic-gate } 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate if (_T_IS_TLI(api_semantics)) 10917c478bd9Sstevel@tonic-gate tsap->tsa_qlen = 0; /* not needed for TLI */ 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate ntiptr->ti_qlen = tsap->tsa_qlen; 10947c478bd9Sstevel@tonic-gate sig_mutex_unlock(&ntiptr->ti_lock); 10957c478bd9Sstevel@tonic-gate return (ntiptr); 10967c478bd9Sstevel@tonic-gate } 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate static int 11007c478bd9Sstevel@tonic-gate _t_adjust_state(int fd, int instate) 11017c478bd9Sstevel@tonic-gate { 11027c478bd9Sstevel@tonic-gate char ctlbuf[sizeof (t_scalar_t)]; 11037c478bd9Sstevel@tonic-gate char databuf[sizeof (int)]; /* size unimportant - anything > 0 */ 11047c478bd9Sstevel@tonic-gate struct strpeek arg; 11057c478bd9Sstevel@tonic-gate int outstate, retval; 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate /* 11087c478bd9Sstevel@tonic-gate * Peek at message on stream head (if any) 11097c478bd9Sstevel@tonic-gate * and see if it is data 11107c478bd9Sstevel@tonic-gate */ 11117c478bd9Sstevel@tonic-gate arg.ctlbuf.buf = ctlbuf; 11127c478bd9Sstevel@tonic-gate arg.ctlbuf.maxlen = (int)sizeof (ctlbuf); 11137c478bd9Sstevel@tonic-gate arg.ctlbuf.len = 0; 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate arg.databuf.buf = databuf; 11167c478bd9Sstevel@tonic-gate arg.databuf.maxlen = (int)sizeof (databuf); 11177c478bd9Sstevel@tonic-gate arg.databuf.len = 0; 11187c478bd9Sstevel@tonic-gate 11197c478bd9Sstevel@tonic-gate arg.flags = 0; 11207c478bd9Sstevel@tonic-gate 1121*e8031f0aSraf if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) { 11227c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 11237c478bd9Sstevel@tonic-gate return (-1); 11247c478bd9Sstevel@tonic-gate } 11257c478bd9Sstevel@tonic-gate outstate = instate; 11267c478bd9Sstevel@tonic-gate /* 11277c478bd9Sstevel@tonic-gate * If peek shows something at stream head, then 11287c478bd9Sstevel@tonic-gate * Adjust "outstate" based on some heuristics. 11297c478bd9Sstevel@tonic-gate */ 11307c478bd9Sstevel@tonic-gate if (retval > 0) { 11317c478bd9Sstevel@tonic-gate switch (instate) { 11327c478bd9Sstevel@tonic-gate case T_IDLE: 11337c478bd9Sstevel@tonic-gate /* 11347c478bd9Sstevel@tonic-gate * The following heuristic is to handle data 11357c478bd9Sstevel@tonic-gate * ahead of T_DISCON_IND indications that might 11367c478bd9Sstevel@tonic-gate * be at the stream head waiting to be 11377c478bd9Sstevel@tonic-gate * read (T_DATA_IND or M_DATA) 11387c478bd9Sstevel@tonic-gate */ 11397c478bd9Sstevel@tonic-gate if (((arg.ctlbuf.len == 4) && 114061961e0fSrobinson /* LINTED pointer cast */ 11417c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) || 11427c478bd9Sstevel@tonic-gate ((arg.ctlbuf.len == 0) && arg.databuf.len)) { 11437c478bd9Sstevel@tonic-gate outstate = T_DATAXFER; 11447c478bd9Sstevel@tonic-gate } 11457c478bd9Sstevel@tonic-gate break; 11467c478bd9Sstevel@tonic-gate case T_DATAXFER: 11477c478bd9Sstevel@tonic-gate /* 11487c478bd9Sstevel@tonic-gate * The following heuristic is to handle 11497c478bd9Sstevel@tonic-gate * the case where the connection is established 11507c478bd9Sstevel@tonic-gate * and in data transfer state at the provider 11517c478bd9Sstevel@tonic-gate * but the T_CONN_CON has not yet been read 11527c478bd9Sstevel@tonic-gate * from the stream head. 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate if ((arg.ctlbuf.len == 4) && 115561961e0fSrobinson /* LINTED pointer cast */ 11567c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON)) 11577c478bd9Sstevel@tonic-gate outstate = T_OUTCON; 11587c478bd9Sstevel@tonic-gate break; 11597c478bd9Sstevel@tonic-gate case T_INREL: 11607c478bd9Sstevel@tonic-gate /* 11617c478bd9Sstevel@tonic-gate * The following heuristic is to handle data 11627c478bd9Sstevel@tonic-gate * ahead of T_ORDREL_IND indications that might 11637c478bd9Sstevel@tonic-gate * be at the stream head waiting to be 11647c478bd9Sstevel@tonic-gate * read (T_DATA_IND or M_DATA) 11657c478bd9Sstevel@tonic-gate */ 11667c478bd9Sstevel@tonic-gate if (((arg.ctlbuf.len == 4) && 116761961e0fSrobinson /* LINTED pointer cast */ 11687c478bd9Sstevel@tonic-gate ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) || 11697c478bd9Sstevel@tonic-gate ((arg.ctlbuf.len == 0) && arg.databuf.len)) { 11707c478bd9Sstevel@tonic-gate outstate = T_DATAXFER; 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate break; 11737c478bd9Sstevel@tonic-gate default: 11747c478bd9Sstevel@tonic-gate break; 11757c478bd9Sstevel@tonic-gate } 11767c478bd9Sstevel@tonic-gate } 11777c478bd9Sstevel@tonic-gate return (outstate); 11787c478bd9Sstevel@tonic-gate } 11797c478bd9Sstevel@tonic-gate 11807c478bd9Sstevel@tonic-gate /* 11817c478bd9Sstevel@tonic-gate * Assumes caller has blocked signals at least in this thread (for safe 11827c478bd9Sstevel@tonic-gate * malloc/free operations) 11837c478bd9Sstevel@tonic-gate */ 11847c478bd9Sstevel@tonic-gate static int 11857c478bd9Sstevel@tonic-gate _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf) 11867c478bd9Sstevel@tonic-gate { 11877c478bd9Sstevel@tonic-gate unsigned size2; 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 11907c478bd9Sstevel@tonic-gate size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */ 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate if ((*retbuf = malloc(size2)) == NULL) { 11937c478bd9Sstevel@tonic-gate return (-1); 11947c478bd9Sstevel@tonic-gate } 11957c478bd9Sstevel@tonic-gate return (size2); 11967c478bd9Sstevel@tonic-gate } 11977c478bd9Sstevel@tonic-gate 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * Assumes caller has blocked signals at least in this thread (for safe 12017c478bd9Sstevel@tonic-gate * malloc/free operations) 12027c478bd9Sstevel@tonic-gate */ 12037c478bd9Sstevel@tonic-gate int 12047c478bd9Sstevel@tonic-gate _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf) 12057c478bd9Sstevel@tonic-gate { 12067c478bd9Sstevel@tonic-gate unsigned size1; 12077c478bd9Sstevel@tonic-gate 12087c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 12097c478bd9Sstevel@tonic-gate size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */ 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate if ((*retbuf = malloc(size1)) == NULL) { 12127c478bd9Sstevel@tonic-gate return (-1); 12137c478bd9Sstevel@tonic-gate } 12147c478bd9Sstevel@tonic-gate return (size1); 12157c478bd9Sstevel@tonic-gate } 12167c478bd9Sstevel@tonic-gate 12177c478bd9Sstevel@tonic-gate /* 12187c478bd9Sstevel@tonic-gate * Free lookbuffer structures and associated resources 12197c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate static void 12227c478bd9Sstevel@tonic-gate _t_free_lookbufs(struct _ti_user *tiptr) 12237c478bd9Sstevel@tonic-gate { 12247c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs; 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate /* 12277c478bd9Sstevel@tonic-gate * Assertion: 12287c478bd9Sstevel@tonic-gate * The structure lock should be held or the global list 12297c478bd9Sstevel@tonic-gate * manipulation lock. The assumption is that nothing 12307c478bd9Sstevel@tonic-gate * else can access the descriptor since global list manipulation 12317c478bd9Sstevel@tonic-gate * lock is held so it is OK to manipulate fields without the 12327c478bd9Sstevel@tonic-gate * structure lock 12337c478bd9Sstevel@tonic-gate */ 12347c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock)); 12357c478bd9Sstevel@tonic-gate 12367c478bd9Sstevel@tonic-gate /* 12377c478bd9Sstevel@tonic-gate * Free only the buffers in the first lookbuf 12387c478bd9Sstevel@tonic-gate */ 12397c478bd9Sstevel@tonic-gate head_tlbs = &tiptr->ti_lookbufs; 12407c478bd9Sstevel@tonic-gate if (head_tlbs->tl_lookdbuf != NULL) { 12417c478bd9Sstevel@tonic-gate free(head_tlbs->tl_lookdbuf); 12427c478bd9Sstevel@tonic-gate head_tlbs->tl_lookdbuf = NULL; 12437c478bd9Sstevel@tonic-gate } 12447c478bd9Sstevel@tonic-gate free(head_tlbs->tl_lookcbuf); 12457c478bd9Sstevel@tonic-gate head_tlbs->tl_lookcbuf = NULL; 12467c478bd9Sstevel@tonic-gate /* 12477c478bd9Sstevel@tonic-gate * Free the node and the buffers in the rest of the 12487c478bd9Sstevel@tonic-gate * list 12497c478bd9Sstevel@tonic-gate */ 12507c478bd9Sstevel@tonic-gate 12517c478bd9Sstevel@tonic-gate tlbs = head_tlbs->tl_next; 12527c478bd9Sstevel@tonic-gate head_tlbs->tl_next = NULL; 12537c478bd9Sstevel@tonic-gate 12547c478bd9Sstevel@tonic-gate while (tlbs != NULL) { 12557c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 12567c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 12577c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 12587c478bd9Sstevel@tonic-gate prev_tlbs = tlbs; 12597c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 126061961e0fSrobinson free(prev_tlbs); 12617c478bd9Sstevel@tonic-gate } 12627c478bd9Sstevel@tonic-gate } 12637c478bd9Sstevel@tonic-gate 12647c478bd9Sstevel@tonic-gate /* 12657c478bd9Sstevel@tonic-gate * Free lookbuffer event list head. 12667c478bd9Sstevel@tonic-gate * Consume current lookbuffer event 12677c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 12687c478bd9Sstevel@tonic-gate * Note: The head of this list is part of the instance 12697c478bd9Sstevel@tonic-gate * structure so the code is a little unorthodox. 12707c478bd9Sstevel@tonic-gate */ 12717c478bd9Sstevel@tonic-gate void 12727c478bd9Sstevel@tonic-gate _t_free_looklist_head(struct _ti_user *tiptr) 12737c478bd9Sstevel@tonic-gate { 12747c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *next_tlbs; 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate tlbs = &tiptr->ti_lookbufs; 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate if (tlbs->tl_next) { 12797c478bd9Sstevel@tonic-gate /* 12807c478bd9Sstevel@tonic-gate * Free the control and data buffers 12817c478bd9Sstevel@tonic-gate */ 12827c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 12837c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 12847c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 12857c478bd9Sstevel@tonic-gate /* 12867c478bd9Sstevel@tonic-gate * Replace with next lookbuf event contents 12877c478bd9Sstevel@tonic-gate */ 12887c478bd9Sstevel@tonic-gate next_tlbs = tlbs->tl_next; 12897c478bd9Sstevel@tonic-gate tlbs->tl_next = next_tlbs->tl_next; 12907c478bd9Sstevel@tonic-gate tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf; 12917c478bd9Sstevel@tonic-gate tlbs->tl_lookclen = next_tlbs->tl_lookclen; 12927c478bd9Sstevel@tonic-gate tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf; 12937c478bd9Sstevel@tonic-gate tlbs->tl_lookdlen = next_tlbs->tl_lookdlen; 12947c478bd9Sstevel@tonic-gate free(next_tlbs); 12957c478bd9Sstevel@tonic-gate /* 12967c478bd9Sstevel@tonic-gate * Decrement the flag - should never get to zero. 12977c478bd9Sstevel@tonic-gate * in this path 12987c478bd9Sstevel@tonic-gate */ 12997c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt--; 13007c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt > 0); 13017c478bd9Sstevel@tonic-gate } else { 13027c478bd9Sstevel@tonic-gate /* 13037c478bd9Sstevel@tonic-gate * No more look buffer events - just clear the flag 13047c478bd9Sstevel@tonic-gate * and leave the buffers alone 13057c478bd9Sstevel@tonic-gate */ 13067c478bd9Sstevel@tonic-gate assert(tiptr->ti_lookcnt == 1); 13077c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 13087c478bd9Sstevel@tonic-gate } 13097c478bd9Sstevel@tonic-gate } 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate /* 13127c478bd9Sstevel@tonic-gate * Discard lookbuffer events. 13137c478bd9Sstevel@tonic-gate * Assumes ti_lock held for MT case. 13147c478bd9Sstevel@tonic-gate */ 13157c478bd9Sstevel@tonic-gate void 13167c478bd9Sstevel@tonic-gate _t_flush_lookevents(struct _ti_user *tiptr) 13177c478bd9Sstevel@tonic-gate { 13187c478bd9Sstevel@tonic-gate struct _ti_lookbufs *tlbs, *prev_tlbs; 13197c478bd9Sstevel@tonic-gate 13207c478bd9Sstevel@tonic-gate /* 13217c478bd9Sstevel@tonic-gate * Leave the first nodes buffers alone (i.e. allocated) 13227c478bd9Sstevel@tonic-gate * but reset the flag. 13237c478bd9Sstevel@tonic-gate */ 13247c478bd9Sstevel@tonic-gate assert(MUTEX_HELD(&tiptr->ti_lock)); 13257c478bd9Sstevel@tonic-gate tiptr->ti_lookcnt = 0; 13267c478bd9Sstevel@tonic-gate /* 13277c478bd9Sstevel@tonic-gate * Blow away the rest of the list 13287c478bd9Sstevel@tonic-gate */ 13297c478bd9Sstevel@tonic-gate tlbs = tiptr->ti_lookbufs.tl_next; 13307c478bd9Sstevel@tonic-gate tiptr->ti_lookbufs.tl_next = NULL; 13317c478bd9Sstevel@tonic-gate while (tlbs != NULL) { 13327c478bd9Sstevel@tonic-gate if (tlbs->tl_lookdbuf != NULL) 13337c478bd9Sstevel@tonic-gate free(tlbs->tl_lookdbuf); 13347c478bd9Sstevel@tonic-gate free(tlbs->tl_lookcbuf); 13357c478bd9Sstevel@tonic-gate prev_tlbs = tlbs; 13367c478bd9Sstevel@tonic-gate tlbs = tlbs->tl_next; 133761961e0fSrobinson free(prev_tlbs); 13387c478bd9Sstevel@tonic-gate } 13397c478bd9Sstevel@tonic-gate } 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate 13427c478bd9Sstevel@tonic-gate /* 13437c478bd9Sstevel@tonic-gate * This routine checks if the receive. buffer in the instance structure 13447c478bd9Sstevel@tonic-gate * is available (non-null). If it is, the buffer is acquired and marked busy 13457c478bd9Sstevel@tonic-gate * (null). If it is busy (possible in MT programs), it allocates a new 13467c478bd9Sstevel@tonic-gate * buffer and sets a flag indicating new memory was allocated and the caller 13477c478bd9Sstevel@tonic-gate * has to free it. 13487c478bd9Sstevel@tonic-gate */ 13497c478bd9Sstevel@tonic-gate int 13507c478bd9Sstevel@tonic-gate _t_acquire_ctlbuf( 13517c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 13527c478bd9Sstevel@tonic-gate struct strbuf *ctlbufp, 13537c478bd9Sstevel@tonic-gate int *didallocp) 13547c478bd9Sstevel@tonic-gate { 13557c478bd9Sstevel@tonic-gate *didallocp = 0; 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate ctlbufp->len = 0; 13587c478bd9Sstevel@tonic-gate if (tiptr->ti_ctlbuf) { 13597c478bd9Sstevel@tonic-gate ctlbufp->buf = tiptr->ti_ctlbuf; 13607c478bd9Sstevel@tonic-gate tiptr->ti_ctlbuf = NULL; 13617c478bd9Sstevel@tonic-gate ctlbufp->maxlen = tiptr->ti_ctlsize; 13627c478bd9Sstevel@tonic-gate } else { 13637c478bd9Sstevel@tonic-gate /* 13647c478bd9Sstevel@tonic-gate * tiptr->ti_ctlbuf is in use 13657c478bd9Sstevel@tonic-gate * allocate new buffer and free after use. 13667c478bd9Sstevel@tonic-gate */ 13677c478bd9Sstevel@tonic-gate if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr, 13687c478bd9Sstevel@tonic-gate &ctlbufp->buf)) < 0) { 13697c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 13707c478bd9Sstevel@tonic-gate return (-1); 13717c478bd9Sstevel@tonic-gate } 13727c478bd9Sstevel@tonic-gate *didallocp = 1; 13737c478bd9Sstevel@tonic-gate } 13747c478bd9Sstevel@tonic-gate return (0); 13757c478bd9Sstevel@tonic-gate } 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * This routine checks if the receive buffer in the instance structure 13797c478bd9Sstevel@tonic-gate * is available (non-null). If it is, the buffer is acquired and marked busy 13807c478bd9Sstevel@tonic-gate * (null). If it is busy (possible in MT programs), it allocates a new 13817c478bd9Sstevel@tonic-gate * buffer and sets a flag indicating new memory was allocated and the caller 13827c478bd9Sstevel@tonic-gate * has to free it. 13837c478bd9Sstevel@tonic-gate * Note: The receive buffer pointer can also be null if the transport 13847c478bd9Sstevel@tonic-gate * provider does not support connect/disconnect data, (e.g. TCP) - not 13857c478bd9Sstevel@tonic-gate * just when it is "busy". In that case, ti_rcvsize will be 0 and that is 13867c478bd9Sstevel@tonic-gate * used to instantiate the databuf which points to a null buffer of 13877c478bd9Sstevel@tonic-gate * length 0 which is the right thing to do for that case. 13887c478bd9Sstevel@tonic-gate */ 13897c478bd9Sstevel@tonic-gate int 13907c478bd9Sstevel@tonic-gate _t_acquire_databuf( 13917c478bd9Sstevel@tonic-gate struct _ti_user *tiptr, 13927c478bd9Sstevel@tonic-gate struct strbuf *databufp, 13937c478bd9Sstevel@tonic-gate int *didallocp) 13947c478bd9Sstevel@tonic-gate { 13957c478bd9Sstevel@tonic-gate *didallocp = 0; 13967c478bd9Sstevel@tonic-gate 13977c478bd9Sstevel@tonic-gate databufp->len = 0; 13987c478bd9Sstevel@tonic-gate if (tiptr->ti_rcvbuf) { 13997c478bd9Sstevel@tonic-gate assert(tiptr->ti_rcvsize != 0); 14007c478bd9Sstevel@tonic-gate databufp->buf = tiptr->ti_rcvbuf; 14017c478bd9Sstevel@tonic-gate tiptr->ti_rcvbuf = NULL; 14027c478bd9Sstevel@tonic-gate databufp->maxlen = tiptr->ti_rcvsize; 14037c478bd9Sstevel@tonic-gate } else if (tiptr->ti_rcvsize == 0) { 14047c478bd9Sstevel@tonic-gate databufp->buf = NULL; 14057c478bd9Sstevel@tonic-gate databufp->maxlen = 0; 14067c478bd9Sstevel@tonic-gate } else { 14077c478bd9Sstevel@tonic-gate /* 14087c478bd9Sstevel@tonic-gate * tiptr->ti_rcvbuf is in use 14097c478bd9Sstevel@tonic-gate * allocate new buffer and free after use. 14107c478bd9Sstevel@tonic-gate */ 14117c478bd9Sstevel@tonic-gate if ((databufp->maxlen = _t_rbuf_alloc(tiptr, 14127c478bd9Sstevel@tonic-gate &databufp->buf)) < 0) { 14137c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 14147c478bd9Sstevel@tonic-gate return (-1); 14157c478bd9Sstevel@tonic-gate } 14167c478bd9Sstevel@tonic-gate *didallocp = 1; 14177c478bd9Sstevel@tonic-gate } 14187c478bd9Sstevel@tonic-gate return (0); 14197c478bd9Sstevel@tonic-gate } 14207c478bd9Sstevel@tonic-gate 14217c478bd9Sstevel@tonic-gate /* 14227c478bd9Sstevel@tonic-gate * This routine requests timod to look for any expedited data 14237c478bd9Sstevel@tonic-gate * queued in the "receive buffers" in the kernel. Used for XTI 14247c478bd9Sstevel@tonic-gate * t_look() semantics for transports that send expedited data 14257c478bd9Sstevel@tonic-gate * data inline (e.g TCP). 14267c478bd9Sstevel@tonic-gate * Returns -1 for failure 14277c478bd9Sstevel@tonic-gate * Returns 0 for success 14287c478bd9Sstevel@tonic-gate * On a successful return, the location pointed by "expedited_queuedp" 14297c478bd9Sstevel@tonic-gate * contains 14307c478bd9Sstevel@tonic-gate * 0 if no expedited data is found queued in "receive buffers" 14317c478bd9Sstevel@tonic-gate * 1 if expedited data is found queued in "receive buffers" 14327c478bd9Sstevel@tonic-gate */ 14337c478bd9Sstevel@tonic-gate 14347c478bd9Sstevel@tonic-gate int 14357c478bd9Sstevel@tonic-gate _t_expinline_queued(int fd, int *expedited_queuedp) 14367c478bd9Sstevel@tonic-gate { 14377c478bd9Sstevel@tonic-gate union { 14387c478bd9Sstevel@tonic-gate struct ti_sync_req ti_req; 14397c478bd9Sstevel@tonic-gate struct ti_sync_ack ti_ack; 14407c478bd9Sstevel@tonic-gate char pad[128]; 14417c478bd9Sstevel@tonic-gate } ioctl_data; 14427c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */ 14437c478bd9Sstevel@tonic-gate /* preferred location first local variable */ 14447c478bd9Sstevel@tonic-gate /* see note in _t_create above */ 14457c478bd9Sstevel@tonic-gate struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf; 14467c478bd9Sstevel@tonic-gate struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf; 14477c478bd9Sstevel@tonic-gate int rval, retlen; 14487c478bd9Sstevel@tonic-gate 14497c478bd9Sstevel@tonic-gate *expedited_queuedp = 0; 14507c478bd9Sstevel@tonic-gate /* request info on rq expinds */ 14517c478bd9Sstevel@tonic-gate tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF; 14527c478bd9Sstevel@tonic-gate do { 14537c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, ioctlbuf, 14547c478bd9Sstevel@tonic-gate (int)sizeof (struct T_info_req), TI_SYNC, &retlen); 14557c478bd9Sstevel@tonic-gate } while (rval < 0 && errno == EINTR); 14567c478bd9Sstevel@tonic-gate 14577c478bd9Sstevel@tonic-gate if (rval < 0) 14587c478bd9Sstevel@tonic-gate return (-1); 14597c478bd9Sstevel@tonic-gate 14607c478bd9Sstevel@tonic-gate /* 14617c478bd9Sstevel@tonic-gate * This is a "less than" check as "struct ti_sync_ack" returned by 14627c478bd9Sstevel@tonic-gate * TI_SYNC can grow in size in future kernels. If/when a statically 14637c478bd9Sstevel@tonic-gate * linked application is run on a future kernel, it should not fail. 14647c478bd9Sstevel@tonic-gate */ 14657c478bd9Sstevel@tonic-gate if (retlen < (int)sizeof (struct ti_sync_ack)) { 14667c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 14677c478bd9Sstevel@tonic-gate errno = EIO; 14687c478bd9Sstevel@tonic-gate return (-1); 14697c478bd9Sstevel@tonic-gate } 14707c478bd9Sstevel@tonic-gate if (tsap->tsa_flags & TSAF_EXP_QUEUED) 14717c478bd9Sstevel@tonic-gate *expedited_queuedp = 1; 14727c478bd9Sstevel@tonic-gate return (0); 14737c478bd9Sstevel@tonic-gate } 14747c478bd9Sstevel@tonic-gate 14757c478bd9Sstevel@tonic-gate /* 14767c478bd9Sstevel@tonic-gate * Support functions for use by functions that do scatter/gather 14777c478bd9Sstevel@tonic-gate * like t_sndv(), t_rcvv() etc..follow below. 14787c478bd9Sstevel@tonic-gate */ 14797c478bd9Sstevel@tonic-gate 14807c478bd9Sstevel@tonic-gate /* 14817c478bd9Sstevel@tonic-gate * _t_bytecount_upto_intmax() : 14827c478bd9Sstevel@tonic-gate * Sum of the lengths of the individual buffers in 14837c478bd9Sstevel@tonic-gate * the t_iovec array. If the sum exceeds INT_MAX 14847c478bd9Sstevel@tonic-gate * it is truncated to INT_MAX. 14857c478bd9Sstevel@tonic-gate */ 14867c478bd9Sstevel@tonic-gate unsigned int 14877c478bd9Sstevel@tonic-gate _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount) 14887c478bd9Sstevel@tonic-gate { 14897c478bd9Sstevel@tonic-gate size_t nbytes; 14907c478bd9Sstevel@tonic-gate int i; 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate nbytes = 0; 14937c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) { 14947c478bd9Sstevel@tonic-gate if (tiov[i].iov_len >= INT_MAX) { 14957c478bd9Sstevel@tonic-gate nbytes = INT_MAX; 14967c478bd9Sstevel@tonic-gate break; 14977c478bd9Sstevel@tonic-gate } 14987c478bd9Sstevel@tonic-gate nbytes += tiov[i].iov_len; 14997c478bd9Sstevel@tonic-gate } 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate if (nbytes > INT_MAX) 15027c478bd9Sstevel@tonic-gate nbytes = INT_MAX; 15037c478bd9Sstevel@tonic-gate 15047c478bd9Sstevel@tonic-gate return ((unsigned int)nbytes); 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate /* 15087c478bd9Sstevel@tonic-gate * Gather the data in the t_iovec buffers, into a single linear buffer 15097c478bd9Sstevel@tonic-gate * starting at dataptr. Caller must have allocated sufficient space 15107c478bd9Sstevel@tonic-gate * starting at dataptr. The total amount of data that is gathered is 15117c478bd9Sstevel@tonic-gate * limited to INT_MAX. Any remaining data in the t_iovec buffers is 15127c478bd9Sstevel@tonic-gate * not copied. 15137c478bd9Sstevel@tonic-gate */ 15147c478bd9Sstevel@tonic-gate void 15157c478bd9Sstevel@tonic-gate _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount) 15167c478bd9Sstevel@tonic-gate { 15177c478bd9Sstevel@tonic-gate char *curptr; 15187c478bd9Sstevel@tonic-gate unsigned int cur_count; 15197c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 15207c478bd9Sstevel@tonic-gate int i; 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate curptr = dataptr; 15237c478bd9Sstevel@tonic-gate cur_count = 0; 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount); 15267c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) { 15277c478bd9Sstevel@tonic-gate if (tiov[i].iov_len <= nbytes_remaining) 15287c478bd9Sstevel@tonic-gate cur_count = (int)tiov[i].iov_len; 15297c478bd9Sstevel@tonic-gate else 15307c478bd9Sstevel@tonic-gate cur_count = nbytes_remaining; 15317c478bd9Sstevel@tonic-gate (void) memcpy(curptr, tiov[i].iov_base, cur_count); 15327c478bd9Sstevel@tonic-gate curptr += cur_count; 15337c478bd9Sstevel@tonic-gate nbytes_remaining -= cur_count; 15347c478bd9Sstevel@tonic-gate } 15357c478bd9Sstevel@tonic-gate } 15367c478bd9Sstevel@tonic-gate 15377c478bd9Sstevel@tonic-gate /* 15387c478bd9Sstevel@tonic-gate * Scatter the data from the single linear buffer at pdatabuf->buf into 15397c478bd9Sstevel@tonic-gate * the t_iovec buffers. 15407c478bd9Sstevel@tonic-gate */ 15417c478bd9Sstevel@tonic-gate void 15427c478bd9Sstevel@tonic-gate _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount) 15437c478bd9Sstevel@tonic-gate { 15447c478bd9Sstevel@tonic-gate char *curptr; 15457c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 15467c478bd9Sstevel@tonic-gate unsigned int curlen; 15477c478bd9Sstevel@tonic-gate int i; 15487c478bd9Sstevel@tonic-gate 15497c478bd9Sstevel@tonic-gate /* 15507c478bd9Sstevel@tonic-gate * There cannot be any uncopied data leftover in pdatabuf 15517c478bd9Sstevel@tonic-gate * at the conclusion of this function. (asserted below) 15527c478bd9Sstevel@tonic-gate */ 15537c478bd9Sstevel@tonic-gate assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount)); 15547c478bd9Sstevel@tonic-gate curptr = pdatabuf->buf; 15557c478bd9Sstevel@tonic-gate nbytes_remaining = pdatabuf->len; 15567c478bd9Sstevel@tonic-gate for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) { 15577c478bd9Sstevel@tonic-gate if (tiov[i].iov_len < nbytes_remaining) 15587c478bd9Sstevel@tonic-gate curlen = (unsigned int)tiov[i].iov_len; 15597c478bd9Sstevel@tonic-gate else 15607c478bd9Sstevel@tonic-gate curlen = nbytes_remaining; 15617c478bd9Sstevel@tonic-gate (void) memcpy(tiov[i].iov_base, curptr, curlen); 15627c478bd9Sstevel@tonic-gate curptr += curlen; 15637c478bd9Sstevel@tonic-gate nbytes_remaining -= curlen; 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate } 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate /* 15687c478bd9Sstevel@tonic-gate * Adjust the iovec array, for subsequent use. Examine each element in the 15697c478bd9Sstevel@tonic-gate * iovec array,and zero out the iov_len if the buffer was sent fully. 15707c478bd9Sstevel@tonic-gate * otherwise the buffer was only partially sent, so adjust both iov_len and 15717c478bd9Sstevel@tonic-gate * iov_base. 15727c478bd9Sstevel@tonic-gate * 15737c478bd9Sstevel@tonic-gate */ 15747c478bd9Sstevel@tonic-gate void 15757c478bd9Sstevel@tonic-gate _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp) 15767c478bd9Sstevel@tonic-gate { 15777c478bd9Sstevel@tonic-gate 15787c478bd9Sstevel@tonic-gate int i; 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate for (i = 0; i < *iovcountp && bytes_sent; i++) { 15817c478bd9Sstevel@tonic-gate if (iov[i].iov_len == 0) 15827c478bd9Sstevel@tonic-gate continue; 15837c478bd9Sstevel@tonic-gate if (bytes_sent < iov[i].iov_len) 15847c478bd9Sstevel@tonic-gate break; 15857c478bd9Sstevel@tonic-gate else { 15867c478bd9Sstevel@tonic-gate bytes_sent -= iov[i].iov_len; 15877c478bd9Sstevel@tonic-gate iov[i].iov_len = 0; 15887c478bd9Sstevel@tonic-gate } 15897c478bd9Sstevel@tonic-gate } 15907c478bd9Sstevel@tonic-gate iov[i].iov_len -= bytes_sent; 15917c478bd9Sstevel@tonic-gate iov[i].iov_base += bytes_sent; 15927c478bd9Sstevel@tonic-gate } 15937c478bd9Sstevel@tonic-gate 15947c478bd9Sstevel@tonic-gate /* 15957c478bd9Sstevel@tonic-gate * Copy the t_iovec array to the iovec array while taking care to see 15967c478bd9Sstevel@tonic-gate * that the sum of the buffer lengths in the result is not more than 15977c478bd9Sstevel@tonic-gate * INT_MAX. This function requires that T_IOV_MAX is no larger than 15987c478bd9Sstevel@tonic-gate * IOV_MAX. Otherwise the resulting array is not a suitable input to 15997c478bd9Sstevel@tonic-gate * writev(). If the sum of the lengths in t_iovec is zero, so is the 16007c478bd9Sstevel@tonic-gate * resulting iovec. 16017c478bd9Sstevel@tonic-gate */ 16027c478bd9Sstevel@tonic-gate void 16037c478bd9Sstevel@tonic-gate _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount, 16047c478bd9Sstevel@tonic-gate struct iovec *iov, int *iovcountp) 16057c478bd9Sstevel@tonic-gate { 16067c478bd9Sstevel@tonic-gate int i; 16077c478bd9Sstevel@tonic-gate unsigned int nbytes_remaining; 16087c478bd9Sstevel@tonic-gate 16097c478bd9Sstevel@tonic-gate nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount); 16107c478bd9Sstevel@tonic-gate i = 0; 16117c478bd9Sstevel@tonic-gate do { 16127c478bd9Sstevel@tonic-gate iov[i].iov_base = tiov[i].iov_base; 16137c478bd9Sstevel@tonic-gate if (tiov[i].iov_len > nbytes_remaining) 16147c478bd9Sstevel@tonic-gate iov[i].iov_len = nbytes_remaining; 16157c478bd9Sstevel@tonic-gate else 16167c478bd9Sstevel@tonic-gate iov[i].iov_len = tiov[i].iov_len; 16177c478bd9Sstevel@tonic-gate nbytes_remaining -= iov[i].iov_len; 16187c478bd9Sstevel@tonic-gate i++; 16197c478bd9Sstevel@tonic-gate } while (nbytes_remaining != 0 && i < tiovcount); 16207c478bd9Sstevel@tonic-gate 16217c478bd9Sstevel@tonic-gate *iovcountp = i; 16227c478bd9Sstevel@tonic-gate } 16237c478bd9Sstevel@tonic-gate 16247c478bd9Sstevel@tonic-gate /* 16257c478bd9Sstevel@tonic-gate * Routine called after connection establishment on transports where 16267c478bd9Sstevel@tonic-gate * connection establishment changes certain transport attributes such as 16277c478bd9Sstevel@tonic-gate * TIDU_size 16287c478bd9Sstevel@tonic-gate */ 16297c478bd9Sstevel@tonic-gate int 16307c478bd9Sstevel@tonic-gate _t_do_postconn_sync(int fd, struct _ti_user *tiptr) 16317c478bd9Sstevel@tonic-gate { 16327c478bd9Sstevel@tonic-gate union { 16337c478bd9Sstevel@tonic-gate struct T_capability_req tc_req; 16347c478bd9Sstevel@tonic-gate struct T_capability_ack tc_ack; 16357c478bd9Sstevel@tonic-gate } ioctl_data; 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate void *ioctlbuf = &ioctl_data; 16387c478bd9Sstevel@tonic-gate int expected_acksize; 16397c478bd9Sstevel@tonic-gate int retlen, rval; 16407c478bd9Sstevel@tonic-gate struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf; 16417c478bd9Sstevel@tonic-gate struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf; 16427c478bd9Sstevel@tonic-gate struct T_info_ack *tiap; 16437c478bd9Sstevel@tonic-gate 16447c478bd9Sstevel@tonic-gate /* 16457c478bd9Sstevel@tonic-gate * This T_CAPABILITY_REQ should not fail, even if it is unsupported 16467c478bd9Sstevel@tonic-gate * by the transport provider. timod will emulate it in that case. 16477c478bd9Sstevel@tonic-gate */ 16487c478bd9Sstevel@tonic-gate tc_reqp->PRIM_type = T_CAPABILITY_REQ; 16497c478bd9Sstevel@tonic-gate tc_reqp->CAP_bits1 = TC1_INFO; 16507c478bd9Sstevel@tonic-gate rval = _t_do_ioctl(fd, (char *)ioctlbuf, 16517c478bd9Sstevel@tonic-gate (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen); 16527c478bd9Sstevel@tonic-gate expected_acksize = (int)sizeof (struct T_capability_ack); 16537c478bd9Sstevel@tonic-gate 16547c478bd9Sstevel@tonic-gate if (rval < 0) 16557c478bd9Sstevel@tonic-gate return (-1); 16567c478bd9Sstevel@tonic-gate 16577c478bd9Sstevel@tonic-gate /* 16587c478bd9Sstevel@tonic-gate * T_capability TPI messages are extensible and can grow in future. 16597c478bd9Sstevel@tonic-gate * However timod will take care of returning no more information 16607c478bd9Sstevel@tonic-gate * than what was requested, and truncating the "extended" 16617c478bd9Sstevel@tonic-gate * information towards the end of the T_capability_ack, if necessary. 16627c478bd9Sstevel@tonic-gate */ 16637c478bd9Sstevel@tonic-gate if (retlen != expected_acksize) { 16647c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16657c478bd9Sstevel@tonic-gate errno = EIO; 16667c478bd9Sstevel@tonic-gate return (-1); 16677c478bd9Sstevel@tonic-gate } 16687c478bd9Sstevel@tonic-gate 16697c478bd9Sstevel@tonic-gate /* 16707c478bd9Sstevel@tonic-gate * The T_info_ack part of the T_capability_ack is guaranteed to be 16717c478bd9Sstevel@tonic-gate * present only if the corresponding TC1_INFO bit is set 16727c478bd9Sstevel@tonic-gate */ 16737c478bd9Sstevel@tonic-gate if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) { 16747c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16757c478bd9Sstevel@tonic-gate errno = EPROTO; 16767c478bd9Sstevel@tonic-gate return (-1); 16777c478bd9Sstevel@tonic-gate } 16787c478bd9Sstevel@tonic-gate 16797c478bd9Sstevel@tonic-gate tiap = &tc_ackp->INFO_ack; 16807c478bd9Sstevel@tonic-gate if (tiap->PRIM_type != T_INFO_ACK) { 16817c478bd9Sstevel@tonic-gate t_errno = TSYSERR; 16827c478bd9Sstevel@tonic-gate errno = EPROTO; 16837c478bd9Sstevel@tonic-gate return (-1); 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * Note: Sync with latest information returned in "struct T_info_ack 16887c478bd9Sstevel@tonic-gate * but we deliberately not sync the state here as user level state 16897c478bd9Sstevel@tonic-gate * construction here is not required, only update of attributes which 16907c478bd9Sstevel@tonic-gate * may have changed because of negotations during connection 16917c478bd9Sstevel@tonic-gate * establsihment 16927c478bd9Sstevel@tonic-gate */ 16937c478bd9Sstevel@tonic-gate assert(tiap->TIDU_size > 0); 16947c478bd9Sstevel@tonic-gate tiptr->ti_maxpsz = tiap->TIDU_size; 16957c478bd9Sstevel@tonic-gate assert(tiap->TSDU_size >= T_INVALID); 16967c478bd9Sstevel@tonic-gate tiptr->ti_tsdusize = tiap->TSDU_size; 16977c478bd9Sstevel@tonic-gate assert(tiap->ETSDU_size >= T_INVALID); 16987c478bd9Sstevel@tonic-gate tiptr->ti_etsdusize = tiap->ETSDU_size; 16997c478bd9Sstevel@tonic-gate assert(tiap->CDATA_size >= T_INVALID); 17007c478bd9Sstevel@tonic-gate tiptr->ti_cdatasize = tiap->CDATA_size; 17017c478bd9Sstevel@tonic-gate assert(tiap->DDATA_size >= T_INVALID); 17027c478bd9Sstevel@tonic-gate tiptr->ti_ddatasize = tiap->DDATA_size; 17037c478bd9Sstevel@tonic-gate tiptr->ti_prov_flag = tiap->PROVIDER_flag; 17047c478bd9Sstevel@tonic-gate 17057c478bd9Sstevel@tonic-gate return (0); 17067c478bd9Sstevel@tonic-gate } 1707