17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 545916cd2Sjpk * Common Development and Distribution License (the "License"). 645916cd2Sjpk * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21*cf98b944SMarcel Telka 22*cf98b944SMarcel Telka /* 23*cf98b944SMarcel Telka * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24*cf98b944SMarcel Telka */ 25*cf98b944SMarcel Telka 267c478bd9Sstevel@tonic-gate /* 2751e44b2bSDai Ngo * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 287c478bd9Sstevel@tonic-gate * Use is subject to license terms. 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate /* 327c478bd9Sstevel@tonic-gate * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T 337c478bd9Sstevel@tonic-gate * All Rights Reserved 347c478bd9Sstevel@tonic-gate */ 357c478bd9Sstevel@tonic-gate 367c478bd9Sstevel@tonic-gate /* 377c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 387c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California. 397c478bd9Sstevel@tonic-gate */ 407c478bd9Sstevel@tonic-gate 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate /* 437c478bd9Sstevel@tonic-gate * Implements a kernel based, client side RPC. 447c478bd9Sstevel@tonic-gate */ 457c478bd9Sstevel@tonic-gate 467c478bd9Sstevel@tonic-gate #include <sys/param.h> 477c478bd9Sstevel@tonic-gate #include <sys/types.h> 487c478bd9Sstevel@tonic-gate #include <sys/systm.h> 497c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 507c478bd9Sstevel@tonic-gate #include <sys/stream.h> 517c478bd9Sstevel@tonic-gate #include <sys/strsubr.h> 527c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 537c478bd9Sstevel@tonic-gate #include <sys/tiuser.h> 547c478bd9Sstevel@tonic-gate #include <sys/tihdr.h> 557c478bd9Sstevel@tonic-gate #include <sys/t_kuser.h> 567c478bd9Sstevel@tonic-gate #include <sys/errno.h> 577c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 587c478bd9Sstevel@tonic-gate #include <sys/debug.h> 597c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 607c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 617c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 627c478bd9Sstevel@tonic-gate #include <sys/conf.h> 637c478bd9Sstevel@tonic-gate #include <sys/disp.h> 647c478bd9Sstevel@tonic-gate #include <sys/taskq.h> 657c478bd9Sstevel@tonic-gate #include <sys/list.h> 667c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 677c478bd9Sstevel@tonic-gate #include <sys/zone.h> 687c478bd9Sstevel@tonic-gate #include <netinet/in.h> 697c478bd9Sstevel@tonic-gate #include <rpc/types.h> 707c478bd9Sstevel@tonic-gate #include <rpc/xdr.h> 717c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 727c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 737c478bd9Sstevel@tonic-gate #include <rpc/rpc_msg.h> 747c478bd9Sstevel@tonic-gate 7551e44b2bSDai Ngo #include <sys/sdt.h> 7651e44b2bSDai Ngo 777c478bd9Sstevel@tonic-gate static enum clnt_stat clnt_clts_kcallit(CLIENT *, rpcproc_t, xdrproc_t, 787c478bd9Sstevel@tonic-gate caddr_t, xdrproc_t, caddr_t, struct timeval); 797c478bd9Sstevel@tonic-gate static void clnt_clts_kabort(CLIENT *); 807c478bd9Sstevel@tonic-gate static void clnt_clts_kerror(CLIENT *, struct rpc_err *); 817c478bd9Sstevel@tonic-gate static bool_t clnt_clts_kfreeres(CLIENT *, xdrproc_t, caddr_t); 827c478bd9Sstevel@tonic-gate static bool_t clnt_clts_kcontrol(CLIENT *, int, char *); 837c478bd9Sstevel@tonic-gate static void clnt_clts_kdestroy(CLIENT *); 847c478bd9Sstevel@tonic-gate static int clnt_clts_ksettimers(CLIENT *, struct rpc_timers *, 857c478bd9Sstevel@tonic-gate struct rpc_timers *, int, void (*)(), caddr_t, uint32_t); 867c478bd9Sstevel@tonic-gate 877c478bd9Sstevel@tonic-gate /* 887c478bd9Sstevel@tonic-gate * Operations vector for CLTS based RPC 897c478bd9Sstevel@tonic-gate */ 907c478bd9Sstevel@tonic-gate static struct clnt_ops clts_ops = { 917c478bd9Sstevel@tonic-gate clnt_clts_kcallit, /* do rpc call */ 927c478bd9Sstevel@tonic-gate clnt_clts_kabort, /* abort call */ 937c478bd9Sstevel@tonic-gate clnt_clts_kerror, /* return error status */ 947c478bd9Sstevel@tonic-gate clnt_clts_kfreeres, /* free results */ 957c478bd9Sstevel@tonic-gate clnt_clts_kdestroy, /* destroy rpc handle */ 967c478bd9Sstevel@tonic-gate clnt_clts_kcontrol, /* the ioctl() of rpc */ 977c478bd9Sstevel@tonic-gate clnt_clts_ksettimers /* set retry timers */ 987c478bd9Sstevel@tonic-gate }; 997c478bd9Sstevel@tonic-gate 1007c478bd9Sstevel@tonic-gate /* 1017c478bd9Sstevel@tonic-gate * Endpoint for CLTS (INET, INET6, loopback, etc.) 1027c478bd9Sstevel@tonic-gate */ 1037c478bd9Sstevel@tonic-gate typedef struct endpnt_type { 1047c478bd9Sstevel@tonic-gate struct endpnt_type *e_next; /* pointer to next endpoint type */ 1057c478bd9Sstevel@tonic-gate list_t e_pool; /* list of available endpoints */ 10651e44b2bSDai Ngo list_t e_ilist; /* list of idle endpoints */ 1077c478bd9Sstevel@tonic-gate struct endpnt *e_pcurr; /* pointer to current endpoint */ 1087c478bd9Sstevel@tonic-gate char e_protofmly[KNC_STRSIZE]; /* protocol family */ 1097c478bd9Sstevel@tonic-gate dev_t e_rdev; /* device */ 1107c478bd9Sstevel@tonic-gate kmutex_t e_plock; /* pool lock */ 1117c478bd9Sstevel@tonic-gate kmutex_t e_ilock; /* idle list lock */ 1127c478bd9Sstevel@tonic-gate timeout_id_t e_itimer; /* timer to dispatch the taskq */ 1137c478bd9Sstevel@tonic-gate uint_t e_cnt; /* number of endpoints in the pool */ 1147c478bd9Sstevel@tonic-gate zoneid_t e_zoneid; /* zoneid of endpoint type */ 1157c478bd9Sstevel@tonic-gate kcondvar_t e_async_cv; /* cv for asynchronous reap threads */ 1167c478bd9Sstevel@tonic-gate uint_t e_async_count; /* count of asynchronous reap threads */ 1177c478bd9Sstevel@tonic-gate } endpnt_type_t; 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate typedef struct endpnt { 1207c478bd9Sstevel@tonic-gate list_node_t e_node; /* link to the pool */ 1217c478bd9Sstevel@tonic-gate list_node_t e_idle; /* link to the idle list */ 1227c478bd9Sstevel@tonic-gate endpnt_type_t *e_type; /* back pointer to endpoint type */ 1237c478bd9Sstevel@tonic-gate TIUSER *e_tiptr; /* pointer to transport endpoint */ 1247c478bd9Sstevel@tonic-gate queue_t *e_wq; /* write queue */ 1257c478bd9Sstevel@tonic-gate uint_t e_flags; /* endpoint flags */ 1267c478bd9Sstevel@tonic-gate uint_t e_ref; /* ref count on endpoint */ 1277c478bd9Sstevel@tonic-gate kcondvar_t e_cv; /* condition variable */ 1287c478bd9Sstevel@tonic-gate kmutex_t e_lock; /* protects cv and flags */ 1297c478bd9Sstevel@tonic-gate time_t e_itime; /* time when rele'd */ 1307c478bd9Sstevel@tonic-gate } endpnt_t; 1317c478bd9Sstevel@tonic-gate 1327c478bd9Sstevel@tonic-gate #define ENDPNT_ESTABLISHED 0x1 /* endpoint is established */ 1337c478bd9Sstevel@tonic-gate #define ENDPNT_WAITING 0x2 /* thread waiting for endpoint */ 1347c478bd9Sstevel@tonic-gate #define ENDPNT_BOUND 0x4 /* endpoint is bound */ 1357c478bd9Sstevel@tonic-gate #define ENDPNT_STALE 0x8 /* endpoint is dead */ 1367c478bd9Sstevel@tonic-gate #define ENDPNT_ONIDLE 0x10 /* endpoint is on the idle list */ 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate static krwlock_t endpnt_type_lock; /* protects endpnt_type_list */ 1397c478bd9Sstevel@tonic-gate static endpnt_type_t *endpnt_type_list = NULL; /* list of CLTS endpoints */ 1407c478bd9Sstevel@tonic-gate static struct kmem_cache *endpnt_cache; /* cache of endpnt_t's */ 1417c478bd9Sstevel@tonic-gate static taskq_t *endpnt_taskq; /* endpnt_t reaper thread */ 1427c478bd9Sstevel@tonic-gate static bool_t taskq_created; /* flag for endpnt_taskq */ 1437c478bd9Sstevel@tonic-gate static kmutex_t endpnt_taskq_lock; /* taskq lock */ 1447c478bd9Sstevel@tonic-gate static zone_key_t endpnt_destructor_key; 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate #define DEFAULT_ENDPOINT_REAP_INTERVAL 60 /* 1 minute */ 1477c478bd9Sstevel@tonic-gate #define DEFAULT_INTERVAL_SHIFT 30 /* 30 seconds */ 1487c478bd9Sstevel@tonic-gate 1497c478bd9Sstevel@tonic-gate /* 1507c478bd9Sstevel@tonic-gate * Endpoint tunables 1517c478bd9Sstevel@tonic-gate */ 1527c478bd9Sstevel@tonic-gate static int clnt_clts_max_endpoints = -1; 1537c478bd9Sstevel@tonic-gate static int clnt_clts_hash_size = DEFAULT_HASH_SIZE; 1547c478bd9Sstevel@tonic-gate static time_t clnt_clts_endpoint_reap_interval = -1; 1557c478bd9Sstevel@tonic-gate static clock_t clnt_clts_taskq_dispatch_interval; 1567c478bd9Sstevel@tonic-gate 1577c478bd9Sstevel@tonic-gate /* 1587c478bd9Sstevel@tonic-gate * Response completion hash queue 1597c478bd9Sstevel@tonic-gate */ 1607c478bd9Sstevel@tonic-gate static call_table_t *clts_call_ht; 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate /* 1637c478bd9Sstevel@tonic-gate * Routines for the endpoint manager 1647c478bd9Sstevel@tonic-gate */ 1657c478bd9Sstevel@tonic-gate static struct endpnt_type *endpnt_type_create(struct knetconfig *); 1667c478bd9Sstevel@tonic-gate static void endpnt_type_free(struct endpnt_type *); 1677c478bd9Sstevel@tonic-gate static int check_endpnt(struct endpnt *, struct endpnt **); 16878598ee3Snd150628 static struct endpnt *endpnt_get(struct knetconfig *, int); 1697c478bd9Sstevel@tonic-gate static void endpnt_rele(struct endpnt *); 1707c478bd9Sstevel@tonic-gate static void endpnt_reap_settimer(endpnt_type_t *); 1717c478bd9Sstevel@tonic-gate static void endpnt_reap(endpnt_type_t *); 1727c478bd9Sstevel@tonic-gate static void endpnt_reap_dispatch(void *); 1737c478bd9Sstevel@tonic-gate static void endpnt_reclaim(zoneid_t); 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate /* 1777c478bd9Sstevel@tonic-gate * Request dipatching function. 1787c478bd9Sstevel@tonic-gate */ 1797c478bd9Sstevel@tonic-gate static int clnt_clts_dispatch_send(queue_t *q, mblk_t *, struct netbuf *addr, 180de8c4a14SErik Nordmark calllist_t *, uint_t, cred_t *); 1817c478bd9Sstevel@tonic-gate 1827c478bd9Sstevel@tonic-gate /* 1837c478bd9Sstevel@tonic-gate * The size of the preserialized RPC header information. 1847c478bd9Sstevel@tonic-gate */ 1857c478bd9Sstevel@tonic-gate #define CKU_HDRSIZE 20 1867c478bd9Sstevel@tonic-gate /* 1877c478bd9Sstevel@tonic-gate * The initial allocation size. It is small to reduce space requirements. 1887c478bd9Sstevel@tonic-gate */ 1897c478bd9Sstevel@tonic-gate #define CKU_INITSIZE 2048 1907c478bd9Sstevel@tonic-gate /* 1917c478bd9Sstevel@tonic-gate * The size of additional allocations, if required. It is larger to 1927c478bd9Sstevel@tonic-gate * reduce the number of actual allocations. 1937c478bd9Sstevel@tonic-gate */ 1947c478bd9Sstevel@tonic-gate #define CKU_ALLOCSIZE 8192 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate /* 1977c478bd9Sstevel@tonic-gate * Private data per rpc handle. This structure is allocated by 1987c478bd9Sstevel@tonic-gate * clnt_clts_kcreate, and freed by clnt_clts_kdestroy. 1997c478bd9Sstevel@tonic-gate */ 2007c478bd9Sstevel@tonic-gate struct cku_private { 2017c478bd9Sstevel@tonic-gate CLIENT cku_client; /* client handle */ 2027c478bd9Sstevel@tonic-gate int cku_retrys; /* request retrys */ 2037c478bd9Sstevel@tonic-gate calllist_t cku_call; 2047c478bd9Sstevel@tonic-gate struct endpnt *cku_endpnt; /* open end point */ 2057c478bd9Sstevel@tonic-gate struct knetconfig cku_config; 2067c478bd9Sstevel@tonic-gate struct netbuf cku_addr; /* remote address */ 2077c478bd9Sstevel@tonic-gate struct rpc_err cku_err; /* error status */ 2087c478bd9Sstevel@tonic-gate XDR cku_outxdr; /* xdr stream for output */ 2097c478bd9Sstevel@tonic-gate XDR cku_inxdr; /* xdr stream for input */ 2107c478bd9Sstevel@tonic-gate char cku_rpchdr[CKU_HDRSIZE + 4]; /* rpc header */ 2117c478bd9Sstevel@tonic-gate struct cred *cku_cred; /* credentials */ 2127c478bd9Sstevel@tonic-gate struct rpc_timers *cku_timers; /* for estimating RTT */ 2137c478bd9Sstevel@tonic-gate struct rpc_timers *cku_timeall; /* for estimating RTT */ 2147c478bd9Sstevel@tonic-gate void (*cku_feedback)(int, int, caddr_t); 2157c478bd9Sstevel@tonic-gate /* ptr to feedback rtn */ 2167c478bd9Sstevel@tonic-gate caddr_t cku_feedarg; /* argument for feedback func */ 2177c478bd9Sstevel@tonic-gate uint32_t cku_xid; /* current XID */ 2187c478bd9Sstevel@tonic-gate bool_t cku_bcast; /* RPC broadcast hint */ 21978598ee3Snd150628 int cku_useresvport; /* Use reserved port */ 2207c478bd9Sstevel@tonic-gate struct rpc_clts_client *cku_stats; /* counters for the zone */ 2217c478bd9Sstevel@tonic-gate }; 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate static const struct rpc_clts_client { 2247c478bd9Sstevel@tonic-gate kstat_named_t rccalls; 2257c478bd9Sstevel@tonic-gate kstat_named_t rcbadcalls; 2267c478bd9Sstevel@tonic-gate kstat_named_t rcretrans; 2277c478bd9Sstevel@tonic-gate kstat_named_t rcbadxids; 2287c478bd9Sstevel@tonic-gate kstat_named_t rctimeouts; 2297c478bd9Sstevel@tonic-gate kstat_named_t rcnewcreds; 2307c478bd9Sstevel@tonic-gate kstat_named_t rcbadverfs; 2317c478bd9Sstevel@tonic-gate kstat_named_t rctimers; 2327c478bd9Sstevel@tonic-gate kstat_named_t rcnomem; 2337c478bd9Sstevel@tonic-gate kstat_named_t rccantsend; 2347c478bd9Sstevel@tonic-gate } clts_rcstat_tmpl = { 2357c478bd9Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 }, 2367c478bd9Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 }, 2377c478bd9Sstevel@tonic-gate { "retrans", KSTAT_DATA_UINT64 }, 2387c478bd9Sstevel@tonic-gate { "badxids", KSTAT_DATA_UINT64 }, 2397c478bd9Sstevel@tonic-gate { "timeouts", KSTAT_DATA_UINT64 }, 2407c478bd9Sstevel@tonic-gate { "newcreds", KSTAT_DATA_UINT64 }, 2417c478bd9Sstevel@tonic-gate { "badverfs", KSTAT_DATA_UINT64 }, 2427c478bd9Sstevel@tonic-gate { "timers", KSTAT_DATA_UINT64 }, 2437c478bd9Sstevel@tonic-gate { "nomem", KSTAT_DATA_UINT64 }, 2447c478bd9Sstevel@tonic-gate { "cantsend", KSTAT_DATA_UINT64 }, 2457c478bd9Sstevel@tonic-gate }; 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate static uint_t clts_rcstat_ndata = 2487c478bd9Sstevel@tonic-gate sizeof (clts_rcstat_tmpl) / sizeof (kstat_named_t); 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate #define RCSTAT_INCR(s, x) \ 2511a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&(s)->x.value.ui64) 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate #define ptoh(p) (&((p)->cku_client)) 2547c478bd9Sstevel@tonic-gate #define htop(h) ((struct cku_private *)((h)->cl_private)) 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate /* 2577c478bd9Sstevel@tonic-gate * Times to retry 2587c478bd9Sstevel@tonic-gate */ 2597c478bd9Sstevel@tonic-gate #define SNDTRIES 4 2607c478bd9Sstevel@tonic-gate #define REFRESHES 2 /* authentication refreshes */ 2617c478bd9Sstevel@tonic-gate 2625bd9f8f1Srg137905 /* 2635bd9f8f1Srg137905 * The following is used to determine the global default behavior for 2645bd9f8f1Srg137905 * CLTS when binding to a local port. 2655bd9f8f1Srg137905 * 2665bd9f8f1Srg137905 * If the value is set to 1 the default will be to select a reserved 2675bd9f8f1Srg137905 * (aka privileged) port, if the value is zero the default will be to 2685bd9f8f1Srg137905 * use non-reserved ports. Users of kRPC may override this by using 2695bd9f8f1Srg137905 * CLNT_CONTROL() and CLSET_BINDRESVPORT. 2705bd9f8f1Srg137905 */ 2715bd9f8f1Srg137905 static int clnt_clts_do_bindresvport = 1; 2725bd9f8f1Srg137905 2737c478bd9Sstevel@tonic-gate #define BINDRESVPORT_RETRIES 5 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate void 2767c478bd9Sstevel@tonic-gate clnt_clts_stats_init(zoneid_t zoneid, struct rpc_clts_client **statsp) 2777c478bd9Sstevel@tonic-gate { 2787c478bd9Sstevel@tonic-gate kstat_t *ksp; 2797c478bd9Sstevel@tonic-gate kstat_named_t *knp; 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate knp = rpcstat_zone_init_common(zoneid, "unix", "rpc_clts_client", 2827c478bd9Sstevel@tonic-gate (const kstat_named_t *)&clts_rcstat_tmpl, 2837c478bd9Sstevel@tonic-gate sizeof (clts_rcstat_tmpl)); 2847c478bd9Sstevel@tonic-gate /* 2857c478bd9Sstevel@tonic-gate * Backwards compatibility for old kstat clients 2867c478bd9Sstevel@tonic-gate */ 2877c478bd9Sstevel@tonic-gate ksp = kstat_create_zone("unix", 0, "rpc_client", "rpc", 2887c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, clts_rcstat_ndata, 2897c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE, zoneid); 2907c478bd9Sstevel@tonic-gate if (ksp) { 2917c478bd9Sstevel@tonic-gate ksp->ks_data = knp; 2927c478bd9Sstevel@tonic-gate kstat_install(ksp); 2937c478bd9Sstevel@tonic-gate } 2947c478bd9Sstevel@tonic-gate *statsp = (struct rpc_clts_client *)knp; 2957c478bd9Sstevel@tonic-gate } 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate void 2987c478bd9Sstevel@tonic-gate clnt_clts_stats_fini(zoneid_t zoneid, struct rpc_clts_client **statsp) 2997c478bd9Sstevel@tonic-gate { 3007c478bd9Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_clts_client"); 3017c478bd9Sstevel@tonic-gate kstat_delete_byname_zone("unix", 0, "rpc_client", zoneid); 3027c478bd9Sstevel@tonic-gate kmem_free(*statsp, sizeof (clts_rcstat_tmpl)); 3037c478bd9Sstevel@tonic-gate } 3047c478bd9Sstevel@tonic-gate 3057c478bd9Sstevel@tonic-gate /* 3067c478bd9Sstevel@tonic-gate * Create an rpc handle for a clts rpc connection. 3077c478bd9Sstevel@tonic-gate * Allocates space for the handle structure and the private data. 3087c478bd9Sstevel@tonic-gate */ 3097c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3107c478bd9Sstevel@tonic-gate int 3117c478bd9Sstevel@tonic-gate clnt_clts_kcreate(struct knetconfig *config, struct netbuf *addr, 3127c478bd9Sstevel@tonic-gate rpcprog_t pgm, rpcvers_t vers, int retrys, struct cred *cred, 3137c478bd9Sstevel@tonic-gate CLIENT **cl) 3147c478bd9Sstevel@tonic-gate { 3157c478bd9Sstevel@tonic-gate CLIENT *h; 3167c478bd9Sstevel@tonic-gate struct cku_private *p; 3177c478bd9Sstevel@tonic-gate struct rpc_msg call_msg; 3187c478bd9Sstevel@tonic-gate int error; 3197c478bd9Sstevel@tonic-gate int plen; 3207c478bd9Sstevel@tonic-gate 3217c478bd9Sstevel@tonic-gate if (cl == NULL) 3227c478bd9Sstevel@tonic-gate return (EINVAL); 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate *cl = NULL; 3257c478bd9Sstevel@tonic-gate error = 0; 3267c478bd9Sstevel@tonic-gate 3277c478bd9Sstevel@tonic-gate p = kmem_zalloc(sizeof (*p), KM_SLEEP); 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate h = ptoh(p); 3307c478bd9Sstevel@tonic-gate 3317c478bd9Sstevel@tonic-gate /* handle */ 3327c478bd9Sstevel@tonic-gate h->cl_ops = &clts_ops; 3337c478bd9Sstevel@tonic-gate h->cl_private = (caddr_t)p; 3347c478bd9Sstevel@tonic-gate h->cl_auth = authkern_create(); 3357c478bd9Sstevel@tonic-gate 3367c478bd9Sstevel@tonic-gate /* call message, just used to pre-serialize below */ 3377c478bd9Sstevel@tonic-gate call_msg.rm_xid = 0; 3387c478bd9Sstevel@tonic-gate call_msg.rm_direction = CALL; 3397c478bd9Sstevel@tonic-gate call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 3407c478bd9Sstevel@tonic-gate call_msg.rm_call.cb_prog = pgm; 3417c478bd9Sstevel@tonic-gate call_msg.rm_call.cb_vers = vers; 3427c478bd9Sstevel@tonic-gate 3437c478bd9Sstevel@tonic-gate /* private */ 3447c478bd9Sstevel@tonic-gate clnt_clts_kinit(h, addr, retrys, cred); 3457c478bd9Sstevel@tonic-gate 3467c478bd9Sstevel@tonic-gate xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, CKU_HDRSIZE, XDR_ENCODE); 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate /* pre-serialize call message header */ 3497c478bd9Sstevel@tonic-gate if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) { 350*cf98b944SMarcel Telka XDR_DESTROY(&p->cku_outxdr); 3517c478bd9Sstevel@tonic-gate error = EINVAL; /* XXX */ 3527c478bd9Sstevel@tonic-gate goto bad; 3537c478bd9Sstevel@tonic-gate } 354*cf98b944SMarcel Telka XDR_DESTROY(&p->cku_outxdr); 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate p->cku_config.knc_rdev = config->knc_rdev; 3577c478bd9Sstevel@tonic-gate p->cku_config.knc_semantics = config->knc_semantics; 3587c478bd9Sstevel@tonic-gate plen = strlen(config->knc_protofmly) + 1; 3597c478bd9Sstevel@tonic-gate p->cku_config.knc_protofmly = kmem_alloc(plen, KM_SLEEP); 3607c478bd9Sstevel@tonic-gate bcopy(config->knc_protofmly, p->cku_config.knc_protofmly, plen); 36178598ee3Snd150628 p->cku_useresvport = -1; /* value is has not been set */ 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL); 3647c478bd9Sstevel@tonic-gate mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL); 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate *cl = h; 3677c478bd9Sstevel@tonic-gate return (0); 3687c478bd9Sstevel@tonic-gate 3697c478bd9Sstevel@tonic-gate bad: 3707c478bd9Sstevel@tonic-gate auth_destroy(h->cl_auth); 3717c478bd9Sstevel@tonic-gate kmem_free(p->cku_addr.buf, addr->maxlen); 3727c478bd9Sstevel@tonic-gate kmem_free(p, sizeof (struct cku_private)); 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate return (error); 3757c478bd9Sstevel@tonic-gate } 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate void 3787c478bd9Sstevel@tonic-gate clnt_clts_kinit(CLIENT *h, struct netbuf *addr, int retrys, cred_t *cred) 3797c478bd9Sstevel@tonic-gate { 3807c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 3817c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 3827c478bd9Sstevel@tonic-gate struct rpcstat *rsp; 3837c478bd9Sstevel@tonic-gate 384108322fbScarlsonj rsp = zone_getspecific(rpcstat_zone_key, rpc_zone()); 3857c478bd9Sstevel@tonic-gate ASSERT(rsp != NULL); 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate p->cku_retrys = retrys; 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate if (p->cku_addr.maxlen < addr->len) { 3907c478bd9Sstevel@tonic-gate if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL) 3917c478bd9Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 3927c478bd9Sstevel@tonic-gate 3937c478bd9Sstevel@tonic-gate p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP); 3947c478bd9Sstevel@tonic-gate p->cku_addr.maxlen = addr->maxlen; 3957c478bd9Sstevel@tonic-gate } 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate p->cku_addr.len = addr->len; 3987c478bd9Sstevel@tonic-gate bcopy(addr->buf, p->cku_addr.buf, addr->len); 3997c478bd9Sstevel@tonic-gate 4007c478bd9Sstevel@tonic-gate p->cku_cred = cred; 4017c478bd9Sstevel@tonic-gate p->cku_xid = 0; 4027c478bd9Sstevel@tonic-gate p->cku_timers = NULL; 4037c478bd9Sstevel@tonic-gate p->cku_timeall = NULL; 4047c478bd9Sstevel@tonic-gate p->cku_feedback = NULL; 4057c478bd9Sstevel@tonic-gate p->cku_bcast = FALSE; 4067c478bd9Sstevel@tonic-gate p->cku_call.call_xid = 0; 4077c478bd9Sstevel@tonic-gate p->cku_call.call_hash = 0; 4087c478bd9Sstevel@tonic-gate p->cku_call.call_notified = FALSE; 4097c478bd9Sstevel@tonic-gate p->cku_call.call_next = NULL; 4107c478bd9Sstevel@tonic-gate p->cku_call.call_prev = NULL; 4117c478bd9Sstevel@tonic-gate p->cku_call.call_reply = NULL; 4127c478bd9Sstevel@tonic-gate p->cku_call.call_wq = NULL; 4137c478bd9Sstevel@tonic-gate p->cku_stats = rsp->rpc_clts_client; 4147c478bd9Sstevel@tonic-gate } 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate /* 4177c478bd9Sstevel@tonic-gate * set the timers. Return current retransmission timeout. 4187c478bd9Sstevel@tonic-gate */ 4197c478bd9Sstevel@tonic-gate static int 4207c478bd9Sstevel@tonic-gate clnt_clts_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all, 4217c478bd9Sstevel@tonic-gate int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg, 4227c478bd9Sstevel@tonic-gate uint32_t xid) 4237c478bd9Sstevel@tonic-gate { 4247c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 4257c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 4267c478bd9Sstevel@tonic-gate int value; 4277c478bd9Sstevel@tonic-gate 4287c478bd9Sstevel@tonic-gate p->cku_feedback = feedback; 4297c478bd9Sstevel@tonic-gate p->cku_feedarg = arg; 4307c478bd9Sstevel@tonic-gate p->cku_timers = t; 4317c478bd9Sstevel@tonic-gate p->cku_timeall = all; 4327c478bd9Sstevel@tonic-gate if (xid) 4337c478bd9Sstevel@tonic-gate p->cku_xid = xid; 4347c478bd9Sstevel@tonic-gate value = all->rt_rtxcur; 4357c478bd9Sstevel@tonic-gate value += t->rt_rtxcur; 4367c478bd9Sstevel@tonic-gate if (value < minimum) 4377c478bd9Sstevel@tonic-gate return (minimum); 4387c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimers); 4397c478bd9Sstevel@tonic-gate return (value); 4407c478bd9Sstevel@tonic-gate } 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate /* 4437c478bd9Sstevel@tonic-gate * Time out back off function. tim is in HZ 4447c478bd9Sstevel@tonic-gate */ 4457c478bd9Sstevel@tonic-gate #define MAXTIMO (20 * hz) 4467c478bd9Sstevel@tonic-gate #define backoff(tim) (((tim) < MAXTIMO) ? dobackoff(tim) : (tim)) 4477c478bd9Sstevel@tonic-gate #define dobackoff(tim) ((((tim) << 1) > MAXTIMO) ? MAXTIMO : ((tim) << 1)) 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate #define RETRY_POLL_TIMO 30 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate /* 4527c478bd9Sstevel@tonic-gate * Call remote procedure. 4537c478bd9Sstevel@tonic-gate * Most of the work of rpc is done here. We serialize what is left 4547c478bd9Sstevel@tonic-gate * of the header (some was pre-serialized in the handle), serialize 4557c478bd9Sstevel@tonic-gate * the arguments, and send it off. We wait for a reply or a time out. 4567c478bd9Sstevel@tonic-gate * Timeout causes an immediate return, other packet problems may cause 4577c478bd9Sstevel@tonic-gate * a retry on the receive. When a good packet is received we deserialize 4587c478bd9Sstevel@tonic-gate * it, and check verification. A bad reply code will cause one retry 4597c478bd9Sstevel@tonic-gate * with full (longhand) credentials. 4607c478bd9Sstevel@tonic-gate */ 4617c478bd9Sstevel@tonic-gate enum clnt_stat 4627c478bd9Sstevel@tonic-gate clnt_clts_kcallit_addr(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 4637c478bd9Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 4647c478bd9Sstevel@tonic-gate struct timeval wait, struct netbuf *sin) 4657c478bd9Sstevel@tonic-gate { 4667c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 4677c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 4687c478bd9Sstevel@tonic-gate XDR *xdrs; 4697c478bd9Sstevel@tonic-gate int stries = p->cku_retrys; 4707c478bd9Sstevel@tonic-gate int refreshes = REFRESHES; /* number of times to refresh cred */ 4717c478bd9Sstevel@tonic-gate int round_trip; /* time the RPC */ 4727c478bd9Sstevel@tonic-gate int error; 4737c478bd9Sstevel@tonic-gate mblk_t *mp; 4747c478bd9Sstevel@tonic-gate mblk_t *mpdup; 4757c478bd9Sstevel@tonic-gate mblk_t *resp = NULL; 4767c478bd9Sstevel@tonic-gate mblk_t *tmp; 4777c478bd9Sstevel@tonic-gate calllist_t *call = &p->cku_call; 47851e44b2bSDai Ngo clock_t ori_timout, timout; 4797c478bd9Sstevel@tonic-gate bool_t interrupted; 4807c478bd9Sstevel@tonic-gate enum clnt_stat status; 4817c478bd9Sstevel@tonic-gate struct rpc_msg reply_msg; 4827c478bd9Sstevel@tonic-gate enum clnt_stat re_status; 48378598ee3Snd150628 endpnt_t *endpt; 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccalls); 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_sec: %ld\n", wait.tv_sec); 4887c478bd9Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_usec: %ld\n", wait.tv_usec); 4897c478bd9Sstevel@tonic-gate 4907c478bd9Sstevel@tonic-gate timout = TIMEVAL_TO_TICK(&wait); 49151e44b2bSDai Ngo ori_timout = timout; 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate if (p->cku_xid == 0) { 4947c478bd9Sstevel@tonic-gate p->cku_xid = alloc_xid(); 4957c478bd9Sstevel@tonic-gate if (p->cku_endpnt != NULL) 4967c478bd9Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 4977c478bd9Sstevel@tonic-gate p->cku_endpnt = NULL; 4987c478bd9Sstevel@tonic-gate } 4998ffff9fdSgt29601 call->call_zoneid = rpc_zoneid(); 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate mpdup = NULL; 5027c478bd9Sstevel@tonic-gate call_again: 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate if (mpdup == NULL) { 5057c478bd9Sstevel@tonic-gate 5067c478bd9Sstevel@tonic-gate while ((mp = allocb(CKU_INITSIZE, BPRI_LO)) == NULL) { 5077c478bd9Sstevel@tonic-gate if (strwaitbuf(CKU_INITSIZE, BPRI_LO)) { 5087c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5097c478bd9Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5107c478bd9Sstevel@tonic-gate goto done; 5117c478bd9Sstevel@tonic-gate } 5127c478bd9Sstevel@tonic-gate } 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate xdrs = &p->cku_outxdr; 5157c478bd9Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, CKU_ALLOCSIZE); 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 5187c478bd9Sstevel@tonic-gate /* 5197c478bd9Sstevel@tonic-gate * Copy in the preserialized RPC header 5207c478bd9Sstevel@tonic-gate * information. 5217c478bd9Sstevel@tonic-gate */ 5227c478bd9Sstevel@tonic-gate bcopy(p->cku_rpchdr, mp->b_rptr, CKU_HDRSIZE); 5237c478bd9Sstevel@tonic-gate 5247c478bd9Sstevel@tonic-gate /* 5257c478bd9Sstevel@tonic-gate * transaction id is the 1st thing in the output 5267c478bd9Sstevel@tonic-gate * buffer. 5277c478bd9Sstevel@tonic-gate */ 5287c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 5297c478bd9Sstevel@tonic-gate (*(uint32_t *)(mp->b_rptr)) = p->cku_xid; 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate /* Skip the preserialized stuff. */ 5327c478bd9Sstevel@tonic-gate XDR_SETPOS(xdrs, CKU_HDRSIZE); 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate /* Serialize dynamic stuff into the output buffer. */ 5357c478bd9Sstevel@tonic-gate if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) || 5367c478bd9Sstevel@tonic-gate (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) || 5377c478bd9Sstevel@tonic-gate (!(*xdr_args)(xdrs, argsp))) { 538*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 5397c478bd9Sstevel@tonic-gate freemsg(mp); 5407c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 5417c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 5427c478bd9Sstevel@tonic-gate goto done; 5437c478bd9Sstevel@tonic-gate } 5447c478bd9Sstevel@tonic-gate } else { 5457c478bd9Sstevel@tonic-gate uint32_t *uproc = (uint32_t *) 5467c478bd9Sstevel@tonic-gate &p->cku_rpchdr[CKU_HDRSIZE]; 5477c478bd9Sstevel@tonic-gate IXDR_PUT_U_INT32(uproc, procnum); 5487c478bd9Sstevel@tonic-gate 5497c478bd9Sstevel@tonic-gate (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid; 5507c478bd9Sstevel@tonic-gate XDR_SETPOS(xdrs, 0); 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate /* Serialize the procedure number and the arguments. */ 5537c478bd9Sstevel@tonic-gate if (!AUTH_WRAP(h->cl_auth, (caddr_t)p->cku_rpchdr, 5547c478bd9Sstevel@tonic-gate CKU_HDRSIZE+4, xdrs, xdr_args, argsp)) { 555*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 5567c478bd9Sstevel@tonic-gate freemsg(mp); 5577c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 5587c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 5597c478bd9Sstevel@tonic-gate goto done; 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate } 562*cf98b944SMarcel Telka 563*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 5647c478bd9Sstevel@tonic-gate } else 5657c478bd9Sstevel@tonic-gate mp = mpdup; 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate mpdup = dupmsg(mp); 5687c478bd9Sstevel@tonic-gate if (mpdup == NULL) { 5697c478bd9Sstevel@tonic-gate freemsg(mp); 5707c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5717c478bd9Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5727c478bd9Sstevel@tonic-gate goto done; 5737c478bd9Sstevel@tonic-gate } 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate /* 5767c478bd9Sstevel@tonic-gate * Grab an endpnt only if the endpoint is NULL. We could be retrying 5777c478bd9Sstevel@tonic-gate * the request and in this case we want to go through the same 5787c478bd9Sstevel@tonic-gate * source port, so that the duplicate request cache may detect a 5797c478bd9Sstevel@tonic-gate * retry. 5807c478bd9Sstevel@tonic-gate */ 58178598ee3Snd150628 5827c478bd9Sstevel@tonic-gate if (p->cku_endpnt == NULL) 58378598ee3Snd150628 p->cku_endpnt = endpnt_get(&p->cku_config, p->cku_useresvport); 5847c478bd9Sstevel@tonic-gate 5857c478bd9Sstevel@tonic-gate if (p->cku_endpnt == NULL) { 5867c478bd9Sstevel@tonic-gate freemsg(mp); 5877c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5887c478bd9Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5897c478bd9Sstevel@tonic-gate goto done; 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate 592d3d50737SRafael Vanoni round_trip = ddi_get_lbolt(); 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate error = clnt_clts_dispatch_send(p->cku_endpnt->e_wq, mp, 595de8c4a14SErik Nordmark &p->cku_addr, call, p->cku_xid, p->cku_cred); 5967c478bd9Sstevel@tonic-gate 5977c478bd9Sstevel@tonic-gate if (error != 0) { 5987c478bd9Sstevel@tonic-gate freemsg(mp); 5997c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTSEND; 6007c478bd9Sstevel@tonic-gate p->cku_err.re_errno = error; 6017c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccantsend); 6027c478bd9Sstevel@tonic-gate goto done1; 6037c478bd9Sstevel@tonic-gate } 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: sent call for xid 0x%x\n", 6067c478bd9Sstevel@tonic-gate p->cku_xid); 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate /* 6097c478bd9Sstevel@tonic-gate * There are two reasons for which we go back to to tryread. 6107c478bd9Sstevel@tonic-gate * 6117c478bd9Sstevel@tonic-gate * a) In case the status is RPC_PROCUNAVAIL and we sent out a 6127c478bd9Sstevel@tonic-gate * broadcast we should not get any invalid messages with the 6137c478bd9Sstevel@tonic-gate * RPC_PROCUNAVAIL error back. Some broken RPC implementations 6147c478bd9Sstevel@tonic-gate * send them and for this we have to ignore them ( as we would 6157c478bd9Sstevel@tonic-gate * have never received them ) and look for another message 6167c478bd9Sstevel@tonic-gate * which might contain the valid response because we don't know 6177c478bd9Sstevel@tonic-gate * how many broken implementations are in the network. So we are 6187c478bd9Sstevel@tonic-gate * going to loop until 6197c478bd9Sstevel@tonic-gate * - we received a valid response 6207c478bd9Sstevel@tonic-gate * - we have processed all invalid responses and 6217c478bd9Sstevel@tonic-gate * got a time out when we try to receive again a 6227c478bd9Sstevel@tonic-gate * message. 6237c478bd9Sstevel@tonic-gate * 6247c478bd9Sstevel@tonic-gate * b) We will jump back to tryread also in case we failed 6257c478bd9Sstevel@tonic-gate * within the AUTH_VALIDATE. In this case we should move 6267c478bd9Sstevel@tonic-gate * on and loop until we received a valid response or we 6277c478bd9Sstevel@tonic-gate * have processed all responses with broken authentication 6287c478bd9Sstevel@tonic-gate * and we got a time out when we try to receive a message. 6297c478bd9Sstevel@tonic-gate */ 6307c478bd9Sstevel@tonic-gate tryread: 6317c478bd9Sstevel@tonic-gate mutex_enter(&call->call_lock); 6327c478bd9Sstevel@tonic-gate interrupted = FALSE; 6337c478bd9Sstevel@tonic-gate if (call->call_notified == FALSE) { 6347c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 6357c478bd9Sstevel@tonic-gate clock_t cv_wait_ret = 1; /* init to > 0 */ 6367c478bd9Sstevel@tonic-gate clock_t cv_timout = timout; 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate if (lwp != NULL) 6397c478bd9Sstevel@tonic-gate lwp->lwp_nostop++; 6407c478bd9Sstevel@tonic-gate 641d3d50737SRafael Vanoni cv_timout += ddi_get_lbolt(); 6427c478bd9Sstevel@tonic-gate 6437c478bd9Sstevel@tonic-gate if (h->cl_nosignal) 6447c478bd9Sstevel@tonic-gate while ((cv_wait_ret = 6457c478bd9Sstevel@tonic-gate cv_timedwait(&call->call_cv, 6467c478bd9Sstevel@tonic-gate &call->call_lock, cv_timout)) > 0 && 6478ffff9fdSgt29601 call->call_notified == FALSE) 6488ffff9fdSgt29601 ; 6497c478bd9Sstevel@tonic-gate else 6507c478bd9Sstevel@tonic-gate while ((cv_wait_ret = 6517c478bd9Sstevel@tonic-gate cv_timedwait_sig(&call->call_cv, 6527c478bd9Sstevel@tonic-gate &call->call_lock, cv_timout)) > 0 && 6538ffff9fdSgt29601 call->call_notified == FALSE) 6548ffff9fdSgt29601 ; 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate if (cv_wait_ret == 0) 6577c478bd9Sstevel@tonic-gate interrupted = TRUE; 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate if (lwp != NULL) 6607c478bd9Sstevel@tonic-gate lwp->lwp_nostop--; 6617c478bd9Sstevel@tonic-gate } 6627c478bd9Sstevel@tonic-gate resp = call->call_reply; 6637c478bd9Sstevel@tonic-gate call->call_reply = NULL; 6647c478bd9Sstevel@tonic-gate status = call->call_status; 6657c478bd9Sstevel@tonic-gate /* 6667c478bd9Sstevel@tonic-gate * We have to reset the call_notified here. In case we have 6677c478bd9Sstevel@tonic-gate * to do a retry ( e.g. in case we got a RPC_PROCUNAVAIL 6687c478bd9Sstevel@tonic-gate * error ) we need to set this to false to ensure that 6697c478bd9Sstevel@tonic-gate * we will wait for the next message. When the next message 6707c478bd9Sstevel@tonic-gate * is going to arrive the function clnt_clts_dispatch_notify 6717c478bd9Sstevel@tonic-gate * will set this to true again. 6727c478bd9Sstevel@tonic-gate */ 6737c478bd9Sstevel@tonic-gate call->call_notified = FALSE; 67428a15eaaSMarcel Telka call->call_status = RPC_TIMEDOUT; 6757c478bd9Sstevel@tonic-gate mutex_exit(&call->call_lock); 6767c478bd9Sstevel@tonic-gate 6777c478bd9Sstevel@tonic-gate if (status == RPC_TIMEDOUT) { 6787c478bd9Sstevel@tonic-gate if (interrupted) { 6797c478bd9Sstevel@tonic-gate /* 6807c478bd9Sstevel@tonic-gate * We got interrupted, bail out 6817c478bd9Sstevel@tonic-gate */ 6827c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR; 6837c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EINTR; 6847c478bd9Sstevel@tonic-gate goto done1; 6857c478bd9Sstevel@tonic-gate } else { 6867c478bd9Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kcallit_addr: " 6877c478bd9Sstevel@tonic-gate "request w/xid 0x%x timedout " 6887c478bd9Sstevel@tonic-gate "waiting for reply\n", p->cku_xid); 6897c478bd9Sstevel@tonic-gate #if 0 /* XXX not yet */ 6907c478bd9Sstevel@tonic-gate /* 6917c478bd9Sstevel@tonic-gate * Timeout may be due to a dead gateway. Send 6927c478bd9Sstevel@tonic-gate * an ioctl downstream advising deletion of 6937c478bd9Sstevel@tonic-gate * route when we reach the half-way point to 6947c478bd9Sstevel@tonic-gate * timing out. 6957c478bd9Sstevel@tonic-gate */ 6967c478bd9Sstevel@tonic-gate if (stries == p->cku_retrys/2) { 6977c478bd9Sstevel@tonic-gate t_kadvise(p->cku_endpnt->e_tiptr, 6987c478bd9Sstevel@tonic-gate (uchar_t *)p->cku_addr.buf, 6997c478bd9Sstevel@tonic-gate p->cku_addr.len); 7007c478bd9Sstevel@tonic-gate } 7017c478bd9Sstevel@tonic-gate #endif /* not yet */ 7027c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_TIMEDOUT; 7037c478bd9Sstevel@tonic-gate p->cku_err.re_errno = ETIMEDOUT; 7047c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimeouts); 7057c478bd9Sstevel@tonic-gate goto done1; 7067c478bd9Sstevel@tonic-gate } 7077c478bd9Sstevel@tonic-gate } 7087c478bd9Sstevel@tonic-gate 70928a15eaaSMarcel Telka ASSERT(resp != NULL); 7107c478bd9Sstevel@tonic-gate 7117c478bd9Sstevel@tonic-gate /* 7127c478bd9Sstevel@tonic-gate * Prepare the message for further processing. We need to remove 7137c478bd9Sstevel@tonic-gate * the datagram header and copy the source address if necessary. No 7147c478bd9Sstevel@tonic-gate * need to verify the header since rpcmod took care of that. 7157c478bd9Sstevel@tonic-gate */ 7167c478bd9Sstevel@tonic-gate /* 7177c478bd9Sstevel@tonic-gate * Copy the source address if the caller has supplied a netbuf. 7187c478bd9Sstevel@tonic-gate */ 7197c478bd9Sstevel@tonic-gate if (sin != NULL) { 7207c478bd9Sstevel@tonic-gate union T_primitives *pptr; 7217c478bd9Sstevel@tonic-gate 7227c478bd9Sstevel@tonic-gate pptr = (union T_primitives *)resp->b_rptr; 7237c478bd9Sstevel@tonic-gate bcopy(resp->b_rptr + pptr->unitdata_ind.SRC_offset, sin->buf, 7247c478bd9Sstevel@tonic-gate pptr->unitdata_ind.SRC_length); 7257c478bd9Sstevel@tonic-gate sin->len = pptr->unitdata_ind.SRC_length; 7267c478bd9Sstevel@tonic-gate } 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate /* 7297c478bd9Sstevel@tonic-gate * Pop off the datagram header. 73028a15eaaSMarcel Telka * It was retained in rpcmodrput(). 7317c478bd9Sstevel@tonic-gate */ 7327c478bd9Sstevel@tonic-gate tmp = resp; 7337c478bd9Sstevel@tonic-gate resp = resp->b_cont; 7347c478bd9Sstevel@tonic-gate tmp->b_cont = NULL; 7357c478bd9Sstevel@tonic-gate freeb(tmp); 7367c478bd9Sstevel@tonic-gate 737d3d50737SRafael Vanoni round_trip = ddi_get_lbolt() - round_trip; 7387c478bd9Sstevel@tonic-gate /* 7397c478bd9Sstevel@tonic-gate * Van Jacobson timer algorithm here, only if NOT a retransmission. 7407c478bd9Sstevel@tonic-gate */ 7417c478bd9Sstevel@tonic-gate if (p->cku_timers != NULL && stries == p->cku_retrys) { 7427c478bd9Sstevel@tonic-gate int rt; 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate rt = round_trip; 7457c478bd9Sstevel@tonic-gate rt -= (p->cku_timers->rt_srtt >> 3); 7467c478bd9Sstevel@tonic-gate p->cku_timers->rt_srtt += rt; 7477c478bd9Sstevel@tonic-gate if (rt < 0) 7487c478bd9Sstevel@tonic-gate rt = - rt; 7497c478bd9Sstevel@tonic-gate rt -= (p->cku_timers->rt_deviate >> 2); 7507c478bd9Sstevel@tonic-gate p->cku_timers->rt_deviate += rt; 7517c478bd9Sstevel@tonic-gate p->cku_timers->rt_rtxcur = 7527c478bd9Sstevel@tonic-gate (clock_t)((p->cku_timers->rt_srtt >> 2) + 7537c478bd9Sstevel@tonic-gate p->cku_timers->rt_deviate) >> 1; 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate rt = round_trip; 7567c478bd9Sstevel@tonic-gate rt -= (p->cku_timeall->rt_srtt >> 3); 7577c478bd9Sstevel@tonic-gate p->cku_timeall->rt_srtt += rt; 7587c478bd9Sstevel@tonic-gate if (rt < 0) 7597c478bd9Sstevel@tonic-gate rt = - rt; 7607c478bd9Sstevel@tonic-gate rt -= (p->cku_timeall->rt_deviate >> 2); 7617c478bd9Sstevel@tonic-gate p->cku_timeall->rt_deviate += rt; 7627c478bd9Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = 7637c478bd9Sstevel@tonic-gate (clock_t)((p->cku_timeall->rt_srtt >> 2) + 7647c478bd9Sstevel@tonic-gate p->cku_timeall->rt_deviate) >> 1; 7657c478bd9Sstevel@tonic-gate if (p->cku_feedback != NULL) { 7667c478bd9Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_OK, procnum, 7677c478bd9Sstevel@tonic-gate p->cku_feedarg); 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate } 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * Process reply 7737c478bd9Sstevel@tonic-gate */ 7747c478bd9Sstevel@tonic-gate xdrs = &(p->cku_inxdr); 7757c478bd9Sstevel@tonic-gate xdrmblk_init(xdrs, resp, XDR_DECODE, 0); 7767c478bd9Sstevel@tonic-gate 7777c478bd9Sstevel@tonic-gate reply_msg.rm_direction = REPLY; 7787c478bd9Sstevel@tonic-gate reply_msg.rm_reply.rp_stat = MSG_ACCEPTED; 7797c478bd9Sstevel@tonic-gate reply_msg.acpted_rply.ar_stat = SUCCESS; 7807c478bd9Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf = _null_auth; 7817c478bd9Sstevel@tonic-gate /* 7827c478bd9Sstevel@tonic-gate * xdr_results will be done in AUTH_UNWRAP. 7837c478bd9Sstevel@tonic-gate */ 7847c478bd9Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.where = NULL; 7857c478bd9Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.proc = xdr_void; 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate /* 7887c478bd9Sstevel@tonic-gate * Decode and validate the response. 7897c478bd9Sstevel@tonic-gate */ 7907c478bd9Sstevel@tonic-gate if (!xdr_replymsg(xdrs, &reply_msg)) { 7917c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 7927c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 7937c478bd9Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 794*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 7957c478bd9Sstevel@tonic-gate goto done1; 7967c478bd9Sstevel@tonic-gate } 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate _seterr_reply(&reply_msg, &(p->cku_err)); 7997c478bd9Sstevel@tonic-gate 8007c478bd9Sstevel@tonic-gate re_status = p->cku_err.re_status; 8017c478bd9Sstevel@tonic-gate if (re_status == RPC_SUCCESS) { 8027c478bd9Sstevel@tonic-gate /* 8037c478bd9Sstevel@tonic-gate * Reply is good, check auth. 8047c478bd9Sstevel@tonic-gate */ 8057c478bd9Sstevel@tonic-gate if (!AUTH_VALIDATE(h->cl_auth, 8067c478bd9Sstevel@tonic-gate &reply_msg.acpted_rply.ar_verf)) { 8077c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_AUTHERROR; 8087c478bd9Sstevel@tonic-gate p->cku_err.re_why = AUTH_INVALIDRESP; 8097c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadverfs); 8107c478bd9Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 811*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 8127c478bd9Sstevel@tonic-gate goto tryread; 8137c478bd9Sstevel@tonic-gate } 8147c478bd9Sstevel@tonic-gate if (!AUTH_UNWRAP(h->cl_auth, xdrs, xdr_results, resultsp)) { 8157c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 8167c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 8177c478bd9Sstevel@tonic-gate } 8187c478bd9Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 819*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 8207c478bd9Sstevel@tonic-gate goto done1; 8217c478bd9Sstevel@tonic-gate } 8227c478bd9Sstevel@tonic-gate /* set errno in case we can't recover */ 8237c478bd9Sstevel@tonic-gate if (re_status != RPC_VERSMISMATCH && 8248ffff9fdSgt29601 re_status != RPC_AUTHERROR && re_status != RPC_PROGVERSMISMATCH) 8257c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 8267c478bd9Sstevel@tonic-gate /* 8277c478bd9Sstevel@tonic-gate * Determine whether or not we're doing an RPC 8287c478bd9Sstevel@tonic-gate * broadcast. Some server implementations don't 8297c478bd9Sstevel@tonic-gate * follow RFC 1050, section 7.4.2 in that they 8307c478bd9Sstevel@tonic-gate * don't remain silent when they see a proc 8317c478bd9Sstevel@tonic-gate * they don't support. Therefore we keep trying 8327c478bd9Sstevel@tonic-gate * to receive on RPC_PROCUNAVAIL, hoping to get 8337c478bd9Sstevel@tonic-gate * a valid response from a compliant server. 8347c478bd9Sstevel@tonic-gate */ 8357c478bd9Sstevel@tonic-gate if (re_status == RPC_PROCUNAVAIL && p->cku_bcast) { 8367c478bd9Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 837*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 8387c478bd9Sstevel@tonic-gate goto tryread; 8397c478bd9Sstevel@tonic-gate } 8407c478bd9Sstevel@tonic-gate if (re_status == RPC_AUTHERROR) { 84128a15eaaSMarcel Telka 84228a15eaaSMarcel Telka (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 843*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 84428a15eaaSMarcel Telka call_table_remove(call); 84528a15eaaSMarcel Telka if (call->call_reply != NULL) { 84628a15eaaSMarcel Telka freemsg(call->call_reply); 84728a15eaaSMarcel Telka call->call_reply = NULL; 84828a15eaaSMarcel Telka } 84928a15eaaSMarcel Telka 8507c478bd9Sstevel@tonic-gate /* 8517c478bd9Sstevel@tonic-gate * Maybe our credential need to be refreshed 8527c478bd9Sstevel@tonic-gate */ 8537c478bd9Sstevel@tonic-gate if (refreshes > 0 && 8547c478bd9Sstevel@tonic-gate AUTH_REFRESH(h->cl_auth, &reply_msg, p->cku_cred)) { 8557c478bd9Sstevel@tonic-gate /* 8567c478bd9Sstevel@tonic-gate * The credential is refreshed. Try the request again. 8577c478bd9Sstevel@tonic-gate * Even if stries == 0, we still retry as long as 8587c478bd9Sstevel@tonic-gate * refreshes > 0. This prevents a soft authentication 8597c478bd9Sstevel@tonic-gate * error turning into a hard one at an upper level. 8607c478bd9Sstevel@tonic-gate */ 8617c478bd9Sstevel@tonic-gate refreshes--; 8627c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 8637c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcnewcreds); 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate freemsg(mpdup); 8667c478bd9Sstevel@tonic-gate mpdup = NULL; 86728a15eaaSMarcel Telka freemsg(resp); 86828a15eaaSMarcel Telka resp = NULL; 8697c478bd9Sstevel@tonic-gate goto call_again; 8707c478bd9Sstevel@tonic-gate } 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * We have used the client handle to do an AUTH_REFRESH 8737c478bd9Sstevel@tonic-gate * and the RPC status may be set to RPC_SUCCESS; 8747c478bd9Sstevel@tonic-gate * Let's make sure to set it to RPC_AUTHERROR. 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 8777c478bd9Sstevel@tonic-gate 8787c478bd9Sstevel@tonic-gate /* 8797c478bd9Sstevel@tonic-gate * Map recoverable and unrecoverable 8807c478bd9Sstevel@tonic-gate * authentication errors to appropriate errno 8817c478bd9Sstevel@tonic-gate */ 8827c478bd9Sstevel@tonic-gate switch (p->cku_err.re_why) { 88378598ee3Snd150628 case AUTH_TOOWEAK: 88478598ee3Snd150628 /* 88578598ee3Snd150628 * Could be an nfsportmon failure, set 88678598ee3Snd150628 * useresvport and try again. 88778598ee3Snd150628 */ 88878598ee3Snd150628 if (p->cku_useresvport != 1) { 88978598ee3Snd150628 p->cku_useresvport = 1; 89028a15eaaSMarcel Telka 89178598ee3Snd150628 freemsg(mpdup); 89278598ee3Snd150628 mpdup = NULL; 89328a15eaaSMarcel Telka freemsg(resp); 89428a15eaaSMarcel Telka resp = NULL; 89528a15eaaSMarcel Telka 89678598ee3Snd150628 endpt = p->cku_endpnt; 89778598ee3Snd150628 if (endpt->e_tiptr != NULL) { 89878598ee3Snd150628 mutex_enter(&endpt->e_lock); 89978598ee3Snd150628 endpt->e_flags &= ~ENDPNT_BOUND; 90078598ee3Snd150628 (void) t_kclose(endpt->e_tiptr, 1); 90178598ee3Snd150628 endpt->e_tiptr = NULL; 90278598ee3Snd150628 mutex_exit(&endpt->e_lock); 90378598ee3Snd150628 90478598ee3Snd150628 } 90578598ee3Snd150628 90678598ee3Snd150628 p->cku_xid = alloc_xid(); 90778598ee3Snd150628 endpnt_rele(p->cku_endpnt); 90878598ee3Snd150628 p->cku_endpnt = NULL; 90978598ee3Snd150628 goto call_again; 91078598ee3Snd150628 } 91178598ee3Snd150628 /* FALLTHRU */ 9127c478bd9Sstevel@tonic-gate case AUTH_BADCRED: 9137c478bd9Sstevel@tonic-gate case AUTH_BADVERF: 9147c478bd9Sstevel@tonic-gate case AUTH_INVALIDRESP: 9157c478bd9Sstevel@tonic-gate case AUTH_FAILED: 9167c478bd9Sstevel@tonic-gate case RPCSEC_GSS_NOCRED: 9177c478bd9Sstevel@tonic-gate case RPCSEC_GSS_FAILED: 9187c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EACCES; 9197c478bd9Sstevel@tonic-gate break; 9207c478bd9Sstevel@tonic-gate case AUTH_REJECTEDCRED: 9217c478bd9Sstevel@tonic-gate case AUTH_REJECTEDVERF: 9227c478bd9Sstevel@tonic-gate default: 9237c478bd9Sstevel@tonic-gate p->cku_err.re_errno = EIO; 9247c478bd9Sstevel@tonic-gate break; 9257c478bd9Sstevel@tonic-gate } 9267c478bd9Sstevel@tonic-gate RPCLOG(1, "clnt_clts_kcallit : authentication failed " 9277c478bd9Sstevel@tonic-gate "with RPC_AUTHERROR of type %d\n", 9287c478bd9Sstevel@tonic-gate p->cku_err.re_why); 92928a15eaaSMarcel Telka goto done; 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 933*cf98b944SMarcel Telka XDR_DESTROY(xdrs); 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate done1: 9367c478bd9Sstevel@tonic-gate call_table_remove(call); 9377c478bd9Sstevel@tonic-gate if (call->call_reply != NULL) { 9387c478bd9Sstevel@tonic-gate freemsg(call->call_reply); 9397c478bd9Sstevel@tonic-gate call->call_reply = NULL; 9407c478bd9Sstevel@tonic-gate } 9417c478bd9Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: xid 0x%x taken off dispatch list", 9427c478bd9Sstevel@tonic-gate p->cku_xid); 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate done: 9457c478bd9Sstevel@tonic-gate if (resp != NULL) { 9467c478bd9Sstevel@tonic-gate freemsg(resp); 9477c478bd9Sstevel@tonic-gate resp = NULL; 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate if ((p->cku_err.re_status != RPC_SUCCESS) && 9517c478bd9Sstevel@tonic-gate (p->cku_err.re_status != RPC_INTR) && 9527c478bd9Sstevel@tonic-gate (p->cku_err.re_status != RPC_UDERROR) && 9537c478bd9Sstevel@tonic-gate !IS_UNRECOVERABLE_RPC(p->cku_err.re_status)) { 9547c478bd9Sstevel@tonic-gate if (p->cku_feedback != NULL && stries == p->cku_retrys) { 9557c478bd9Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_REXMIT1, procnum, 9567c478bd9Sstevel@tonic-gate p->cku_feedarg); 9577c478bd9Sstevel@tonic-gate } 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate timout = backoff(timout); 9607c478bd9Sstevel@tonic-gate if (p->cku_timeall != (struct rpc_timers *)0) 9617c478bd9Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = timout; 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate if (p->cku_err.re_status == RPC_SYSTEMERROR || 9647c478bd9Sstevel@tonic-gate p->cku_err.re_status == RPC_CANTSEND) { 9657c478bd9Sstevel@tonic-gate /* 9667c478bd9Sstevel@tonic-gate * Errors due to lack of resources, wait a bit 9677c478bd9Sstevel@tonic-gate * and try again. 9687c478bd9Sstevel@tonic-gate */ 9697c478bd9Sstevel@tonic-gate (void) delay(hz/10); 9707c478bd9Sstevel@tonic-gate } 9717c478bd9Sstevel@tonic-gate if (stries-- > 0) { 9727c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcretrans); 9737c478bd9Sstevel@tonic-gate goto call_again; 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate 9777c478bd9Sstevel@tonic-gate if (mpdup != NULL) 9787c478bd9Sstevel@tonic-gate freemsg(mpdup); 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate if (p->cku_err.re_status != RPC_SUCCESS) { 9817c478bd9Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 9827c478bd9Sstevel@tonic-gate } 9837c478bd9Sstevel@tonic-gate 9847c478bd9Sstevel@tonic-gate /* 9857c478bd9Sstevel@tonic-gate * Allow the endpoint to be held by the client handle in case this 9867c478bd9Sstevel@tonic-gate * RPC was not successful. A retry may occur at a higher level and 9877c478bd9Sstevel@tonic-gate * in this case we may want to send the request over the same 9887c478bd9Sstevel@tonic-gate * source port. 98951e44b2bSDai Ngo * Endpoint is also released for one-way RPC: no reply, nor retransmit 99051e44b2bSDai Ngo * is expected. 9917c478bd9Sstevel@tonic-gate */ 99251e44b2bSDai Ngo if ((p->cku_err.re_status == RPC_SUCCESS || 99351e44b2bSDai Ngo (p->cku_err.re_status == RPC_TIMEDOUT && ori_timout == 0)) && 99451e44b2bSDai Ngo p->cku_endpnt != NULL) { 9957c478bd9Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 9967c478bd9Sstevel@tonic-gate p->cku_endpnt = NULL; 99751e44b2bSDai Ngo } else { 99851e44b2bSDai Ngo DTRACE_PROBE2(clnt_clts_kcallit_done, int, p->cku_err.re_status, 99951e44b2bSDai Ngo struct endpnt *, p->cku_endpnt); 10007c478bd9Sstevel@tonic-gate } 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate return (p->cku_err.re_status); 10037c478bd9Sstevel@tonic-gate } 10047c478bd9Sstevel@tonic-gate 10057c478bd9Sstevel@tonic-gate static enum clnt_stat 10067c478bd9Sstevel@tonic-gate clnt_clts_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 10077c478bd9Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 10087c478bd9Sstevel@tonic-gate struct timeval wait) 10097c478bd9Sstevel@tonic-gate { 10107c478bd9Sstevel@tonic-gate return (clnt_clts_kcallit_addr(h, procnum, xdr_args, argsp, 10117c478bd9Sstevel@tonic-gate xdr_results, resultsp, wait, NULL)); 10127c478bd9Sstevel@tonic-gate } 10137c478bd9Sstevel@tonic-gate 10147c478bd9Sstevel@tonic-gate /* 10157c478bd9Sstevel@tonic-gate * Return error info on this handle. 10167c478bd9Sstevel@tonic-gate */ 10177c478bd9Sstevel@tonic-gate static void 10187c478bd9Sstevel@tonic-gate clnt_clts_kerror(CLIENT *h, struct rpc_err *err) 10197c478bd9Sstevel@tonic-gate { 10207c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 10217c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate *err = p->cku_err; 10247c478bd9Sstevel@tonic-gate } 10257c478bd9Sstevel@tonic-gate 1026*cf98b944SMarcel Telka /*ARGSUSED*/ 10277c478bd9Sstevel@tonic-gate static bool_t 10287c478bd9Sstevel@tonic-gate clnt_clts_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr) 10297c478bd9Sstevel@tonic-gate { 1030*cf98b944SMarcel Telka xdr_free(xdr_res, res_ptr); 10317c478bd9Sstevel@tonic-gate 1032*cf98b944SMarcel Telka return (TRUE); 10337c478bd9Sstevel@tonic-gate } 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10367c478bd9Sstevel@tonic-gate static void 10377c478bd9Sstevel@tonic-gate clnt_clts_kabort(CLIENT *h) 10387c478bd9Sstevel@tonic-gate { 10397c478bd9Sstevel@tonic-gate } 10407c478bd9Sstevel@tonic-gate 10417c478bd9Sstevel@tonic-gate static bool_t 10427c478bd9Sstevel@tonic-gate clnt_clts_kcontrol(CLIENT *h, int cmd, char *arg) 10437c478bd9Sstevel@tonic-gate { 10447c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 10457c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 10467c478bd9Sstevel@tonic-gate 10477c478bd9Sstevel@tonic-gate switch (cmd) { 10487c478bd9Sstevel@tonic-gate case CLSET_XID: 10497c478bd9Sstevel@tonic-gate p->cku_xid = *((uint32_t *)arg); 10507c478bd9Sstevel@tonic-gate return (TRUE); 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate case CLGET_XID: 10537c478bd9Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_xid; 10547c478bd9Sstevel@tonic-gate return (TRUE); 10557c478bd9Sstevel@tonic-gate 10567c478bd9Sstevel@tonic-gate case CLSET_BCAST: 10577c478bd9Sstevel@tonic-gate p->cku_bcast = *((uint32_t *)arg); 10587c478bd9Sstevel@tonic-gate return (TRUE); 10597c478bd9Sstevel@tonic-gate 10607c478bd9Sstevel@tonic-gate case CLGET_BCAST: 10617c478bd9Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_bcast; 10627c478bd9Sstevel@tonic-gate return (TRUE); 106378598ee3Snd150628 case CLSET_BINDRESVPORT: 106478598ee3Snd150628 if (arg == NULL) 106578598ee3Snd150628 return (FALSE); 106678598ee3Snd150628 106778598ee3Snd150628 if (*(int *)arg != 1 && *(int *)arg != 0) 106878598ee3Snd150628 return (FALSE); 106978598ee3Snd150628 107078598ee3Snd150628 p->cku_useresvport = *(int *)arg; 107178598ee3Snd150628 107278598ee3Snd150628 return (TRUE); 107378598ee3Snd150628 107478598ee3Snd150628 case CLGET_BINDRESVPORT: 107578598ee3Snd150628 if (arg == NULL) 107678598ee3Snd150628 return (FALSE); 107778598ee3Snd150628 107878598ee3Snd150628 *(int *)arg = p->cku_useresvport; 107978598ee3Snd150628 108078598ee3Snd150628 return (TRUE); 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate default: 10837c478bd9Sstevel@tonic-gate return (FALSE); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate } 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate /* 10887c478bd9Sstevel@tonic-gate * Destroy rpc handle. 10897c478bd9Sstevel@tonic-gate * Frees the space used for output buffer, private data, and handle 10907c478bd9Sstevel@tonic-gate * structure, and the file pointer/TLI data on last reference. 10917c478bd9Sstevel@tonic-gate */ 10927c478bd9Sstevel@tonic-gate static void 10937c478bd9Sstevel@tonic-gate clnt_clts_kdestroy(CLIENT *h) 10947c478bd9Sstevel@tonic-gate { 10957c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */ 10967c478bd9Sstevel@tonic-gate struct cku_private *p = htop(h); 10977c478bd9Sstevel@tonic-gate calllist_t *call = &p->cku_call; 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate int plen; 11007c478bd9Sstevel@tonic-gate 11017c478bd9Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: %p\n", (void *)h); 11027c478bd9Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: xid=0x%x\n", p->cku_xid); 11037c478bd9Sstevel@tonic-gate 11047c478bd9Sstevel@tonic-gate if (p->cku_endpnt != NULL) 11057c478bd9Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate cv_destroy(&call->call_cv); 11087c478bd9Sstevel@tonic-gate mutex_destroy(&call->call_lock); 11097c478bd9Sstevel@tonic-gate 11107c478bd9Sstevel@tonic-gate plen = strlen(p->cku_config.knc_protofmly) + 1; 11117c478bd9Sstevel@tonic-gate kmem_free(p->cku_config.knc_protofmly, plen); 11127c478bd9Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 11137c478bd9Sstevel@tonic-gate kmem_free(p, sizeof (*p)); 11147c478bd9Sstevel@tonic-gate } 11157c478bd9Sstevel@tonic-gate 11167c478bd9Sstevel@tonic-gate /* 11177c478bd9Sstevel@tonic-gate * The connectionless (CLTS) kRPC endpoint management subsystem. 11187c478bd9Sstevel@tonic-gate * 11197c478bd9Sstevel@tonic-gate * Because endpoints are potentially shared among threads making RPC calls, 11207c478bd9Sstevel@tonic-gate * they are managed in a pool according to type (endpnt_type_t). Each 11217c478bd9Sstevel@tonic-gate * endpnt_type_t points to a list of usable endpoints through the e_pool 11227c478bd9Sstevel@tonic-gate * field, which is of type list_t. list_t is a doubly-linked list. 11237c478bd9Sstevel@tonic-gate * The number of endpoints in the pool is stored in the e_cnt field of 11247c478bd9Sstevel@tonic-gate * endpnt_type_t and the endpoints are reference counted using the e_ref field 11257c478bd9Sstevel@tonic-gate * in the endpnt_t structure. 11267c478bd9Sstevel@tonic-gate * 11277c478bd9Sstevel@tonic-gate * As an optimization, endpoints that have no references are also linked 11287c478bd9Sstevel@tonic-gate * to an idle list via e_ilist which is also of type list_t. When a thread 11297c478bd9Sstevel@tonic-gate * calls endpnt_get() to obtain a transport endpoint, the idle list is first 11307c478bd9Sstevel@tonic-gate * consulted and if such an endpoint exists, it is removed from the idle list 11317c478bd9Sstevel@tonic-gate * and returned to the caller. 11327c478bd9Sstevel@tonic-gate * 11337c478bd9Sstevel@tonic-gate * If the idle list is empty, then a check is made to see if more endpoints 11347c478bd9Sstevel@tonic-gate * can be created. If so, we proceed and create a new endpoint which is added 11357c478bd9Sstevel@tonic-gate * to the pool and returned to the caller. If we have reached the limit and 11367c478bd9Sstevel@tonic-gate * cannot make a new endpoint then one is returned to the caller via round- 11377c478bd9Sstevel@tonic-gate * robin policy. 11387c478bd9Sstevel@tonic-gate * 11397c478bd9Sstevel@tonic-gate * When an endpoint is placed on the idle list by a thread calling 11407c478bd9Sstevel@tonic-gate * endpnt_rele(), it is timestamped and then a reaper taskq is scheduled to 11417c478bd9Sstevel@tonic-gate * be dispatched if one hasn't already been. When the timer fires, the 11427c478bd9Sstevel@tonic-gate * taskq traverses the idle list and checks to see which endpoints are 11437c478bd9Sstevel@tonic-gate * eligible to be closed. It determines this by checking if the timestamp 11447c478bd9Sstevel@tonic-gate * when the endpoint was released has exceeded the the threshold for how long 11457c478bd9Sstevel@tonic-gate * it should stay alive. 11467c478bd9Sstevel@tonic-gate * 11477c478bd9Sstevel@tonic-gate * endpnt_t structures remain persistent until the memory reclaim callback, 11487c478bd9Sstevel@tonic-gate * endpnt_reclaim(), is invoked. 11497c478bd9Sstevel@tonic-gate * 11507c478bd9Sstevel@tonic-gate * Here is an example of how the data structures would be laid out by the 11517c478bd9Sstevel@tonic-gate * subsystem: 11527c478bd9Sstevel@tonic-gate * 11537c478bd9Sstevel@tonic-gate * endpnt_type_t 11547c478bd9Sstevel@tonic-gate * 11557c478bd9Sstevel@tonic-gate * loopback inet 11567c478bd9Sstevel@tonic-gate * _______________ ______________ 11577c478bd9Sstevel@tonic-gate * | e_next |----------------------->| e_next |---->> 11587c478bd9Sstevel@tonic-gate * | e_pool |<---+ | e_pool |<----+ 11597c478bd9Sstevel@tonic-gate * | e_ilist |<---+--+ | e_ilist |<----+--+ 11607c478bd9Sstevel@tonic-gate * +->| e_pcurr |----+--+--+ +->| e_pcurr |-----+--+--+ 11617c478bd9Sstevel@tonic-gate * | | ... | | | | | | ... | | | | 11627c478bd9Sstevel@tonic-gate * | | e_itimer (90) | | | | | | e_itimer (0) | | | | 11637c478bd9Sstevel@tonic-gate * | | e_cnt (1) | | | | | | e_cnt (3) | | | | 11647c478bd9Sstevel@tonic-gate * | +---------------+ | | | | +--------------+ | | | 11657c478bd9Sstevel@tonic-gate * | | | | | | | | 11667c478bd9Sstevel@tonic-gate * | endpnt_t | | | | | | | 11677c478bd9Sstevel@tonic-gate * | ____________ | | | | ____________ | | | 11687c478bd9Sstevel@tonic-gate * | | e_node |<------+ | | | | e_node |<------+ | | 11697c478bd9Sstevel@tonic-gate * | | e_idle |<---------+ | | | e_idle | | | | 11707c478bd9Sstevel@tonic-gate * +--| e_type |<------------+ +--| e_type | | | | 11717c478bd9Sstevel@tonic-gate * | e_tiptr | | | e_tiptr | | | | 11727c478bd9Sstevel@tonic-gate * | ... | | | ... | | | | 11737c478bd9Sstevel@tonic-gate * | e_lock | | | e_lock | | | | 11747c478bd9Sstevel@tonic-gate * | ... | | | ... | | | | 11757c478bd9Sstevel@tonic-gate * | e_ref (0) | | | e_ref (2) | | | | 11767c478bd9Sstevel@tonic-gate * | e_itime | | | e_itime | | | | 11777c478bd9Sstevel@tonic-gate * +------------+ | +------------+ | | | 11787c478bd9Sstevel@tonic-gate * | | | | 11797c478bd9Sstevel@tonic-gate * | | | | 11807c478bd9Sstevel@tonic-gate * | ____________ | | | 11817c478bd9Sstevel@tonic-gate * | | e_node |<------+ | | 11827c478bd9Sstevel@tonic-gate * | | e_idle |<------+--+ | 11837c478bd9Sstevel@tonic-gate * +--| e_type | | | 11847c478bd9Sstevel@tonic-gate * | | e_tiptr | | | 11857c478bd9Sstevel@tonic-gate * | | ... | | | 11867c478bd9Sstevel@tonic-gate * | | e_lock | | | 11877c478bd9Sstevel@tonic-gate * | | ... | | | 11887c478bd9Sstevel@tonic-gate * | | e_ref (0) | | | 11897c478bd9Sstevel@tonic-gate * | | e_itime | | | 11907c478bd9Sstevel@tonic-gate * | +------------+ | | 11917c478bd9Sstevel@tonic-gate * | | | 11927c478bd9Sstevel@tonic-gate * | | | 11937c478bd9Sstevel@tonic-gate * | ____________ | | 11947c478bd9Sstevel@tonic-gate * | | e_node |<------+ | 11957c478bd9Sstevel@tonic-gate * | | e_idle | | 11967c478bd9Sstevel@tonic-gate * +--| e_type |<------------+ 11977c478bd9Sstevel@tonic-gate * | e_tiptr | 11987c478bd9Sstevel@tonic-gate * | ... | 11997c478bd9Sstevel@tonic-gate * | e_lock | 12007c478bd9Sstevel@tonic-gate * | ... | 12017c478bd9Sstevel@tonic-gate * | e_ref (1) | 12027c478bd9Sstevel@tonic-gate * | e_itime | 12037c478bd9Sstevel@tonic-gate * +------------+ 12047c478bd9Sstevel@tonic-gate * 12057c478bd9Sstevel@tonic-gate * Endpoint locking strategy: 12067c478bd9Sstevel@tonic-gate * 12077c478bd9Sstevel@tonic-gate * The following functions manipulate lists which hold the endpoint and the 12087c478bd9Sstevel@tonic-gate * endpoints themselves: 12097c478bd9Sstevel@tonic-gate * 12107c478bd9Sstevel@tonic-gate * endpnt_get()/check_endpnt()/endpnt_rele()/endpnt_reap()/do_endpnt_reclaim() 12117c478bd9Sstevel@tonic-gate * 12127c478bd9Sstevel@tonic-gate * Lock description follows: 12137c478bd9Sstevel@tonic-gate * 12147c478bd9Sstevel@tonic-gate * endpnt_type_lock: Global reader/writer lock which protects accesses to the 12157c478bd9Sstevel@tonic-gate * endpnt_type_list. 12167c478bd9Sstevel@tonic-gate * 12177c478bd9Sstevel@tonic-gate * e_plock: Lock defined in the endpnt_type_t. It is intended to 12187c478bd9Sstevel@tonic-gate * protect accesses to the pool of endopints (e_pool) for a given 12197c478bd9Sstevel@tonic-gate * endpnt_type_t. 12207c478bd9Sstevel@tonic-gate * 12217c478bd9Sstevel@tonic-gate * e_ilock: Lock defined in endpnt_type_t. It is intended to protect accesses 12227c478bd9Sstevel@tonic-gate * to the idle list (e_ilist) of available endpoints for a given 12237c478bd9Sstevel@tonic-gate * endpnt_type_t. It also protects access to the e_itimer, e_async_cv, 12247c478bd9Sstevel@tonic-gate * and e_async_count fields in endpnt_type_t. 12257c478bd9Sstevel@tonic-gate * 12267c478bd9Sstevel@tonic-gate * e_lock: Lock defined in the endpnt structure. It is intended to protect 12277c478bd9Sstevel@tonic-gate * flags, cv, and ref count. 12287c478bd9Sstevel@tonic-gate * 12297c478bd9Sstevel@tonic-gate * The order goes as follows so as not to induce deadlock. 12307c478bd9Sstevel@tonic-gate * 12317c478bd9Sstevel@tonic-gate * endpnt_type_lock -> e_plock -> e_ilock -> e_lock 12327c478bd9Sstevel@tonic-gate * 12337c478bd9Sstevel@tonic-gate * Interaction with Zones and shutting down: 12347c478bd9Sstevel@tonic-gate * 12357c478bd9Sstevel@tonic-gate * endpnt_type_ts are uniquely identified by the (e_zoneid, e_rdev, e_protofmly) 12367c478bd9Sstevel@tonic-gate * tuple, which means that a zone may not reuse another zone's idle endpoints 12377c478bd9Sstevel@tonic-gate * without first doing a t_kclose(). 12387c478bd9Sstevel@tonic-gate * 12397c478bd9Sstevel@tonic-gate * A zone's endpnt_type_ts are destroyed when a zone is shut down; e_async_cv 12407c478bd9Sstevel@tonic-gate * and e_async_count are used to keep track of the threads in endpnt_taskq 12417c478bd9Sstevel@tonic-gate * trying to reap endpnt_ts in the endpnt_type_t. 12427c478bd9Sstevel@tonic-gate */ 12437c478bd9Sstevel@tonic-gate 12447c478bd9Sstevel@tonic-gate /* 12457c478bd9Sstevel@tonic-gate * Allocate and initialize an endpnt_type_t 12467c478bd9Sstevel@tonic-gate */ 12477c478bd9Sstevel@tonic-gate static struct endpnt_type * 12487c478bd9Sstevel@tonic-gate endpnt_type_create(struct knetconfig *config) 12497c478bd9Sstevel@tonic-gate { 12507c478bd9Sstevel@tonic-gate struct endpnt_type *etype; 12517c478bd9Sstevel@tonic-gate 12527c478bd9Sstevel@tonic-gate /* 12537c478bd9Sstevel@tonic-gate * Allocate a new endpoint type to hang a list of 12547c478bd9Sstevel@tonic-gate * endpoints off of it. 12557c478bd9Sstevel@tonic-gate */ 12567c478bd9Sstevel@tonic-gate etype = kmem_alloc(sizeof (struct endpnt_type), KM_SLEEP); 12577c478bd9Sstevel@tonic-gate etype->e_next = NULL; 12587c478bd9Sstevel@tonic-gate etype->e_pcurr = NULL; 12597c478bd9Sstevel@tonic-gate etype->e_itimer = 0; 12607c478bd9Sstevel@tonic-gate etype->e_cnt = 0; 12617c478bd9Sstevel@tonic-gate 12627c478bd9Sstevel@tonic-gate (void) strncpy(etype->e_protofmly, config->knc_protofmly, KNC_STRSIZE); 12637c478bd9Sstevel@tonic-gate mutex_init(&etype->e_plock, NULL, MUTEX_DEFAULT, NULL); 12647c478bd9Sstevel@tonic-gate mutex_init(&etype->e_ilock, NULL, MUTEX_DEFAULT, NULL); 12657c478bd9Sstevel@tonic-gate etype->e_rdev = config->knc_rdev; 1266108322fbScarlsonj etype->e_zoneid = rpc_zoneid(); 12677c478bd9Sstevel@tonic-gate etype->e_async_count = 0; 12687c478bd9Sstevel@tonic-gate cv_init(&etype->e_async_cv, NULL, CV_DEFAULT, NULL); 12697c478bd9Sstevel@tonic-gate 12707c478bd9Sstevel@tonic-gate list_create(&etype->e_pool, sizeof (endpnt_t), 12717c478bd9Sstevel@tonic-gate offsetof(endpnt_t, e_node)); 12727c478bd9Sstevel@tonic-gate list_create(&etype->e_ilist, sizeof (endpnt_t), 12737c478bd9Sstevel@tonic-gate offsetof(endpnt_t, e_idle)); 12747c478bd9Sstevel@tonic-gate 12757c478bd9Sstevel@tonic-gate /* 12767c478bd9Sstevel@tonic-gate * Check to see if we need to create a taskq for endpoint 12777c478bd9Sstevel@tonic-gate * reaping 12787c478bd9Sstevel@tonic-gate */ 12797c478bd9Sstevel@tonic-gate mutex_enter(&endpnt_taskq_lock); 12807c478bd9Sstevel@tonic-gate if (taskq_created == FALSE) { 12817c478bd9Sstevel@tonic-gate taskq_created = TRUE; 12827c478bd9Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 12837c478bd9Sstevel@tonic-gate ASSERT(endpnt_taskq == NULL); 12847c478bd9Sstevel@tonic-gate endpnt_taskq = taskq_create("clts_endpnt_taskq", 1, 12857c478bd9Sstevel@tonic-gate minclsyspri, 200, INT_MAX, 0); 12867c478bd9Sstevel@tonic-gate } else 12877c478bd9Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate return (etype); 12907c478bd9Sstevel@tonic-gate } 12917c478bd9Sstevel@tonic-gate 12927c478bd9Sstevel@tonic-gate /* 12937c478bd9Sstevel@tonic-gate * Free an endpnt_type_t 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate static void 12967c478bd9Sstevel@tonic-gate endpnt_type_free(struct endpnt_type *etype) 12977c478bd9Sstevel@tonic-gate { 12987c478bd9Sstevel@tonic-gate mutex_destroy(&etype->e_plock); 12997c478bd9Sstevel@tonic-gate mutex_destroy(&etype->e_ilock); 13007c478bd9Sstevel@tonic-gate list_destroy(&etype->e_pool); 13017c478bd9Sstevel@tonic-gate list_destroy(&etype->e_ilist); 13027c478bd9Sstevel@tonic-gate kmem_free(etype, sizeof (endpnt_type_t)); 13037c478bd9Sstevel@tonic-gate } 13047c478bd9Sstevel@tonic-gate 13057c478bd9Sstevel@tonic-gate /* 13067c478bd9Sstevel@tonic-gate * Check the endpoint to ensure that it is suitable for use. 13077c478bd9Sstevel@tonic-gate * 13087c478bd9Sstevel@tonic-gate * Possible return values: 13097c478bd9Sstevel@tonic-gate * 13107c478bd9Sstevel@tonic-gate * return (1) - Endpoint is established, but needs to be re-opened. 13117c478bd9Sstevel@tonic-gate * return (0) && *newp == NULL - Endpoint is established, but unusable. 13127c478bd9Sstevel@tonic-gate * return (0) && *newp != NULL - Endpoint is established and usable. 13137c478bd9Sstevel@tonic-gate */ 13147c478bd9Sstevel@tonic-gate static int 13157c478bd9Sstevel@tonic-gate check_endpnt(struct endpnt *endp, struct endpnt **newp) 13167c478bd9Sstevel@tonic-gate { 13177c478bd9Sstevel@tonic-gate *newp = endp; 13187c478bd9Sstevel@tonic-gate 13197c478bd9Sstevel@tonic-gate mutex_enter(&endp->e_lock); 13207c478bd9Sstevel@tonic-gate ASSERT(endp->e_ref >= 1); 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate /* 13237c478bd9Sstevel@tonic-gate * The first condition we check for is if the endpoint has been 13247c478bd9Sstevel@tonic-gate * allocated, but is unusable either because it has been closed or 13257c478bd9Sstevel@tonic-gate * has been marked stale. Only *one* thread will be allowed to 132651e44b2bSDai Ngo * execute the then clause. This is enforced because the first thread 13277c478bd9Sstevel@tonic-gate * to check this condition will clear the flags, so that subsequent 13287c478bd9Sstevel@tonic-gate * thread(s) checking this endpoint will move on. 13297c478bd9Sstevel@tonic-gate */ 13307c478bd9Sstevel@tonic-gate if ((endp->e_flags & ENDPNT_ESTABLISHED) && 13317c478bd9Sstevel@tonic-gate (!(endp->e_flags & ENDPNT_BOUND) || 13327c478bd9Sstevel@tonic-gate (endp->e_flags & ENDPNT_STALE))) { 13337c478bd9Sstevel@tonic-gate /* 13347c478bd9Sstevel@tonic-gate * Clear the flags here since they will be 13357c478bd9Sstevel@tonic-gate * set again by this thread. They need to be 13367c478bd9Sstevel@tonic-gate * individually cleared because we want to maintain 13377c478bd9Sstevel@tonic-gate * the state for ENDPNT_ONIDLE. 13387c478bd9Sstevel@tonic-gate */ 13397c478bd9Sstevel@tonic-gate endp->e_flags &= ~(ENDPNT_ESTABLISHED | 13407c478bd9Sstevel@tonic-gate ENDPNT_WAITING | ENDPNT_BOUND | ENDPNT_STALE); 13417c478bd9Sstevel@tonic-gate mutex_exit(&endp->e_lock); 13427c478bd9Sstevel@tonic-gate return (1); 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate /* 13467c478bd9Sstevel@tonic-gate * The second condition is meant for any thread that is waiting for 13477c478bd9Sstevel@tonic-gate * an endpoint to become established. It will cv_wait() until 13487c478bd9Sstevel@tonic-gate * the condition for the endpoint has been changed to ENDPNT_BOUND or 13497c478bd9Sstevel@tonic-gate * ENDPNT_STALE. 13507c478bd9Sstevel@tonic-gate */ 13517c478bd9Sstevel@tonic-gate while (!(endp->e_flags & ENDPNT_BOUND) && 13527c478bd9Sstevel@tonic-gate !(endp->e_flags & ENDPNT_STALE)) { 13537c478bd9Sstevel@tonic-gate endp->e_flags |= ENDPNT_WAITING; 13547c478bd9Sstevel@tonic-gate cv_wait(&endp->e_cv, &endp->e_lock); 13557c478bd9Sstevel@tonic-gate } 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate ASSERT(endp->e_flags & ENDPNT_ESTABLISHED); 13587c478bd9Sstevel@tonic-gate 13597c478bd9Sstevel@tonic-gate /* 13607c478bd9Sstevel@tonic-gate * The last case we check for is if the endpoint has been marked stale. 13617c478bd9Sstevel@tonic-gate * If this is the case then set *newp to NULL and return, so that the 13627c478bd9Sstevel@tonic-gate * caller is notified of the error and can take appropriate action. 13637c478bd9Sstevel@tonic-gate */ 13647c478bd9Sstevel@tonic-gate if (endp->e_flags & ENDPNT_STALE) { 13657c478bd9Sstevel@tonic-gate endp->e_ref--; 13667c478bd9Sstevel@tonic-gate *newp = NULL; 13677c478bd9Sstevel@tonic-gate } 13687c478bd9Sstevel@tonic-gate mutex_exit(&endp->e_lock); 13697c478bd9Sstevel@tonic-gate return (0); 13707c478bd9Sstevel@tonic-gate } 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate #ifdef DEBUG 13737c478bd9Sstevel@tonic-gate /* 13747c478bd9Sstevel@tonic-gate * Provide a fault injection setting to test error conditions. 13757c478bd9Sstevel@tonic-gate */ 13767c478bd9Sstevel@tonic-gate static int endpnt_get_return_null = 0; 13777c478bd9Sstevel@tonic-gate #endif 13787c478bd9Sstevel@tonic-gate 13797c478bd9Sstevel@tonic-gate /* 13807c478bd9Sstevel@tonic-gate * Returns a handle (struct endpnt *) to an open and bound endpoint 13817c478bd9Sstevel@tonic-gate * specified by the knetconfig passed in. Returns NULL if no valid endpoint 13827c478bd9Sstevel@tonic-gate * can be obtained. 13837c478bd9Sstevel@tonic-gate */ 13847c478bd9Sstevel@tonic-gate static struct endpnt * 138578598ee3Snd150628 endpnt_get(struct knetconfig *config, int useresvport) 13867c478bd9Sstevel@tonic-gate { 13877c478bd9Sstevel@tonic-gate struct endpnt_type *n_etype = NULL; 13887c478bd9Sstevel@tonic-gate struct endpnt_type *np = NULL; 13897c478bd9Sstevel@tonic-gate struct endpnt *new = NULL; 13907c478bd9Sstevel@tonic-gate struct endpnt *endp = NULL; 13917c478bd9Sstevel@tonic-gate struct endpnt *next = NULL; 13927c478bd9Sstevel@tonic-gate TIUSER *tiptr = NULL; 13937c478bd9Sstevel@tonic-gate int rtries = BINDRESVPORT_RETRIES; 13947c478bd9Sstevel@tonic-gate int i = 0; 13957c478bd9Sstevel@tonic-gate int error; 13967c478bd9Sstevel@tonic-gate int retval; 1397108322fbScarlsonj zoneid_t zoneid = rpc_zoneid(); 139845916cd2Sjpk cred_t *cr; 13997c478bd9Sstevel@tonic-gate 14007c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: protofmly %s, ", config->knc_protofmly); 14017c478bd9Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", config->knc_rdev); 14027c478bd9Sstevel@tonic-gate 14037c478bd9Sstevel@tonic-gate #ifdef DEBUG 14047c478bd9Sstevel@tonic-gate /* 14057c478bd9Sstevel@tonic-gate * Inject fault if desired. Pretend we have a stale endpoint 14067c478bd9Sstevel@tonic-gate * and return NULL. 14077c478bd9Sstevel@tonic-gate */ 14087c478bd9Sstevel@tonic-gate if (endpnt_get_return_null > 0) { 14097c478bd9Sstevel@tonic-gate endpnt_get_return_null--; 14107c478bd9Sstevel@tonic-gate return (NULL); 14117c478bd9Sstevel@tonic-gate } 14127c478bd9Sstevel@tonic-gate #endif 14137c478bd9Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate top: 14167c478bd9Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) 14177c478bd9Sstevel@tonic-gate if ((np->e_zoneid == zoneid) && 14187c478bd9Sstevel@tonic-gate (np->e_rdev == config->knc_rdev) && 14197c478bd9Sstevel@tonic-gate (strcmp(np->e_protofmly, 14207c478bd9Sstevel@tonic-gate config->knc_protofmly) == 0)) 14217c478bd9Sstevel@tonic-gate break; 14227c478bd9Sstevel@tonic-gate 14237c478bd9Sstevel@tonic-gate if (np == NULL && n_etype != NULL) { 14247c478bd9Sstevel@tonic-gate ASSERT(rw_write_held(&endpnt_type_lock)); 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate /* 14277c478bd9Sstevel@tonic-gate * Link the endpoint type onto the list 14287c478bd9Sstevel@tonic-gate */ 14297c478bd9Sstevel@tonic-gate n_etype->e_next = endpnt_type_list; 14307c478bd9Sstevel@tonic-gate endpnt_type_list = n_etype; 14317c478bd9Sstevel@tonic-gate np = n_etype; 14327c478bd9Sstevel@tonic-gate n_etype = NULL; 14337c478bd9Sstevel@tonic-gate } 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate if (np == NULL) { 14367c478bd9Sstevel@tonic-gate /* 14377c478bd9Sstevel@tonic-gate * The logic here is that we were unable to find an 14387c478bd9Sstevel@tonic-gate * endpnt_type_t that matched our criteria, so we allocate a 14397c478bd9Sstevel@tonic-gate * new one. Because kmem_alloc() needs to be called with 14407c478bd9Sstevel@tonic-gate * KM_SLEEP, we drop our locks so that we don't induce 14417c478bd9Sstevel@tonic-gate * deadlock. After allocating and initializing the 14427c478bd9Sstevel@tonic-gate * endpnt_type_t, we reaquire the lock and go back to check 14437c478bd9Sstevel@tonic-gate * if this entry needs to be added to the list. Since we do 14447c478bd9Sstevel@tonic-gate * some operations without any locking other threads may 14457c478bd9Sstevel@tonic-gate * have been looking for the same endpnt_type_t and gone 14467c478bd9Sstevel@tonic-gate * through this code path. We check for this case and allow 14477c478bd9Sstevel@tonic-gate * one thread to link its endpnt_type_t to the list and the 14487c478bd9Sstevel@tonic-gate * other threads will simply free theirs. 14497c478bd9Sstevel@tonic-gate */ 14507c478bd9Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 14517c478bd9Sstevel@tonic-gate n_etype = endpnt_type_create(config); 14527c478bd9Sstevel@tonic-gate 14537c478bd9Sstevel@tonic-gate /* 14547c478bd9Sstevel@tonic-gate * We need to reaquire the lock with RW_WRITER here so that 14557c478bd9Sstevel@tonic-gate * we can safely link the new endpoint type onto the list. 14567c478bd9Sstevel@tonic-gate */ 14577c478bd9Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 14587c478bd9Sstevel@tonic-gate goto top; 14597c478bd9Sstevel@tonic-gate } 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 14627c478bd9Sstevel@tonic-gate /* 14637c478bd9Sstevel@tonic-gate * If n_etype is not NULL, then another thread was able to 14647c478bd9Sstevel@tonic-gate * insert an endpnt_type_t of this type onto the list before 14657c478bd9Sstevel@tonic-gate * we did. Go ahead and free ours. 14667c478bd9Sstevel@tonic-gate */ 14677c478bd9Sstevel@tonic-gate if (n_etype != NULL) 14687c478bd9Sstevel@tonic-gate endpnt_type_free(n_etype); 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate mutex_enter(&np->e_ilock); 14717c478bd9Sstevel@tonic-gate /* 14727c478bd9Sstevel@tonic-gate * The algorithm to hand out endpoints is to first 14737c478bd9Sstevel@tonic-gate * give out those that are idle if such endpoints 14747c478bd9Sstevel@tonic-gate * exist. Otherwise, create a new one if we haven't 14757c478bd9Sstevel@tonic-gate * reached the max threshold. Finally, we give out 14767c478bd9Sstevel@tonic-gate * endpoints in a pseudo LRU fashion (round-robin). 14777c478bd9Sstevel@tonic-gate * 14787c478bd9Sstevel@tonic-gate * Note: The idle list is merely a hint of those endpoints 14797c478bd9Sstevel@tonic-gate * that should be idle. There exists a window after the 14807c478bd9Sstevel@tonic-gate * endpoint is released and before it is linked back onto the 14817c478bd9Sstevel@tonic-gate * idle list where a thread could get a reference to it and 14827c478bd9Sstevel@tonic-gate * use it. This is okay, since the reference counts will 14837c478bd9Sstevel@tonic-gate * still be consistent. 14847c478bd9Sstevel@tonic-gate */ 14857c478bd9Sstevel@tonic-gate if ((endp = (endpnt_t *)list_head(&np->e_ilist)) != NULL) { 14867c478bd9Sstevel@tonic-gate timeout_id_t t_id = 0; 14877c478bd9Sstevel@tonic-gate 14887c478bd9Sstevel@tonic-gate mutex_enter(&endp->e_lock); 14897c478bd9Sstevel@tonic-gate endp->e_ref++; 14907c478bd9Sstevel@tonic-gate endp->e_itime = 0; 14917c478bd9Sstevel@tonic-gate endp->e_flags &= ~ENDPNT_ONIDLE; 14927c478bd9Sstevel@tonic-gate mutex_exit(&endp->e_lock); 14937c478bd9Sstevel@tonic-gate 14947c478bd9Sstevel@tonic-gate /* 14957c478bd9Sstevel@tonic-gate * Pop the endpoint off the idle list and hand it off 14967c478bd9Sstevel@tonic-gate */ 14977c478bd9Sstevel@tonic-gate list_remove(&np->e_ilist, endp); 14987c478bd9Sstevel@tonic-gate 14997c478bd9Sstevel@tonic-gate if (np->e_itimer != 0) { 15007c478bd9Sstevel@tonic-gate t_id = np->e_itimer; 15017c478bd9Sstevel@tonic-gate np->e_itimer = 0; 15027c478bd9Sstevel@tonic-gate } 15037c478bd9Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15047c478bd9Sstevel@tonic-gate /* 15057c478bd9Sstevel@tonic-gate * Reset the idle timer if it has been set 15067c478bd9Sstevel@tonic-gate */ 15077c478bd9Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 15087c478bd9Sstevel@tonic-gate (void) untimeout(t_id); 15097c478bd9Sstevel@tonic-gate 15107c478bd9Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 15117c478bd9Sstevel@tonic-gate return (new); 15127c478bd9Sstevel@tonic-gate } else if (np->e_cnt >= clnt_clts_max_endpoints) { 15137c478bd9Sstevel@tonic-gate /* 15147c478bd9Sstevel@tonic-gate * There are no idle endpoints currently, so 15157c478bd9Sstevel@tonic-gate * create a new one if we have not reached the maximum or 15167c478bd9Sstevel@tonic-gate * hand one out in round-robin. 15177c478bd9Sstevel@tonic-gate */ 15187c478bd9Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15197c478bd9Sstevel@tonic-gate mutex_enter(&np->e_plock); 15207c478bd9Sstevel@tonic-gate endp = np->e_pcurr; 15217c478bd9Sstevel@tonic-gate mutex_enter(&endp->e_lock); 15227c478bd9Sstevel@tonic-gate endp->e_ref++; 15237c478bd9Sstevel@tonic-gate mutex_exit(&endp->e_lock); 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate ASSERT(endp != NULL); 15267c478bd9Sstevel@tonic-gate /* 15277c478bd9Sstevel@tonic-gate * Advance the pointer to the next eligible endpoint, if 15287c478bd9Sstevel@tonic-gate * necessary. 15297c478bd9Sstevel@tonic-gate */ 15307c478bd9Sstevel@tonic-gate if (np->e_cnt > 1) { 15317c478bd9Sstevel@tonic-gate next = (endpnt_t *)list_next(&np->e_pool, np->e_pcurr); 15327c478bd9Sstevel@tonic-gate if (next == NULL) 15337c478bd9Sstevel@tonic-gate next = (endpnt_t *)list_head(&np->e_pool); 15347c478bd9Sstevel@tonic-gate np->e_pcurr = next; 15357c478bd9Sstevel@tonic-gate } 15367c478bd9Sstevel@tonic-gate 15377c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate /* 15407c478bd9Sstevel@tonic-gate * We need to check to see if this endpoint is bound or 15417c478bd9Sstevel@tonic-gate * not. If it is in progress then just wait until 15427c478bd9Sstevel@tonic-gate * the set up is complete 15437c478bd9Sstevel@tonic-gate */ 15447c478bd9Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 15457c478bd9Sstevel@tonic-gate return (new); 15467c478bd9Sstevel@tonic-gate } else { 15477c478bd9Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15487c478bd9Sstevel@tonic-gate mutex_enter(&np->e_plock); 15497c478bd9Sstevel@tonic-gate 15507c478bd9Sstevel@tonic-gate /* 15517c478bd9Sstevel@tonic-gate * Allocate a new endpoint to use. If we can't allocate any 15527c478bd9Sstevel@tonic-gate * more memory then use one that is already established if any 15537c478bd9Sstevel@tonic-gate * such endpoints exist. 15547c478bd9Sstevel@tonic-gate */ 15557c478bd9Sstevel@tonic-gate new = kmem_cache_alloc(endpnt_cache, KM_NOSLEEP); 15567c478bd9Sstevel@tonic-gate if (new == NULL) { 15577c478bd9Sstevel@tonic-gate RPCLOG0(1, "endpnt_get: kmem_cache_alloc failed\n"); 15587c478bd9Sstevel@tonic-gate /* 15597c478bd9Sstevel@tonic-gate * Try to recover by using an existing endpoint. 15607c478bd9Sstevel@tonic-gate */ 15617c478bd9Sstevel@tonic-gate if (np->e_cnt <= 0) { 15627c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 15637c478bd9Sstevel@tonic-gate return (NULL); 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate endp = np->e_pcurr; 15667c478bd9Sstevel@tonic-gate if ((next = list_next(&np->e_pool, np->e_pcurr)) != 15677c478bd9Sstevel@tonic-gate NULL) 15687c478bd9Sstevel@tonic-gate np->e_pcurr = next; 15697c478bd9Sstevel@tonic-gate ASSERT(endp != NULL); 15707c478bd9Sstevel@tonic-gate mutex_enter(&endp->e_lock); 15717c478bd9Sstevel@tonic-gate endp->e_ref++; 15727c478bd9Sstevel@tonic-gate mutex_exit(&endp->e_lock); 15737c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 15747c478bd9Sstevel@tonic-gate 15757c478bd9Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 15767c478bd9Sstevel@tonic-gate return (new); 15777c478bd9Sstevel@tonic-gate } else { 15787c478bd9Sstevel@tonic-gate /* 15797c478bd9Sstevel@tonic-gate * Partially init an endpoint structure and put 15807c478bd9Sstevel@tonic-gate * it on the list, so that other interested threads 15817c478bd9Sstevel@tonic-gate * know that one is being created 15827c478bd9Sstevel@tonic-gate */ 15837c478bd9Sstevel@tonic-gate bzero(new, sizeof (struct endpnt)); 15847c478bd9Sstevel@tonic-gate 15857c478bd9Sstevel@tonic-gate cv_init(&new->e_cv, NULL, CV_DEFAULT, NULL); 15867c478bd9Sstevel@tonic-gate mutex_init(&new->e_lock, NULL, MUTEX_DEFAULT, NULL); 15877c478bd9Sstevel@tonic-gate new->e_ref = 1; 15887c478bd9Sstevel@tonic-gate new->e_type = np; 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate /* 15917c478bd9Sstevel@tonic-gate * Link the endpoint into the pool. 15927c478bd9Sstevel@tonic-gate */ 15937c478bd9Sstevel@tonic-gate list_insert_head(&np->e_pool, new); 15947c478bd9Sstevel@tonic-gate np->e_cnt++; 15957c478bd9Sstevel@tonic-gate if (np->e_pcurr == NULL) 15967c478bd9Sstevel@tonic-gate np->e_pcurr = new; 15977c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 15987c478bd9Sstevel@tonic-gate } 15997c478bd9Sstevel@tonic-gate } 16007c478bd9Sstevel@tonic-gate 16017c478bd9Sstevel@tonic-gate /* 16027c478bd9Sstevel@tonic-gate * The transport should be opened with sufficient privs 16037c478bd9Sstevel@tonic-gate */ 160445916cd2Sjpk cr = zone_kcred(); 16057c478bd9Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, FREAD|FWRITE|FNDELAY, &tiptr, 160645916cd2Sjpk cr); 16077c478bd9Sstevel@tonic-gate if (error) { 16087c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 16097c478bd9Sstevel@tonic-gate goto bad; 16107c478bd9Sstevel@tonic-gate } 16117c478bd9Sstevel@tonic-gate 16127c478bd9Sstevel@tonic-gate new->e_tiptr = tiptr; 16137c478bd9Sstevel@tonic-gate rpc_poptimod(tiptr->fp->f_vnode); 16147c478bd9Sstevel@tonic-gate 16157c478bd9Sstevel@tonic-gate /* 16167c478bd9Sstevel@tonic-gate * Allow the kernel to push the module on behalf of the user. 16177c478bd9Sstevel@tonic-gate */ 16187c478bd9Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0, 161945916cd2Sjpk K_TO_K, cr, &retval); 16207c478bd9Sstevel@tonic-gate if (error) { 16217c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on rpcmod failed %d\n", error); 16227c478bd9Sstevel@tonic-gate goto bad; 16237c478bd9Sstevel@tonic-gate } 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K, 162645916cd2Sjpk cr, &retval); 16277c478bd9Sstevel@tonic-gate if (error) { 16287c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: strioctl failed %d\n", error); 16297c478bd9Sstevel@tonic-gate goto bad; 16307c478bd9Sstevel@tonic-gate } 16317c478bd9Sstevel@tonic-gate 16327c478bd9Sstevel@tonic-gate /* 16337c478bd9Sstevel@tonic-gate * Connectionless data flow should bypass the stream head. 16347c478bd9Sstevel@tonic-gate */ 16357c478bd9Sstevel@tonic-gate new->e_wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next; 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0, 163845916cd2Sjpk K_TO_K, cr, &retval); 16397c478bd9Sstevel@tonic-gate if (error) { 16407c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on timod failed %d\n", error); 16417c478bd9Sstevel@tonic-gate goto bad; 16427c478bd9Sstevel@tonic-gate } 16437c478bd9Sstevel@tonic-gate 16447c478bd9Sstevel@tonic-gate /* 16457c478bd9Sstevel@tonic-gate * Attempt to bind the endpoint. If we fail then propogate 16467c478bd9Sstevel@tonic-gate * error back to calling subsystem, so that it can be handled 16477c478bd9Sstevel@tonic-gate * appropriately. 164878598ee3Snd150628 * If the caller has not specified reserved port usage then 164978598ee3Snd150628 * take the system default. 16507c478bd9Sstevel@tonic-gate */ 165178598ee3Snd150628 if (useresvport == -1) 165278598ee3Snd150628 useresvport = clnt_clts_do_bindresvport; 165378598ee3Snd150628 165478598ee3Snd150628 if (useresvport && 16557c478bd9Sstevel@tonic-gate (strcmp(config->knc_protofmly, NC_INET) == 0 || 16567c478bd9Sstevel@tonic-gate strcmp(config->knc_protofmly, NC_INET6) == 0)) { 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate while ((error = 16597c478bd9Sstevel@tonic-gate bindresvport(new->e_tiptr, NULL, NULL, FALSE)) != 0) { 16607c478bd9Sstevel@tonic-gate RPCLOG(1, 16618ffff9fdSgt29601 "endpnt_get: bindresvport error %d\n", error); 16627c478bd9Sstevel@tonic-gate if (error != EPROTO) { 16637c478bd9Sstevel@tonic-gate if (rtries-- <= 0) 16647c478bd9Sstevel@tonic-gate goto bad; 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate delay(hz << i++); 16677c478bd9Sstevel@tonic-gate continue; 16687c478bd9Sstevel@tonic-gate } 16697c478bd9Sstevel@tonic-gate 16707c478bd9Sstevel@tonic-gate (void) t_kclose(new->e_tiptr, 1); 16717c478bd9Sstevel@tonic-gate /* 16727c478bd9Sstevel@tonic-gate * reopen with all privileges 16737c478bd9Sstevel@tonic-gate */ 16747c478bd9Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, 16757c478bd9Sstevel@tonic-gate FREAD|FWRITE|FNDELAY, 167645916cd2Sjpk &new->e_tiptr, cr); 16777c478bd9Sstevel@tonic-gate if (error) { 16787c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 16797c478bd9Sstevel@tonic-gate new->e_tiptr = NULL; 16807c478bd9Sstevel@tonic-gate goto bad; 16817c478bd9Sstevel@tonic-gate } 16827c478bd9Sstevel@tonic-gate } 16837c478bd9Sstevel@tonic-gate } else if ((error = t_kbind(new->e_tiptr, NULL, NULL)) != 0) { 16847c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kbind failed: %d\n", error); 16857c478bd9Sstevel@tonic-gate goto bad; 16867c478bd9Sstevel@tonic-gate } 16877c478bd9Sstevel@tonic-gate 16887c478bd9Sstevel@tonic-gate /* 16897c478bd9Sstevel@tonic-gate * Set the flags and notify and waiters that we have an established 16907c478bd9Sstevel@tonic-gate * endpoint. 16917c478bd9Sstevel@tonic-gate */ 16927c478bd9Sstevel@tonic-gate mutex_enter(&new->e_lock); 16937c478bd9Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 16947c478bd9Sstevel@tonic-gate new->e_flags |= ENDPNT_BOUND; 16957c478bd9Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 16967c478bd9Sstevel@tonic-gate cv_broadcast(&new->e_cv); 16977c478bd9Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 16987c478bd9Sstevel@tonic-gate } 16997c478bd9Sstevel@tonic-gate mutex_exit(&new->e_lock); 17007c478bd9Sstevel@tonic-gate 17017c478bd9Sstevel@tonic-gate return (new); 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate bad: 17047c478bd9Sstevel@tonic-gate ASSERT(new != NULL); 17057c478bd9Sstevel@tonic-gate /* 17067c478bd9Sstevel@tonic-gate * mark this endpoint as stale and notify any threads waiting 17077c478bd9Sstevel@tonic-gate * on this endpoint that it will be going away. 17087c478bd9Sstevel@tonic-gate */ 17097c478bd9Sstevel@tonic-gate mutex_enter(&new->e_lock); 17107c478bd9Sstevel@tonic-gate if (new->e_ref > 0) { 17117c478bd9Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 17127c478bd9Sstevel@tonic-gate new->e_flags |= ENDPNT_STALE; 17137c478bd9Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 17147c478bd9Sstevel@tonic-gate cv_broadcast(&new->e_cv); 17157c478bd9Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate } 17187c478bd9Sstevel@tonic-gate new->e_ref--; 17197c478bd9Sstevel@tonic-gate new->e_tiptr = NULL; 17207c478bd9Sstevel@tonic-gate mutex_exit(&new->e_lock); 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate /* 17237c478bd9Sstevel@tonic-gate * If there was a transport endopoint opened, then close it. 17247c478bd9Sstevel@tonic-gate */ 17257c478bd9Sstevel@tonic-gate if (tiptr != NULL) 17267c478bd9Sstevel@tonic-gate (void) t_kclose(tiptr, 1); 17277c478bd9Sstevel@tonic-gate 17287c478bd9Sstevel@tonic-gate return (NULL); 17297c478bd9Sstevel@tonic-gate } 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate /* 17327c478bd9Sstevel@tonic-gate * Release a referece to the endpoint 17337c478bd9Sstevel@tonic-gate */ 17347c478bd9Sstevel@tonic-gate static void 17357c478bd9Sstevel@tonic-gate endpnt_rele(struct endpnt *sp) 17367c478bd9Sstevel@tonic-gate { 17377c478bd9Sstevel@tonic-gate mutex_enter(&sp->e_lock); 17387c478bd9Sstevel@tonic-gate ASSERT(sp->e_ref > 0); 17397c478bd9Sstevel@tonic-gate sp->e_ref--; 17407c478bd9Sstevel@tonic-gate /* 17417c478bd9Sstevel@tonic-gate * If the ref count is zero, then start the idle timer and link 17427c478bd9Sstevel@tonic-gate * the endpoint onto the idle list. 17437c478bd9Sstevel@tonic-gate */ 17447c478bd9Sstevel@tonic-gate if (sp->e_ref == 0) { 17457c478bd9Sstevel@tonic-gate sp->e_itime = gethrestime_sec(); 17467c478bd9Sstevel@tonic-gate 17477c478bd9Sstevel@tonic-gate /* 17487c478bd9Sstevel@tonic-gate * Check to see if the endpoint is already linked to the idle 17497c478bd9Sstevel@tonic-gate * list, so that we don't try to reinsert it. 17507c478bd9Sstevel@tonic-gate */ 17517c478bd9Sstevel@tonic-gate if (sp->e_flags & ENDPNT_ONIDLE) { 17527c478bd9Sstevel@tonic-gate mutex_exit(&sp->e_lock); 17537c478bd9Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 17547c478bd9Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 17557c478bd9Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 17567c478bd9Sstevel@tonic-gate return; 17577c478bd9Sstevel@tonic-gate } 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate sp->e_flags |= ENDPNT_ONIDLE; 17607c478bd9Sstevel@tonic-gate mutex_exit(&sp->e_lock); 17617c478bd9Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 17627c478bd9Sstevel@tonic-gate list_insert_tail(&sp->e_type->e_ilist, sp); 17637c478bd9Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 17647c478bd9Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 17657c478bd9Sstevel@tonic-gate } else 17667c478bd9Sstevel@tonic-gate mutex_exit(&sp->e_lock); 17677c478bd9Sstevel@tonic-gate } 17687c478bd9Sstevel@tonic-gate 17697c478bd9Sstevel@tonic-gate static void 17707c478bd9Sstevel@tonic-gate endpnt_reap_settimer(endpnt_type_t *etp) 17717c478bd9Sstevel@tonic-gate { 17727c478bd9Sstevel@tonic-gate if (etp->e_itimer == (timeout_id_t)0) 17737c478bd9Sstevel@tonic-gate etp->e_itimer = timeout(endpnt_reap_dispatch, (void *)etp, 17747c478bd9Sstevel@tonic-gate clnt_clts_taskq_dispatch_interval); 17757c478bd9Sstevel@tonic-gate } 17767c478bd9Sstevel@tonic-gate 17777c478bd9Sstevel@tonic-gate static void 17787c478bd9Sstevel@tonic-gate endpnt_reap_dispatch(void *a) 17797c478bd9Sstevel@tonic-gate { 17807c478bd9Sstevel@tonic-gate endpnt_type_t *etp = a; 17817c478bd9Sstevel@tonic-gate 17827c478bd9Sstevel@tonic-gate /* 17837c478bd9Sstevel@tonic-gate * The idle timer has fired, so dispatch the taskq to close the 17847c478bd9Sstevel@tonic-gate * endpoint. 17857c478bd9Sstevel@tonic-gate */ 17867c478bd9Sstevel@tonic-gate if (taskq_dispatch(endpnt_taskq, (task_func_t *)endpnt_reap, etp, 17877c478bd9Sstevel@tonic-gate TQ_NOSLEEP) == NULL) 17887c478bd9Sstevel@tonic-gate return; 17897c478bd9Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 17907c478bd9Sstevel@tonic-gate etp->e_async_count++; 17917c478bd9Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 17927c478bd9Sstevel@tonic-gate } 17937c478bd9Sstevel@tonic-gate 17947c478bd9Sstevel@tonic-gate /* 17957c478bd9Sstevel@tonic-gate * Traverse the idle list and close those endpoints that have reached their 17967c478bd9Sstevel@tonic-gate * timeout interval. 17977c478bd9Sstevel@tonic-gate */ 17987c478bd9Sstevel@tonic-gate static void 17997c478bd9Sstevel@tonic-gate endpnt_reap(endpnt_type_t *etp) 18007c478bd9Sstevel@tonic-gate { 18017c478bd9Sstevel@tonic-gate struct endpnt *e; 18027c478bd9Sstevel@tonic-gate struct endpnt *next_node = NULL; 18037c478bd9Sstevel@tonic-gate 18047c478bd9Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 18057c478bd9Sstevel@tonic-gate e = list_head(&etp->e_ilist); 18067c478bd9Sstevel@tonic-gate while (e != NULL) { 18077c478bd9Sstevel@tonic-gate next_node = list_next(&etp->e_ilist, e); 18087c478bd9Sstevel@tonic-gate 18097c478bd9Sstevel@tonic-gate mutex_enter(&e->e_lock); 18107c478bd9Sstevel@tonic-gate if (e->e_ref > 0) { 18117c478bd9Sstevel@tonic-gate mutex_exit(&e->e_lock); 18127c478bd9Sstevel@tonic-gate e = next_node; 18137c478bd9Sstevel@tonic-gate continue; 18147c478bd9Sstevel@tonic-gate } 18157c478bd9Sstevel@tonic-gate 18167c478bd9Sstevel@tonic-gate ASSERT(e->e_ref == 0); 18177c478bd9Sstevel@tonic-gate if (e->e_itime > 0 && 18187c478bd9Sstevel@tonic-gate (e->e_itime + clnt_clts_endpoint_reap_interval) < 18197c478bd9Sstevel@tonic-gate gethrestime_sec()) { 18207c478bd9Sstevel@tonic-gate e->e_flags &= ~ENDPNT_BOUND; 18217c478bd9Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 18227c478bd9Sstevel@tonic-gate e->e_tiptr = NULL; 18237c478bd9Sstevel@tonic-gate e->e_itime = 0; 18247c478bd9Sstevel@tonic-gate } 18257c478bd9Sstevel@tonic-gate mutex_exit(&e->e_lock); 18267c478bd9Sstevel@tonic-gate e = next_node; 18277c478bd9Sstevel@tonic-gate } 18287c478bd9Sstevel@tonic-gate etp->e_itimer = 0; 18297c478bd9Sstevel@tonic-gate if (--etp->e_async_count == 0) 18307c478bd9Sstevel@tonic-gate cv_signal(&etp->e_async_cv); 18317c478bd9Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 18327c478bd9Sstevel@tonic-gate } 18337c478bd9Sstevel@tonic-gate 18347c478bd9Sstevel@tonic-gate static void 18357c478bd9Sstevel@tonic-gate endpnt_reclaim(zoneid_t zoneid) 18367c478bd9Sstevel@tonic-gate { 18377c478bd9Sstevel@tonic-gate struct endpnt_type *np; 18387c478bd9Sstevel@tonic-gate struct endpnt *e; 18397c478bd9Sstevel@tonic-gate struct endpnt *next_node = NULL; 18407c478bd9Sstevel@tonic-gate list_t free_list; 18417c478bd9Sstevel@tonic-gate int rcnt = 0; 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate list_create(&free_list, sizeof (endpnt_t), offsetof(endpnt_t, e_node)); 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate RPCLOG0(1, "endpnt_reclaim: reclaim callback started\n"); 18467c478bd9Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 18477c478bd9Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) { 18487c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != np->e_zoneid) 18497c478bd9Sstevel@tonic-gate continue; 18507c478bd9Sstevel@tonic-gate 18517c478bd9Sstevel@tonic-gate mutex_enter(&np->e_plock); 18527c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: protofmly %s, ", 18537c478bd9Sstevel@tonic-gate np->e_protofmly); 18547c478bd9Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", np->e_rdev); 18557c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: found %d endpoint(s)\n", 18567c478bd9Sstevel@tonic-gate np->e_cnt); 18577c478bd9Sstevel@tonic-gate 18587c478bd9Sstevel@tonic-gate if (np->e_cnt == 0) { 18597c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 18607c478bd9Sstevel@tonic-gate continue; 18617c478bd9Sstevel@tonic-gate } 18627c478bd9Sstevel@tonic-gate 18637c478bd9Sstevel@tonic-gate /* 18647c478bd9Sstevel@tonic-gate * The nice thing about maintaining an idle list is that if 18657c478bd9Sstevel@tonic-gate * there are any endpoints to reclaim, they are going to be 18667c478bd9Sstevel@tonic-gate * on this list. Just go through and reap the one's that 18677c478bd9Sstevel@tonic-gate * have ref counts of zero. 18687c478bd9Sstevel@tonic-gate */ 18697c478bd9Sstevel@tonic-gate mutex_enter(&np->e_ilock); 18707c478bd9Sstevel@tonic-gate e = list_head(&np->e_ilist); 18717c478bd9Sstevel@tonic-gate while (e != NULL) { 18727c478bd9Sstevel@tonic-gate next_node = list_next(&np->e_ilist, e); 18737c478bd9Sstevel@tonic-gate mutex_enter(&e->e_lock); 18747c478bd9Sstevel@tonic-gate if (e->e_ref > 0) { 18757c478bd9Sstevel@tonic-gate mutex_exit(&e->e_lock); 18767c478bd9Sstevel@tonic-gate e = next_node; 18777c478bd9Sstevel@tonic-gate continue; 18787c478bd9Sstevel@tonic-gate } 18797c478bd9Sstevel@tonic-gate ASSERT(e->e_ref == 0); 18807c478bd9Sstevel@tonic-gate mutex_exit(&e->e_lock); 18817c478bd9Sstevel@tonic-gate 18827c478bd9Sstevel@tonic-gate list_remove(&np->e_ilist, e); 18837c478bd9Sstevel@tonic-gate list_remove(&np->e_pool, e); 18847c478bd9Sstevel@tonic-gate list_insert_head(&free_list, e); 18857c478bd9Sstevel@tonic-gate 18867c478bd9Sstevel@tonic-gate rcnt++; 18877c478bd9Sstevel@tonic-gate np->e_cnt--; 18887c478bd9Sstevel@tonic-gate e = next_node; 18897c478bd9Sstevel@tonic-gate } 18907c478bd9Sstevel@tonic-gate mutex_exit(&np->e_ilock); 18917c478bd9Sstevel@tonic-gate /* 18927c478bd9Sstevel@tonic-gate * Reset the current pointer to be safe 18937c478bd9Sstevel@tonic-gate */ 18947c478bd9Sstevel@tonic-gate if ((e = (struct endpnt *)list_head(&np->e_pool)) != NULL) 18957c478bd9Sstevel@tonic-gate np->e_pcurr = e; 18967c478bd9Sstevel@tonic-gate else { 18977c478bd9Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 18987c478bd9Sstevel@tonic-gate np->e_pcurr = NULL; 18997c478bd9Sstevel@tonic-gate } 19007c478bd9Sstevel@tonic-gate 19017c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 19027c478bd9Sstevel@tonic-gate } 19037c478bd9Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 19047c478bd9Sstevel@tonic-gate 19057c478bd9Sstevel@tonic-gate while ((e = list_head(&free_list)) != NULL) { 19067c478bd9Sstevel@tonic-gate list_remove(&free_list, e); 19077c478bd9Sstevel@tonic-gate if (e->e_tiptr != NULL) 19087c478bd9Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 19097c478bd9Sstevel@tonic-gate 19107c478bd9Sstevel@tonic-gate cv_destroy(&e->e_cv); 19117c478bd9Sstevel@tonic-gate mutex_destroy(&e->e_lock); 19127c478bd9Sstevel@tonic-gate kmem_cache_free(endpnt_cache, e); 19137c478bd9Sstevel@tonic-gate } 19147c478bd9Sstevel@tonic-gate list_destroy(&free_list); 19157c478bd9Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: reclaimed %d endpoint(s)\n", rcnt); 19167c478bd9Sstevel@tonic-gate } 19177c478bd9Sstevel@tonic-gate 19187c478bd9Sstevel@tonic-gate /* 19197c478bd9Sstevel@tonic-gate * Endpoint reclaim zones destructor callback routine. 19207c478bd9Sstevel@tonic-gate * 19217c478bd9Sstevel@tonic-gate * After reclaiming any cached entries, we basically go through the endpnt_type 19227c478bd9Sstevel@tonic-gate * list, canceling outstanding timeouts and free'ing data structures. 19237c478bd9Sstevel@tonic-gate */ 19247c478bd9Sstevel@tonic-gate /* ARGSUSED */ 19257c478bd9Sstevel@tonic-gate static void 19267c478bd9Sstevel@tonic-gate endpnt_destructor(zoneid_t zoneid, void *a) 19277c478bd9Sstevel@tonic-gate { 19287c478bd9Sstevel@tonic-gate struct endpnt_type **npp; 19297c478bd9Sstevel@tonic-gate struct endpnt_type *np; 19307c478bd9Sstevel@tonic-gate struct endpnt_type *free_list = NULL; 19317c478bd9Sstevel@tonic-gate timeout_id_t t_id = 0; 19327c478bd9Sstevel@tonic-gate extern void clcleanup_zone(zoneid_t); 19337c478bd9Sstevel@tonic-gate extern void clcleanup4_zone(zoneid_t); 19347c478bd9Sstevel@tonic-gate 19357c478bd9Sstevel@tonic-gate /* Make sure NFS client handles are released. */ 19367c478bd9Sstevel@tonic-gate clcleanup_zone(zoneid); 19377c478bd9Sstevel@tonic-gate clcleanup4_zone(zoneid); 19387c478bd9Sstevel@tonic-gate 19397c478bd9Sstevel@tonic-gate endpnt_reclaim(zoneid); 19407c478bd9Sstevel@tonic-gate /* 19417c478bd9Sstevel@tonic-gate * We don't need to be holding on to any locks across the call to 19427c478bd9Sstevel@tonic-gate * endpnt_reclaim() and the code below; we know that no-one can 19437c478bd9Sstevel@tonic-gate * be holding open connections for this zone (all processes and kernel 19447c478bd9Sstevel@tonic-gate * threads are gone), so nothing could be adding anything to the list. 19457c478bd9Sstevel@tonic-gate */ 19467c478bd9Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 19477c478bd9Sstevel@tonic-gate npp = &endpnt_type_list; 19487c478bd9Sstevel@tonic-gate while ((np = *npp) != NULL) { 19497c478bd9Sstevel@tonic-gate if (np->e_zoneid != zoneid) { 19507c478bd9Sstevel@tonic-gate npp = &np->e_next; 19517c478bd9Sstevel@tonic-gate continue; 19527c478bd9Sstevel@tonic-gate } 19537c478bd9Sstevel@tonic-gate mutex_enter(&np->e_plock); 19547c478bd9Sstevel@tonic-gate mutex_enter(&np->e_ilock); 19557c478bd9Sstevel@tonic-gate if (np->e_itimer != 0) { 19567c478bd9Sstevel@tonic-gate t_id = np->e_itimer; 19577c478bd9Sstevel@tonic-gate np->e_itimer = 0; 19587c478bd9Sstevel@tonic-gate } 19597c478bd9Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 19607c478bd9Sstevel@tonic-gate ASSERT(list_head(&np->e_pool) == NULL); 19617c478bd9Sstevel@tonic-gate ASSERT(list_head(&np->e_ilist) == NULL); 19627c478bd9Sstevel@tonic-gate 19637c478bd9Sstevel@tonic-gate mutex_exit(&np->e_ilock); 19647c478bd9Sstevel@tonic-gate mutex_exit(&np->e_plock); 19657c478bd9Sstevel@tonic-gate 19667c478bd9Sstevel@tonic-gate /* 19677c478bd9Sstevel@tonic-gate * untimeout() any outstanding timers that have not yet fired. 19687c478bd9Sstevel@tonic-gate */ 19697c478bd9Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 19707c478bd9Sstevel@tonic-gate (void) untimeout(t_id); 19717c478bd9Sstevel@tonic-gate *npp = np->e_next; 19727c478bd9Sstevel@tonic-gate np->e_next = free_list; 19737c478bd9Sstevel@tonic-gate free_list = np; 19747c478bd9Sstevel@tonic-gate } 19757c478bd9Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 19767c478bd9Sstevel@tonic-gate 19777c478bd9Sstevel@tonic-gate while (free_list != NULL) { 19787c478bd9Sstevel@tonic-gate np = free_list; 19797c478bd9Sstevel@tonic-gate free_list = free_list->e_next; 19807c478bd9Sstevel@tonic-gate /* 19817c478bd9Sstevel@tonic-gate * Wait for threads in endpnt_taskq trying to reap endpnt_ts in 19827c478bd9Sstevel@tonic-gate * the endpnt_type_t. 19837c478bd9Sstevel@tonic-gate */ 19847c478bd9Sstevel@tonic-gate mutex_enter(&np->e_ilock); 19857c478bd9Sstevel@tonic-gate while (np->e_async_count > 0) 19867c478bd9Sstevel@tonic-gate cv_wait(&np->e_async_cv, &np->e_ilock); 19877c478bd9Sstevel@tonic-gate cv_destroy(&np->e_async_cv); 19887c478bd9Sstevel@tonic-gate mutex_destroy(&np->e_plock); 19897c478bd9Sstevel@tonic-gate mutex_destroy(&np->e_ilock); 19907c478bd9Sstevel@tonic-gate list_destroy(&np->e_pool); 19917c478bd9Sstevel@tonic-gate list_destroy(&np->e_ilist); 19927c478bd9Sstevel@tonic-gate kmem_free(np, sizeof (endpnt_type_t)); 19937c478bd9Sstevel@tonic-gate } 19947c478bd9Sstevel@tonic-gate } 19957c478bd9Sstevel@tonic-gate 19967c478bd9Sstevel@tonic-gate /* 19977c478bd9Sstevel@tonic-gate * Endpoint reclaim kmem callback routine. 19987c478bd9Sstevel@tonic-gate */ 19997c478bd9Sstevel@tonic-gate /* ARGSUSED */ 20007c478bd9Sstevel@tonic-gate static void 20017c478bd9Sstevel@tonic-gate endpnt_repossess(void *a) 20027c478bd9Sstevel@tonic-gate { 20037c478bd9Sstevel@tonic-gate /* 20047c478bd9Sstevel@tonic-gate * Reclaim idle endpnt's from all zones. 20057c478bd9Sstevel@tonic-gate */ 20067c478bd9Sstevel@tonic-gate if (endpnt_taskq != NULL) 20077c478bd9Sstevel@tonic-gate (void) taskq_dispatch(endpnt_taskq, 20087c478bd9Sstevel@tonic-gate (task_func_t *)endpnt_reclaim, (void *)ALL_ZONES, 20097c478bd9Sstevel@tonic-gate TQ_NOSLEEP); 20107c478bd9Sstevel@tonic-gate } 20117c478bd9Sstevel@tonic-gate 20127c478bd9Sstevel@tonic-gate /* 20137c478bd9Sstevel@tonic-gate * RPC request dispatch routine. Constructs a datagram message and wraps it 20147c478bd9Sstevel@tonic-gate * around the RPC request to pass downstream. 20157c478bd9Sstevel@tonic-gate */ 20167c478bd9Sstevel@tonic-gate static int 20177c478bd9Sstevel@tonic-gate clnt_clts_dispatch_send(queue_t *q, mblk_t *mp, struct netbuf *addr, 2018de8c4a14SErik Nordmark calllist_t *cp, uint_t xid, cred_t *cr) 20197c478bd9Sstevel@tonic-gate { 20207c478bd9Sstevel@tonic-gate mblk_t *bp; 20217c478bd9Sstevel@tonic-gate int msgsz; 20227c478bd9Sstevel@tonic-gate struct T_unitdata_req *udreq; 20237c478bd9Sstevel@tonic-gate 20247c478bd9Sstevel@tonic-gate /* 20257c478bd9Sstevel@tonic-gate * Set up the call record. 20267c478bd9Sstevel@tonic-gate */ 20277c478bd9Sstevel@tonic-gate cp->call_wq = q; 20287c478bd9Sstevel@tonic-gate cp->call_xid = xid; 20297c478bd9Sstevel@tonic-gate cp->call_status = RPC_TIMEDOUT; 20307c478bd9Sstevel@tonic-gate cp->call_notified = FALSE; 20317c478bd9Sstevel@tonic-gate RPCLOG(64, 20327c478bd9Sstevel@tonic-gate "clnt_clts_dispatch_send: putting xid 0x%x on " 20337c478bd9Sstevel@tonic-gate "dispatch list\n", xid); 20347c478bd9Sstevel@tonic-gate cp->call_hash = call_hash(xid, clnt_clts_hash_size); 20357c478bd9Sstevel@tonic-gate cp->call_bucket = &clts_call_ht[cp->call_hash]; 20367c478bd9Sstevel@tonic-gate call_table_enter(cp); 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate /* 20397c478bd9Sstevel@tonic-gate * Construct the datagram 20407c478bd9Sstevel@tonic-gate */ 20417c478bd9Sstevel@tonic-gate msgsz = (int)TUNITDATAREQSZ; 2042de8c4a14SErik Nordmark /* 2043de8c4a14SErik Nordmark * Note: if the receiver uses SCM_UCRED/getpeerucred the pid will 2044de8c4a14SErik Nordmark * appear as -1. 2045de8c4a14SErik Nordmark */ 2046de8c4a14SErik Nordmark while (!(bp = allocb_cred(msgsz + addr->len, cr, NOPID))) { 20477c478bd9Sstevel@tonic-gate if (strwaitbuf(msgsz + addr->len, BPRI_LO)) 20487c478bd9Sstevel@tonic-gate return (ENOSR); 20497c478bd9Sstevel@tonic-gate } 20507c478bd9Sstevel@tonic-gate 20517c478bd9Sstevel@tonic-gate udreq = (struct T_unitdata_req *)bp->b_wptr; 20527c478bd9Sstevel@tonic-gate udreq->PRIM_type = T_UNITDATA_REQ; 20537c478bd9Sstevel@tonic-gate udreq->DEST_length = addr->len; 20547c478bd9Sstevel@tonic-gate 20557c478bd9Sstevel@tonic-gate if (addr->len) { 20567c478bd9Sstevel@tonic-gate bcopy(addr->buf, bp->b_wptr + msgsz, addr->len); 20577c478bd9Sstevel@tonic-gate udreq->DEST_offset = (t_scalar_t)msgsz; 20587c478bd9Sstevel@tonic-gate msgsz += addr->len; 20597c478bd9Sstevel@tonic-gate } else 20607c478bd9Sstevel@tonic-gate udreq->DEST_offset = 0; 20617c478bd9Sstevel@tonic-gate udreq->OPT_length = 0; 20627c478bd9Sstevel@tonic-gate udreq->OPT_offset = 0; 20637c478bd9Sstevel@tonic-gate 20647c478bd9Sstevel@tonic-gate bp->b_datap->db_type = M_PROTO; 20657c478bd9Sstevel@tonic-gate bp->b_wptr += msgsz; 20667c478bd9Sstevel@tonic-gate 20677c478bd9Sstevel@tonic-gate /* 20687c478bd9Sstevel@tonic-gate * Link the datagram header with the actual data 20697c478bd9Sstevel@tonic-gate */ 20707c478bd9Sstevel@tonic-gate linkb(bp, mp); 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate /* 20737c478bd9Sstevel@tonic-gate * Send downstream. 20747c478bd9Sstevel@tonic-gate */ 2075125a8fd9SSiddheshwar Mahesh if (canput(cp->call_wq)) { 20767c478bd9Sstevel@tonic-gate put(cp->call_wq, bp); 20777c478bd9Sstevel@tonic-gate return (0); 20787c478bd9Sstevel@tonic-gate } 20797c478bd9Sstevel@tonic-gate 2080125a8fd9SSiddheshwar Mahesh return (EIO); 2081125a8fd9SSiddheshwar Mahesh } 2082125a8fd9SSiddheshwar Mahesh 20837c478bd9Sstevel@tonic-gate /* 20847c478bd9Sstevel@tonic-gate * RPC response delivery routine. Deliver the response to the waiting 20857c478bd9Sstevel@tonic-gate * thread by matching the xid. 20867c478bd9Sstevel@tonic-gate */ 20877c478bd9Sstevel@tonic-gate void 20887c478bd9Sstevel@tonic-gate clnt_clts_dispatch_notify(mblk_t *mp, int resp_off, zoneid_t zoneid) 20897c478bd9Sstevel@tonic-gate { 20907c478bd9Sstevel@tonic-gate calllist_t *e = NULL; 20917c478bd9Sstevel@tonic-gate call_table_t *chtp; 20927c478bd9Sstevel@tonic-gate uint32_t xid; 20937c478bd9Sstevel@tonic-gate uint_t hash; 20947c478bd9Sstevel@tonic-gate unsigned char *hdr_offset; 20957c478bd9Sstevel@tonic-gate mblk_t *resp; 20967c478bd9Sstevel@tonic-gate 20977c478bd9Sstevel@tonic-gate /* 20987c478bd9Sstevel@tonic-gate * If the RPC response is not contained in the same mblk as the 20997c478bd9Sstevel@tonic-gate * datagram header, then move to the next mblk. 21007c478bd9Sstevel@tonic-gate */ 21017c478bd9Sstevel@tonic-gate hdr_offset = mp->b_rptr; 21027c478bd9Sstevel@tonic-gate resp = mp; 21037c478bd9Sstevel@tonic-gate if ((mp->b_wptr - (mp->b_rptr + resp_off)) == 0) 21047c478bd9Sstevel@tonic-gate resp = mp->b_cont; 21057c478bd9Sstevel@tonic-gate else 21067c478bd9Sstevel@tonic-gate resp->b_rptr += resp_off; 21077c478bd9Sstevel@tonic-gate 21087c478bd9Sstevel@tonic-gate ASSERT(resp != NULL); 21097c478bd9Sstevel@tonic-gate 21107c478bd9Sstevel@tonic-gate if ((IS_P2ALIGNED(resp->b_rptr, sizeof (uint32_t))) && 21117c478bd9Sstevel@tonic-gate (resp->b_wptr - resp->b_rptr) >= sizeof (xid)) 21127c478bd9Sstevel@tonic-gate xid = *((uint32_t *)resp->b_rptr); 21137c478bd9Sstevel@tonic-gate else { 21147c478bd9Sstevel@tonic-gate int i = 0; 21157c478bd9Sstevel@tonic-gate unsigned char *p = (unsigned char *)&xid; 21167c478bd9Sstevel@tonic-gate unsigned char *rptr; 21177c478bd9Sstevel@tonic-gate mblk_t *tmp = resp; 21187c478bd9Sstevel@tonic-gate 21197c478bd9Sstevel@tonic-gate /* 21207c478bd9Sstevel@tonic-gate * Copy the xid, byte-by-byte into xid. 21217c478bd9Sstevel@tonic-gate */ 21227c478bd9Sstevel@tonic-gate while (tmp) { 21237c478bd9Sstevel@tonic-gate rptr = tmp->b_rptr; 21247c478bd9Sstevel@tonic-gate while (rptr < tmp->b_wptr) { 21257c478bd9Sstevel@tonic-gate *p++ = *rptr++; 21267c478bd9Sstevel@tonic-gate if (++i >= sizeof (xid)) 21277c478bd9Sstevel@tonic-gate goto done_xid_copy; 21287c478bd9Sstevel@tonic-gate } 21297c478bd9Sstevel@tonic-gate tmp = tmp->b_cont; 21307c478bd9Sstevel@tonic-gate } 21317c478bd9Sstevel@tonic-gate 21327c478bd9Sstevel@tonic-gate /* 21337c478bd9Sstevel@tonic-gate * If we got here, we ran out of mblk space before the 21347c478bd9Sstevel@tonic-gate * xid could be copied. 21357c478bd9Sstevel@tonic-gate */ 21367c478bd9Sstevel@tonic-gate ASSERT(tmp == NULL && i < sizeof (xid)); 21377c478bd9Sstevel@tonic-gate 21387c478bd9Sstevel@tonic-gate RPCLOG0(1, 21397c478bd9Sstevel@tonic-gate "clnt_dispatch_notify(clts): message less than " 21407c478bd9Sstevel@tonic-gate "size of xid\n"); 21417c478bd9Sstevel@tonic-gate 21427c478bd9Sstevel@tonic-gate freemsg(mp); 21437c478bd9Sstevel@tonic-gate return; 21447c478bd9Sstevel@tonic-gate } 21457c478bd9Sstevel@tonic-gate 21467c478bd9Sstevel@tonic-gate done_xid_copy: 21477c478bd9Sstevel@tonic-gate 21487c478bd9Sstevel@tonic-gate /* 21497c478bd9Sstevel@tonic-gate * Reset the read pointer back to the beginning of the protocol 21507c478bd9Sstevel@tonic-gate * header if we moved it. 21517c478bd9Sstevel@tonic-gate */ 21527c478bd9Sstevel@tonic-gate if (mp->b_rptr != hdr_offset) 21537c478bd9Sstevel@tonic-gate mp->b_rptr = hdr_offset; 21547c478bd9Sstevel@tonic-gate 21557c478bd9Sstevel@tonic-gate hash = call_hash(xid, clnt_clts_hash_size); 21567c478bd9Sstevel@tonic-gate chtp = &clts_call_ht[hash]; 21577c478bd9Sstevel@tonic-gate /* call_table_find returns with the hash bucket locked */ 21587c478bd9Sstevel@tonic-gate call_table_find(chtp, xid, e); 21597c478bd9Sstevel@tonic-gate 21607c478bd9Sstevel@tonic-gate if (e != NULL) { 21617c478bd9Sstevel@tonic-gate mutex_enter(&e->call_lock); 21628ffff9fdSgt29601 21638ffff9fdSgt29601 /* 21648ffff9fdSgt29601 * verify that the reply is coming in on 21658ffff9fdSgt29601 * the same zone that it was sent from. 21668ffff9fdSgt29601 */ 21678ffff9fdSgt29601 if (e->call_zoneid != zoneid) { 21688ffff9fdSgt29601 mutex_exit(&e->call_lock); 21698ffff9fdSgt29601 mutex_exit(&chtp->ct_lock); 21708c3630f0SGerald Thornbrugh RPCLOG0(8, "clnt_dispatch_notify (clts): incorrect " 21718c3630f0SGerald Thornbrugh "zoneid\n"); 21728ffff9fdSgt29601 freemsg(mp); 21738ffff9fdSgt29601 return; 21748ffff9fdSgt29601 } 21758ffff9fdSgt29601 21767c478bd9Sstevel@tonic-gate /* 21777c478bd9Sstevel@tonic-gate * found thread waiting for this reply. 21787c478bd9Sstevel@tonic-gate */ 21797c478bd9Sstevel@tonic-gate if (e->call_reply) { 21807c478bd9Sstevel@tonic-gate RPCLOG(8, 21817c478bd9Sstevel@tonic-gate "clnt_dispatch_notify (clts): discarding old " 21827c478bd9Sstevel@tonic-gate "reply for xid 0x%x\n", 21837c478bd9Sstevel@tonic-gate xid); 21847c478bd9Sstevel@tonic-gate freemsg(e->call_reply); 21857c478bd9Sstevel@tonic-gate } 21867c478bd9Sstevel@tonic-gate e->call_notified = TRUE; 21877c478bd9Sstevel@tonic-gate e->call_reply = mp; 21887c478bd9Sstevel@tonic-gate e->call_status = RPC_SUCCESS; 21897c478bd9Sstevel@tonic-gate cv_signal(&e->call_cv); 21907c478bd9Sstevel@tonic-gate mutex_exit(&e->call_lock); 21917c478bd9Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 21927c478bd9Sstevel@tonic-gate } else { 21937c478bd9Sstevel@tonic-gate zone_t *zone; 21947c478bd9Sstevel@tonic-gate struct rpcstat *rpcstat; 21957c478bd9Sstevel@tonic-gate 21967c478bd9Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 21977c478bd9Sstevel@tonic-gate RPCLOG(8, "clnt_dispatch_notify (clts): no caller for reply " 21987c478bd9Sstevel@tonic-gate "0x%x\n", xid); 21997c478bd9Sstevel@tonic-gate freemsg(mp); 22007c478bd9Sstevel@tonic-gate /* 22017c478bd9Sstevel@tonic-gate * This is unfortunate, but we need to lookup the zone so we 22027c478bd9Sstevel@tonic-gate * can increment its "rcbadxids" counter. 22037c478bd9Sstevel@tonic-gate */ 22047c478bd9Sstevel@tonic-gate zone = zone_find_by_id(zoneid); 22057c478bd9Sstevel@tonic-gate if (zone == NULL) { 22067c478bd9Sstevel@tonic-gate /* 22077c478bd9Sstevel@tonic-gate * The zone went away... 22087c478bd9Sstevel@tonic-gate */ 22097c478bd9Sstevel@tonic-gate return; 22107c478bd9Sstevel@tonic-gate } 22117c478bd9Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, zone); 22127c478bd9Sstevel@tonic-gate if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) { 22137c478bd9Sstevel@tonic-gate /* 22147c478bd9Sstevel@tonic-gate * Not interested 22157c478bd9Sstevel@tonic-gate */ 22167c478bd9Sstevel@tonic-gate zone_rele(zone); 22177c478bd9Sstevel@tonic-gate return; 22187c478bd9Sstevel@tonic-gate } 22197c478bd9Sstevel@tonic-gate RCSTAT_INCR(rpcstat->rpc_clts_client, rcbadxids); 22207c478bd9Sstevel@tonic-gate zone_rele(zone); 22217c478bd9Sstevel@tonic-gate } 22227c478bd9Sstevel@tonic-gate } 22237c478bd9Sstevel@tonic-gate 22247c478bd9Sstevel@tonic-gate /* 22257c478bd9Sstevel@tonic-gate * Init routine. Called when rpcmod is loaded. 22267c478bd9Sstevel@tonic-gate */ 22277c478bd9Sstevel@tonic-gate void 22287c478bd9Sstevel@tonic-gate clnt_clts_init(void) 22297c478bd9Sstevel@tonic-gate { 22307c478bd9Sstevel@tonic-gate endpnt_cache = kmem_cache_create("clnt_clts_endpnt_cache", 22317c478bd9Sstevel@tonic-gate sizeof (struct endpnt), 0, NULL, NULL, endpnt_repossess, NULL, 22327c478bd9Sstevel@tonic-gate NULL, 0); 22337c478bd9Sstevel@tonic-gate 22347c478bd9Sstevel@tonic-gate rw_init(&endpnt_type_lock, NULL, RW_DEFAULT, NULL); 22357c478bd9Sstevel@tonic-gate 22367c478bd9Sstevel@tonic-gate /* 22377c478bd9Sstevel@tonic-gate * Perform simple bounds checking to make sure that the setting is 22387c478bd9Sstevel@tonic-gate * reasonable 22397c478bd9Sstevel@tonic-gate */ 22407c478bd9Sstevel@tonic-gate if (clnt_clts_max_endpoints <= 0) { 22417c478bd9Sstevel@tonic-gate if (clnt_clts_do_bindresvport) 22427c478bd9Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 22437c478bd9Sstevel@tonic-gate else 22447c478bd9Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 22457c478bd9Sstevel@tonic-gate } 22467c478bd9Sstevel@tonic-gate 22477c478bd9Sstevel@tonic-gate if (clnt_clts_do_bindresvport && 22487c478bd9Sstevel@tonic-gate clnt_clts_max_endpoints > RESERVED_PORTSPACE) 22497c478bd9Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 22507c478bd9Sstevel@tonic-gate else if (clnt_clts_max_endpoints > NONRESERVED_PORTSPACE) 22517c478bd9Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 22527c478bd9Sstevel@tonic-gate 22537c478bd9Sstevel@tonic-gate if (clnt_clts_hash_size < DEFAULT_MIN_HASH_SIZE) 22547c478bd9Sstevel@tonic-gate clnt_clts_hash_size = DEFAULT_MIN_HASH_SIZE; 22557c478bd9Sstevel@tonic-gate 22567c478bd9Sstevel@tonic-gate /* 22577c478bd9Sstevel@tonic-gate * Defer creating the taskq until rpcmod gets pushed. If we are 22587c478bd9Sstevel@tonic-gate * in diskless boot mode, rpcmod will get loaded early even before 22597c478bd9Sstevel@tonic-gate * thread_create() is available. 22607c478bd9Sstevel@tonic-gate */ 22617c478bd9Sstevel@tonic-gate endpnt_taskq = NULL; 22627c478bd9Sstevel@tonic-gate taskq_created = FALSE; 22637c478bd9Sstevel@tonic-gate mutex_init(&endpnt_taskq_lock, NULL, MUTEX_DEFAULT, NULL); 22647c478bd9Sstevel@tonic-gate 22657c478bd9Sstevel@tonic-gate if (clnt_clts_endpoint_reap_interval < DEFAULT_ENDPOINT_REAP_INTERVAL) 22667c478bd9Sstevel@tonic-gate clnt_clts_endpoint_reap_interval = 22677c478bd9Sstevel@tonic-gate DEFAULT_ENDPOINT_REAP_INTERVAL; 22687c478bd9Sstevel@tonic-gate 22697c478bd9Sstevel@tonic-gate /* 22707c478bd9Sstevel@tonic-gate * Dispatch the taskq at an interval which is offset from the 22717c478bd9Sstevel@tonic-gate * interval that the endpoints should be reaped. 22727c478bd9Sstevel@tonic-gate */ 22737c478bd9Sstevel@tonic-gate clnt_clts_taskq_dispatch_interval = 22748ffff9fdSgt29601 (clnt_clts_endpoint_reap_interval + DEFAULT_INTERVAL_SHIFT) * hz; 22757c478bd9Sstevel@tonic-gate 22767c478bd9Sstevel@tonic-gate /* 22777c478bd9Sstevel@tonic-gate * Initialize the completion queue 22787c478bd9Sstevel@tonic-gate */ 22797c478bd9Sstevel@tonic-gate clts_call_ht = call_table_init(clnt_clts_hash_size); 22807c478bd9Sstevel@tonic-gate /* 22817c478bd9Sstevel@tonic-gate * Initialize the zone destructor callback. 22827c478bd9Sstevel@tonic-gate */ 22837c478bd9Sstevel@tonic-gate zone_key_create(&endpnt_destructor_key, NULL, NULL, endpnt_destructor); 22847c478bd9Sstevel@tonic-gate } 22857c478bd9Sstevel@tonic-gate 22867c478bd9Sstevel@tonic-gate void 22877c478bd9Sstevel@tonic-gate clnt_clts_fini(void) 22887c478bd9Sstevel@tonic-gate { 22897c478bd9Sstevel@tonic-gate (void) zone_key_delete(endpnt_destructor_key); 22907c478bd9Sstevel@tonic-gate } 2291