1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 /* 29 * Portions of this source code were derived from Berkeley 30 * 4.3 BSD under license from the Regents of the University of 31 * California. 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 37 /* 38 * svc_dg.c, Server side for connectionless RPC. 39 * 40 * Does some caching in the hopes of achieving execute-at-most-once semantics. 41 */ 42 43 #include "mt.h" 44 #include "rpc_mt.h" 45 #include <stdio.h> 46 #include <sys/types.h> 47 #include <rpc/trace.h> 48 #include <rpc/rpc.h> 49 #include <errno.h> 50 #include <syslog.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <unistd.h> 54 #ifdef RPC_CACHE_DEBUG 55 #include <netconfig.h> 56 #include <netdir.h> 57 #endif 58 59 #ifndef MAX 60 #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 61 #endif 62 63 static struct xp_ops *svc_dg_ops(); 64 static void cache_set(); 65 static int cache_get(); 66 67 #define MAX_OPT_WORDS 128 /* needs to fit a ucred */ 68 69 /* 70 * kept in xprt->xp_p2 71 */ 72 struct svc_dg_data { 73 /* XXX: optbuf should be the first field, used by ti_opts.c code */ 74 struct netbuf optbuf; /* netbuf for options */ 75 int opts[MAX_OPT_WORDS]; /* options */ 76 uint_t su_iosz; /* size of send.recv buffer */ 77 uint32_t su_xid; /* transaction id */ 78 XDR su_xdrs; /* XDR handle */ 79 char su_verfbody[MAX_AUTH_BYTES]; /* verifier body */ 80 char *su_cache; /* cached data, NULL if none */ 81 struct t_unitdata su_tudata; /* tu_data for recv */ 82 }; 83 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2)) 84 #define rpc_buffer(xprt) ((xprt)->xp_p1) 85 86 /* 87 * Usage: 88 * xprt = svc_dg_create(sock, sendsize, recvsize); 89 * Does other connectionless specific initializations. 90 * Once *xprt is initialized, it is registered. 91 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable 92 * system defaults are chosen. 93 * The routines returns NULL if a problem occurred. 94 */ 95 static const char svc_dg_str[] = "svc_dg_create: %s"; 96 static const char svc_dg_err1[] = "could not get transport information"; 97 static const char svc_dg_err2[] = " transport does not support data transfer"; 98 static const char svc_dg_err3[] = 99 "fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);"; 100 static const char __no_mem_str[] = "out of memory"; 101 102 /* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */ 103 extern struct svc_auth_ops svc_auth_any_ops; 104 extern int __rpc_get_ltaddr(struct netbuf *, struct netbuf *); 105 106 void 107 svc_dg_xprtfree(xprt) 108 SVCXPRT *xprt; 109 { 110 /* LINTED pointer alignment */ 111 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL; 112 /* LINTED pointer alignment */ 113 struct svc_dg_data *su = xprt ? su_data(xprt) : NULL; 114 115 if (xprt == NULL) 116 return; 117 if (xprt->xp_netid) 118 free((char *)xprt->xp_netid); 119 if (xprt->xp_tp) 120 free((char *)xprt->xp_tp); 121 if (xt->parent == NULL) 122 if (xprt->xp_ltaddr.buf) 123 free(xprt->xp_ltaddr.buf); 124 if (xprt->xp_rtaddr.buf) 125 free(xprt->xp_rtaddr.buf); 126 if (su != NULL) { 127 XDR_DESTROY(&(su->su_xdrs)); 128 free((char *)su); 129 } 130 if (rpc_buffer(xprt)) 131 free((char *)rpc_buffer(xprt)); 132 svc_xprt_free(xprt); 133 } 134 135 SVCXPRT * 136 svc_dg_create_private(fd, sendsize, recvsize) 137 int fd; 138 uint_t sendsize; 139 uint_t recvsize; 140 { 141 SVCXPRT *xprt; 142 struct svc_dg_data *su = NULL; 143 struct t_info tinfo; 144 145 trace4(TR_svc_dg_create, 0, fd, sendsize, recvsize); 146 if (RPC_FD_NOTIN_FDSET(fd)) { 147 errno = EBADF; 148 t_errno = TBADF; 149 syslog(LOG_ERR, svc_dg_str, svc_dg_err3); 150 trace2(TR_svc_dg_create, 1, fd); 151 return ((SVCXPRT *)NULL); 152 } 153 154 if (t_getinfo(fd, &tinfo) == -1) { 155 syslog(LOG_ERR, svc_dg_str, svc_dg_err1); 156 trace2(TR_svc_dg_create, 1, fd); 157 return ((SVCXPRT *)NULL); 158 } 159 /* 160 * Find the receive and the send size 161 */ 162 sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu); 163 recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu); 164 if ((sendsize == 0) || (recvsize == 0)) { 165 syslog(LOG_ERR, svc_dg_str, svc_dg_err2); 166 trace2(TR_svc_dg_create, 1, fd); 167 return ((SVCXPRT *)NULL); 168 } 169 170 if ((xprt = svc_xprt_alloc()) == NULL) 171 goto freedata; 172 /* LINTED pointer alignment */ 173 svc_flags(xprt) |= SVC_DGRAM; 174 175 su = (struct svc_dg_data *)mem_alloc(sizeof (*su)); 176 if (su == NULL) 177 goto freedata; 178 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4; 179 if ((rpc_buffer(xprt) = (char *)mem_alloc(su->su_iosz)) == NULL) 180 goto freedata; 181 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz, 182 XDR_DECODE); 183 su->su_cache = NULL; 184 xprt->xp_fd = fd; 185 xprt->xp_p2 = (caddr_t)su; 186 xprt->xp_verf.oa_base = su->su_verfbody; 187 xprt->xp_ops = svc_dg_ops(); 188 189 su->su_tudata.addr.maxlen = 0; /* Fill in later */ 190 191 su->su_tudata.udata.buf = (char *)rpc_buffer(xprt); 192 su->su_tudata.opt.buf = (char *)su->opts; 193 su->su_tudata.udata.maxlen = su->su_iosz; 194 su->su_tudata.opt.maxlen = MAX_OPT_WORDS << 2; /* no of bytes */ 195 /* LINTED pointer alignment */ 196 SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops; 197 /* LINTED pointer alignment */ 198 SVC_XP_AUTH(xprt).svc_ah_private = NULL; 199 trace2(TR_svc_dg_create, 1, fd); 200 return (xprt); 201 freedata: 202 (void) syslog(LOG_ERR, svc_dg_str, __no_mem_str); 203 if (xprt) 204 svc_dg_xprtfree(xprt); 205 trace2(TR_svc_dg_create, 1, fd); 206 return ((SVCXPRT *)NULL); 207 } 208 209 SVCXPRT * 210 svc_dg_create(fd, sendsize, recvsize) 211 int fd; 212 uint_t sendsize; 213 uint_t recvsize; 214 { 215 SVCXPRT *xprt; 216 217 if ((xprt = svc_dg_create_private(fd, sendsize, recvsize)) != NULL) 218 xprt_register(xprt); 219 return (xprt); 220 } 221 222 SVCXPRT * 223 svc_dg_xprtcopy(parent) 224 SVCXPRT *parent; 225 { 226 SVCXPRT *xprt; 227 struct svc_dg_data *su; 228 229 if ((xprt = svc_xprt_alloc()) == NULL) 230 return (NULL); 231 232 /* LINTED pointer alignment */ 233 SVCEXT(xprt)->parent = parent; 234 /* LINTED pointer alignment */ 235 SVCEXT(xprt)->flags = SVCEXT(parent)->flags; 236 237 xprt->xp_fd = parent->xp_fd; 238 xprt->xp_port = parent->xp_port; 239 xprt->xp_ops = svc_dg_ops(); 240 if (parent->xp_tp) { 241 xprt->xp_tp = (char *)strdup(parent->xp_tp); 242 if (xprt->xp_tp == NULL) { 243 syslog(LOG_ERR, "svc_dg_xprtcopy: strdup failed"); 244 svc_dg_xprtfree(xprt); 245 return (NULL); 246 } 247 } 248 if (parent->xp_netid) { 249 xprt->xp_netid = (char *)strdup(parent->xp_netid); 250 if (xprt->xp_netid == NULL) { 251 syslog(LOG_ERR, "svc_dg_xprtcopy: strdup failed"); 252 if (parent->xp_tp) 253 free(parent->xp_tp); 254 svc_dg_xprtfree(xprt); 255 return (NULL); 256 } 257 } 258 xprt->xp_ltaddr = parent->xp_ltaddr; /* shared with parent */ 259 260 xprt->xp_rtaddr = parent->xp_rtaddr; 261 xprt->xp_rtaddr.buf = (char *)malloc(xprt->xp_rtaddr.maxlen); 262 if (xprt->xp_rtaddr.buf == NULL) { 263 svc_dg_xprtfree(xprt); 264 return (NULL); 265 } 266 memcpy(xprt->xp_rtaddr.buf, parent->xp_rtaddr.buf, 267 xprt->xp_rtaddr.maxlen); 268 xprt->xp_type = parent->xp_type; 269 270 if ((su = (struct svc_dg_data *)malloc(sizeof (struct svc_dg_data))) 271 == NULL) { 272 svc_dg_xprtfree(xprt); 273 return (NULL); 274 } 275 /* LINTED pointer alignment */ 276 su->su_iosz = su_data(parent)->su_iosz; 277 if ((rpc_buffer(xprt) = (char *)mem_alloc(su->su_iosz)) == NULL) { 278 svc_dg_xprtfree(xprt); 279 free((char *)su); 280 return (NULL); 281 } 282 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz, 283 XDR_DECODE); 284 su->su_cache = NULL; 285 su->su_tudata.addr.maxlen = 0; /* Fill in later */ 286 su->su_tudata.udata.buf = (char *)rpc_buffer(xprt); 287 su->su_tudata.opt.buf = (char *)su->opts; 288 su->su_tudata.udata.maxlen = su->su_iosz; 289 su->su_tudata.opt.maxlen = MAX_OPT_WORDS << 2; /* no of bytes */ 290 xprt->xp_p2 = (caddr_t)su; /* su_data(xprt) = su */ 291 xprt->xp_verf.oa_base = su->su_verfbody; 292 293 return (xprt); 294 } 295 296 /*ARGSUSED*/ 297 static enum xprt_stat 298 svc_dg_stat(xprt) 299 SVCXPRT *xprt; 300 { 301 trace1(TR_svc_dg_stat, 0); 302 trace1(TR_svc_dg_stat, 1); 303 return (XPRT_IDLE); 304 } 305 306 static bool_t 307 svc_dg_recv(xprt, msg) 308 SVCXPRT *xprt; 309 struct rpc_msg *msg; 310 { 311 /* LINTED pointer alignment */ 312 struct svc_dg_data *su = su_data(xprt); 313 XDR *xdrs = &(su->su_xdrs); 314 struct t_unitdata *tu_data = &(su->su_tudata); 315 int moreflag; 316 struct netbuf *nbufp; 317 struct netconfig *nconf; 318 319 /* XXX: tudata should have been made a part of the server handle */ 320 trace1(TR_svc_dg_recv, 0); 321 322 if (tu_data->addr.maxlen == 0) 323 tu_data->addr = xprt->xp_rtaddr; 324 again: 325 tu_data->addr.len = 0; 326 tu_data->opt.len = 0; 327 tu_data->udata.len = 0; 328 329 moreflag = 0; 330 if (t_rcvudata(xprt->xp_fd, tu_data, &moreflag) == -1) { 331 #ifdef RPC_DEBUG 332 syslog(LOG_ERR, "svc_dg_recv: t_rcvudata t_errno=%d errno=%d\n", 333 t_errno, errno); 334 #endif 335 if (t_errno == TLOOK) { 336 int lookres; 337 338 lookres = t_look(xprt->xp_fd); 339 if ((lookres & T_UDERR) && 340 (t_rcvuderr(xprt->xp_fd, 341 (struct t_uderr *)0) < 0)) { 342 /*EMPTY*/ 343 #ifdef RPC_DEBUG 344 syslog(LOG_ERR, 345 "svc_dg_recv: t_rcvuderr t_errno = %d\n", 346 t_errno); 347 #endif 348 } 349 if (lookres & T_DATA) 350 goto again; 351 } else if ((errno == EINTR) && (t_errno == TSYSERR)) 352 goto again; 353 else { 354 trace1(TR_svc_dg_recv, 1); 355 return (FALSE); 356 } 357 } 358 359 if ((moreflag) || 360 (tu_data->udata.len < 4 * (uint_t)sizeof (uint32_t))) { 361 /* 362 * If moreflag is set, drop that data packet. Something wrong 363 */ 364 trace1(TR_svc_dg_recv, 1); 365 return (FALSE); 366 } 367 su->optbuf = tu_data->opt; 368 xprt->xp_rtaddr.len = tu_data->addr.len; 369 xdrs->x_op = XDR_DECODE; 370 XDR_SETPOS(xdrs, 0); 371 if (! xdr_callmsg(xdrs, msg)) { 372 trace1(TR_svc_dg_recv, 1); 373 return (FALSE); 374 } 375 su->su_xid = msg->rm_xid; 376 if (su->su_cache != NULL) { 377 char *reply; 378 uint32_t replylen; 379 380 if (cache_get(xprt, msg, &reply, &replylen)) { 381 /* tu_data.addr is already set */ 382 tu_data->udata.buf = reply; 383 tu_data->udata.len = (uint_t)replylen; 384 tu_data->opt.len = 0; 385 (void) t_sndudata(xprt->xp_fd, tu_data); 386 tu_data->udata.buf = (char *)rpc_buffer(xprt); 387 trace1(TR_svc_dg_recv, 1); 388 return (FALSE); 389 } 390 } 391 392 /* 393 * get local ip address 394 */ 395 396 if ((nconf = getnetconfigent(xprt->xp_netid)) != NULL) { 397 if (strcmp(nconf->nc_protofmly, NC_INET) == 0 || 398 strcmp(nconf->nc_protofmly, NC_INET6) == 0) { 399 if (nconf->nc_semantics == NC_TPI_CLTS) { 400 nbufp = (struct netbuf *)(xprt->xp_p2); 401 if (__rpc_get_ltaddr(nbufp, &xprt->xp_ltaddr) < 0) { 402 if (strcmp(nconf->nc_protofmly, NC_INET) == 0) { 403 syslog(LOG_ERR, 404 "svc_dg_recv: ip(udp), t_errno=%d, errno=%d", 405 t_errno, errno); 406 } 407 if (strcmp(nconf->nc_protofmly, NC_INET6) == 0) { 408 syslog(LOG_ERR, 409 "svc_dg_recv: ip (udp6), t_errno=%d, errno=%d", 410 t_errno, errno); 411 } 412 freenetconfigent(nconf); 413 trace1(TR_svc_dg_recv, 1); 414 return (FALSE); 415 } 416 } 417 } 418 freenetconfigent(nconf); 419 } 420 trace1(TR_svc_dg_recv, 1); 421 return (TRUE); 422 } 423 424 static bool_t 425 svc_dg_reply(xprt, msg) 426 SVCXPRT *xprt; 427 struct rpc_msg *msg; 428 { 429 /* LINTED pointer alignment */ 430 struct svc_dg_data *su = su_data(xprt); 431 XDR *xdrs = &(su->su_xdrs); 432 bool_t stat = FALSE; 433 xdrproc_t xdr_results; 434 caddr_t xdr_location; 435 bool_t has_args; 436 437 trace1(TR_svc_dg_reply, 0); 438 if (msg->rm_reply.rp_stat == MSG_ACCEPTED && 439 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { 440 has_args = TRUE; 441 xdr_results = msg->acpted_rply.ar_results.proc; 442 xdr_location = msg->acpted_rply.ar_results.where; 443 msg->acpted_rply.ar_results.proc = xdr_void; 444 msg->acpted_rply.ar_results.where = NULL; 445 } else 446 has_args = FALSE; 447 448 xdrs->x_op = XDR_ENCODE; 449 XDR_SETPOS(xdrs, 0); 450 msg->rm_xid = su->su_xid; 451 if (xdr_replymsg(xdrs, msg) && (!has_args || 452 /* LINTED pointer alignment */ 453 SVCAUTH_WRAP(&SVC_XP_AUTH(xprt), xdrs, xdr_results, 454 xdr_location))) { 455 int slen; 456 struct t_unitdata *tu_data = &(su->su_tudata); 457 458 slen = (int)XDR_GETPOS(xdrs); 459 tu_data->udata.len = slen; 460 tu_data->opt.len = 0; 461 try_again: 462 if (t_sndudata(xprt->xp_fd, tu_data) == 0) { 463 stat = TRUE; 464 if (su->su_cache && slen >= 0) { 465 cache_set(xprt, (uint32_t)slen); 466 } 467 } else { 468 if (errno == EINTR) 469 goto try_again; 470 471 syslog(LOG_ERR, 472 "svc_dg_reply: t_sndudata error t_errno=%d errno=%d\n", 473 t_errno, errno); 474 } 475 } 476 trace1(TR_svc_dg_reply, 1); 477 return (stat); 478 } 479 480 static bool_t 481 svc_dg_getargs(xprt, xdr_args, args_ptr) 482 SVCXPRT *xprt; 483 xdrproc_t xdr_args; 484 caddr_t args_ptr; 485 { 486 bool_t dummy_stat1; 487 488 trace1(TR_svc_dg_getargs, 0); 489 if (svc_mt_mode != RPC_SVC_MT_NONE) 490 svc_args_done(xprt); 491 /* LINTED pointer alignment */ 492 dummy_stat1 = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), 493 &(su_data(xprt)->su_xdrs), xdr_args, args_ptr); 494 trace1(TR_svc_dg_getargs, 1); 495 return (dummy_stat1); 496 } 497 498 static bool_t 499 svc_dg_freeargs(xprt, xdr_args, args_ptr) 500 SVCXPRT *xprt; 501 xdrproc_t xdr_args; 502 caddr_t args_ptr; 503 { 504 /* LINTED pointer alignment */ 505 XDR *xdrs = &(su_data(xprt)->su_xdrs); 506 bool_t dummy_stat2; 507 508 trace1(TR_svc_dg_freeargs, 0); 509 xdrs->x_op = XDR_FREE; 510 dummy_stat2 = (*xdr_args)(xdrs, args_ptr); 511 trace1(TR_svc_dg_freeargs, 1); 512 return (dummy_stat2); 513 } 514 515 static void 516 svc_dg_destroy(xprt) 517 SVCXPRT *xprt; 518 { 519 trace1(TR_svc_dg_destroy, 0); 520 mutex_lock(&svc_mutex); 521 _svc_dg_destroy_private(xprt); 522 mutex_unlock(&svc_mutex); 523 trace1(TR_svc_dg_destroy, 1); 524 } 525 526 void 527 _svc_dg_destroy_private(xprt) 528 SVCXPRT *xprt; 529 { 530 if (svc_mt_mode != RPC_SVC_MT_NONE) { 531 /* LINTED pointer alignment */ 532 if (SVCEXT(xprt)->parent) 533 /* LINTED pointer alignment */ 534 xprt = SVCEXT(xprt)->parent; 535 /* LINTED pointer alignment */ 536 svc_flags(xprt) |= SVC_DEFUNCT; 537 /* LINTED pointer alignment */ 538 if (SVCEXT(xprt)->refcnt > 0) 539 return; 540 } 541 542 xprt_unregister(xprt); 543 (void) t_close(xprt->xp_fd); 544 545 if (svc_mt_mode != RPC_SVC_MT_NONE) 546 svc_xprt_destroy(xprt); 547 else 548 svc_dg_xprtfree(xprt); 549 } 550 551 /*ARGSUSED*/ 552 static bool_t 553 svc_dg_control(xprt, rq, in) 554 SVCXPRT *xprt; 555 const uint_t rq; 556 void *in; 557 { 558 trace3(TR_svc_dg_control, 0, xprt, rq); 559 switch (rq) { 560 case SVCGET_XID: 561 if (xprt->xp_p2 == NULL) { 562 trace1(TR_svc_dg_control, 1); 563 return (FALSE); 564 } else { 565 *(uint32_t *)in = 566 /* LINTED pointer alignment */ 567 ((struct svc_dg_data *)(xprt->xp_p2))->su_xid; 568 trace1(TR_svc_dg_control, 1); 569 return (TRUE); 570 } 571 default: 572 trace1(TR_svc_dg_control, 1); 573 return (FALSE); 574 } 575 } 576 577 static struct xp_ops * 578 svc_dg_ops() 579 { 580 static struct xp_ops ops; 581 extern mutex_t ops_lock; 582 583 /* VARIABLES PROTECTED BY ops_lock: ops */ 584 585 trace1(TR_svc_dg_ops, 0); 586 mutex_lock(&ops_lock); 587 if (ops.xp_recv == NULL) { 588 ops.xp_recv = svc_dg_recv; 589 ops.xp_stat = svc_dg_stat; 590 ops.xp_getargs = svc_dg_getargs; 591 ops.xp_reply = svc_dg_reply; 592 ops.xp_freeargs = svc_dg_freeargs; 593 ops.xp_destroy = svc_dg_destroy; 594 ops.xp_control = svc_dg_control; 595 } 596 mutex_unlock(&ops_lock); 597 trace1(TR_svc_dg_ops, 1); 598 return (&ops); 599 } 600 601 /* The CACHING COMPONENT */ 602 603 /* 604 * Could have been a separate file, but some part of it depends upon the 605 * private structure of the client handle. 606 * 607 * Fifo cache for cl server 608 * Copies pointers to reply buffers into fifo cache 609 * Buffers are sent again if retransmissions are detected. 610 */ 611 612 #define SPARSENESS 4 /* 75% sparse */ 613 614 #define ALLOC(type, size) \ 615 (type *)mem_alloc((unsigned)(sizeof (type) * (size))) 616 617 #define MEMZERO(addr, type, size) \ 618 (void) memset((char *)(addr), 0, sizeof (type) * (int)(size)) 619 620 #define FREE(addr, type, size) \ 621 mem_free((char *)(addr), (sizeof (type) * (size))) 622 623 /* 624 * An entry in the cache 625 */ 626 typedef struct cache_node *cache_ptr; 627 struct cache_node { 628 /* 629 * Index into cache is xid, proc, vers, prog and address 630 */ 631 uint32_t cache_xid; 632 rpcproc_t cache_proc; 633 rpcvers_t cache_vers; 634 rpcprog_t cache_prog; 635 struct netbuf cache_addr; 636 /* 637 * The cached reply and length 638 */ 639 char *cache_reply; 640 uint32_t cache_replylen; 641 /* 642 * Next node on the list, if there is a collision 643 */ 644 cache_ptr cache_next; 645 }; 646 647 /* 648 * The entire cache 649 */ 650 struct cl_cache { 651 uint32_t uc_size; /* size of cache */ 652 cache_ptr *uc_entries; /* hash table of entries in cache */ 653 cache_ptr *uc_fifo; /* fifo list of entries in cache */ 654 uint32_t uc_nextvictim; /* points to next victim in fifo list */ 655 rpcprog_t uc_prog; /* saved program number */ 656 rpcvers_t uc_vers; /* saved version number */ 657 rpcproc_t uc_proc; /* saved procedure number */ 658 }; 659 660 661 /* 662 * the hashing function 663 */ 664 #define CACHE_LOC(transp, xid) \ 665 (xid % (SPARSENESS * ((struct cl_cache *) \ 666 su_data(transp)->su_cache)->uc_size)) 667 668 extern mutex_t dupreq_lock; 669 670 /* 671 * Enable use of the cache. Returns 1 on success, 0 on failure. 672 * Note: there is no disable. 673 */ 674 static const char cache_enable_str[] = "svc_enablecache: %s %s"; 675 static const char alloc_err[] = "could not allocate cache "; 676 static const char enable_err[] = "cache already enabled"; 677 678 int 679 svc_dg_enablecache(xprt, size) 680 SVCXPRT *xprt; 681 uint_t size; 682 { 683 SVCXPRT *transp; 684 struct svc_dg_data *su; 685 struct cl_cache *uc; 686 687 /* LINTED pointer alignment */ 688 if (svc_mt_mode != RPC_SVC_MT_NONE && SVCEXT(xprt)->parent != NULL) 689 /* LINTED pointer alignment */ 690 transp = SVCEXT(xprt)->parent; 691 else 692 transp = xprt; 693 /* LINTED pointer alignment */ 694 su = su_data(transp); 695 696 trace2(TR_svc_dg_enablecache, 0, size); 697 mutex_lock(&dupreq_lock); 698 if (su->su_cache != NULL) { 699 (void) syslog(LOG_ERR, cache_enable_str, 700 enable_err, " "); 701 mutex_unlock(&dupreq_lock); 702 trace2(TR_svc_dg_enablecache, 1, size); 703 return (0); 704 } 705 uc = ALLOC(struct cl_cache, 1); 706 if (uc == NULL) { 707 (void) syslog(LOG_ERR, cache_enable_str, 708 alloc_err, " "); 709 mutex_unlock(&dupreq_lock); 710 trace2(TR_svc_dg_enablecache, 1, size); 711 return (0); 712 } 713 uc->uc_size = size; 714 uc->uc_nextvictim = 0; 715 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS); 716 if (uc->uc_entries == NULL) { 717 (void) syslog(LOG_ERR, cache_enable_str, 718 alloc_err, "data"); 719 FREE(uc, struct cl_cache, 1); 720 mutex_unlock(&dupreq_lock); 721 trace2(TR_svc_dg_enablecache, 1, size); 722 return (0); 723 } 724 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS); 725 uc->uc_fifo = ALLOC(cache_ptr, size); 726 if (uc->uc_fifo == NULL) { 727 (void) syslog(LOG_ERR, cache_enable_str, 728 alloc_err, "fifo"); 729 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS); 730 FREE(uc, struct cl_cache, 1); 731 mutex_unlock(&dupreq_lock); 732 trace2(TR_svc_dg_enablecache, 1, size); 733 return (0); 734 } 735 MEMZERO(uc->uc_fifo, cache_ptr, size); 736 su->su_cache = (char *)uc; 737 mutex_unlock(&dupreq_lock); 738 trace2(TR_svc_dg_enablecache, 1, size); 739 return (1); 740 } 741 742 /* 743 * Set an entry in the cache. It assumes that the uc entry is set from 744 * the earlier call to cache_get() for the same procedure. This will always 745 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called 746 * by svc_dg_reply(). All this hoopla because the right RPC parameters are 747 * not available at svc_dg_reply time. 748 */ 749 750 static const char cache_set_str[] = "cache_set: %s"; 751 static const char cache_set_err1[] = "victim not found"; 752 static const char cache_set_err2[] = "victim alloc failed"; 753 static const char cache_set_err3[] = "could not allocate new rpc buffer"; 754 755 static void 756 cache_set(xprt, replylen) 757 SVCXPRT *xprt; 758 uint32_t replylen; 759 { 760 SVCXPRT *parent; 761 cache_ptr victim; 762 cache_ptr *vicp; 763 struct svc_dg_data *su; 764 struct cl_cache *uc; 765 uint_t loc; 766 char *newbuf, *newbuf2; 767 int my_mallocs = 0; 768 #ifdef RPC_CACHE_DEBUG 769 struct netconfig *nconf; 770 char *uaddr; 771 #endif 772 773 /* LINTED pointer alignment */ 774 if (svc_mt_mode != RPC_SVC_MT_NONE && SVCEXT(xprt)->parent != NULL) 775 /* LINTED pointer alignment */ 776 parent = SVCEXT(xprt)->parent; 777 else 778 parent = xprt; 779 /* LINTED pointer alignment */ 780 su = su_data(xprt); 781 /* LINTED pointer alignment */ 782 uc = (struct cl_cache *)su_data(parent)->su_cache; 783 784 mutex_lock(&dupreq_lock); 785 /* 786 * Find space for the new entry, either by 787 * reusing an old entry, or by mallocing a new one 788 */ 789 trace2(TR_cache_set, 0, replylen); 790 victim = uc->uc_fifo[uc->uc_nextvictim]; 791 if (victim != NULL) { 792 /* LINTED pointer alignment */ 793 loc = CACHE_LOC(parent, victim->cache_xid); 794 for (vicp = &uc->uc_entries[loc]; 795 *vicp != NULL && *vicp != victim; 796 vicp = &(*vicp)->cache_next) 797 ; 798 if (*vicp == NULL) { 799 (void) syslog(LOG_ERR, cache_set_str, cache_set_err1); 800 mutex_unlock(&dupreq_lock); 801 trace2(TR_cache_set, 1, replylen); 802 return; 803 } 804 *vicp = victim->cache_next; /* remove from cache */ 805 newbuf = victim->cache_reply; 806 } else { 807 victim = ALLOC(struct cache_node, 1); 808 if (victim == NULL) { 809 (void) syslog(LOG_ERR, cache_set_str, cache_set_err2); 810 mutex_unlock(&dupreq_lock); 811 trace2(TR_cache_set, 1, replylen); 812 return; 813 } 814 newbuf = (char *)mem_alloc(su->su_iosz); 815 if (newbuf == NULL) { 816 (void) syslog(LOG_ERR, cache_set_str, cache_set_err3); 817 FREE(victim, struct cache_node, 1); 818 mutex_unlock(&dupreq_lock); 819 trace2(TR_cache_set, 1, replylen); 820 return; 821 } 822 my_mallocs = 1; 823 } 824 825 /* 826 * Store it away 827 */ 828 #ifdef RPC_CACHE_DEBUG 829 if (nconf = getnetconfigent(xprt->xp_netid)) { 830 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr); 831 freenetconfigent(nconf); 832 printf( 833 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n", 834 su->su_xid, uc->uc_prog, uc->uc_vers, 835 uc->uc_proc, uaddr); 836 free(uaddr); 837 } 838 #endif 839 newbuf2 = ALLOC(char, xprt->xp_rtaddr.len); 840 if (newbuf2 == NULL) { 841 syslog(LOG_ERR, "cache_set : out of memory"); 842 if (my_mallocs) { 843 FREE(victim, struct cache_node, 1); 844 mem_free(newbuf, su->su_iosz); 845 } 846 mutex_unlock(&dupreq_lock); 847 trace2(TR_cache_set, 1, replylen); 848 return; 849 } 850 victim->cache_replylen = replylen; 851 victim->cache_reply = rpc_buffer(xprt); 852 rpc_buffer(xprt) = newbuf; 853 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), 854 su->su_iosz, XDR_ENCODE); 855 su->su_tudata.udata.buf = (char *)rpc_buffer(xprt); 856 victim->cache_xid = su->su_xid; 857 victim->cache_proc = uc->uc_proc; 858 victim->cache_vers = uc->uc_vers; 859 victim->cache_prog = uc->uc_prog; 860 victim->cache_addr = xprt->xp_rtaddr; 861 victim->cache_addr.buf = newbuf2; 862 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf, 863 (int)xprt->xp_rtaddr.len); 864 /* LINTED pointer alignment */ 865 loc = CACHE_LOC(parent, victim->cache_xid); 866 victim->cache_next = uc->uc_entries[loc]; 867 uc->uc_entries[loc] = victim; 868 uc->uc_fifo[uc->uc_nextvictim++] = victim; 869 uc->uc_nextvictim %= uc->uc_size; 870 mutex_unlock(&dupreq_lock); 871 trace2(TR_cache_set, 1, replylen); 872 } 873 874 /* 875 * Try to get an entry from the cache 876 * return 1 if found, 0 if not found and set the stage for cache_set() 877 */ 878 static int 879 cache_get(xprt, msg, replyp, replylenp) 880 SVCXPRT *xprt; 881 struct rpc_msg *msg; 882 char **replyp; 883 uint32_t *replylenp; 884 { 885 SVCXPRT *parent; 886 uint_t loc; 887 cache_ptr ent; 888 struct svc_dg_data *su; 889 struct cl_cache *uc; 890 #ifdef RPC_CACHE_DEBUG 891 struct netconfig *nconf; 892 char *uaddr; 893 #endif 894 895 trace1(TR_cache_get, 0); 896 897 /* LINTED pointer alignment */ 898 if (svc_mt_mode != RPC_SVC_MT_NONE && SVCEXT(xprt)->parent != NULL) 899 /* LINTED pointer alignment */ 900 parent = SVCEXT(xprt)->parent; 901 else 902 parent = xprt; 903 /* LINTED pointer alignment */ 904 su = su_data(xprt); 905 /* LINTED pointer alignment */ 906 uc = (struct cl_cache *)su_data(parent)->su_cache; 907 908 mutex_lock(&dupreq_lock); 909 /* LINTED pointer alignment */ 910 loc = CACHE_LOC(parent, su->su_xid); 911 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) { 912 if (ent->cache_xid == su->su_xid && 913 ent->cache_proc == msg->rm_call.cb_proc && 914 ent->cache_vers == msg->rm_call.cb_vers && 915 ent->cache_prog == msg->rm_call.cb_prog && 916 ent->cache_addr.len == xprt->xp_rtaddr.len && 917 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf, 918 xprt->xp_rtaddr.len) == 0)) { 919 #ifdef RPC_CACHE_DEBUG 920 if (nconf = getnetconfigent(xprt->xp_netid)) { 921 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr); 922 freenetconfigent(nconf); 923 printf( 924 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n", 925 su->su_xid, msg->rm_call.cb_prog, 926 msg->rm_call.cb_vers, 927 msg->rm_call.cb_proc, uaddr); 928 free(uaddr); 929 } 930 #endif 931 *replyp = ent->cache_reply; 932 *replylenp = ent->cache_replylen; 933 mutex_unlock(&dupreq_lock); 934 trace1(TR_cache_get, 1); 935 return (1); 936 } 937 } 938 /* 939 * Failed to find entry 940 * Remember a few things so we can do a set later 941 */ 942 uc->uc_proc = msg->rm_call.cb_proc; 943 uc->uc_vers = msg->rm_call.cb_vers; 944 uc->uc_prog = msg->rm_call.cb_prog; 945 mutex_unlock(&dupreq_lock); 946 trace1(TR_cache_get, 1); 947 return (0); 948 } 949