1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Server side RPC handler. 28 */ 29 30 #include <sys/byteorder.h> 31 #include <sys/errno.h> 32 #include <sys/uio.h> 33 #include <thread.h> 34 #include <synch.h> 35 #include <stdlib.h> 36 #include <strings.h> 37 #include <string.h> 38 #include <time.h> 39 40 #include <smbsrv/libsmb.h> 41 #include <smbsrv/libmlrpc.h> 42 #include <smbsrv/mlsvc.h> 43 #include <smbsrv/ndr.h> 44 #include <smbsrv/mlrpc.h> 45 #include <smbsrv/mlsvc_util.h> 46 47 48 #define SMB_CTXT_BUFSZ 65536 49 50 /* 51 * Fragment size (5680: NT style). 52 */ 53 #define MLRPC_FRAG_SZ 5680 54 static unsigned long mlrpc_frag_size = MLRPC_FRAG_SZ; 55 56 /* 57 * Service context table. 58 */ 59 #define CTXT_TABLE_ENTRIES 128 60 static struct mlsvc_rpc_context context_table[CTXT_TABLE_ENTRIES]; 61 static mutex_t mlrpc_context_lock; 62 63 static int ndr_s_transact(struct mlsvc_rpc_context *); 64 static struct mlsvc_rpc_context *ndr_s_lookup(int); 65 static void ndr_s_release(struct mlsvc_rpc_context *); 66 static struct mlsvc_rpc_context *ndr_s_allocate(int); 67 static void ndr_s_deallocate(struct mlsvc_rpc_context *); 68 static void ndr_s_rewind(struct mlsvc_rpc_context *); 69 static void ndr_s_flush(struct mlsvc_rpc_context *); 70 71 static int mlrpc_s_process(struct mlrpc_xaction *); 72 static int mlrpc_s_bind(struct mlrpc_xaction *); 73 static int mlrpc_s_request(struct mlrpc_xaction *); 74 static void mlrpc_reply_prepare_hdr(struct mlrpc_xaction *); 75 static int mlrpc_s_alter_context(struct mlrpc_xaction *); 76 static void mlrpc_reply_fault(struct mlrpc_xaction *, unsigned long); 77 static int mlrpc_build_reply(struct mlrpc_xaction *); 78 static void mlrpc_build_frag(struct mlndr_stream *, uint8_t *, uint32_t); 79 80 /* 81 * Allocate and associate a service context with a fid. 82 */ 83 int 84 ndr_s_open(int fid, uint8_t *data, uint32_t datalen) 85 { 86 struct mlsvc_rpc_context *svc; 87 88 (void) mutex_lock(&mlrpc_context_lock); 89 90 if ((svc = ndr_s_lookup(fid)) != NULL) { 91 ndr_s_release(svc); 92 (void) mutex_unlock(&mlrpc_context_lock); 93 return (EEXIST); 94 } 95 96 if ((svc = ndr_s_allocate(fid)) == NULL) { 97 (void) mutex_unlock(&mlrpc_context_lock); 98 return (ENOMEM); 99 } 100 101 if (smb_opipe_context_decode(&svc->svc_ctx, data, datalen) == -1) { 102 ndr_s_release(svc); 103 (void) mutex_unlock(&mlrpc_context_lock); 104 return (EINVAL); 105 } 106 107 mlrpc_binding_pool_initialize(&svc->binding, svc->binding_pool, 108 CTXT_N_BINDING_POOL); 109 110 (void) mutex_unlock(&mlrpc_context_lock); 111 return (0); 112 } 113 114 /* 115 * Release the context associated with a fid when an opipe is closed. 116 */ 117 int 118 ndr_s_close(int fid) 119 { 120 struct mlsvc_rpc_context *svc; 121 122 (void) mutex_lock(&mlrpc_context_lock); 123 124 if ((svc = ndr_s_lookup(fid)) == NULL) { 125 (void) mutex_unlock(&mlrpc_context_lock); 126 return (ENOENT); 127 } 128 129 /* 130 * Release twice: once for the lookup above 131 * and again to close the fid. 132 */ 133 ndr_s_release(svc); 134 ndr_s_release(svc); 135 (void) mutex_unlock(&mlrpc_context_lock); 136 return (0); 137 } 138 139 /* 140 * Write RPC request data to the input stream. Input data is buffered 141 * until the response is requested. 142 */ 143 int 144 ndr_s_write(int fid, uint8_t *buf, uint32_t len) 145 { 146 struct mlsvc_rpc_context *svc; 147 ssize_t nbytes; 148 149 if (len == 0) 150 return (0); 151 152 (void) mutex_lock(&mlrpc_context_lock); 153 154 if ((svc = ndr_s_lookup(fid)) == NULL) { 155 (void) mutex_unlock(&mlrpc_context_lock); 156 return (ENOENT); 157 } 158 159 nbytes = ndr_uiomove((caddr_t)buf, len, UIO_READ, &svc->in_uio); 160 161 ndr_s_release(svc); 162 (void) mutex_unlock(&mlrpc_context_lock); 163 return ((nbytes == len) ? 0 : EIO); 164 } 165 166 /* 167 * Read RPC response data. If the input stream contains an RPC request, 168 * we need to process the RPC transaction, which will place the RPC 169 * response in the output (frags) stream. Otherwise, read data from 170 * the output stream. 171 */ 172 int 173 ndr_s_read(int fid, uint8_t *buf, uint32_t *len, uint32_t *resid) 174 { 175 struct mlsvc_rpc_context *svc; 176 ssize_t nbytes = *len; 177 int rc; 178 179 if (nbytes == 0) { 180 *resid = 0; 181 return (0); 182 } 183 184 (void) mutex_lock(&mlrpc_context_lock); 185 if ((svc = ndr_s_lookup(fid)) == NULL) { 186 (void) mutex_unlock(&mlrpc_context_lock); 187 return (ENOENT); 188 } 189 (void) mutex_unlock(&mlrpc_context_lock); 190 191 if (svc->in_uio.uio_offset) { 192 if ((rc = ndr_s_transact(svc)) != 0) { 193 ndr_s_flush(svc); 194 (void) mutex_lock(&mlrpc_context_lock); 195 ndr_s_release(svc); 196 (void) mutex_unlock(&mlrpc_context_lock); 197 return (rc); 198 } 199 200 } 201 202 *len = ndr_uiomove((caddr_t)buf, nbytes, UIO_WRITE, &svc->frags.uio); 203 *resid = svc->frags.uio.uio_resid; 204 205 if (*resid == 0) { 206 /* 207 * Nothing left, cleanup the output stream. 208 */ 209 ndr_s_flush(svc); 210 } 211 212 (void) mutex_lock(&mlrpc_context_lock); 213 ndr_s_release(svc); 214 (void) mutex_unlock(&mlrpc_context_lock); 215 return (0); 216 } 217 218 /* 219 * Process a server-side RPC request. 220 */ 221 static int 222 ndr_s_transact(struct mlsvc_rpc_context *svc) 223 { 224 ndr_xa_t *mxa; 225 struct mlndr_stream *recv_mlnds; 226 struct mlndr_stream *send_mlnds; 227 char *data; 228 int datalen; 229 230 data = svc->in_buf; 231 datalen = svc->in_uio.uio_offset; 232 233 if ((mxa = (ndr_xa_t *)malloc(sizeof (ndr_xa_t))) == NULL) 234 return (ENOMEM); 235 236 bzero(mxa, sizeof (struct mlrpc_xaction)); 237 mxa->fid = svc->fid; 238 mxa->context = svc; 239 mxa->binding_list = svc->binding; 240 241 if ((mxa->heap = mlrpc_heap_create()) == NULL) { 242 free(mxa); 243 return (ENOMEM); 244 } 245 246 recv_mlnds = &mxa->recv_mlnds; 247 mlnds_initialize(recv_mlnds, datalen, NDR_MODE_CALL_RECV, mxa->heap); 248 249 /* 250 * Copy the input data and reset the input stream. 251 */ 252 bcopy(data, recv_mlnds->pdu_base_addr, datalen); 253 ndr_s_rewind(svc); 254 255 send_mlnds = &mxa->send_mlnds; 256 mlnds_initialize(send_mlnds, 0, NDR_MODE_RETURN_SEND, mxa->heap); 257 258 (void) mlrpc_s_process(mxa); 259 260 mlnds_finalize(send_mlnds, &svc->frags); 261 mlnds_destruct(&mxa->recv_mlnds); 262 mlnds_destruct(&mxa->send_mlnds); 263 mlrpc_heap_destroy(mxa->heap); 264 free(mxa); 265 return (0); 266 } 267 268 /* 269 * Must be called with mlrpc_context_lock held. 270 */ 271 static struct mlsvc_rpc_context * 272 ndr_s_lookup(int fid) 273 { 274 struct mlsvc_rpc_context *svc; 275 int i; 276 277 for (i = 0; i < CTXT_TABLE_ENTRIES; ++i) { 278 svc = &context_table[i]; 279 280 if (svc->fid == fid) { 281 if (svc->refcnt == 0) 282 return (NULL); 283 284 svc->refcnt++; 285 return (svc); 286 } 287 } 288 289 return (NULL); 290 } 291 292 /* 293 * Must be called with mlrpc_context_lock held. 294 */ 295 static void 296 ndr_s_release(struct mlsvc_rpc_context *svc) 297 { 298 svc->refcnt--; 299 ndr_s_deallocate(svc); 300 } 301 302 /* 303 * Must be called with mlrpc_context_lock held. 304 */ 305 static struct mlsvc_rpc_context * 306 ndr_s_allocate(int fid) 307 { 308 struct mlsvc_rpc_context *svc = NULL; 309 int i; 310 311 for (i = 0; i < CTXT_TABLE_ENTRIES; ++i) { 312 svc = &context_table[i]; 313 314 if (svc->fid == 0) { 315 bzero(svc, sizeof (struct mlsvc_rpc_context)); 316 317 if ((svc->in_buf = malloc(SMB_CTXT_BUFSZ)) == NULL) 318 return (NULL); 319 320 ndr_s_rewind(svc); 321 svc->fid = fid; 322 svc->refcnt = 1; 323 return (svc); 324 } 325 } 326 327 return (NULL); 328 } 329 330 /* 331 * Must be called with mlrpc_context_lock held. 332 */ 333 static void 334 ndr_s_deallocate(struct mlsvc_rpc_context *svc) 335 { 336 if (svc->refcnt == 0) { 337 /* 338 * Ensure that there are no RPC service policy handles 339 * (associated with this fid) left around. 340 */ 341 ndr_hdclose(svc->fid); 342 343 ndr_s_rewind(svc); 344 ndr_s_flush(svc); 345 free(svc->in_buf); 346 free(svc->svc_ctx.oc_domain); 347 free(svc->svc_ctx.oc_account); 348 free(svc->svc_ctx.oc_workstation); 349 bzero(svc, sizeof (struct mlsvc_rpc_context)); 350 } 351 } 352 353 /* 354 * Rewind the input data stream, ready for the next write. 355 */ 356 static void 357 ndr_s_rewind(struct mlsvc_rpc_context *svc) 358 { 359 svc->in_uio.uio_iov = &svc->in_iov; 360 svc->in_uio.uio_iovcnt = 1; 361 svc->in_uio.uio_offset = 0; 362 svc->in_uio.uio_segflg = UIO_USERSPACE; 363 svc->in_uio.uio_resid = SMB_CTXT_BUFSZ; 364 svc->in_iov.iov_base = svc->in_buf; 365 svc->in_iov.iov_len = SMB_CTXT_BUFSZ; 366 } 367 368 /* 369 * Flush the output data stream. 370 */ 371 static void 372 ndr_s_flush(struct mlsvc_rpc_context *svc) 373 { 374 ndr_frag_t *frag; 375 376 while ((frag = svc->frags.head) != NULL) { 377 svc->frags.head = frag->next; 378 free(frag); 379 } 380 381 free(svc->frags.iov); 382 bzero(&svc->frags, sizeof (ndr_fraglist_t)); 383 } 384 385 /* 386 * Check whether or not the specified user has administrator privileges, 387 * i.e. is a member of Domain Admins or Administrators. 388 * Returns true if the user is an administrator, otherwise returns false. 389 */ 390 boolean_t 391 ndr_is_admin(ndr_xa_t *xa) 392 { 393 smb_opipe_context_t *svc = &xa->context->svc_ctx; 394 395 return (svc->oc_flags & SMB_ATF_ADMIN); 396 } 397 398 /* 399 * Check whether or not the specified user has power-user privileges, 400 * i.e. is a member of Domain Admins, Administrators or Power Users. 401 * This is typically required for operations such as managing shares. 402 * Returns true if the user is a power user, otherwise returns false. 403 */ 404 boolean_t 405 ndr_is_poweruser(ndr_xa_t *xa) 406 { 407 smb_opipe_context_t *svc = &xa->context->svc_ctx; 408 409 return ((svc->oc_flags & SMB_ATF_ADMIN) || 410 (svc->oc_flags & SMB_ATF_POWERUSER)); 411 } 412 413 int32_t 414 ndr_native_os(ndr_xa_t *xa) 415 { 416 smb_opipe_context_t *svc = &xa->context->svc_ctx; 417 418 return (svc->oc_native_os); 419 } 420 421 /* 422 * This is the entry point for all server-side RPC processing. 423 * It is assumed that the PDU has already been received. 424 */ 425 static int 426 mlrpc_s_process(struct mlrpc_xaction *mxa) 427 { 428 int rc; 429 430 rc = mlrpc_decode_pdu_hdr(mxa); 431 if (!MLRPC_DRC_IS_OK(rc)) 432 return (-1); 433 434 (void) mlrpc_reply_prepare_hdr(mxa); 435 436 switch (mxa->ptype) { 437 case MLRPC_PTYPE_BIND: 438 rc = mlrpc_s_bind(mxa); 439 break; 440 441 case MLRPC_PTYPE_REQUEST: 442 rc = mlrpc_s_request(mxa); 443 break; 444 445 case MLRPC_PTYPE_ALTER_CONTEXT: 446 rc = mlrpc_s_alter_context(mxa); 447 break; 448 449 default: 450 rc = MLRPC_DRC_FAULT_RPCHDR_PTYPE_INVALID; 451 break; 452 } 453 454 if (MLRPC_DRC_IS_FAULT(rc)) 455 mlrpc_reply_fault(mxa, rc); 456 457 (void) mlrpc_build_reply(mxa); 458 return (rc); 459 } 460 461 /* 462 * Multiple p_cont_elem[]s, multiple transfer_syntaxes[] and multiple 463 * p_results[] not supported. 464 */ 465 static int 466 mlrpc_s_bind(struct mlrpc_xaction *mxa) 467 { 468 mlrpc_p_cont_list_t *cont_list; 469 mlrpc_p_result_list_t *result_list; 470 mlrpc_p_result_t *result; 471 unsigned p_cont_id; 472 struct mlrpc_binding *mbind; 473 ndr_uuid_t *as_uuid; 474 ndr_uuid_t *ts_uuid; 475 char as_buf[64]; 476 char ts_buf[64]; 477 int as_vers; 478 int ts_vers; 479 struct mlndr_stream *send_mlnds; 480 struct mlrpc_service *msvc; 481 int rc; 482 mlrpc_port_any_t *sec_addr; 483 484 /* acquire targets */ 485 cont_list = &mxa->recv_hdr.bind_hdr.p_context_elem; 486 result_list = &mxa->send_hdr.bind_ack_hdr.p_result_list; 487 result = &result_list->p_results[0]; 488 489 /* 490 * Set up temporary secondary address port. 491 * We will correct this later (below). 492 */ 493 send_mlnds = &mxa->send_mlnds; 494 sec_addr = &mxa->send_hdr.bind_ack_hdr.sec_addr; 495 sec_addr->length = 13; 496 (void) strcpy((char *)sec_addr->port_spec, "\\PIPE\\ntsvcs"); 497 498 result_list->n_results = 1; 499 result_list->reserved = 0; 500 result_list->reserved2 = 0; 501 result->result = MLRPC_PCDR_ACCEPTANCE; 502 result->reason = 0; 503 bzero(&result->transfer_syntax, sizeof (result->transfer_syntax)); 504 505 /* sanity check */ 506 if (cont_list->n_context_elem != 1 || 507 cont_list->p_cont_elem[0].n_transfer_syn != 1) { 508 mlndo_trace("mlrpc_s_bind: warning: multiple p_cont_elem"); 509 } 510 511 p_cont_id = cont_list->p_cont_elem[0].p_cont_id; 512 513 if ((mbind = mlrpc_find_binding(mxa, p_cont_id)) != NULL) { 514 /* 515 * Duplicate p_cont_id. 516 * Send a bind_ack with a better error. 517 */ 518 mlndo_trace("mlrpc_s_bind: duplicate binding"); 519 return (MLRPC_DRC_FAULT_BIND_PCONT_BUSY); 520 } 521 522 if ((mbind = mlrpc_new_binding(mxa)) == NULL) { 523 /* 524 * No free binding slot 525 */ 526 result->result = MLRPC_PCDR_PROVIDER_REJECTION; 527 result->reason = MLRPC_PPR_LOCAL_LIMIT_EXCEEDED; 528 mlndo_trace("mlrpc_s_bind: no resources"); 529 return (MLRPC_DRC_OK); 530 } 531 532 as_uuid = &cont_list->p_cont_elem[0].abstract_syntax.if_uuid; 533 as_vers = cont_list->p_cont_elem[0].abstract_syntax.if_version; 534 535 ts_uuid = &cont_list->p_cont_elem[0].transfer_syntaxes[0].if_uuid; 536 ts_vers = cont_list->p_cont_elem[0].transfer_syntaxes[0].if_version; 537 538 msvc = mlrpc_find_service_by_uuids(as_uuid, as_vers, ts_uuid, ts_vers); 539 if (!msvc) { 540 mlrpc_uuid_to_str(as_uuid, as_buf); 541 mlrpc_uuid_to_str(ts_uuid, ts_buf); 542 543 mlndo_printf(send_mlnds, 0, "mlrpc_s_bind: unknown service"); 544 mlndo_printf(send_mlnds, 0, "abs=%s v%d, xfer=%s v%d", 545 as_buf, as_vers, ts_buf, ts_vers); 546 547 result->result = MLRPC_PCDR_PROVIDER_REJECTION; 548 result->reason = MLRPC_PPR_ABSTRACT_SYNTAX_NOT_SUPPORTED; 549 return (MLRPC_DRC_OK); 550 } 551 552 /* 553 * We can now use the correct secondary address port. 554 */ 555 sec_addr = &mxa->send_hdr.bind_ack_hdr.sec_addr; 556 sec_addr->length = strlen(msvc->sec_addr_port) + 1; 557 (void) strlcpy((char *)sec_addr->port_spec, msvc->sec_addr_port, 558 MLRPC_PORT_ANY_MAX_PORT_SPEC); 559 560 mbind->p_cont_id = p_cont_id; 561 mbind->which_side = MLRPC_BIND_SIDE_SERVER; 562 /* mbind->context set by app */ 563 mbind->service = msvc; 564 mbind->instance_specific = 0; 565 566 mxa->binding = mbind; 567 568 if (msvc->bind_req) { 569 /* 570 * Call the service-specific bind() handler. If 571 * this fails, we shouild send a specific error 572 * on the bind ack. 573 */ 574 rc = (msvc->bind_req)(mxa); 575 if (MLRPC_DRC_IS_FAULT(rc)) { 576 mbind->service = 0; /* free binding slot */ 577 mbind->which_side = 0; 578 mbind->p_cont_id = 0; 579 mbind->instance_specific = 0; 580 return (rc); 581 } 582 } 583 584 result->transfer_syntax = 585 cont_list->p_cont_elem[0].transfer_syntaxes[0]; 586 587 return (MLRPC_DRC_BINDING_MADE); 588 } 589 590 /* 591 * mlrpc_s_alter_context 592 * 593 * The alter context request is used to request additional presentation 594 * context for another interface and/or version. It is very similar to 595 * a bind request. 596 */ 597 static int 598 mlrpc_s_alter_context(struct mlrpc_xaction *mxa) 599 { 600 mlrpc_p_result_list_t *result_list; 601 mlrpc_p_result_t *result; 602 mlrpc_p_cont_list_t *cont_list; 603 struct mlrpc_binding *mbind; 604 struct mlrpc_service *msvc; 605 unsigned p_cont_id; 606 ndr_uuid_t *as_uuid; 607 ndr_uuid_t *ts_uuid; 608 int as_vers; 609 int ts_vers; 610 mlrpc_port_any_t *sec_addr; 611 612 result_list = &mxa->send_hdr.alter_context_rsp_hdr.p_result_list; 613 result_list->n_results = 1; 614 result_list->reserved = 0; 615 result_list->reserved2 = 0; 616 617 result = &result_list->p_results[0]; 618 result->result = MLRPC_PCDR_ACCEPTANCE; 619 result->reason = 0; 620 bzero(&result->transfer_syntax, sizeof (result->transfer_syntax)); 621 622 cont_list = &mxa->recv_hdr.alter_context_hdr.p_context_elem; 623 p_cont_id = cont_list->p_cont_elem[0].p_cont_id; 624 625 if (mlrpc_find_binding(mxa, p_cont_id) != NULL) 626 return (MLRPC_DRC_FAULT_BIND_PCONT_BUSY); 627 628 if ((mbind = mlrpc_new_binding(mxa)) == NULL) { 629 result->result = MLRPC_PCDR_PROVIDER_REJECTION; 630 result->reason = MLRPC_PPR_LOCAL_LIMIT_EXCEEDED; 631 return (MLRPC_DRC_OK); 632 } 633 634 as_uuid = &cont_list->p_cont_elem[0].abstract_syntax.if_uuid; 635 as_vers = cont_list->p_cont_elem[0].abstract_syntax.if_version; 636 637 ts_uuid = &cont_list->p_cont_elem[0].transfer_syntaxes[0].if_uuid; 638 ts_vers = cont_list->p_cont_elem[0].transfer_syntaxes[0].if_version; 639 640 msvc = mlrpc_find_service_by_uuids(as_uuid, as_vers, ts_uuid, ts_vers); 641 if (msvc == 0) { 642 result->result = MLRPC_PCDR_PROVIDER_REJECTION; 643 result->reason = MLRPC_PPR_ABSTRACT_SYNTAX_NOT_SUPPORTED; 644 return (MLRPC_DRC_OK); 645 } 646 647 mbind->p_cont_id = p_cont_id; 648 mbind->which_side = MLRPC_BIND_SIDE_SERVER; 649 /* mbind->context set by app */ 650 mbind->service = msvc; 651 mbind->instance_specific = 0; 652 mxa->binding = mbind; 653 654 sec_addr = &mxa->send_hdr.alter_context_rsp_hdr.sec_addr; 655 sec_addr->length = 0; 656 bzero(sec_addr->port_spec, MLRPC_PORT_ANY_MAX_PORT_SPEC); 657 658 result->transfer_syntax = 659 cont_list->p_cont_elem[0].transfer_syntaxes[0]; 660 661 return (MLRPC_DRC_BINDING_MADE); 662 } 663 664 static int 665 mlrpc_s_request(struct mlrpc_xaction *mxa) 666 { 667 struct mlrpc_binding *mbind; 668 struct mlrpc_service *msvc; 669 unsigned p_cont_id; 670 int rc; 671 672 mxa->opnum = mxa->recv_hdr.request_hdr.opnum; 673 p_cont_id = mxa->recv_hdr.request_hdr.p_cont_id; 674 675 if ((mbind = mlrpc_find_binding(mxa, p_cont_id)) == NULL) 676 return (MLRPC_DRC_FAULT_REQUEST_PCONT_INVALID); 677 678 mxa->binding = mbind; 679 msvc = mbind->service; 680 681 /* 682 * Make room for the response hdr. 683 */ 684 mxa->send_mlnds.pdu_scan_offset = MLRPC_RSP_HDR_SIZE; 685 686 if (msvc->call_stub) 687 rc = (*msvc->call_stub)(mxa); 688 else 689 rc = mlrpc_generic_call_stub(mxa); 690 691 if (MLRPC_DRC_IS_FAULT(rc)) { 692 mlndo_printf(0, 0, "%s[0x%02x]: 0x%04x", 693 msvc->name, mxa->opnum, rc); 694 } 695 696 return (rc); 697 } 698 699 /* 700 * The transaction and the two mlnds streams use the same heap, which 701 * should already exist at this point. The heap will also be available 702 * to the stub. 703 */ 704 int 705 mlrpc_generic_call_stub(struct mlrpc_xaction *mxa) 706 { 707 struct mlrpc_binding *mbind = mxa->binding; 708 struct mlrpc_service *msvc = mbind->service; 709 struct ndr_typeinfo *intf_ti = msvc->interface_ti; 710 struct mlrpc_stub_table *ste; 711 int opnum = mxa->opnum; 712 unsigned p_len = intf_ti->c_size_fixed_part; 713 char *param; 714 int rc; 715 716 if (mxa->heap == NULL) { 717 mlndo_printf(0, 0, "%s[0x%02x]: no heap", msvc->name, opnum); 718 return (MLRPC_DRC_FAULT_OUT_OF_MEMORY); 719 } 720 721 if ((ste = mlrpc_find_stub_in_svc(msvc, opnum)) == NULL) { 722 mlndo_printf(0, 0, "%s[0x%02x]: invalid opnum", 723 msvc->name, opnum); 724 return (MLRPC_DRC_FAULT_REQUEST_OPNUM_INVALID); 725 } 726 727 if ((param = mlrpc_heap_malloc(mxa->heap, p_len)) == NULL) 728 return (MLRPC_DRC_FAULT_OUT_OF_MEMORY); 729 730 bzero(param, p_len); 731 732 rc = mlrpc_decode_call(mxa, param); 733 if (!MLRPC_DRC_IS_OK(rc)) 734 return (rc); 735 736 rc = (*ste->func)(param, mxa); 737 if (rc == MLRPC_DRC_OK) 738 rc = mlrpc_encode_return(mxa, param); 739 740 return (rc); 741 } 742 743 /* 744 * We can perform some initial setup of the response header here. 745 * We also need to cache some of the information from the bind 746 * negotiation for use during subsequent RPC calls. 747 */ 748 static void 749 mlrpc_reply_prepare_hdr(struct mlrpc_xaction *mxa) 750 { 751 ndr_common_header_t *rhdr = &mxa->recv_hdr.common_hdr; 752 ndr_common_header_t *hdr = &mxa->send_hdr.common_hdr; 753 754 hdr->rpc_vers = 5; 755 hdr->rpc_vers_minor = 0; 756 hdr->pfc_flags = MLRPC_PFC_FIRST_FRAG + MLRPC_PFC_LAST_FRAG; 757 hdr->packed_drep = rhdr->packed_drep; 758 hdr->frag_length = 0; 759 hdr->auth_length = 0; 760 hdr->call_id = rhdr->call_id; 761 #ifdef _BIG_ENDIAN 762 hdr->packed_drep.intg_char_rep = MLRPC_REPLAB_CHAR_ASCII 763 | MLRPC_REPLAB_INTG_BIG_ENDIAN; 764 #else 765 hdr->packed_drep.intg_char_rep = MLRPC_REPLAB_CHAR_ASCII 766 | MLRPC_REPLAB_INTG_LITTLE_ENDIAN; 767 #endif 768 769 switch (mxa->ptype) { 770 case MLRPC_PTYPE_BIND: 771 hdr->ptype = MLRPC_PTYPE_BIND_ACK; 772 mxa->send_hdr.bind_ack_hdr.max_xmit_frag = 773 mxa->recv_hdr.bind_hdr.max_xmit_frag; 774 mxa->send_hdr.bind_ack_hdr.max_recv_frag = 775 mxa->recv_hdr.bind_hdr.max_recv_frag; 776 mxa->send_hdr.bind_ack_hdr.assoc_group_id = 777 mxa->recv_hdr.bind_hdr.assoc_group_id; 778 779 if (mxa->send_hdr.bind_ack_hdr.assoc_group_id == 0) 780 mxa->send_hdr.bind_ack_hdr.assoc_group_id = time(0); 781 782 /* 783 * Save the maximum fragment sizes 784 * for use with subsequent requests. 785 */ 786 mxa->context->max_xmit_frag = 787 mxa->recv_hdr.bind_hdr.max_xmit_frag; 788 789 mxa->context->max_recv_frag = 790 mxa->recv_hdr.bind_hdr.max_recv_frag; 791 792 break; 793 794 case MLRPC_PTYPE_REQUEST: 795 hdr->ptype = MLRPC_PTYPE_RESPONSE; 796 /* mxa->send_hdr.response_hdr.alloc_hint */ 797 mxa->send_hdr.response_hdr.p_cont_id = 798 mxa->recv_hdr.request_hdr.p_cont_id; 799 mxa->send_hdr.response_hdr.cancel_count = 0; 800 mxa->send_hdr.response_hdr.reserved = 0; 801 break; 802 803 case MLRPC_PTYPE_ALTER_CONTEXT: 804 hdr->ptype = MLRPC_PTYPE_ALTER_CONTEXT_RESP; 805 /* 806 * The max_xmit_frag, max_recv_frag and assoc_group_id are 807 * ignored by the client but it's useful to fill them in. 808 */ 809 mxa->send_hdr.alter_context_rsp_hdr.max_xmit_frag = 810 mxa->recv_hdr.alter_context_hdr.max_xmit_frag; 811 mxa->send_hdr.alter_context_rsp_hdr.max_recv_frag = 812 mxa->recv_hdr.alter_context_hdr.max_recv_frag; 813 mxa->send_hdr.alter_context_rsp_hdr.assoc_group_id = 814 mxa->recv_hdr.alter_context_hdr.assoc_group_id; 815 break; 816 817 default: 818 hdr->ptype = 0xFF; 819 } 820 } 821 822 /* 823 * Signal an RPC fault. The stream is reset and we overwrite whatever 824 * was in the response header with the fault information. 825 */ 826 static void 827 mlrpc_reply_fault(struct mlrpc_xaction *mxa, unsigned long drc) 828 { 829 ndr_common_header_t *rhdr = &mxa->recv_hdr.common_hdr; 830 ndr_common_header_t *hdr = &mxa->send_hdr.common_hdr; 831 struct mlndr_stream *mlnds = &mxa->send_mlnds; 832 unsigned long fault_status; 833 834 MLNDS_RESET(mlnds); 835 836 hdr->rpc_vers = 5; 837 hdr->rpc_vers_minor = 0; 838 hdr->pfc_flags = MLRPC_PFC_FIRST_FRAG + MLRPC_PFC_LAST_FRAG; 839 hdr->packed_drep = rhdr->packed_drep; 840 hdr->frag_length = sizeof (mxa->send_hdr.fault_hdr); 841 hdr->auth_length = 0; 842 hdr->call_id = rhdr->call_id; 843 #ifdef _BIG_ENDIAN 844 hdr->packed_drep.intg_char_rep = MLRPC_REPLAB_CHAR_ASCII 845 | MLRPC_REPLAB_INTG_BIG_ENDIAN; 846 #else 847 hdr->packed_drep.intg_char_rep = MLRPC_REPLAB_CHAR_ASCII 848 | MLRPC_REPLAB_INTG_LITTLE_ENDIAN; 849 #endif 850 851 switch (drc & MLRPC_DRC_MASK_SPECIFIER) { 852 case MLRPC_DRC_FAULT_OUT_OF_MEMORY: 853 case MLRPC_DRC_FAULT_ENCODE_TOO_BIG: 854 fault_status = MLRPC_FAULT_NCA_OUT_ARGS_TOO_BIG; 855 break; 856 857 case MLRPC_DRC_FAULT_REQUEST_PCONT_INVALID: 858 fault_status = MLRPC_FAULT_NCA_INVALID_PRES_CONTEXT_ID; 859 break; 860 861 case MLRPC_DRC_FAULT_REQUEST_OPNUM_INVALID: 862 fault_status = MLRPC_FAULT_NCA_OP_RNG_ERROR; 863 break; 864 865 case MLRPC_DRC_FAULT_DECODE_FAILED: 866 case MLRPC_DRC_FAULT_ENCODE_FAILED: 867 fault_status = MLRPC_FAULT_NCA_PROTO_ERROR; 868 break; 869 870 default: 871 fault_status = MLRPC_FAULT_NCA_UNSPEC_REJECT; 872 break; 873 } 874 875 mxa->send_hdr.fault_hdr.common_hdr.ptype = MLRPC_PTYPE_FAULT; 876 mxa->send_hdr.fault_hdr.status = fault_status; 877 mxa->send_hdr.response_hdr.alloc_hint = hdr->frag_length; 878 } 879 880 /* 881 * Note that the frag_length for bind ack and alter context is 882 * non-standard. 883 */ 884 static int 885 mlrpc_build_reply(struct mlrpc_xaction *mxa) 886 { 887 ndr_common_header_t *hdr = &mxa->send_hdr.common_hdr; 888 struct mlndr_stream *mlnds = &mxa->send_mlnds; 889 uint8_t *pdu_buf; 890 unsigned long pdu_size; 891 unsigned long frag_size; 892 unsigned long pdu_data_size; 893 unsigned long frag_data_size; 894 895 frag_size = mlrpc_frag_size; 896 pdu_size = mlnds->pdu_size; 897 pdu_buf = mlnds->pdu_base_addr; 898 899 if (pdu_size <= frag_size) { 900 /* 901 * Single fragment response. The PDU size may be zero 902 * here (i.e. bind or fault response). So don't make 903 * any assumptions about it until after the header is 904 * encoded. 905 */ 906 switch (hdr->ptype) { 907 case MLRPC_PTYPE_BIND_ACK: 908 hdr->frag_length = mlrpc_bind_ack_hdr_size(mxa); 909 break; 910 911 case MLRPC_PTYPE_FAULT: 912 /* already setup */ 913 break; 914 915 case MLRPC_PTYPE_RESPONSE: 916 hdr->frag_length = pdu_size; 917 mxa->send_hdr.response_hdr.alloc_hint = 918 hdr->frag_length; 919 break; 920 921 case MLRPC_PTYPE_ALTER_CONTEXT_RESP: 922 hdr->frag_length = mlrpc_alter_context_rsp_hdr_size(); 923 break; 924 925 default: 926 hdr->frag_length = pdu_size; 927 break; 928 } 929 930 mlnds->pdu_scan_offset = 0; 931 (void) mlrpc_encode_pdu_hdr(mxa); 932 pdu_size = mlnds->pdu_size; 933 mlrpc_build_frag(mlnds, pdu_buf, pdu_size); 934 return (0); 935 } 936 937 /* 938 * Multiple fragment response. 939 */ 940 hdr->pfc_flags = MLRPC_PFC_FIRST_FRAG; 941 hdr->frag_length = frag_size; 942 mxa->send_hdr.response_hdr.alloc_hint = pdu_size - MLRPC_RSP_HDR_SIZE; 943 mlnds->pdu_scan_offset = 0; 944 (void) mlrpc_encode_pdu_hdr(mxa); 945 mlrpc_build_frag(mlnds, pdu_buf, frag_size); 946 947 /* 948 * We need to update the 24-byte header in subsequent fragments. 949 * 950 * pdu_data_size: total data remaining to be handled 951 * frag_size: total fragment size including header 952 * frag_data_size: data in fragment 953 * (i.e. frag_size - MLRPC_RSP_HDR_SIZE) 954 */ 955 pdu_data_size = pdu_size - MLRPC_RSP_HDR_SIZE; 956 frag_data_size = frag_size - MLRPC_RSP_HDR_SIZE; 957 958 while (pdu_data_size) { 959 mxa->send_hdr.response_hdr.alloc_hint -= frag_data_size; 960 pdu_data_size -= frag_data_size; 961 pdu_buf += frag_data_size; 962 963 if (pdu_data_size <= frag_data_size) { 964 frag_data_size = pdu_data_size; 965 frag_size = frag_data_size + MLRPC_RSP_HDR_SIZE; 966 hdr->pfc_flags = MLRPC_PFC_LAST_FRAG; 967 } else { 968 hdr->pfc_flags = 0; 969 } 970 971 hdr->frag_length = frag_size; 972 mlnds->pdu_scan_offset = 0; 973 (void) mlrpc_encode_pdu_hdr(mxa); 974 bcopy(mlnds->pdu_base_addr, pdu_buf, MLRPC_RSP_HDR_SIZE); 975 976 mlrpc_build_frag(mlnds, pdu_buf, frag_size); 977 978 if (hdr->pfc_flags & MLRPC_PFC_LAST_FRAG) 979 break; 980 } 981 982 return (0); 983 } 984 985 /* 986 * mlrpc_build_frag 987 * 988 * Build an RPC PDU fragment from the specified buffer. 989 * If malloc fails, the client will see a header/pdu inconsistency 990 * and report an error. 991 */ 992 static void 993 mlrpc_build_frag(struct mlndr_stream *mlnds, uint8_t *buf, uint32_t len) 994 { 995 ndr_frag_t *frag; 996 int size = sizeof (ndr_frag_t) + len; 997 998 if ((frag = (ndr_frag_t *)malloc(size)) == NULL) 999 return; 1000 1001 frag->next = NULL; 1002 frag->buf = (uint8_t *)frag + sizeof (ndr_frag_t); 1003 frag->len = len; 1004 bcopy(buf, frag->buf, len); 1005 1006 if (mlnds->frags.head == NULL) { 1007 mlnds->frags.head = frag; 1008 mlnds->frags.tail = frag; 1009 mlnds->frags.nfrag = 1; 1010 } else { 1011 mlnds->frags.tail->next = frag; 1012 mlnds->frags.tail = frag; 1013 ++mlnds->frags.nfrag; 1014 } 1015 } 1016