1 /* 2 * Implementation of SCSI Processor Target Peripheral driver for CAM. 3 * 4 * Copyright (c) 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/buf.h> 37 #include <sys/devicestat.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 41 #include <cam/cam.h> 42 #include <cam/cam_ccb.h> 43 #include <cam/cam_extend.h> 44 #include <cam/cam_periph.h> 45 #include <cam/cam_xpt_periph.h> 46 #include <cam/cam_debug.h> 47 48 #include <cam/scsi/scsi_all.h> 49 #include <cam/scsi/scsi_message.h> 50 #include <cam/scsi/scsi_pt.h> 51 52 typedef enum { 53 PT_STATE_PROBE, 54 PT_STATE_NORMAL 55 } pt_state; 56 57 typedef enum { 58 PT_FLAG_NONE = 0x00, 59 PT_FLAG_OPEN = 0x01, 60 PT_FLAG_DEVICE_INVALID = 0x02, 61 PT_FLAG_RETRY_UA = 0x04 62 } pt_flags; 63 64 typedef enum { 65 PT_CCB_BUFFER_IO = 0x01, 66 PT_CCB_WAITING = 0x02, 67 PT_CCB_RETRY_UA = 0x04, 68 PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA 69 } pt_ccb_state; 70 71 /* Offsets into our private area for storing information */ 72 #define ccb_state ppriv_field0 73 #define ccb_bp ppriv_ptr1 74 75 struct pt_softc { 76 struct buf_queue_head buf_queue; 77 struct devstat device_stats; 78 LIST_HEAD(, ccb_hdr) pending_ccbs; 79 pt_state state; 80 pt_flags flags; 81 union ccb saved_ccb; 82 }; 83 84 static d_open_t ptopen; 85 static d_read_t ptread; 86 static d_write_t ptwrite; 87 static d_close_t ptclose; 88 static d_strategy_t ptstrategy; 89 static periph_init_t ptinit; 90 static void ptasync(void *callback_arg, u_int32_t code, 91 struct cam_path *path, void *arg); 92 static periph_ctor_t ptctor; 93 static periph_dtor_t ptdtor; 94 static periph_start_t ptstart; 95 static void ptdone(struct cam_periph *periph, 96 union ccb *done_ccb); 97 static int pterror(union ccb *ccb, u_int32_t cam_flags, 98 u_int32_t sense_flags); 99 100 void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 101 void (*cbfcnp)(struct cam_periph *, union ccb *), 102 u_int tag_action, int readop, u_int byte2, 103 u_int32_t xfer_len, u_int8_t *data_ptr, 104 u_int8_t sense_len, u_int32_t timeout); 105 106 static struct periph_driver ptdriver = 107 { 108 ptinit, "pt", 109 TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 110 }; 111 112 DATA_SET(periphdriver_set, ptdriver); 113 114 #define PT_CDEV_MAJOR 61 115 116 static struct cdevsw pt_cdevsw = 117 { 118 /*d_open*/ ptopen, 119 /*d_close*/ ptclose, 120 /*d_read*/ ptread, 121 /*d_write*/ ptwrite, 122 /*d_ioctl*/ noioctl, 123 /*d_stop*/ nostop, 124 /*d_reset*/ noreset, 125 /*d_devtotty*/ nodevtotty, 126 /*d_poll*/ seltrue, 127 /*d_mmap*/ nommap, 128 /*d_strategy*/ ptstrategy, 129 /*d_name*/ "pt", 130 /*d_spare*/ NULL, 131 /*d_maj*/ -1, 132 /*d_dump*/ nodump, 133 /*d_psize*/ nopsize, 134 /*d_flags*/ 0, 135 /*d_maxio*/ 0, 136 /*b_maj*/ -1 137 }; 138 139 static struct extend_array *ptperiphs; 140 141 static int 142 ptopen(dev_t dev, int flags, int fmt, struct proc *p) 143 { 144 struct cam_periph *periph; 145 struct pt_softc *softc; 146 int unit; 147 int error; 148 149 unit = minor(dev); 150 periph = cam_extend_get(ptperiphs, unit); 151 if (periph == NULL) 152 return (ENXIO); 153 154 softc = (struct pt_softc *)periph->softc; 155 156 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 157 ("ptopen: dev=0x%x (unit %d)\n", dev, unit)); 158 159 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 160 return (error); /* error code from tsleep */ 161 162 if ((softc->flags & PT_FLAG_OPEN) == 0) { 163 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 164 error = ENXIO; 165 else 166 softc->flags |= PT_FLAG_OPEN; 167 } else 168 error = EBUSY; 169 170 cam_periph_unlock(periph); 171 return (error); 172 } 173 174 static int 175 ptclose(dev_t dev, int flag, int fmt, struct proc *p) 176 { 177 struct cam_periph *periph; 178 struct pt_softc *softc; 179 union ccb *ccb; 180 int unit; 181 int error; 182 183 unit = minor(dev); 184 periph = cam_extend_get(ptperiphs, unit); 185 if (periph == NULL) 186 return (ENXIO); 187 188 softc = (struct pt_softc *)periph->softc; 189 190 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 191 return (error); /* error code from tsleep */ 192 193 softc->flags &= ~PT_FLAG_OPEN; 194 cam_periph_unlock(periph); 195 cam_periph_release(periph); 196 return (0); 197 } 198 199 static int 200 ptread(dev_t dev, struct uio *uio, int ioflag) 201 { 202 return(physio(ptstrategy, NULL, dev, 1, minphys, uio)); 203 } 204 205 static int 206 ptwrite(dev_t dev, struct uio *uio, int ioflag) 207 { 208 return(physio(ptstrategy, NULL, dev, 0, minphys, uio)); 209 } 210 211 /* 212 * Actually translate the requested transfer into one the physical driver 213 * can understand. The transfer is described by a buf and will include 214 * only one physical transfer. 215 */ 216 static void 217 ptstrategy(struct buf *bp) 218 { 219 struct cam_periph *periph; 220 struct pt_softc *softc; 221 u_int unit; 222 int s; 223 224 unit = minor(bp->b_dev); 225 periph = cam_extend_get(ptperiphs, unit); 226 if (periph == NULL) { 227 bp->b_error = ENXIO; 228 goto bad; 229 } 230 softc = (struct pt_softc *)periph->softc; 231 232 /* 233 * Mask interrupts so that the pack cannot be invalidated until 234 * after we are in the queue. Otherwise, we might not properly 235 * clean up one of the buffers. 236 */ 237 s = splbio(); 238 239 /* 240 * If the device has been made invalid, error out 241 */ 242 if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { 243 splx(s); 244 bp->b_error = ENXIO; 245 goto bad; 246 } 247 248 /* 249 * Place it in the queue of disk activities for this disk 250 */ 251 bufq_insert_tail(&softc->buf_queue, bp); 252 253 splx(s); 254 255 /* 256 * Schedule ourselves for performing the work. 257 */ 258 xpt_schedule(periph, /* XXX priority */1); 259 260 return; 261 bad: 262 bp->b_flags |= B_ERROR; 263 264 /* 265 * Correctly set the buf to indicate a completed xfer 266 */ 267 bp->b_resid = bp->b_bcount; 268 biodone(bp); 269 } 270 271 static void 272 ptinit(void) 273 { 274 cam_status status; 275 struct cam_path *path; 276 277 /* 278 * Create our extend array for storing the devices we attach to. 279 */ 280 ptperiphs = cam_extend_new(); 281 if (ptperiphs == NULL) { 282 printf("pt: Failed to alloc extend array!\n"); 283 return; 284 } 285 286 /* 287 * Install a global async callback. This callback will 288 * receive async callbacks like "new device found". 289 */ 290 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 291 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 292 293 if (status == CAM_REQ_CMP) { 294 struct ccb_setasync csa; 295 296 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 297 csa.ccb_h.func_code = XPT_SASYNC_CB; 298 csa.event_enable = AC_FOUND_DEVICE; 299 csa.callback = ptasync; 300 csa.callback_arg = NULL; 301 xpt_action((union ccb *)&csa); 302 status = csa.ccb_h.status; 303 xpt_free_path(path); 304 } 305 306 if (status != CAM_REQ_CMP) { 307 printf("pt: Failed to attach master async callback " 308 "due to status 0x%x!\n", status); 309 } else { 310 /* If we were successfull, register our devsw */ 311 dev_t dev; 312 313 dev = makedev(PT_CDEV_MAJOR, 0); 314 cdevsw_add(&dev,&pt_cdevsw, NULL); 315 } 316 } 317 318 static cam_status 319 ptctor(struct cam_periph *periph, void *arg) 320 { 321 int s; 322 struct pt_softc *softc; 323 struct ccb_setasync csa; 324 struct ccb_getdev *cgd; 325 326 cgd = (struct ccb_getdev *)arg; 327 if (periph == NULL) { 328 printf("ptregister: periph was NULL!!\n"); 329 return(CAM_REQ_CMP_ERR); 330 } 331 332 if (cgd == NULL) { 333 printf("ptregister: no getdev CCB, can't register device\n"); 334 return(CAM_REQ_CMP_ERR); 335 } 336 337 softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 338 339 if (softc == NULL) { 340 printf("daregister: Unable to probe new device. " 341 "Unable to allocate softc\n"); 342 return(CAM_REQ_CMP_ERR); 343 } 344 345 bzero(softc, sizeof(*softc)); 346 LIST_INIT(&softc->pending_ccbs); 347 softc->state = PT_STATE_NORMAL; 348 bufq_init(&softc->buf_queue); 349 350 periph->softc = softc; 351 352 cam_extend_set(ptperiphs, periph->unit_number, periph); 353 354 /* 355 * The DA driver supports a blocksize, but 356 * we don't know the blocksize until we do 357 * a read capacity. So, set a flag to 358 * indicate that the blocksize is 359 * unavailable right now. We'll clear the 360 * flag as soon as we've done a read capacity. 361 */ 362 devstat_add_entry(&softc->device_stats, "pt", 363 periph->unit_number, 0, 364 DEVSTAT_NO_BLOCKSIZE, 365 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 366 367 /* 368 * Add async callbacks for bus reset and 369 * bus device reset calls. I don't bother 370 * checking if this fails as, in most cases, 371 * the system will function just fine without 372 * them and the only alternative would be to 373 * not attach the device on failure. 374 */ 375 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 376 csa.ccb_h.func_code = XPT_SASYNC_CB; 377 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 378 csa.callback = ptasync; 379 csa.callback_arg = periph; 380 xpt_action((union ccb *)&csa); 381 382 /* Tell the user we've attached to the device */ 383 xpt_announce_periph(periph, NULL); 384 385 return(CAM_REQ_CMP); 386 } 387 388 static void 389 ptdtor(struct cam_periph *periph) 390 { 391 cam_extend_release(ptperiphs, periph->unit_number); 392 xpt_print_path(periph->path); 393 printf("removing device entry\n"); 394 free(periph->softc, M_DEVBUF); 395 } 396 397 static void 398 ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 399 { 400 struct cam_periph *periph; 401 402 periph = (struct cam_periph *)callback_arg; 403 switch (code) { 404 case AC_FOUND_DEVICE: 405 { 406 struct ccb_getdev *cgd; 407 cam_status status; 408 409 cgd = (struct ccb_getdev *)arg; 410 411 if (cgd->pd_type != T_PROCESSOR) 412 break; 413 414 /* 415 * Allocate a peripheral instance for 416 * this device and start the probe 417 * process. 418 */ 419 status = cam_periph_alloc(ptctor, ptdtor, ptstart, 420 "pt", CAM_PERIPH_BIO, cgd->ccb_h.path, 421 ptasync, AC_FOUND_DEVICE, cgd); 422 423 if (status != CAM_REQ_CMP 424 && status != CAM_REQ_INPROG) 425 printf("ptasync: Unable to attach to new device " 426 "due to status 0x%x\n", status); 427 break; 428 } 429 case AC_LOST_DEVICE: 430 { 431 int s; 432 struct pt_softc *softc; 433 struct buf *q_bp; 434 struct ccb_setasync csa; 435 436 softc = (struct pt_softc *)periph->softc; 437 438 /* 439 * Insure that no other async callbacks that 440 * might affect this peripheral can come through. 441 */ 442 s = splcam(); 443 444 /* 445 * De-register any async callbacks. 446 */ 447 xpt_setup_ccb(&csa.ccb_h, periph->path, 448 /* priority */ 5); 449 csa.ccb_h.func_code = XPT_SASYNC_CB; 450 csa.event_enable = 0; 451 csa.callback = ptasync; 452 csa.callback_arg = periph; 453 xpt_action((union ccb *)&csa); 454 455 softc->flags |= PT_FLAG_DEVICE_INVALID; 456 457 /* 458 * Return all queued I/O with ENXIO. 459 * XXX Handle any transactions queued to the card 460 * with XPT_ABORT_CCB. 461 */ 462 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 463 bufq_remove(&softc->buf_queue, q_bp); 464 q_bp->b_resid = q_bp->b_bcount; 465 q_bp->b_error = ENXIO; 466 q_bp->b_flags |= B_ERROR; 467 biodone(q_bp); 468 } 469 devstat_remove_entry(&softc->device_stats); 470 471 xpt_print_path(periph->path); 472 printf("lost device\n"); 473 474 splx(s); 475 476 cam_periph_invalidate(periph); 477 break; 478 } 479 case AC_SENT_BDR: 480 case AC_BUS_RESET: 481 { 482 struct pt_softc *softc; 483 struct ccb_hdr *ccbh; 484 int s; 485 486 softc = (struct pt_softc *)periph->softc; 487 s = splsoftcam(); 488 /* 489 * Don't fail on the expected unit attention 490 * that will occur. 491 */ 492 softc->flags |= PT_FLAG_RETRY_UA; 493 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 494 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 495 ccbh->ccb_state |= PT_CCB_RETRY_UA; 496 splx(s); 497 break; 498 } 499 case AC_TRANSFER_NEG: 500 case AC_SCSI_AEN: 501 case AC_UNSOL_RESEL: 502 default: 503 break; 504 } 505 } 506 507 static void 508 ptstart(struct cam_periph *periph, union ccb *start_ccb) 509 { 510 struct pt_softc *softc; 511 struct buf *bp; 512 int s; 513 514 softc = (struct pt_softc *)periph->softc; 515 516 /* 517 * See if there is a buf with work for us to do.. 518 */ 519 s = splbio(); 520 bp = bufq_first(&softc->buf_queue); 521 if (periph->immediate_priority <= periph->pinfo.priority) { 522 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 523 ("queuing for immediate ccb\n")); 524 start_ccb->ccb_h.ccb_state = PT_CCB_WAITING; 525 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 526 periph_links.sle); 527 periph->immediate_priority = CAM_PRIORITY_NONE; 528 splx(s); 529 wakeup(&periph->ccb_list); 530 } else if (bp == NULL) { 531 splx(s); 532 xpt_release_ccb(start_ccb); 533 } else { 534 int oldspl; 535 536 bufq_remove(&softc->buf_queue, bp); 537 538 devstat_start_transaction(&softc->device_stats); 539 540 scsi_send_receive(&start_ccb->csio, 541 /*retries*/4, 542 ptdone, 543 MSG_SIMPLE_Q_TAG, 544 bp->b_flags & B_READ, 545 /*byte2*/0, 546 bp->b_bcount, 547 bp->b_data, 548 /*sense_len*/SSD_FULL_SIZE, 549 /*timeout*/10000); 550 551 start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO; 552 553 /* 554 * Block out any asyncronous callbacks 555 * while we touch the pending ccb list. 556 */ 557 oldspl = splcam(); 558 LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, 559 periph_links.le); 560 splx(oldspl); 561 562 start_ccb->ccb_h.ccb_bp = bp; 563 bp = bufq_first(&softc->buf_queue); 564 splx(s); 565 566 xpt_action(start_ccb); 567 568 if (bp != NULL) { 569 /* Have more work to do, so ensure we stay scheduled */ 570 xpt_schedule(periph, /* XXX priority */1); 571 } 572 } 573 } 574 575 static void 576 ptdone(struct cam_periph *periph, union ccb *done_ccb) 577 { 578 struct pt_softc *softc; 579 struct ccb_scsiio *csio; 580 581 softc = (struct pt_softc *)periph->softc; 582 csio = &done_ccb->csio; 583 switch (csio->ccb_h.ccb_state) { 584 case PT_CCB_BUFFER_IO: 585 case PT_CCB_BUFFER_IO_UA: 586 { 587 struct buf *bp; 588 int oldspl; 589 590 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 591 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 592 int error; 593 int s; 594 int sf; 595 596 if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) 597 sf = SF_RETRY_UA; 598 else 599 sf = 0; 600 601 if ((error = pterror(done_ccb, 0, sf)) == ERESTART) { 602 /* 603 * A retry was scheuled, so 604 * just return. 605 */ 606 return; 607 } 608 if (error != 0) { 609 struct buf *q_bp; 610 611 s = splbio(); 612 613 if (error == ENXIO) { 614 /* 615 * Catastrophic error. Mark our device 616 * as invalid. 617 */ 618 xpt_print_path(periph->path); 619 printf("Invalidating device\n"); 620 softc->flags |= PT_FLAG_DEVICE_INVALID; 621 } 622 623 /* 624 * return all queued I/O with EIO, so that 625 * the client can retry these I/Os in the 626 * proper order should it attempt to recover. 627 */ 628 while ((q_bp = bufq_first(&softc->buf_queue)) 629 != NULL) { 630 bufq_remove(&softc->buf_queue, q_bp); 631 q_bp->b_resid = q_bp->b_bcount; 632 q_bp->b_error = EIO; 633 q_bp->b_flags |= B_ERROR; 634 biodone(q_bp); 635 } 636 splx(s); 637 bp->b_error = error; 638 bp->b_resid = bp->b_bcount; 639 bp->b_flags |= B_ERROR; 640 } else { 641 bp->b_resid = csio->resid; 642 bp->b_error = 0; 643 if (bp->b_resid != 0) { 644 /* Short transfer ??? */ 645 bp->b_flags |= B_ERROR; 646 } 647 } 648 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 649 cam_release_devq(done_ccb->ccb_h.path, 650 /*relsim_flags*/0, 651 /*reduction*/0, 652 /*timeout*/0, 653 /*getcount_only*/0); 654 } else { 655 bp->b_resid = csio->resid; 656 if (bp->b_resid != 0) 657 bp->b_flags |= B_ERROR; 658 } 659 660 /* 661 * Block out any asyncronous callbacks 662 * while we touch the pending ccb list. 663 */ 664 oldspl = splcam(); 665 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 666 splx(oldspl); 667 668 devstat_end_transaction(&softc->device_stats, 669 bp->b_bcount - bp->b_resid, 670 done_ccb->csio.tag_action & 0xf, 671 (bp->b_flags & B_READ) ? DEVSTAT_READ 672 : DEVSTAT_WRITE); 673 674 biodone(bp); 675 break; 676 } 677 case PT_CCB_WAITING: 678 /* Caller will release the CCB */ 679 wakeup(&done_ccb->ccb_h.cbfcnp); 680 return; 681 } 682 xpt_release_ccb(done_ccb); 683 } 684 685 static int 686 pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 687 { 688 struct pt_softc *softc; 689 struct cam_periph *periph; 690 691 periph = xpt_path_periph(ccb->ccb_h.path); 692 softc = (struct pt_softc *)periph->softc; 693 694 return(cam_periph_error(ccb, cam_flags, sense_flags, 695 &softc->saved_ccb)); 696 } 697 698 void 699 scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 700 void (*cbfcnp)(struct cam_periph *, union ccb *), 701 u_int tag_action, int readop, u_int byte2, 702 u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, 703 u_int32_t timeout) 704 { 705 struct scsi_send_receive *scsi_cmd; 706 707 scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; 708 scsi_cmd->opcode = readop ? RECEIVE : SEND; 709 scsi_cmd->byte2 = byte2; 710 scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); 711 scsi_cmd->control = 0; 712 713 cam_fill_csio(csio, 714 retries, 715 cbfcnp, 716 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 717 tag_action, 718 data_ptr, 719 xfer_len, 720 sense_len, 721 sizeof(*scsi_cmd), 722 timeout); 723 } 724