1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/device.h> 13 #include <linux/slab.h> 14 #include <linux/errno.h> 15 #include <linux/list.h> 16 17 #include "css.h" 18 #include "cio.h" 19 #include "cio_debug.h" 20 #include "ioasm.h" 21 #include "chsc.h" 22 #include "device.h" 23 #include "idset.h" 24 #include "chp.h" 25 26 int css_init_done = 0; 27 static int need_reprobe = 0; 28 static int max_ssid = 0; 29 30 struct channel_subsystem *css[__MAX_CSSID + 1]; 31 32 int css_characteristics_avail = 0; 33 34 int 35 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 36 { 37 struct subchannel_id schid; 38 int ret; 39 40 init_subchannel_id(&schid); 41 ret = -ENODEV; 42 do { 43 do { 44 ret = fn(schid, data); 45 if (ret) 46 break; 47 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 48 schid.sch_no = 0; 49 } while (schid.ssid++ < max_ssid); 50 return ret; 51 } 52 53 static struct subchannel * 54 css_alloc_subchannel(struct subchannel_id schid) 55 { 56 struct subchannel *sch; 57 int ret; 58 59 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 60 if (sch == NULL) 61 return ERR_PTR(-ENOMEM); 62 ret = cio_validate_subchannel (sch, schid); 63 if (ret < 0) { 64 kfree(sch); 65 return ERR_PTR(ret); 66 } 67 68 if (sch->st != SUBCHANNEL_TYPE_IO) { 69 /* For now we ignore all non-io subchannels. */ 70 kfree(sch); 71 return ERR_PTR(-EINVAL); 72 } 73 74 /* 75 * Set intparm to subchannel address. 76 * This is fine even on 64bit since the subchannel is always located 77 * under 2G. 78 */ 79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 80 ret = cio_modify(sch); 81 if (ret) { 82 kfree(sch); 83 return ERR_PTR(ret); 84 } 85 return sch; 86 } 87 88 static void 89 css_free_subchannel(struct subchannel *sch) 90 { 91 if (sch) { 92 /* Reset intparm to zeroes. */ 93 sch->schib.pmcw.intparm = 0; 94 cio_modify(sch); 95 kfree(sch->lock); 96 kfree(sch); 97 } 98 } 99 100 static void 101 css_subchannel_release(struct device *dev) 102 { 103 struct subchannel *sch; 104 105 sch = to_subchannel(dev); 106 if (!cio_is_console(sch->schid)) { 107 kfree(sch->lock); 108 kfree(sch); 109 } 110 } 111 112 int css_sch_device_register(struct subchannel *sch) 113 { 114 int ret; 115 116 mutex_lock(&sch->reg_mutex); 117 ret = device_register(&sch->dev); 118 mutex_unlock(&sch->reg_mutex); 119 return ret; 120 } 121 122 void css_sch_device_unregister(struct subchannel *sch) 123 { 124 mutex_lock(&sch->reg_mutex); 125 device_unregister(&sch->dev); 126 mutex_unlock(&sch->reg_mutex); 127 } 128 129 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 130 { 131 int i; 132 int mask; 133 134 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 135 ssd->path_mask = pmcw->pim; 136 for (i = 0; i < 8; i++) { 137 mask = 0x80 >> i; 138 if (pmcw->pim & mask) { 139 chp_id_init(&ssd->chpid[i]); 140 ssd->chpid[i].id = pmcw->chpid[i]; 141 } 142 } 143 } 144 145 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 146 { 147 int i; 148 int mask; 149 150 for (i = 0; i < 8; i++) { 151 mask = 0x80 >> i; 152 if (ssd->path_mask & mask) 153 if (!chp_is_registered(ssd->chpid[i])) 154 chp_new(ssd->chpid[i]); 155 } 156 } 157 158 void css_update_ssd_info(struct subchannel *sch) 159 { 160 int ret; 161 162 if (cio_is_console(sch->schid)) { 163 /* Console is initialized too early for functions requiring 164 * memory allocation. */ 165 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 166 } else { 167 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 168 if (ret) 169 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 170 ssd_register_chpids(&sch->ssd_info); 171 } 172 } 173 174 static int css_register_subchannel(struct subchannel *sch) 175 { 176 int ret; 177 178 /* Initialize the subchannel structure */ 179 sch->dev.parent = &css[0]->device; 180 sch->dev.bus = &css_bus_type; 181 sch->dev.release = &css_subchannel_release; 182 sch->dev.groups = subch_attr_groups; 183 css_update_ssd_info(sch); 184 /* make it known to the system */ 185 ret = css_sch_device_register(sch); 186 if (ret) { 187 printk (KERN_WARNING "%s: could not register %s\n", 188 __func__, sch->dev.bus_id); 189 return ret; 190 } 191 return ret; 192 } 193 194 int 195 css_probe_device(struct subchannel_id schid) 196 { 197 int ret; 198 struct subchannel *sch; 199 200 sch = css_alloc_subchannel(schid); 201 if (IS_ERR(sch)) 202 return PTR_ERR(sch); 203 ret = css_register_subchannel(sch); 204 if (ret) 205 css_free_subchannel(sch); 206 return ret; 207 } 208 209 static int 210 check_subchannel(struct device * dev, void * data) 211 { 212 struct subchannel *sch; 213 struct subchannel_id *schid = data; 214 215 sch = to_subchannel(dev); 216 return schid_equal(&sch->schid, schid); 217 } 218 219 struct subchannel * 220 get_subchannel_by_schid(struct subchannel_id schid) 221 { 222 struct device *dev; 223 224 dev = bus_find_device(&css_bus_type, NULL, 225 &schid, check_subchannel); 226 227 return dev ? to_subchannel(dev) : NULL; 228 } 229 230 static int css_get_subchannel_status(struct subchannel *sch) 231 { 232 struct schib schib; 233 234 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 235 return CIO_GONE; 236 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 237 return CIO_REVALIDATE; 238 if (!sch->lpm) 239 return CIO_NO_PATH; 240 return CIO_OPER; 241 } 242 243 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 244 { 245 int event, ret, disc; 246 unsigned long flags; 247 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 248 249 spin_lock_irqsave(sch->lock, flags); 250 disc = device_is_disconnected(sch); 251 if (disc && slow) { 252 /* Disconnected devices are evaluated directly only.*/ 253 spin_unlock_irqrestore(sch->lock, flags); 254 return 0; 255 } 256 /* No interrupt after machine check - kill pending timers. */ 257 device_kill_pending_timer(sch); 258 if (!disc && !slow) { 259 /* Non-disconnected devices are evaluated on the slow path. */ 260 spin_unlock_irqrestore(sch->lock, flags); 261 return -EAGAIN; 262 } 263 event = css_get_subchannel_status(sch); 264 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 265 sch->schid.ssid, sch->schid.sch_no, event, 266 disc ? "disconnected" : "normal", 267 slow ? "slow" : "fast"); 268 /* Analyze subchannel status. */ 269 action = NONE; 270 switch (event) { 271 case CIO_NO_PATH: 272 if (disc) { 273 /* Check if paths have become available. */ 274 action = REPROBE; 275 break; 276 } 277 /* fall through */ 278 case CIO_GONE: 279 /* Prevent unwanted effects when opening lock. */ 280 cio_disable_subchannel(sch); 281 device_set_disconnected(sch); 282 /* Ask driver what to do with device. */ 283 action = UNREGISTER; 284 if (sch->driver && sch->driver->notify) { 285 spin_unlock_irqrestore(sch->lock, flags); 286 ret = sch->driver->notify(&sch->dev, event); 287 spin_lock_irqsave(sch->lock, flags); 288 if (ret) 289 action = NONE; 290 } 291 break; 292 case CIO_REVALIDATE: 293 /* Device will be removed, so no notify necessary. */ 294 if (disc) 295 /* Reprobe because immediate unregister might block. */ 296 action = REPROBE; 297 else 298 action = UNREGISTER_PROBE; 299 break; 300 case CIO_OPER: 301 if (disc) 302 /* Get device operational again. */ 303 action = REPROBE; 304 break; 305 } 306 /* Perform action. */ 307 ret = 0; 308 switch (action) { 309 case UNREGISTER: 310 case UNREGISTER_PROBE: 311 /* Unregister device (will use subchannel lock). */ 312 spin_unlock_irqrestore(sch->lock, flags); 313 css_sch_device_unregister(sch); 314 spin_lock_irqsave(sch->lock, flags); 315 316 /* Reset intparm to zeroes. */ 317 sch->schib.pmcw.intparm = 0; 318 cio_modify(sch); 319 break; 320 case REPROBE: 321 device_trigger_reprobe(sch); 322 break; 323 default: 324 break; 325 } 326 spin_unlock_irqrestore(sch->lock, flags); 327 /* Probe if necessary. */ 328 if (action == UNREGISTER_PROBE) 329 ret = css_probe_device(sch->schid); 330 331 return ret; 332 } 333 334 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 335 { 336 struct schib schib; 337 338 if (!slow) { 339 /* Will be done on the slow path. */ 340 return -EAGAIN; 341 } 342 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { 343 /* Unusable - ignore. */ 344 return 0; 345 } 346 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 347 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 348 349 return css_probe_device(schid); 350 } 351 352 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 353 { 354 struct subchannel *sch; 355 int ret; 356 357 sch = get_subchannel_by_schid(schid); 358 if (sch) { 359 ret = css_evaluate_known_subchannel(sch, slow); 360 put_device(&sch->dev); 361 } else 362 ret = css_evaluate_new_subchannel(schid, slow); 363 if (ret == -EAGAIN) 364 css_schedule_eval(schid); 365 } 366 367 static struct idset *slow_subchannel_set; 368 static spinlock_t slow_subchannel_lock; 369 370 static int __init slow_subchannel_init(void) 371 { 372 spin_lock_init(&slow_subchannel_lock); 373 slow_subchannel_set = idset_sch_new(); 374 if (!slow_subchannel_set) { 375 printk(KERN_WARNING "cio: could not allocate slow subchannel " 376 "set\n"); 377 return -ENOMEM; 378 } 379 return 0; 380 } 381 382 subsys_initcall(slow_subchannel_init); 383 384 static void css_slow_path_func(struct work_struct *unused) 385 { 386 struct subchannel_id schid; 387 388 CIO_TRACE_EVENT(4, "slowpath"); 389 spin_lock_irq(&slow_subchannel_lock); 390 init_subchannel_id(&schid); 391 while (idset_sch_get_first(slow_subchannel_set, &schid)) { 392 idset_sch_del(slow_subchannel_set, schid); 393 spin_unlock_irq(&slow_subchannel_lock); 394 css_evaluate_subchannel(schid, 1); 395 spin_lock_irq(&slow_subchannel_lock); 396 } 397 spin_unlock_irq(&slow_subchannel_lock); 398 } 399 400 static DECLARE_WORK(slow_path_work, css_slow_path_func); 401 struct workqueue_struct *slow_path_wq; 402 403 void css_schedule_eval(struct subchannel_id schid) 404 { 405 unsigned long flags; 406 407 spin_lock_irqsave(&slow_subchannel_lock, flags); 408 idset_sch_add(slow_subchannel_set, schid); 409 queue_work(slow_path_wq, &slow_path_work); 410 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 411 } 412 413 void css_schedule_eval_all(void) 414 { 415 unsigned long flags; 416 417 spin_lock_irqsave(&slow_subchannel_lock, flags); 418 idset_fill(slow_subchannel_set); 419 queue_work(slow_path_wq, &slow_path_work); 420 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 421 } 422 423 /* Reprobe subchannel if unregistered. */ 424 static int reprobe_subchannel(struct subchannel_id schid, void *data) 425 { 426 struct subchannel *sch; 427 int ret; 428 429 CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n", 430 schid.ssid, schid.sch_no); 431 if (need_reprobe) 432 return -EAGAIN; 433 434 sch = get_subchannel_by_schid(schid); 435 if (sch) { 436 /* Already known. */ 437 put_device(&sch->dev); 438 return 0; 439 } 440 441 ret = css_probe_device(schid); 442 switch (ret) { 443 case 0: 444 break; 445 case -ENXIO: 446 case -ENOMEM: 447 /* These should abort looping */ 448 break; 449 default: 450 ret = 0; 451 } 452 453 return ret; 454 } 455 456 /* Work function used to reprobe all unregistered subchannels. */ 457 static void reprobe_all(struct work_struct *unused) 458 { 459 int ret; 460 461 CIO_MSG_EVENT(2, "reprobe start\n"); 462 463 need_reprobe = 0; 464 /* Make sure initial subchannel scan is done. */ 465 wait_event(ccw_device_init_wq, 466 atomic_read(&ccw_device_init_count) == 0); 467 ret = for_each_subchannel(reprobe_subchannel, NULL); 468 469 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 470 need_reprobe); 471 } 472 473 static DECLARE_WORK(css_reprobe_work, reprobe_all); 474 475 /* Schedule reprobing of all unregistered subchannels. */ 476 void css_schedule_reprobe(void) 477 { 478 need_reprobe = 1; 479 queue_work(ccw_device_work, &css_reprobe_work); 480 } 481 482 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 483 484 /* 485 * Called from the machine check handler for subchannel report words. 486 */ 487 void css_process_crw(int rsid1, int rsid2) 488 { 489 struct subchannel_id mchk_schid; 490 491 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 492 rsid1, rsid2); 493 init_subchannel_id(&mchk_schid); 494 mchk_schid.sch_no = rsid1; 495 if (rsid2 != 0) 496 mchk_schid.ssid = (rsid2 >> 8) & 3; 497 498 /* 499 * Since we are always presented with IPI in the CRW, we have to 500 * use stsch() to find out if the subchannel in question has come 501 * or gone. 502 */ 503 css_evaluate_subchannel(mchk_schid, 0); 504 } 505 506 static int __init 507 __init_channel_subsystem(struct subchannel_id schid, void *data) 508 { 509 struct subchannel *sch; 510 int ret; 511 512 if (cio_is_console(schid)) 513 sch = cio_get_console_subchannel(); 514 else { 515 sch = css_alloc_subchannel(schid); 516 if (IS_ERR(sch)) 517 ret = PTR_ERR(sch); 518 else 519 ret = 0; 520 switch (ret) { 521 case 0: 522 break; 523 case -ENOMEM: 524 panic("Out of memory in init_channel_subsystem\n"); 525 /* -ENXIO: no more subchannels. */ 526 case -ENXIO: 527 return ret; 528 /* -EIO: this subchannel set not supported. */ 529 case -EIO: 530 return ret; 531 default: 532 return 0; 533 } 534 } 535 /* 536 * We register ALL valid subchannels in ioinfo, even those 537 * that have been present before init_channel_subsystem. 538 * These subchannels can't have been registered yet (kmalloc 539 * not working) so we do it now. This is true e.g. for the 540 * console subchannel. 541 */ 542 css_register_subchannel(sch); 543 return 0; 544 } 545 546 static void __init 547 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 548 { 549 if (css_characteristics_avail && css_general_characteristics.mcss) { 550 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 551 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 552 } else { 553 #ifdef CONFIG_SMP 554 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 555 #else 556 css->global_pgid.pgid_high.cpu_addr = 0; 557 #endif 558 } 559 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 560 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 561 css->global_pgid.tod_high = tod_high; 562 563 } 564 565 static void 566 channel_subsystem_release(struct device *dev) 567 { 568 struct channel_subsystem *css; 569 570 css = to_css(dev); 571 mutex_destroy(&css->mutex); 572 kfree(css); 573 } 574 575 static ssize_t 576 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 577 char *buf) 578 { 579 struct channel_subsystem *css = to_css(dev); 580 581 if (!css) 582 return 0; 583 return sprintf(buf, "%x\n", css->cm_enabled); 584 } 585 586 static ssize_t 587 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 588 const char *buf, size_t count) 589 { 590 struct channel_subsystem *css = to_css(dev); 591 int ret; 592 593 switch (buf[0]) { 594 case '0': 595 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 596 break; 597 case '1': 598 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 599 break; 600 default: 601 ret = -EINVAL; 602 } 603 return ret < 0 ? ret : count; 604 } 605 606 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 607 608 static int __init setup_css(int nr) 609 { 610 u32 tod_high; 611 int ret; 612 613 memset(css[nr], 0, sizeof(struct channel_subsystem)); 614 css[nr]->pseudo_subchannel = 615 kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL); 616 if (!css[nr]->pseudo_subchannel) 617 return -ENOMEM; 618 css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device; 619 css[nr]->pseudo_subchannel->dev.release = css_subchannel_release; 620 sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct"); 621 ret = cio_create_sch_lock(css[nr]->pseudo_subchannel); 622 if (ret) { 623 kfree(css[nr]->pseudo_subchannel); 624 return ret; 625 } 626 mutex_init(&css[nr]->mutex); 627 css[nr]->valid = 1; 628 css[nr]->cssid = nr; 629 sprintf(css[nr]->device.bus_id, "css%x", nr); 630 css[nr]->device.release = channel_subsystem_release; 631 tod_high = (u32) (get_clock() >> 32); 632 css_generate_pgid(css[nr], tod_high); 633 return 0; 634 } 635 636 /* 637 * Now that the driver core is running, we can setup our channel subsystem. 638 * The struct subchannel's are created during probing (except for the 639 * static console subchannel). 640 */ 641 static int __init 642 init_channel_subsystem (void) 643 { 644 int ret, i; 645 646 if (chsc_determine_css_characteristics() == 0) 647 css_characteristics_avail = 1; 648 649 if ((ret = bus_register(&css_bus_type))) 650 goto out; 651 652 /* Try to enable MSS. */ 653 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 654 switch (ret) { 655 case 0: /* Success. */ 656 max_ssid = __MAX_SSID; 657 break; 658 case -ENOMEM: 659 goto out_bus; 660 default: 661 max_ssid = 0; 662 } 663 /* Setup css structure. */ 664 for (i = 0; i <= __MAX_CSSID; i++) { 665 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 666 if (!css[i]) { 667 ret = -ENOMEM; 668 goto out_unregister; 669 } 670 ret = setup_css(i); 671 if (ret) 672 goto out_free; 673 ret = device_register(&css[i]->device); 674 if (ret) 675 goto out_free_all; 676 if (css_characteristics_avail && 677 css_chsc_characteristics.secm) { 678 ret = device_create_file(&css[i]->device, 679 &dev_attr_cm_enable); 680 if (ret) 681 goto out_device; 682 } 683 ret = device_register(&css[i]->pseudo_subchannel->dev); 684 if (ret) 685 goto out_file; 686 } 687 css_init_done = 1; 688 689 ctl_set_bit(6, 28); 690 691 for_each_subchannel(__init_channel_subsystem, NULL); 692 return 0; 693 out_file: 694 device_remove_file(&css[i]->device, &dev_attr_cm_enable); 695 out_device: 696 device_unregister(&css[i]->device); 697 out_free_all: 698 kfree(css[i]->pseudo_subchannel->lock); 699 kfree(css[i]->pseudo_subchannel); 700 out_free: 701 kfree(css[i]); 702 out_unregister: 703 while (i > 0) { 704 i--; 705 device_unregister(&css[i]->pseudo_subchannel->dev); 706 if (css_characteristics_avail && css_chsc_characteristics.secm) 707 device_remove_file(&css[i]->device, 708 &dev_attr_cm_enable); 709 device_unregister(&css[i]->device); 710 } 711 out_bus: 712 bus_unregister(&css_bus_type); 713 out: 714 return ret; 715 } 716 717 int sch_is_pseudo_sch(struct subchannel *sch) 718 { 719 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 720 } 721 722 /* 723 * find a driver for a subchannel. They identify by the subchannel 724 * type with the exception that the console subchannel driver has its own 725 * subchannel type although the device is an i/o subchannel 726 */ 727 static int 728 css_bus_match (struct device *dev, struct device_driver *drv) 729 { 730 struct subchannel *sch = container_of (dev, struct subchannel, dev); 731 struct css_driver *driver = container_of (drv, struct css_driver, drv); 732 733 if (sch->st == driver->subchannel_type) 734 return 1; 735 736 return 0; 737 } 738 739 static int 740 css_probe (struct device *dev) 741 { 742 struct subchannel *sch; 743 744 sch = to_subchannel(dev); 745 sch->driver = container_of (dev->driver, struct css_driver, drv); 746 return (sch->driver->probe ? sch->driver->probe(sch) : 0); 747 } 748 749 static int 750 css_remove (struct device *dev) 751 { 752 struct subchannel *sch; 753 754 sch = to_subchannel(dev); 755 return (sch->driver->remove ? sch->driver->remove(sch) : 0); 756 } 757 758 static void 759 css_shutdown (struct device *dev) 760 { 761 struct subchannel *sch; 762 763 sch = to_subchannel(dev); 764 if (sch->driver->shutdown) 765 sch->driver->shutdown(sch); 766 } 767 768 struct bus_type css_bus_type = { 769 .name = "css", 770 .match = css_bus_match, 771 .probe = css_probe, 772 .remove = css_remove, 773 .shutdown = css_shutdown, 774 }; 775 776 subsys_initcall(init_channel_subsystem); 777 778 MODULE_LICENSE("GPL"); 779 EXPORT_SYMBOL(css_bus_type); 780 EXPORT_SYMBOL_GPL(css_characteristics_avail); 781