1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com) 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 17 #include <asm/cio.h> 18 19 #include "css.h" 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "ioasm.h" 23 #include "chsc.h" 24 25 static void *sei_page; 26 27 static int new_channel_path(int chpid); 28 29 static inline void 30 set_chp_logically_online(int chp, int onoff) 31 { 32 css[0]->chps[chp]->state = onoff; 33 } 34 35 static int 36 get_chp_status(int chp) 37 { 38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 39 } 40 41 void 42 chsc_validate_chpids(struct subchannel *sch) 43 { 44 int mask, chp; 45 46 for (chp = 0; chp <= 7; chp++) { 47 mask = 0x80 >> chp; 48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 49 /* disable using this path */ 50 sch->opm &= ~mask; 51 } 52 } 53 54 void 55 chpid_is_actually_online(int chp) 56 { 57 int state; 58 59 state = get_chp_status(chp); 60 if (state < 0) { 61 need_rescan = 1; 62 queue_work(slow_path_wq, &slow_path_work); 63 } else 64 WARN_ON(!state); 65 } 66 67 /* FIXME: this is _always_ called for every subchannel. shouldn't we 68 * process more than one at a time? */ 69 static int 70 chsc_get_sch_desc_irq(struct subchannel *sch, void *page) 71 { 72 int ccode, j; 73 74 struct { 75 struct chsc_header request; 76 u16 reserved1a:10; 77 u16 ssid:2; 78 u16 reserved1b:4; 79 u16 f_sch; /* first subchannel */ 80 u16 reserved2; 81 u16 l_sch; /* last subchannel */ 82 u32 reserved3; 83 struct chsc_header response; 84 u32 reserved4; 85 u8 sch_valid : 1; 86 u8 dev_valid : 1; 87 u8 st : 3; /* subchannel type */ 88 u8 zeroes : 3; 89 u8 unit_addr; /* unit address */ 90 u16 devno; /* device number */ 91 u8 path_mask; 92 u8 fla_valid_mask; 93 u16 sch; /* subchannel */ 94 u8 chpid[8]; /* chpids 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */ 96 } __attribute__ ((packed)) *ssd_area; 97 98 ssd_area = page; 99 100 ssd_area->request.length = 0x0010; 101 ssd_area->request.code = 0x0004; 102 103 ssd_area->ssid = sch->schid.ssid; 104 ssd_area->f_sch = sch->schid.sch_no; 105 ssd_area->l_sch = sch->schid.sch_no; 106 107 ccode = chsc(ssd_area); 108 if (ccode > 0) { 109 pr_debug("chsc returned with ccode = %d\n", ccode); 110 return (ccode == 3) ? -ENODEV : -EBUSY; 111 } 112 113 switch (ssd_area->response.code) { 114 case 0x0001: /* everything ok */ 115 break; 116 case 0x0002: 117 CIO_CRW_EVENT(2, "Invalid command!\n"); 118 return -EINVAL; 119 case 0x0003: 120 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 121 return -EINVAL; 122 case 0x0004: 123 CIO_CRW_EVENT(2, "Model does not provide ssd\n"); 124 return -EOPNOTSUPP; 125 default: 126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 127 ssd_area->response.code); 128 return -EIO; 129 } 130 131 /* 132 * ssd_area->st stores the type of the detected 133 * subchannel, with the following definitions: 134 * 135 * 0: I/O subchannel: All fields have meaning 136 * 1: CHSC subchannel: Only sch_val, st and sch 137 * have meaning 138 * 2: Message subchannel: All fields except unit_addr 139 * have meaning 140 * 3: ADM subchannel: Only sch_val, st and sch 141 * have meaning 142 * 143 * Other types are currently undefined. 144 */ 145 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 146 CIO_CRW_EVENT(0, "Strange subchannel type %d" 147 " for sch 0.%x.%04x\n", ssd_area->st, 148 sch->schid.ssid, sch->schid.sch_no); 149 /* 150 * There may have been a new subchannel type defined in the 151 * time since this code was written; since we don't know which 152 * fields have meaning and what to do with it we just jump out 153 */ 154 return 0; 155 } else { 156 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", 158 sch->schid.ssid, sch->schid.sch_no, 159 type[ssd_area->st]); 160 161 sch->ssd_info.valid = 1; 162 sch->ssd_info.type = ssd_area->st; 163 } 164 165 if (ssd_area->st == 0 || ssd_area->st == 2) { 166 for (j = 0; j < 8; j++) { 167 if (!((0x80 >> j) & ssd_area->path_mask & 168 ssd_area->fla_valid_mask)) 169 continue; 170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 172 } 173 } 174 return 0; 175 } 176 177 int 178 css_get_ssd_info(struct subchannel *sch) 179 { 180 int ret; 181 void *page; 182 183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 184 if (!page) 185 return -ENOMEM; 186 spin_lock_irq(sch->lock); 187 ret = chsc_get_sch_desc_irq(sch, page); 188 if (ret) { 189 static int cio_chsc_err_msg; 190 191 if (!cio_chsc_err_msg) { 192 printk(KERN_ERR 193 "chsc_get_sch_descriptions:" 194 " Error %d while doing chsc; " 195 "processing some machine checks may " 196 "not work\n", ret); 197 cio_chsc_err_msg = 1; 198 } 199 } 200 spin_unlock_irq(sch->lock); 201 free_page((unsigned long)page); 202 if (!ret) { 203 int j, chpid, mask; 204 /* Allocate channel path structures, if needed. */ 205 for (j = 0; j < 8; j++) { 206 mask = 0x80 >> j; 207 chpid = sch->ssd_info.chpid[j]; 208 if ((sch->schib.pmcw.pim & mask) && 209 (get_chp_status(chpid) < 0)) 210 new_channel_path(chpid); 211 } 212 } 213 return ret; 214 } 215 216 static int 217 s390_subchannel_remove_chpid(struct device *dev, void *data) 218 { 219 int j; 220 int mask; 221 struct subchannel *sch; 222 struct channel_path *chpid; 223 struct schib schib; 224 225 sch = to_subchannel(dev); 226 chpid = data; 227 for (j = 0; j < 8; j++) { 228 mask = 0x80 >> j; 229 if ((sch->schib.pmcw.pim & mask) && 230 (sch->schib.pmcw.chpid[j] == chpid->id)) 231 break; 232 } 233 if (j >= 8) 234 return 0; 235 236 spin_lock_irq(sch->lock); 237 238 stsch(sch->schid, &schib); 239 if (!schib.pmcw.dnv) 240 goto out_unreg; 241 memcpy(&sch->schib, &schib, sizeof(struct schib)); 242 /* Check for single path devices. */ 243 if (sch->schib.pmcw.pim == 0x80) 244 goto out_unreg; 245 246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 248 (sch->schib.pmcw.lpum == mask)) { 249 int cc; 250 251 cc = cio_clear(sch); 252 if (cc == -ENODEV) 253 goto out_unreg; 254 /* Request retry of internal operation. */ 255 device_set_intretry(sch); 256 /* Call handler. */ 257 if (sch->driver && sch->driver->termination) 258 sch->driver->termination(&sch->dev); 259 goto out_unlock; 260 } 261 262 /* trigger path verification. */ 263 if (sch->driver && sch->driver->verify) 264 sch->driver->verify(&sch->dev); 265 else if (sch->lpm == mask) 266 goto out_unreg; 267 out_unlock: 268 spin_unlock_irq(sch->lock); 269 return 0; 270 out_unreg: 271 spin_unlock_irq(sch->lock); 272 sch->lpm = 0; 273 if (css_enqueue_subchannel_slow(sch->schid)) { 274 css_clear_subchannel_slow_list(); 275 need_rescan = 1; 276 } 277 return 0; 278 } 279 280 static void 281 s390_set_chpid_offline( __u8 chpid) 282 { 283 char dbf_txt[15]; 284 struct device *dev; 285 286 sprintf(dbf_txt, "chpr%x", chpid); 287 CIO_TRACE_EVENT(2, dbf_txt); 288 289 if (get_chp_status(chpid) <= 0) 290 return; 291 dev = get_device(&css[0]->chps[chpid]->dev); 292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), 293 s390_subchannel_remove_chpid); 294 295 if (need_rescan || css_slow_subchannels_exist()) 296 queue_work(slow_path_wq, &slow_path_work); 297 put_device(dev); 298 } 299 300 struct res_acc_data { 301 struct channel_path *chp; 302 u32 fla_mask; 303 u16 fla; 304 }; 305 306 static int 307 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) 308 { 309 int found; 310 int chp; 311 int ccode; 312 313 found = 0; 314 for (chp = 0; chp <= 7; chp++) 315 /* 316 * check if chpid is in information updated by ssd 317 */ 318 if (sch->ssd_info.valid && 319 sch->ssd_info.chpid[chp] == res_data->chp->id && 320 (sch->ssd_info.fla[chp] & res_data->fla_mask) 321 == res_data->fla) { 322 found = 1; 323 break; 324 } 325 326 if (found == 0) 327 return 0; 328 329 /* 330 * Do a stsch to update our subchannel structure with the 331 * new path information and eventually check for logically 332 * offline chpids. 333 */ 334 ccode = stsch(sch->schid, &sch->schib); 335 if (ccode > 0) 336 return 0; 337 338 return 0x80 >> chp; 339 } 340 341 static int 342 s390_process_res_acc_new_sch(struct subchannel_id schid) 343 { 344 struct schib schib; 345 int ret; 346 /* 347 * We don't know the device yet, but since a path 348 * may be available now to the device we'll have 349 * to do recognition again. 350 * Since we don't have any idea about which chpid 351 * that beast may be on we'll have to do a stsch 352 * on all devices, grr... 353 */ 354 if (stsch_err(schid, &schib)) 355 /* We're through */ 356 return need_rescan ? -EAGAIN : -ENXIO; 357 358 /* Put it on the slow path. */ 359 ret = css_enqueue_subchannel_slow(schid); 360 if (ret) { 361 css_clear_subchannel_slow_list(); 362 need_rescan = 1; 363 return -EAGAIN; 364 } 365 return 0; 366 } 367 368 static int 369 __s390_process_res_acc(struct subchannel_id schid, void *data) 370 { 371 int chp_mask, old_lpm; 372 struct res_acc_data *res_data; 373 struct subchannel *sch; 374 375 res_data = data; 376 sch = get_subchannel_by_schid(schid); 377 if (!sch) 378 /* Check if a subchannel is newly available. */ 379 return s390_process_res_acc_new_sch(schid); 380 381 spin_lock_irq(sch->lock); 382 383 chp_mask = s390_process_res_acc_sch(res_data, sch); 384 385 if (chp_mask == 0) { 386 spin_unlock_irq(sch->lock); 387 put_device(&sch->dev); 388 return 0; 389 } 390 old_lpm = sch->lpm; 391 sch->lpm = ((sch->schib.pmcw.pim & 392 sch->schib.pmcw.pam & 393 sch->schib.pmcw.pom) 394 | chp_mask) & sch->opm; 395 if (!old_lpm && sch->lpm) 396 device_trigger_reprobe(sch); 397 else if (sch->driver && sch->driver->verify) 398 sch->driver->verify(&sch->dev); 399 400 spin_unlock_irq(sch->lock); 401 put_device(&sch->dev); 402 return 0; 403 } 404 405 406 static int 407 s390_process_res_acc (struct res_acc_data *res_data) 408 { 409 int rc; 410 char dbf_txt[15]; 411 412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 413 CIO_TRACE_EVENT( 2, dbf_txt); 414 if (res_data->fla != 0) { 415 sprintf(dbf_txt, "fla%x", res_data->fla); 416 CIO_TRACE_EVENT( 2, dbf_txt); 417 } 418 419 /* 420 * I/O resources may have become accessible. 421 * Scan through all subchannels that may be concerned and 422 * do a validation on those. 423 * The more information we have (info), the less scanning 424 * will we have to do. 425 */ 426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 427 if (css_slow_subchannels_exist()) 428 rc = -EAGAIN; 429 else if (rc != -EAGAIN) 430 rc = 0; 431 return rc; 432 } 433 434 static int 435 __get_chpid_from_lir(void *data) 436 { 437 struct lir { 438 u8 iq; 439 u8 ic; 440 u16 sci; 441 /* incident-node descriptor */ 442 u32 indesc[28]; 443 /* attached-node descriptor */ 444 u32 andesc[28]; 445 /* incident-specific information */ 446 u32 isinfo[28]; 447 } __attribute__ ((packed)) *lir; 448 449 lir = data; 450 if (!(lir->iq&0x80)) 451 /* NULL link incident record */ 452 return -EINVAL; 453 if (!(lir->indesc[0]&0xc0000000)) 454 /* node descriptor not valid */ 455 return -EINVAL; 456 if (!(lir->indesc[0]&0x10000000)) 457 /* don't handle device-type nodes - FIXME */ 458 return -EINVAL; 459 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 460 461 return (u16) (lir->indesc[0]&0x000000ff); 462 } 463 464 struct chsc_sei_area { 465 struct chsc_header request; 466 u32 reserved1; 467 u32 reserved2; 468 u32 reserved3; 469 struct chsc_header response; 470 u32 reserved4; 471 u8 flags; 472 u8 vf; /* validity flags */ 473 u8 rs; /* reporting source */ 474 u8 cc; /* content code */ 475 u16 fla; /* full link address */ 476 u16 rsid; /* reporting source id */ 477 u32 reserved5; 478 u32 reserved6; 479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 480 /* ccdf has to be big enough for a link-incident record */ 481 } __attribute__ ((packed)); 482 483 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 484 { 485 int chpid; 486 487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 488 sei_area->rs, sei_area->rsid); 489 if (sei_area->rs != 4) 490 return 0; 491 chpid = __get_chpid_from_lir(sei_area->ccdf); 492 if (chpid < 0) 493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 494 else 495 s390_set_chpid_offline(chpid); 496 497 return 0; 498 } 499 500 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 501 { 502 struct res_acc_data res_data; 503 struct device *dev; 504 int status; 505 int rc; 506 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 509 if (sei_area->rs != 4) 510 return 0; 511 /* allocate a new channel path structure, if needed */ 512 status = get_chp_status(sei_area->rsid); 513 if (status < 0) 514 new_channel_path(sei_area->rsid); 515 else if (!status) 516 return 0; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev); 518 memset(&res_data, 0, sizeof(struct res_acc_data)); 519 res_data.chp = to_channelpath(dev); 520 if ((sei_area->vf & 0xc0) != 0) { 521 res_data.fla = sei_area->fla; 522 if ((sei_area->vf & 0xc0) == 0xc0) 523 /* full link address */ 524 res_data.fla_mask = 0xffff; 525 else 526 /* link address */ 527 res_data.fla_mask = 0xff00; 528 } 529 rc = s390_process_res_acc(&res_data); 530 put_device(dev); 531 532 return rc; 533 } 534 535 static int chsc_process_sei(struct chsc_sei_area *sei_area) 536 { 537 int rc; 538 539 /* Check if we might have lost some information. */ 540 if (sei_area->flags & 0x40) 541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 542 /* which kind of information was stored? */ 543 rc = 0; 544 switch (sei_area->cc) { 545 case 1: /* link incident*/ 546 rc = chsc_process_sei_link_incident(sei_area); 547 break; 548 case 2: /* i/o resource accessibiliy */ 549 rc = chsc_process_sei_res_acc(sei_area); 550 break; 551 default: /* other stuff */ 552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 553 sei_area->cc); 554 break; 555 } 556 557 return rc; 558 } 559 560 int chsc_process_crw(void) 561 { 562 struct chsc_sei_area *sei_area; 563 int ret; 564 int rc; 565 566 if (!sei_page) 567 return 0; 568 /* Access to sei_page is serialized through machine check handler 569 * thread, so no need for locking. */ 570 sei_area = sei_page; 571 572 CIO_TRACE_EVENT( 2, "prcss"); 573 ret = 0; 574 do { 575 memset(sei_area, 0, sizeof(*sei_area)); 576 sei_area->request.length = 0x0010; 577 sei_area->request.code = 0x000e; 578 if (chsc(sei_area)) 579 break; 580 581 if (sei_area->response.code == 0x0001) { 582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 583 rc = chsc_process_sei(sei_area); 584 if (rc) 585 ret = rc; 586 } else { 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 588 sei_area->response.code); 589 ret = 0; 590 break; 591 } 592 } while (sei_area->flags & 0x80); 593 594 return ret; 595 } 596 597 static int 598 __chp_add_new_sch(struct subchannel_id schid) 599 { 600 struct schib schib; 601 int ret; 602 603 if (stsch_err(schid, &schib)) 604 /* We're through */ 605 return need_rescan ? -EAGAIN : -ENXIO; 606 607 /* Put it on the slow path. */ 608 ret = css_enqueue_subchannel_slow(schid); 609 if (ret) { 610 css_clear_subchannel_slow_list(); 611 need_rescan = 1; 612 return -EAGAIN; 613 } 614 return 0; 615 } 616 617 618 static int 619 __chp_add(struct subchannel_id schid, void *data) 620 { 621 int i, mask; 622 struct channel_path *chp; 623 struct subchannel *sch; 624 625 chp = data; 626 sch = get_subchannel_by_schid(schid); 627 if (!sch) 628 /* Check if the subchannel is now available. */ 629 return __chp_add_new_sch(schid); 630 spin_lock_irq(sch->lock); 631 for (i=0; i<8; i++) { 632 mask = 0x80 >> i; 633 if ((sch->schib.pmcw.pim & mask) && 634 (sch->schib.pmcw.chpid[i] == chp->id)) { 635 if (stsch(sch->schid, &sch->schib) != 0) { 636 /* Endgame. */ 637 spin_unlock_irq(sch->lock); 638 return -ENXIO; 639 } 640 break; 641 } 642 } 643 if (i==8) { 644 spin_unlock_irq(sch->lock); 645 return 0; 646 } 647 sch->lpm = ((sch->schib.pmcw.pim & 648 sch->schib.pmcw.pam & 649 sch->schib.pmcw.pom) 650 | mask) & sch->opm; 651 652 if (sch->driver && sch->driver->verify) 653 sch->driver->verify(&sch->dev); 654 655 spin_unlock_irq(sch->lock); 656 put_device(&sch->dev); 657 return 0; 658 } 659 660 static int 661 chp_add(int chpid) 662 { 663 int rc; 664 char dbf_txt[15]; 665 struct device *dev; 666 667 if (!get_chp_status(chpid)) 668 return 0; /* no need to do the rest */ 669 670 sprintf(dbf_txt, "cadd%x", chpid); 671 CIO_TRACE_EVENT(2, dbf_txt); 672 673 dev = get_device(&css[0]->chps[chpid]->dev); 674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 675 if (css_slow_subchannels_exist()) 676 rc = -EAGAIN; 677 if (rc != -EAGAIN) 678 rc = 0; 679 put_device(dev); 680 return rc; 681 } 682 683 /* 684 * Handling of crw machine checks with channel path source. 685 */ 686 int 687 chp_process_crw(int chpid, int on) 688 { 689 if (on == 0) { 690 /* Path has gone. We use the link incident routine.*/ 691 s390_set_chpid_offline(chpid); 692 return 0; /* De-register is async anyway. */ 693 } 694 /* 695 * Path has come. Allocate a new channel path structure, 696 * if needed. 697 */ 698 if (get_chp_status(chpid) < 0) 699 new_channel_path(chpid); 700 /* Avoid the extra overhead in process_rec_acc. */ 701 return chp_add(chpid); 702 } 703 704 static int check_for_io_on_path(struct subchannel *sch, int index) 705 { 706 int cc; 707 708 cc = stsch(sch->schid, &sch->schib); 709 if (cc) 710 return 0; 711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) 712 return 1; 713 return 0; 714 } 715 716 static void terminate_internal_io(struct subchannel *sch) 717 { 718 if (cio_clear(sch)) { 719 /* Recheck device in case clear failed. */ 720 sch->lpm = 0; 721 if (device_trigger_verify(sch) != 0) { 722 if(css_enqueue_subchannel_slow(sch->schid)) { 723 css_clear_subchannel_slow_list(); 724 need_rescan = 1; 725 } 726 } 727 return; 728 } 729 /* Request retry of internal operation. */ 730 device_set_intretry(sch); 731 /* Call handler. */ 732 if (sch->driver && sch->driver->termination) 733 sch->driver->termination(&sch->dev); 734 } 735 736 static void 737 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 738 { 739 int chp, old_lpm; 740 unsigned long flags; 741 742 if (!sch->ssd_info.valid) 743 return; 744 745 spin_lock_irqsave(sch->lock, flags); 746 old_lpm = sch->lpm; 747 for (chp = 0; chp < 8; chp++) { 748 if (sch->ssd_info.chpid[chp] != chpid) 749 continue; 750 751 if (on) { 752 sch->opm |= (0x80 >> chp); 753 sch->lpm |= (0x80 >> chp); 754 if (!old_lpm) 755 device_trigger_reprobe(sch); 756 else if (sch->driver && sch->driver->verify) 757 sch->driver->verify(&sch->dev); 758 break; 759 } 760 sch->opm &= ~(0x80 >> chp); 761 sch->lpm &= ~(0x80 >> chp); 762 if (check_for_io_on_path(sch, chp)) { 763 if (device_is_online(sch)) 764 /* Path verification is done after killing. */ 765 device_kill_io(sch); 766 else 767 /* Kill and retry internal I/O. */ 768 terminate_internal_io(sch); 769 } else if (!sch->lpm) { 770 if (device_trigger_verify(sch) != 0) { 771 if (css_enqueue_subchannel_slow(sch->schid)) { 772 css_clear_subchannel_slow_list(); 773 need_rescan = 1; 774 } 775 } 776 } else if (sch->driver && sch->driver->verify) 777 sch->driver->verify(&sch->dev); 778 break; 779 } 780 spin_unlock_irqrestore(sch->lock, flags); 781 } 782 783 static int 784 s390_subchannel_vary_chpid_off(struct device *dev, void *data) 785 { 786 struct subchannel *sch; 787 __u8 *chpid; 788 789 sch = to_subchannel(dev); 790 chpid = data; 791 792 __s390_subchannel_vary_chpid(sch, *chpid, 0); 793 return 0; 794 } 795 796 static int 797 s390_subchannel_vary_chpid_on(struct device *dev, void *data) 798 { 799 struct subchannel *sch; 800 __u8 *chpid; 801 802 sch = to_subchannel(dev); 803 chpid = data; 804 805 __s390_subchannel_vary_chpid(sch, *chpid, 1); 806 return 0; 807 } 808 809 static int 810 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 811 { 812 struct schib schib; 813 struct subchannel *sch; 814 815 sch = get_subchannel_by_schid(schid); 816 if (sch) { 817 put_device(&sch->dev); 818 return 0; 819 } 820 if (stsch_err(schid, &schib)) 821 /* We're through */ 822 return -ENXIO; 823 /* Put it on the slow path. */ 824 if (css_enqueue_subchannel_slow(schid)) { 825 css_clear_subchannel_slow_list(); 826 need_rescan = 1; 827 return -EAGAIN; 828 } 829 return 0; 830 } 831 832 /* 833 * Function: s390_vary_chpid 834 * Varies the specified chpid online or offline 835 */ 836 static int 837 s390_vary_chpid( __u8 chpid, int on) 838 { 839 char dbf_text[15]; 840 int status; 841 842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 843 CIO_TRACE_EVENT( 2, dbf_text); 844 845 status = get_chp_status(chpid); 846 if (status < 0) { 847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); 848 return -EINVAL; 849 } 850 851 if (!on && !status) { 852 printk(KERN_ERR "chpid %x is already offline\n", chpid); 853 return -EINVAL; 854 } 855 856 set_chp_logically_online(chpid, on); 857 858 /* 859 * Redo PathVerification on the devices the chpid connects to 860 */ 861 862 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 863 s390_subchannel_vary_chpid_on : 864 s390_subchannel_vary_chpid_off); 865 if (on) 866 /* Scan for new devices on varied on path. */ 867 for_each_subchannel(__s390_vary_chpid_on, NULL); 868 if (need_rescan || css_slow_subchannels_exist()) 869 queue_work(slow_path_wq, &slow_path_work); 870 return 0; 871 } 872 873 /* 874 * Channel measurement related functions 875 */ 876 static ssize_t 877 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, 878 size_t count) 879 { 880 struct channel_path *chp; 881 unsigned int size; 882 883 chp = to_channelpath(container_of(kobj, struct device, kobj)); 884 if (!chp->cmg_chars) 885 return 0; 886 887 size = sizeof(struct cmg_chars); 888 889 if (off > size) 890 return 0; 891 if (off + count > size) 892 count = size - off; 893 memcpy(buf, chp->cmg_chars + off, count); 894 return count; 895 } 896 897 static struct bin_attribute chp_measurement_chars_attr = { 898 .attr = { 899 .name = "measurement_chars", 900 .mode = S_IRUSR, 901 .owner = THIS_MODULE, 902 }, 903 .size = sizeof(struct cmg_chars), 904 .read = chp_measurement_chars_read, 905 }; 906 907 static void 908 chp_measurement_copy_block(struct cmg_entry *buf, 909 struct channel_subsystem *css, int chpid) 910 { 911 void *area; 912 struct cmg_entry *entry, reference_buf; 913 int idx; 914 915 if (chpid < 128) { 916 area = css->cub_addr1; 917 idx = chpid; 918 } else { 919 area = css->cub_addr2; 920 idx = chpid - 128; 921 } 922 entry = area + (idx * sizeof(struct cmg_entry)); 923 do { 924 memcpy(buf, entry, sizeof(*entry)); 925 memcpy(&reference_buf, entry, sizeof(*entry)); 926 } while (reference_buf.values[0] != buf->values[0]); 927 } 928 929 static ssize_t 930 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 931 { 932 struct channel_path *chp; 933 struct channel_subsystem *css; 934 unsigned int size; 935 936 chp = to_channelpath(container_of(kobj, struct device, kobj)); 937 css = to_css(chp->dev.parent); 938 939 size = sizeof(struct cmg_entry); 940 941 /* Only allow single reads. */ 942 if (off || count < size) 943 return 0; 944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 945 count = size; 946 return count; 947 } 948 949 static struct bin_attribute chp_measurement_attr = { 950 .attr = { 951 .name = "measurement", 952 .mode = S_IRUSR, 953 .owner = THIS_MODULE, 954 }, 955 .size = sizeof(struct cmg_entry), 956 .read = chp_measurement_read, 957 }; 958 959 static void 960 chsc_remove_chp_cmg_attr(struct channel_path *chp) 961 { 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr); 964 } 965 966 static int 967 chsc_add_chp_cmg_attr(struct channel_path *chp) 968 { 969 int ret; 970 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); 972 if (ret) 973 return ret; 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); 975 if (ret) 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); 977 return ret; 978 } 979 980 static void 981 chsc_remove_cmg_attr(struct channel_subsystem *css) 982 { 983 int i; 984 985 for (i = 0; i <= __MAX_CHPID; i++) { 986 if (!css->chps[i]) 987 continue; 988 chsc_remove_chp_cmg_attr(css->chps[i]); 989 } 990 } 991 992 static int 993 chsc_add_cmg_attr(struct channel_subsystem *css) 994 { 995 int i, ret; 996 997 ret = 0; 998 for (i = 0; i <= __MAX_CHPID; i++) { 999 if (!css->chps[i]) 1000 continue; 1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 1002 if (ret) 1003 goto cleanup; 1004 } 1005 return ret; 1006 cleanup: 1007 for (--i; i >= 0; i--) { 1008 if (!css->chps[i]) 1009 continue; 1010 chsc_remove_chp_cmg_attr(css->chps[i]); 1011 } 1012 return ret; 1013 } 1014 1015 1016 static int 1017 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 1018 { 1019 struct { 1020 struct chsc_header request; 1021 u32 operation_code : 2; 1022 u32 : 30; 1023 u32 key : 4; 1024 u32 : 28; 1025 u32 zeroes1; 1026 u32 cub_addr1; 1027 u32 zeroes2; 1028 u32 cub_addr2; 1029 u32 reserved[13]; 1030 struct chsc_header response; 1031 u32 status : 8; 1032 u32 : 4; 1033 u32 fmt : 4; 1034 u32 : 16; 1035 } __attribute__ ((packed)) *secm_area; 1036 int ret, ccode; 1037 1038 secm_area = page; 1039 secm_area->request.length = 0x0050; 1040 secm_area->request.code = 0x0016; 1041 1042 secm_area->key = PAGE_DEFAULT_KEY; 1043 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 1044 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 1045 1046 secm_area->operation_code = enable ? 0 : 1; 1047 1048 ccode = chsc(secm_area); 1049 if (ccode > 0) 1050 return (ccode == 3) ? -ENODEV : -EBUSY; 1051 1052 switch (secm_area->response.code) { 1053 case 0x0001: /* Success. */ 1054 ret = 0; 1055 break; 1056 case 0x0003: /* Invalid block. */ 1057 case 0x0007: /* Invalid format. */ 1058 case 0x0008: /* Other invalid block. */ 1059 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1060 ret = -EINVAL; 1061 break; 1062 case 0x0004: /* Command not provided in model. */ 1063 CIO_CRW_EVENT(2, "Model does not provide secm\n"); 1064 ret = -EOPNOTSUPP; 1065 break; 1066 case 0x0102: /* cub adresses incorrect */ 1067 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 1068 ret = -EINVAL; 1069 break; 1070 case 0x0103: /* key error */ 1071 CIO_CRW_EVENT(2, "Access key error in secm\n"); 1072 ret = -EINVAL; 1073 break; 1074 case 0x0105: /* error while starting */ 1075 CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 1076 ret = -EIO; 1077 break; 1078 default: 1079 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1080 secm_area->response.code); 1081 ret = -EIO; 1082 } 1083 return ret; 1084 } 1085 1086 int 1087 chsc_secm(struct channel_subsystem *css, int enable) 1088 { 1089 void *secm_area; 1090 int ret; 1091 1092 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1093 if (!secm_area) 1094 return -ENOMEM; 1095 1096 mutex_lock(&css->mutex); 1097 if (enable && !css->cm_enabled) { 1098 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1099 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1100 if (!css->cub_addr1 || !css->cub_addr2) { 1101 free_page((unsigned long)css->cub_addr1); 1102 free_page((unsigned long)css->cub_addr2); 1103 free_page((unsigned long)secm_area); 1104 mutex_unlock(&css->mutex); 1105 return -ENOMEM; 1106 } 1107 } 1108 ret = __chsc_do_secm(css, enable, secm_area); 1109 if (!ret) { 1110 css->cm_enabled = enable; 1111 if (css->cm_enabled) { 1112 ret = chsc_add_cmg_attr(css); 1113 if (ret) { 1114 memset(secm_area, 0, PAGE_SIZE); 1115 __chsc_do_secm(css, 0, secm_area); 1116 css->cm_enabled = 0; 1117 } 1118 } else 1119 chsc_remove_cmg_attr(css); 1120 } 1121 if (enable && !css->cm_enabled) { 1122 free_page((unsigned long)css->cub_addr1); 1123 free_page((unsigned long)css->cub_addr2); 1124 } 1125 mutex_unlock(&css->mutex); 1126 free_page((unsigned long)secm_area); 1127 return ret; 1128 } 1129 1130 /* 1131 * Files for the channel path entries. 1132 */ 1133 static ssize_t 1134 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) 1135 { 1136 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1137 1138 if (!chp) 1139 return 0; 1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : 1141 sprintf(buf, "offline\n")); 1142 } 1143 1144 static ssize_t 1145 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1146 { 1147 struct channel_path *cp = container_of(dev, struct channel_path, dev); 1148 char cmd[10]; 1149 int num_args; 1150 int error; 1151 1152 num_args = sscanf(buf, "%5s", cmd); 1153 if (!num_args) 1154 return count; 1155 1156 if (!strnicmp(cmd, "on", 2)) 1157 error = s390_vary_chpid(cp->id, 1); 1158 else if (!strnicmp(cmd, "off", 3)) 1159 error = s390_vary_chpid(cp->id, 0); 1160 else 1161 error = -EINVAL; 1162 1163 return error < 0 ? error : count; 1164 1165 } 1166 1167 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 1168 1169 static ssize_t 1170 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1171 { 1172 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1173 1174 if (!chp) 1175 return 0; 1176 return sprintf(buf, "%x\n", chp->desc.desc); 1177 } 1178 1179 static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 1180 1181 static ssize_t 1182 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) 1183 { 1184 struct channel_path *chp = to_channelpath(dev); 1185 1186 if (!chp) 1187 return 0; 1188 if (chp->cmg == -1) /* channel measurements not available */ 1189 return sprintf(buf, "unknown\n"); 1190 return sprintf(buf, "%x\n", chp->cmg); 1191 } 1192 1193 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); 1194 1195 static ssize_t 1196 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 1197 { 1198 struct channel_path *chp = to_channelpath(dev); 1199 1200 if (!chp) 1201 return 0; 1202 if (chp->shared == -1) /* channel measurements not available */ 1203 return sprintf(buf, "unknown\n"); 1204 return sprintf(buf, "%x\n", chp->shared); 1205 } 1206 1207 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 1208 1209 static struct attribute * chp_attrs[] = { 1210 &dev_attr_status.attr, 1211 &dev_attr_type.attr, 1212 &dev_attr_cmg.attr, 1213 &dev_attr_shared.attr, 1214 NULL, 1215 }; 1216 1217 static struct attribute_group chp_attr_group = { 1218 .attrs = chp_attrs, 1219 }; 1220 1221 static void 1222 chp_release(struct device *dev) 1223 { 1224 struct channel_path *cp; 1225 1226 cp = container_of(dev, struct channel_path, dev); 1227 kfree(cp); 1228 } 1229 1230 static int 1231 chsc_determine_channel_path_description(int chpid, 1232 struct channel_path_desc *desc) 1233 { 1234 int ccode, ret; 1235 1236 struct { 1237 struct chsc_header request; 1238 u32 : 24; 1239 u32 first_chpid : 8; 1240 u32 : 24; 1241 u32 last_chpid : 8; 1242 u32 zeroes1; 1243 struct chsc_header response; 1244 u32 zeroes2; 1245 struct channel_path_desc desc; 1246 } __attribute__ ((packed)) *scpd_area; 1247 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1249 if (!scpd_area) 1250 return -ENOMEM; 1251 1252 scpd_area->request.length = 0x0010; 1253 scpd_area->request.code = 0x0002; 1254 1255 scpd_area->first_chpid = chpid; 1256 scpd_area->last_chpid = chpid; 1257 1258 ccode = chsc(scpd_area); 1259 if (ccode > 0) { 1260 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1261 goto out; 1262 } 1263 1264 switch (scpd_area->response.code) { 1265 case 0x0001: /* Success. */ 1266 memcpy(desc, &scpd_area->desc, 1267 sizeof(struct channel_path_desc)); 1268 ret = 0; 1269 break; 1270 case 0x0003: /* Invalid block. */ 1271 case 0x0007: /* Invalid format. */ 1272 case 0x0008: /* Other invalid block. */ 1273 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1274 ret = -EINVAL; 1275 break; 1276 case 0x0004: /* Command not provided in model. */ 1277 CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 1278 ret = -EOPNOTSUPP; 1279 break; 1280 default: 1281 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1282 scpd_area->response.code); 1283 ret = -EIO; 1284 } 1285 out: 1286 free_page((unsigned long)scpd_area); 1287 return ret; 1288 } 1289 1290 static void 1291 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1292 struct cmg_chars *chars) 1293 { 1294 switch (chp->cmg) { 1295 case 2: 1296 case 3: 1297 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 1298 GFP_KERNEL); 1299 if (chp->cmg_chars) { 1300 int i, mask; 1301 struct cmg_chars *cmg_chars; 1302 1303 cmg_chars = chp->cmg_chars; 1304 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1305 mask = 0x80 >> (i + 3); 1306 if (cmcv & mask) 1307 cmg_chars->values[i] = chars->values[i]; 1308 else 1309 cmg_chars->values[i] = 0; 1310 } 1311 } 1312 break; 1313 default: 1314 /* No cmg-dependent data. */ 1315 break; 1316 } 1317 } 1318 1319 static int 1320 chsc_get_channel_measurement_chars(struct channel_path *chp) 1321 { 1322 int ccode, ret; 1323 1324 struct { 1325 struct chsc_header request; 1326 u32 : 24; 1327 u32 first_chpid : 8; 1328 u32 : 24; 1329 u32 last_chpid : 8; 1330 u32 zeroes1; 1331 struct chsc_header response; 1332 u32 zeroes2; 1333 u32 not_valid : 1; 1334 u32 shared : 1; 1335 u32 : 22; 1336 u32 chpid : 8; 1337 u32 cmcv : 5; 1338 u32 : 11; 1339 u32 cmgq : 8; 1340 u32 cmg : 8; 1341 u32 zeroes3; 1342 u32 data[NR_MEASUREMENT_CHARS]; 1343 } __attribute__ ((packed)) *scmc_area; 1344 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1346 if (!scmc_area) 1347 return -ENOMEM; 1348 1349 scmc_area->request.length = 0x0010; 1350 scmc_area->request.code = 0x0022; 1351 1352 scmc_area->first_chpid = chp->id; 1353 scmc_area->last_chpid = chp->id; 1354 1355 ccode = chsc(scmc_area); 1356 if (ccode > 0) { 1357 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1358 goto out; 1359 } 1360 1361 switch (scmc_area->response.code) { 1362 case 0x0001: /* Success. */ 1363 if (!scmc_area->not_valid) { 1364 chp->cmg = scmc_area->cmg; 1365 chp->shared = scmc_area->shared; 1366 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1367 (struct cmg_chars *) 1368 &scmc_area->data); 1369 } else { 1370 chp->cmg = -1; 1371 chp->shared = -1; 1372 } 1373 ret = 0; 1374 break; 1375 case 0x0003: /* Invalid block. */ 1376 case 0x0007: /* Invalid format. */ 1377 case 0x0008: /* Invalid bit combination. */ 1378 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1379 ret = -EINVAL; 1380 break; 1381 case 0x0004: /* Command not provided. */ 1382 CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 1383 ret = -EOPNOTSUPP; 1384 break; 1385 default: 1386 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1387 scmc_area->response.code); 1388 ret = -EIO; 1389 } 1390 out: 1391 free_page((unsigned long)scmc_area); 1392 return ret; 1393 } 1394 1395 /* 1396 * Entries for chpids on the system bus. 1397 * This replaces /proc/chpids. 1398 */ 1399 static int 1400 new_channel_path(int chpid) 1401 { 1402 struct channel_path *chp; 1403 int ret; 1404 1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); 1406 if (!chp) 1407 return -ENOMEM; 1408 1409 /* fill in status, etc. */ 1410 chp->id = chpid; 1411 chp->state = 1; 1412 chp->dev.parent = &css[0]->device; 1413 chp->dev.release = chp_release; 1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1415 1416 /* Obtain channel path description and fill it in. */ 1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 1418 if (ret) 1419 goto out_free; 1420 /* Get channel-measurement characteristics. */ 1421 if (css_characteristics_avail && css_chsc_characteristics.scmc 1422 && css_chsc_characteristics.secm) { 1423 ret = chsc_get_channel_measurement_chars(chp); 1424 if (ret) 1425 goto out_free; 1426 } else { 1427 static int msg_done; 1428 1429 if (!msg_done) { 1430 printk(KERN_WARNING "cio: Channel measurements not " 1431 "available, continuing.\n"); 1432 msg_done = 1; 1433 } 1434 chp->cmg = -1; 1435 } 1436 1437 /* make it known to the system */ 1438 ret = device_register(&chp->dev); 1439 if (ret) { 1440 printk(KERN_WARNING "%s: could not register %02x\n", 1441 __func__, chpid); 1442 goto out_free; 1443 } 1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 1445 if (ret) { 1446 device_unregister(&chp->dev); 1447 goto out_free; 1448 } 1449 mutex_lock(&css[0]->mutex); 1450 if (css[0]->cm_enabled) { 1451 ret = chsc_add_chp_cmg_attr(chp); 1452 if (ret) { 1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 1454 device_unregister(&chp->dev); 1455 mutex_unlock(&css[0]->mutex); 1456 goto out_free; 1457 } 1458 } 1459 css[0]->chps[chpid] = chp; 1460 mutex_unlock(&css[0]->mutex); 1461 return ret; 1462 out_free: 1463 kfree(chp); 1464 return ret; 1465 } 1466 1467 void * 1468 chsc_get_chp_desc(struct subchannel *sch, int chp_no) 1469 { 1470 struct channel_path *chp; 1471 struct channel_path_desc *desc; 1472 1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; 1474 if (!chp) 1475 return NULL; 1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1477 if (!desc) 1478 return NULL; 1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 1480 return desc; 1481 } 1482 1483 static int __init 1484 chsc_alloc_sei_area(void) 1485 { 1486 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1487 if (!sei_page) 1488 printk(KERN_WARNING"Can't allocate page for processing of " \ 1489 "chsc machine checks!\n"); 1490 return (sei_page ? 0 : -ENOMEM); 1491 } 1492 1493 int __init 1494 chsc_enable_facility(int operation_code) 1495 { 1496 int ret; 1497 struct { 1498 struct chsc_header request; 1499 u8 reserved1:4; 1500 u8 format:4; 1501 u8 reserved2; 1502 u16 operation_code; 1503 u32 reserved3; 1504 u32 reserved4; 1505 u32 operation_data_area[252]; 1506 struct chsc_header response; 1507 u32 reserved5:4; 1508 u32 format2:4; 1509 u32 reserved6:24; 1510 } __attribute__ ((packed)) *sda_area; 1511 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1513 if (!sda_area) 1514 return -ENOMEM; 1515 sda_area->request.length = 0x0400; 1516 sda_area->request.code = 0x0031; 1517 sda_area->operation_code = operation_code; 1518 1519 ret = chsc(sda_area); 1520 if (ret > 0) { 1521 ret = (ret == 3) ? -ENODEV : -EBUSY; 1522 goto out; 1523 } 1524 switch (sda_area->response.code) { 1525 case 0x0001: /* everything ok */ 1526 ret = 0; 1527 break; 1528 case 0x0003: /* invalid request block */ 1529 case 0x0007: 1530 ret = -EINVAL; 1531 break; 1532 case 0x0004: /* command not provided */ 1533 case 0x0101: /* facility not provided */ 1534 ret = -EOPNOTSUPP; 1535 break; 1536 default: /* something went wrong */ 1537 ret = -EIO; 1538 } 1539 out: 1540 free_page((unsigned long)sda_area); 1541 return ret; 1542 } 1543 1544 subsys_initcall(chsc_alloc_sei_area); 1545 1546 struct css_general_char css_general_characteristics; 1547 struct css_chsc_char css_chsc_characteristics; 1548 1549 int __init 1550 chsc_determine_css_characteristics(void) 1551 { 1552 int result; 1553 struct { 1554 struct chsc_header request; 1555 u32 reserved1; 1556 u32 reserved2; 1557 u32 reserved3; 1558 struct chsc_header response; 1559 u32 reserved4; 1560 u32 general_char[510]; 1561 u32 chsc_char[518]; 1562 } __attribute__ ((packed)) *scsc_area; 1563 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1565 if (!scsc_area) { 1566 printk(KERN_WARNING"cio: Was not able to determine available" \ 1567 "CHSCs due to no memory.\n"); 1568 return -ENOMEM; 1569 } 1570 1571 scsc_area->request.length = 0x0010; 1572 scsc_area->request.code = 0x0010; 1573 1574 result = chsc(scsc_area); 1575 if (result) { 1576 printk(KERN_WARNING"cio: Was not able to determine " \ 1577 "available CHSCs, cc=%i.\n", result); 1578 result = -EIO; 1579 goto exit; 1580 } 1581 1582 if (scsc_area->response.code != 1) { 1583 printk(KERN_WARNING"cio: Was not able to determine " \ 1584 "available CHSCs.\n"); 1585 result = -EIO; 1586 goto exit; 1587 } 1588 memcpy(&css_general_characteristics, scsc_area->general_char, 1589 sizeof(css_general_characteristics)); 1590 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1591 sizeof(css_chsc_characteristics)); 1592 exit: 1593 free_page ((unsigned long) scsc_area); 1594 return result; 1595 } 1596 1597 EXPORT_SYMBOL_GPL(css_general_characteristics); 1598 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1599