1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com) 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 17 #include <asm/cio.h> 18 19 #include "css.h" 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "ioasm.h" 23 #include "chsc.h" 24 25 static void *sei_page; 26 27 static int new_channel_path(int chpid); 28 29 static inline void 30 set_chp_logically_online(int chp, int onoff) 31 { 32 css[0]->chps[chp]->state = onoff; 33 } 34 35 static int 36 get_chp_status(int chp) 37 { 38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 39 } 40 41 void 42 chsc_validate_chpids(struct subchannel *sch) 43 { 44 int mask, chp; 45 46 for (chp = 0; chp <= 7; chp++) { 47 mask = 0x80 >> chp; 48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 49 /* disable using this path */ 50 sch->opm &= ~mask; 51 } 52 } 53 54 void 55 chpid_is_actually_online(int chp) 56 { 57 int state; 58 59 state = get_chp_status(chp); 60 if (state < 0) { 61 need_rescan = 1; 62 queue_work(slow_path_wq, &slow_path_work); 63 } else 64 WARN_ON(!state); 65 } 66 67 /* FIXME: this is _always_ called for every subchannel. shouldn't we 68 * process more than one at a time? */ 69 static int 70 chsc_get_sch_desc_irq(struct subchannel *sch, void *page) 71 { 72 int ccode, j; 73 74 struct { 75 struct chsc_header request; 76 u16 reserved1a:10; 77 u16 ssid:2; 78 u16 reserved1b:4; 79 u16 f_sch; /* first subchannel */ 80 u16 reserved2; 81 u16 l_sch; /* last subchannel */ 82 u32 reserved3; 83 struct chsc_header response; 84 u32 reserved4; 85 u8 sch_valid : 1; 86 u8 dev_valid : 1; 87 u8 st : 3; /* subchannel type */ 88 u8 zeroes : 3; 89 u8 unit_addr; /* unit address */ 90 u16 devno; /* device number */ 91 u8 path_mask; 92 u8 fla_valid_mask; 93 u16 sch; /* subchannel */ 94 u8 chpid[8]; /* chpids 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */ 96 } *ssd_area; 97 98 ssd_area = page; 99 100 ssd_area->request.length = 0x0010; 101 ssd_area->request.code = 0x0004; 102 103 ssd_area->ssid = sch->schid.ssid; 104 ssd_area->f_sch = sch->schid.sch_no; 105 ssd_area->l_sch = sch->schid.sch_no; 106 107 ccode = chsc(ssd_area); 108 if (ccode > 0) { 109 pr_debug("chsc returned with ccode = %d\n", ccode); 110 return (ccode == 3) ? -ENODEV : -EBUSY; 111 } 112 113 switch (ssd_area->response.code) { 114 case 0x0001: /* everything ok */ 115 break; 116 case 0x0002: 117 CIO_CRW_EVENT(2, "Invalid command!\n"); 118 return -EINVAL; 119 case 0x0003: 120 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 121 return -EINVAL; 122 case 0x0004: 123 CIO_CRW_EVENT(2, "Model does not provide ssd\n"); 124 return -EOPNOTSUPP; 125 default: 126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 127 ssd_area->response.code); 128 return -EIO; 129 } 130 131 /* 132 * ssd_area->st stores the type of the detected 133 * subchannel, with the following definitions: 134 * 135 * 0: I/O subchannel: All fields have meaning 136 * 1: CHSC subchannel: Only sch_val, st and sch 137 * have meaning 138 * 2: Message subchannel: All fields except unit_addr 139 * have meaning 140 * 3: ADM subchannel: Only sch_val, st and sch 141 * have meaning 142 * 143 * Other types are currently undefined. 144 */ 145 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 146 CIO_CRW_EVENT(0, "Strange subchannel type %d" 147 " for sch 0.%x.%04x\n", ssd_area->st, 148 sch->schid.ssid, sch->schid.sch_no); 149 /* 150 * There may have been a new subchannel type defined in the 151 * time since this code was written; since we don't know which 152 * fields have meaning and what to do with it we just jump out 153 */ 154 return 0; 155 } else { 156 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", 158 sch->schid.ssid, sch->schid.sch_no, 159 type[ssd_area->st]); 160 161 sch->ssd_info.valid = 1; 162 sch->ssd_info.type = ssd_area->st; 163 } 164 165 if (ssd_area->st == 0 || ssd_area->st == 2) { 166 for (j = 0; j < 8; j++) { 167 if (!((0x80 >> j) & ssd_area->path_mask & 168 ssd_area->fla_valid_mask)) 169 continue; 170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 172 } 173 } 174 return 0; 175 } 176 177 int 178 css_get_ssd_info(struct subchannel *sch) 179 { 180 int ret; 181 void *page; 182 183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 184 if (!page) 185 return -ENOMEM; 186 spin_lock_irq(&sch->lock); 187 ret = chsc_get_sch_desc_irq(sch, page); 188 if (ret) { 189 static int cio_chsc_err_msg; 190 191 if (!cio_chsc_err_msg) { 192 printk(KERN_ERR 193 "chsc_get_sch_descriptions:" 194 " Error %d while doing chsc; " 195 "processing some machine checks may " 196 "not work\n", ret); 197 cio_chsc_err_msg = 1; 198 } 199 } 200 spin_unlock_irq(&sch->lock); 201 free_page((unsigned long)page); 202 if (!ret) { 203 int j, chpid; 204 /* Allocate channel path structures, if needed. */ 205 for (j = 0; j < 8; j++) { 206 chpid = sch->ssd_info.chpid[j]; 207 if (chpid && (get_chp_status(chpid) < 0)) 208 new_channel_path(chpid); 209 } 210 } 211 return ret; 212 } 213 214 static int 215 s390_subchannel_remove_chpid(struct device *dev, void *data) 216 { 217 int j; 218 int mask; 219 struct subchannel *sch; 220 struct channel_path *chpid; 221 struct schib schib; 222 223 sch = to_subchannel(dev); 224 chpid = data; 225 for (j = 0; j < 8; j++) 226 if (sch->schib.pmcw.chpid[j] == chpid->id) 227 break; 228 if (j >= 8) 229 return 0; 230 231 mask = 0x80 >> j; 232 spin_lock_irq(&sch->lock); 233 234 stsch(sch->schid, &schib); 235 if (!schib.pmcw.dnv) 236 goto out_unreg; 237 memcpy(&sch->schib, &schib, sizeof(struct schib)); 238 /* Check for single path devices. */ 239 if (sch->schib.pmcw.pim == 0x80) 240 goto out_unreg; 241 242 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 243 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 244 (sch->schib.pmcw.lpum == mask)) { 245 int cc; 246 247 cc = cio_clear(sch); 248 if (cc == -ENODEV) 249 goto out_unreg; 250 /* Call handler. */ 251 if (sch->driver && sch->driver->termination) 252 sch->driver->termination(&sch->dev); 253 goto out_unlock; 254 } 255 256 /* trigger path verification. */ 257 if (sch->driver && sch->driver->verify) 258 sch->driver->verify(&sch->dev); 259 else if (sch->lpm == mask) 260 goto out_unreg; 261 out_unlock: 262 spin_unlock_irq(&sch->lock); 263 return 0; 264 out_unreg: 265 spin_unlock_irq(&sch->lock); 266 sch->lpm = 0; 267 if (css_enqueue_subchannel_slow(sch->schid)) { 268 css_clear_subchannel_slow_list(); 269 need_rescan = 1; 270 } 271 return 0; 272 } 273 274 static inline void 275 s390_set_chpid_offline( __u8 chpid) 276 { 277 char dbf_txt[15]; 278 struct device *dev; 279 280 sprintf(dbf_txt, "chpr%x", chpid); 281 CIO_TRACE_EVENT(2, dbf_txt); 282 283 if (get_chp_status(chpid) <= 0) 284 return; 285 dev = get_device(&css[0]->chps[chpid]->dev); 286 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), 287 s390_subchannel_remove_chpid); 288 289 if (need_rescan || css_slow_subchannels_exist()) 290 queue_work(slow_path_wq, &slow_path_work); 291 put_device(dev); 292 } 293 294 struct res_acc_data { 295 struct channel_path *chp; 296 u32 fla_mask; 297 u16 fla; 298 }; 299 300 static int 301 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) 302 { 303 int found; 304 int chp; 305 int ccode; 306 307 found = 0; 308 for (chp = 0; chp <= 7; chp++) 309 /* 310 * check if chpid is in information updated by ssd 311 */ 312 if (sch->ssd_info.valid && 313 sch->ssd_info.chpid[chp] == res_data->chp->id && 314 (sch->ssd_info.fla[chp] & res_data->fla_mask) 315 == res_data->fla) { 316 found = 1; 317 break; 318 } 319 320 if (found == 0) 321 return 0; 322 323 /* 324 * Do a stsch to update our subchannel structure with the 325 * new path information and eventually check for logically 326 * offline chpids. 327 */ 328 ccode = stsch(sch->schid, &sch->schib); 329 if (ccode > 0) 330 return 0; 331 332 return 0x80 >> chp; 333 } 334 335 static inline int 336 s390_process_res_acc_new_sch(struct subchannel_id schid) 337 { 338 struct schib schib; 339 int ret; 340 /* 341 * We don't know the device yet, but since a path 342 * may be available now to the device we'll have 343 * to do recognition again. 344 * Since we don't have any idea about which chpid 345 * that beast may be on we'll have to do a stsch 346 * on all devices, grr... 347 */ 348 if (stsch_err(schid, &schib)) 349 /* We're through */ 350 return need_rescan ? -EAGAIN : -ENXIO; 351 352 /* Put it on the slow path. */ 353 ret = css_enqueue_subchannel_slow(schid); 354 if (ret) { 355 css_clear_subchannel_slow_list(); 356 need_rescan = 1; 357 return -EAGAIN; 358 } 359 return 0; 360 } 361 362 static int 363 __s390_process_res_acc(struct subchannel_id schid, void *data) 364 { 365 int chp_mask, old_lpm; 366 struct res_acc_data *res_data; 367 struct subchannel *sch; 368 369 res_data = (struct res_acc_data *)data; 370 sch = get_subchannel_by_schid(schid); 371 if (!sch) 372 /* Check if a subchannel is newly available. */ 373 return s390_process_res_acc_new_sch(schid); 374 375 spin_lock_irq(&sch->lock); 376 377 chp_mask = s390_process_res_acc_sch(res_data, sch); 378 379 if (chp_mask == 0) { 380 spin_unlock_irq(&sch->lock); 381 put_device(&sch->dev); 382 return 0; 383 } 384 old_lpm = sch->lpm; 385 sch->lpm = ((sch->schib.pmcw.pim & 386 sch->schib.pmcw.pam & 387 sch->schib.pmcw.pom) 388 | chp_mask) & sch->opm; 389 if (!old_lpm && sch->lpm) 390 device_trigger_reprobe(sch); 391 else if (sch->driver && sch->driver->verify) 392 sch->driver->verify(&sch->dev); 393 394 spin_unlock_irq(&sch->lock); 395 put_device(&sch->dev); 396 return 0; 397 } 398 399 400 static int 401 s390_process_res_acc (struct res_acc_data *res_data) 402 { 403 int rc; 404 char dbf_txt[15]; 405 406 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 407 CIO_TRACE_EVENT( 2, dbf_txt); 408 if (res_data->fla != 0) { 409 sprintf(dbf_txt, "fla%x", res_data->fla); 410 CIO_TRACE_EVENT( 2, dbf_txt); 411 } 412 413 /* 414 * I/O resources may have become accessible. 415 * Scan through all subchannels that may be concerned and 416 * do a validation on those. 417 * The more information we have (info), the less scanning 418 * will we have to do. 419 */ 420 rc = for_each_subchannel(__s390_process_res_acc, res_data); 421 if (css_slow_subchannels_exist()) 422 rc = -EAGAIN; 423 else if (rc != -EAGAIN) 424 rc = 0; 425 return rc; 426 } 427 428 static int 429 __get_chpid_from_lir(void *data) 430 { 431 struct lir { 432 u8 iq; 433 u8 ic; 434 u16 sci; 435 /* incident-node descriptor */ 436 u32 indesc[28]; 437 /* attached-node descriptor */ 438 u32 andesc[28]; 439 /* incident-specific information */ 440 u32 isinfo[28]; 441 } *lir; 442 443 lir = (struct lir*) data; 444 if (!(lir->iq&0x80)) 445 /* NULL link incident record */ 446 return -EINVAL; 447 if (!(lir->indesc[0]&0xc0000000)) 448 /* node descriptor not valid */ 449 return -EINVAL; 450 if (!(lir->indesc[0]&0x10000000)) 451 /* don't handle device-type nodes - FIXME */ 452 return -EINVAL; 453 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 454 455 return (u16) (lir->indesc[0]&0x000000ff); 456 } 457 458 int 459 chsc_process_crw(void) 460 { 461 int chpid, ret; 462 struct res_acc_data res_data; 463 struct { 464 struct chsc_header request; 465 u32 reserved1; 466 u32 reserved2; 467 u32 reserved3; 468 struct chsc_header response; 469 u32 reserved4; 470 u8 flags; 471 u8 vf; /* validity flags */ 472 u8 rs; /* reporting source */ 473 u8 cc; /* content code */ 474 u16 fla; /* full link address */ 475 u16 rsid; /* reporting source id */ 476 u32 reserved5; 477 u32 reserved6; 478 u32 ccdf[96]; /* content-code dependent field */ 479 /* ccdf has to be big enough for a link-incident record */ 480 } *sei_area; 481 482 if (!sei_page) 483 return 0; 484 /* 485 * build the chsc request block for store event information 486 * and do the call 487 * This function is only called by the machine check handler thread, 488 * so we don't need locking for the sei_page. 489 */ 490 sei_area = sei_page; 491 492 CIO_TRACE_EVENT( 2, "prcss"); 493 ret = 0; 494 do { 495 int ccode, status; 496 struct device *dev; 497 memset(sei_area, 0, sizeof(*sei_area)); 498 memset(&res_data, 0, sizeof(struct res_acc_data)); 499 sei_area->request.length = 0x0010; 500 sei_area->request.code = 0x000e; 501 502 ccode = chsc(sei_area); 503 if (ccode > 0) 504 return 0; 505 506 switch (sei_area->response.code) { 507 /* for debug purposes, check for problems */ 508 case 0x0001: 509 CIO_CRW_EVENT(4, "chsc_process_crw: event information " 510 "successfully stored\n"); 511 break; /* everything ok */ 512 case 0x0002: 513 CIO_CRW_EVENT(2, 514 "chsc_process_crw: invalid command!\n"); 515 return 0; 516 case 0x0003: 517 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " 518 "request block!\n"); 519 return 0; 520 case 0x0005: 521 CIO_CRW_EVENT(2, "chsc_process_crw: no event " 522 "information stored\n"); 523 return 0; 524 default: 525 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", 526 sei_area->response.code); 527 return 0; 528 } 529 530 /* Check if we might have lost some information. */ 531 if (sei_area->flags & 0x40) 532 CIO_CRW_EVENT(2, "chsc_process_crw: Event information " 533 "has been lost due to overflow!\n"); 534 535 if (sei_area->rs != 4) { 536 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " 537 "(%04X) isn't a chpid!\n", 538 sei_area->rsid); 539 continue; 540 } 541 542 /* which kind of information was stored? */ 543 switch (sei_area->cc) { 544 case 1: /* link incident*/ 545 CIO_CRW_EVENT(4, "chsc_process_crw: " 546 "channel subsystem reports link incident," 547 " reporting source is chpid %x\n", 548 sei_area->rsid); 549 chpid = __get_chpid_from_lir(sei_area->ccdf); 550 if (chpid < 0) 551 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", 552 __FUNCTION__); 553 else 554 s390_set_chpid_offline(chpid); 555 break; 556 557 case 2: /* i/o resource accessibiliy */ 558 CIO_CRW_EVENT(4, "chsc_process_crw: " 559 "channel subsystem reports some I/O " 560 "devices may have become accessible\n"); 561 pr_debug("Data received after sei: \n"); 562 pr_debug("Validity flags: %x\n", sei_area->vf); 563 564 /* allocate a new channel path structure, if needed */ 565 status = get_chp_status(sei_area->rsid); 566 if (status < 0) 567 new_channel_path(sei_area->rsid); 568 else if (!status) 569 break; 570 dev = get_device(&css[0]->chps[sei_area->rsid]->dev); 571 res_data.chp = to_channelpath(dev); 572 pr_debug("chpid: %x", sei_area->rsid); 573 if ((sei_area->vf & 0xc0) != 0) { 574 res_data.fla = sei_area->fla; 575 if ((sei_area->vf & 0xc0) == 0xc0) { 576 pr_debug(" full link addr: %x", 577 sei_area->fla); 578 res_data.fla_mask = 0xffff; 579 } else { 580 pr_debug(" link addr: %x", 581 sei_area->fla); 582 res_data.fla_mask = 0xff00; 583 } 584 } 585 ret = s390_process_res_acc(&res_data); 586 pr_debug("\n\n"); 587 put_device(dev); 588 break; 589 590 default: /* other stuff */ 591 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", 592 sei_area->cc); 593 break; 594 } 595 } while (sei_area->flags & 0x80); 596 return ret; 597 } 598 599 static inline int 600 __chp_add_new_sch(struct subchannel_id schid) 601 { 602 struct schib schib; 603 int ret; 604 605 if (stsch(schid, &schib)) 606 /* We're through */ 607 return need_rescan ? -EAGAIN : -ENXIO; 608 609 /* Put it on the slow path. */ 610 ret = css_enqueue_subchannel_slow(schid); 611 if (ret) { 612 css_clear_subchannel_slow_list(); 613 need_rescan = 1; 614 return -EAGAIN; 615 } 616 return 0; 617 } 618 619 620 static int 621 __chp_add(struct subchannel_id schid, void *data) 622 { 623 int i; 624 struct channel_path *chp; 625 struct subchannel *sch; 626 627 chp = (struct channel_path *)data; 628 sch = get_subchannel_by_schid(schid); 629 if (!sch) 630 /* Check if the subchannel is now available. */ 631 return __chp_add_new_sch(schid); 632 spin_lock_irq(&sch->lock); 633 for (i=0; i<8; i++) 634 if (sch->schib.pmcw.chpid[i] == chp->id) { 635 if (stsch(sch->schid, &sch->schib) != 0) { 636 /* Endgame. */ 637 spin_unlock_irq(&sch->lock); 638 return -ENXIO; 639 } 640 break; 641 } 642 if (i==8) { 643 spin_unlock_irq(&sch->lock); 644 return 0; 645 } 646 sch->lpm = ((sch->schib.pmcw.pim & 647 sch->schib.pmcw.pam & 648 sch->schib.pmcw.pom) 649 | 0x80 >> i) & sch->opm; 650 651 if (sch->driver && sch->driver->verify) 652 sch->driver->verify(&sch->dev); 653 654 spin_unlock_irq(&sch->lock); 655 put_device(&sch->dev); 656 return 0; 657 } 658 659 static int 660 chp_add(int chpid) 661 { 662 int rc; 663 char dbf_txt[15]; 664 struct device *dev; 665 666 if (!get_chp_status(chpid)) 667 return 0; /* no need to do the rest */ 668 669 sprintf(dbf_txt, "cadd%x", chpid); 670 CIO_TRACE_EVENT(2, dbf_txt); 671 672 dev = get_device(&css[0]->chps[chpid]->dev); 673 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 674 if (css_slow_subchannels_exist()) 675 rc = -EAGAIN; 676 if (rc != -EAGAIN) 677 rc = 0; 678 put_device(dev); 679 return rc; 680 } 681 682 /* 683 * Handling of crw machine checks with channel path source. 684 */ 685 int 686 chp_process_crw(int chpid, int on) 687 { 688 if (on == 0) { 689 /* Path has gone. We use the link incident routine.*/ 690 s390_set_chpid_offline(chpid); 691 return 0; /* De-register is async anyway. */ 692 } 693 /* 694 * Path has come. Allocate a new channel path structure, 695 * if needed. 696 */ 697 if (get_chp_status(chpid) < 0) 698 new_channel_path(chpid); 699 /* Avoid the extra overhead in process_rec_acc. */ 700 return chp_add(chpid); 701 } 702 703 static inline int 704 __check_for_io_and_kill(struct subchannel *sch, int index) 705 { 706 int cc; 707 708 if (!device_is_online(sch)) 709 /* cio could be doing I/O. */ 710 return 0; 711 cc = stsch(sch->schid, &sch->schib); 712 if (cc) 713 return 0; 714 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 715 device_set_waiting(sch); 716 return 1; 717 } 718 return 0; 719 } 720 721 static inline void 722 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 723 { 724 int chp, old_lpm; 725 unsigned long flags; 726 727 if (!sch->ssd_info.valid) 728 return; 729 730 spin_lock_irqsave(&sch->lock, flags); 731 old_lpm = sch->lpm; 732 for (chp = 0; chp < 8; chp++) { 733 if (sch->ssd_info.chpid[chp] != chpid) 734 continue; 735 736 if (on) { 737 sch->opm |= (0x80 >> chp); 738 sch->lpm |= (0x80 >> chp); 739 if (!old_lpm) 740 device_trigger_reprobe(sch); 741 else if (sch->driver && sch->driver->verify) 742 sch->driver->verify(&sch->dev); 743 } else { 744 sch->opm &= ~(0x80 >> chp); 745 sch->lpm &= ~(0x80 >> chp); 746 /* 747 * Give running I/O a grace period in which it 748 * can successfully terminate, even using the 749 * just varied off path. Then kill it. 750 */ 751 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 752 if (css_enqueue_subchannel_slow(sch->schid)) { 753 css_clear_subchannel_slow_list(); 754 need_rescan = 1; 755 } 756 } else if (sch->driver && sch->driver->verify) 757 sch->driver->verify(&sch->dev); 758 } 759 break; 760 } 761 spin_unlock_irqrestore(&sch->lock, flags); 762 } 763 764 static int 765 s390_subchannel_vary_chpid_off(struct device *dev, void *data) 766 { 767 struct subchannel *sch; 768 __u8 *chpid; 769 770 sch = to_subchannel(dev); 771 chpid = data; 772 773 __s390_subchannel_vary_chpid(sch, *chpid, 0); 774 return 0; 775 } 776 777 static int 778 s390_subchannel_vary_chpid_on(struct device *dev, void *data) 779 { 780 struct subchannel *sch; 781 __u8 *chpid; 782 783 sch = to_subchannel(dev); 784 chpid = data; 785 786 __s390_subchannel_vary_chpid(sch, *chpid, 1); 787 return 0; 788 } 789 790 static int 791 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 792 { 793 struct schib schib; 794 struct subchannel *sch; 795 796 sch = get_subchannel_by_schid(schid); 797 if (sch) { 798 put_device(&sch->dev); 799 return 0; 800 } 801 if (stsch_err(schid, &schib)) 802 /* We're through */ 803 return -ENXIO; 804 /* Put it on the slow path. */ 805 if (css_enqueue_subchannel_slow(schid)) { 806 css_clear_subchannel_slow_list(); 807 need_rescan = 1; 808 return -EAGAIN; 809 } 810 return 0; 811 } 812 813 /* 814 * Function: s390_vary_chpid 815 * Varies the specified chpid online or offline 816 */ 817 static int 818 s390_vary_chpid( __u8 chpid, int on) 819 { 820 char dbf_text[15]; 821 int status; 822 823 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 824 CIO_TRACE_EVENT( 2, dbf_text); 825 826 status = get_chp_status(chpid); 827 if (status < 0) { 828 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); 829 return -EINVAL; 830 } 831 832 if (!on && !status) { 833 printk(KERN_ERR "chpid %x is already offline\n", chpid); 834 return -EINVAL; 835 } 836 837 set_chp_logically_online(chpid, on); 838 839 /* 840 * Redo PathVerification on the devices the chpid connects to 841 */ 842 843 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 844 s390_subchannel_vary_chpid_on : 845 s390_subchannel_vary_chpid_off); 846 if (on) 847 /* Scan for new devices on varied on path. */ 848 for_each_subchannel(__s390_vary_chpid_on, NULL); 849 if (need_rescan || css_slow_subchannels_exist()) 850 queue_work(slow_path_wq, &slow_path_work); 851 return 0; 852 } 853 854 /* 855 * Channel measurement related functions 856 */ 857 static ssize_t 858 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, 859 size_t count) 860 { 861 struct channel_path *chp; 862 unsigned int size; 863 864 chp = to_channelpath(container_of(kobj, struct device, kobj)); 865 if (!chp->cmg_chars) 866 return 0; 867 868 size = sizeof(struct cmg_chars); 869 870 if (off > size) 871 return 0; 872 if (off + count > size) 873 count = size - off; 874 memcpy(buf, chp->cmg_chars + off, count); 875 return count; 876 } 877 878 static struct bin_attribute chp_measurement_chars_attr = { 879 .attr = { 880 .name = "measurement_chars", 881 .mode = S_IRUSR, 882 .owner = THIS_MODULE, 883 }, 884 .size = sizeof(struct cmg_chars), 885 .read = chp_measurement_chars_read, 886 }; 887 888 static void 889 chp_measurement_copy_block(struct cmg_entry *buf, 890 struct channel_subsystem *css, int chpid) 891 { 892 void *area; 893 struct cmg_entry *entry, reference_buf; 894 int idx; 895 896 if (chpid < 128) { 897 area = css->cub_addr1; 898 idx = chpid; 899 } else { 900 area = css->cub_addr2; 901 idx = chpid - 128; 902 } 903 entry = area + (idx * sizeof(struct cmg_entry)); 904 do { 905 memcpy(buf, entry, sizeof(*entry)); 906 memcpy(&reference_buf, entry, sizeof(*entry)); 907 } while (reference_buf.values[0] != buf->values[0]); 908 } 909 910 static ssize_t 911 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 912 { 913 struct channel_path *chp; 914 struct channel_subsystem *css; 915 unsigned int size; 916 917 chp = to_channelpath(container_of(kobj, struct device, kobj)); 918 css = to_css(chp->dev.parent); 919 920 size = sizeof(struct cmg_entry); 921 922 /* Only allow single reads. */ 923 if (off || count < size) 924 return 0; 925 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 926 count = size; 927 return count; 928 } 929 930 static struct bin_attribute chp_measurement_attr = { 931 .attr = { 932 .name = "measurement", 933 .mode = S_IRUSR, 934 .owner = THIS_MODULE, 935 }, 936 .size = sizeof(struct cmg_entry), 937 .read = chp_measurement_read, 938 }; 939 940 static void 941 chsc_remove_chp_cmg_attr(struct channel_path *chp) 942 { 943 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 944 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 945 } 946 947 static int 948 chsc_add_chp_cmg_attr(struct channel_path *chp) 949 { 950 int ret; 951 952 ret = sysfs_create_bin_file(&chp->dev.kobj, 953 &chp_measurement_chars_attr); 954 if (ret) 955 return ret; 956 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 957 if (ret) 958 sysfs_remove_bin_file(&chp->dev.kobj, 959 &chp_measurement_chars_attr); 960 return ret; 961 } 962 963 static void 964 chsc_remove_cmg_attr(struct channel_subsystem *css) 965 { 966 int i; 967 968 for (i = 0; i <= __MAX_CHPID; i++) { 969 if (!css->chps[i]) 970 continue; 971 chsc_remove_chp_cmg_attr(css->chps[i]); 972 } 973 } 974 975 static int 976 chsc_add_cmg_attr(struct channel_subsystem *css) 977 { 978 int i, ret; 979 980 ret = 0; 981 for (i = 0; i <= __MAX_CHPID; i++) { 982 if (!css->chps[i]) 983 continue; 984 ret = chsc_add_chp_cmg_attr(css->chps[i]); 985 if (ret) 986 goto cleanup; 987 } 988 return ret; 989 cleanup: 990 for (--i; i >= 0; i--) { 991 if (!css->chps[i]) 992 continue; 993 chsc_remove_chp_cmg_attr(css->chps[i]); 994 } 995 return ret; 996 } 997 998 999 static int 1000 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 1001 { 1002 struct { 1003 struct chsc_header request; 1004 u32 operation_code : 2; 1005 u32 : 30; 1006 u32 key : 4; 1007 u32 : 28; 1008 u32 zeroes1; 1009 u32 cub_addr1; 1010 u32 zeroes2; 1011 u32 cub_addr2; 1012 u32 reserved[13]; 1013 struct chsc_header response; 1014 u32 status : 8; 1015 u32 : 4; 1016 u32 fmt : 4; 1017 u32 : 16; 1018 } *secm_area; 1019 int ret, ccode; 1020 1021 secm_area = page; 1022 secm_area->request.length = 0x0050; 1023 secm_area->request.code = 0x0016; 1024 1025 secm_area->key = PAGE_DEFAULT_KEY; 1026 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 1027 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 1028 1029 secm_area->operation_code = enable ? 0 : 1; 1030 1031 ccode = chsc(secm_area); 1032 if (ccode > 0) 1033 return (ccode == 3) ? -ENODEV : -EBUSY; 1034 1035 switch (secm_area->response.code) { 1036 case 0x0001: /* Success. */ 1037 ret = 0; 1038 break; 1039 case 0x0003: /* Invalid block. */ 1040 case 0x0007: /* Invalid format. */ 1041 case 0x0008: /* Other invalid block. */ 1042 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1043 ret = -EINVAL; 1044 break; 1045 case 0x0004: /* Command not provided in model. */ 1046 CIO_CRW_EVENT(2, "Model does not provide secm\n"); 1047 ret = -EOPNOTSUPP; 1048 break; 1049 case 0x0102: /* cub adresses incorrect */ 1050 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 1051 ret = -EINVAL; 1052 break; 1053 case 0x0103: /* key error */ 1054 CIO_CRW_EVENT(2, "Access key error in secm\n"); 1055 ret = -EINVAL; 1056 break; 1057 case 0x0105: /* error while starting */ 1058 CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 1059 ret = -EIO; 1060 break; 1061 default: 1062 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1063 secm_area->response.code); 1064 ret = -EIO; 1065 } 1066 return ret; 1067 } 1068 1069 int 1070 chsc_secm(struct channel_subsystem *css, int enable) 1071 { 1072 void *secm_area; 1073 int ret; 1074 1075 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1076 if (!secm_area) 1077 return -ENOMEM; 1078 1079 mutex_lock(&css->mutex); 1080 if (enable && !css->cm_enabled) { 1081 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1082 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1083 if (!css->cub_addr1 || !css->cub_addr2) { 1084 free_page((unsigned long)css->cub_addr1); 1085 free_page((unsigned long)css->cub_addr2); 1086 free_page((unsigned long)secm_area); 1087 mutex_unlock(&css->mutex); 1088 return -ENOMEM; 1089 } 1090 } 1091 ret = __chsc_do_secm(css, enable, secm_area); 1092 if (!ret) { 1093 css->cm_enabled = enable; 1094 if (css->cm_enabled) { 1095 ret = chsc_add_cmg_attr(css); 1096 if (ret) { 1097 memset(secm_area, 0, PAGE_SIZE); 1098 __chsc_do_secm(css, 0, secm_area); 1099 css->cm_enabled = 0; 1100 } 1101 } else 1102 chsc_remove_cmg_attr(css); 1103 } 1104 if (enable && !css->cm_enabled) { 1105 free_page((unsigned long)css->cub_addr1); 1106 free_page((unsigned long)css->cub_addr2); 1107 } 1108 mutex_unlock(&css->mutex); 1109 free_page((unsigned long)secm_area); 1110 return ret; 1111 } 1112 1113 /* 1114 * Files for the channel path entries. 1115 */ 1116 static ssize_t 1117 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) 1118 { 1119 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1120 1121 if (!chp) 1122 return 0; 1123 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : 1124 sprintf(buf, "offline\n")); 1125 } 1126 1127 static ssize_t 1128 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1129 { 1130 struct channel_path *cp = container_of(dev, struct channel_path, dev); 1131 char cmd[10]; 1132 int num_args; 1133 int error; 1134 1135 num_args = sscanf(buf, "%5s", cmd); 1136 if (!num_args) 1137 return count; 1138 1139 if (!strnicmp(cmd, "on", 2)) 1140 error = s390_vary_chpid(cp->id, 1); 1141 else if (!strnicmp(cmd, "off", 3)) 1142 error = s390_vary_chpid(cp->id, 0); 1143 else 1144 error = -EINVAL; 1145 1146 return error < 0 ? error : count; 1147 1148 } 1149 1150 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 1151 1152 static ssize_t 1153 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1154 { 1155 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1156 1157 if (!chp) 1158 return 0; 1159 return sprintf(buf, "%x\n", chp->desc.desc); 1160 } 1161 1162 static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 1163 1164 static ssize_t 1165 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) 1166 { 1167 struct channel_path *chp = to_channelpath(dev); 1168 1169 if (!chp) 1170 return 0; 1171 if (chp->cmg == -1) /* channel measurements not available */ 1172 return sprintf(buf, "unknown\n"); 1173 return sprintf(buf, "%x\n", chp->cmg); 1174 } 1175 1176 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); 1177 1178 static ssize_t 1179 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 1180 { 1181 struct channel_path *chp = to_channelpath(dev); 1182 1183 if (!chp) 1184 return 0; 1185 if (chp->shared == -1) /* channel measurements not available */ 1186 return sprintf(buf, "unknown\n"); 1187 return sprintf(buf, "%x\n", chp->shared); 1188 } 1189 1190 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 1191 1192 static struct attribute * chp_attrs[] = { 1193 &dev_attr_status.attr, 1194 &dev_attr_type.attr, 1195 &dev_attr_cmg.attr, 1196 &dev_attr_shared.attr, 1197 NULL, 1198 }; 1199 1200 static struct attribute_group chp_attr_group = { 1201 .attrs = chp_attrs, 1202 }; 1203 1204 static void 1205 chp_release(struct device *dev) 1206 { 1207 struct channel_path *cp; 1208 1209 cp = container_of(dev, struct channel_path, dev); 1210 kfree(cp); 1211 } 1212 1213 static int 1214 chsc_determine_channel_path_description(int chpid, 1215 struct channel_path_desc *desc) 1216 { 1217 int ccode, ret; 1218 1219 struct { 1220 struct chsc_header request; 1221 u32 : 24; 1222 u32 first_chpid : 8; 1223 u32 : 24; 1224 u32 last_chpid : 8; 1225 u32 zeroes1; 1226 struct chsc_header response; 1227 u32 zeroes2; 1228 struct channel_path_desc desc; 1229 } *scpd_area; 1230 1231 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1232 if (!scpd_area) 1233 return -ENOMEM; 1234 1235 scpd_area->request.length = 0x0010; 1236 scpd_area->request.code = 0x0002; 1237 1238 scpd_area->first_chpid = chpid; 1239 scpd_area->last_chpid = chpid; 1240 1241 ccode = chsc(scpd_area); 1242 if (ccode > 0) { 1243 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1244 goto out; 1245 } 1246 1247 switch (scpd_area->response.code) { 1248 case 0x0001: /* Success. */ 1249 memcpy(desc, &scpd_area->desc, 1250 sizeof(struct channel_path_desc)); 1251 ret = 0; 1252 break; 1253 case 0x0003: /* Invalid block. */ 1254 case 0x0007: /* Invalid format. */ 1255 case 0x0008: /* Other invalid block. */ 1256 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1257 ret = -EINVAL; 1258 break; 1259 case 0x0004: /* Command not provided in model. */ 1260 CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 1261 ret = -EOPNOTSUPP; 1262 break; 1263 default: 1264 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1265 scpd_area->response.code); 1266 ret = -EIO; 1267 } 1268 out: 1269 free_page((unsigned long)scpd_area); 1270 return ret; 1271 } 1272 1273 static void 1274 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1275 struct cmg_chars *chars) 1276 { 1277 switch (chp->cmg) { 1278 case 2: 1279 case 3: 1280 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 1281 GFP_KERNEL); 1282 if (chp->cmg_chars) { 1283 int i, mask; 1284 struct cmg_chars *cmg_chars; 1285 1286 cmg_chars = chp->cmg_chars; 1287 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1288 mask = 0x80 >> (i + 3); 1289 if (cmcv & mask) 1290 cmg_chars->values[i] = chars->values[i]; 1291 else 1292 cmg_chars->values[i] = 0; 1293 } 1294 } 1295 break; 1296 default: 1297 /* No cmg-dependent data. */ 1298 break; 1299 } 1300 } 1301 1302 static int 1303 chsc_get_channel_measurement_chars(struct channel_path *chp) 1304 { 1305 int ccode, ret; 1306 1307 struct { 1308 struct chsc_header request; 1309 u32 : 24; 1310 u32 first_chpid : 8; 1311 u32 : 24; 1312 u32 last_chpid : 8; 1313 u32 zeroes1; 1314 struct chsc_header response; 1315 u32 zeroes2; 1316 u32 not_valid : 1; 1317 u32 shared : 1; 1318 u32 : 22; 1319 u32 chpid : 8; 1320 u32 cmcv : 5; 1321 u32 : 11; 1322 u32 cmgq : 8; 1323 u32 cmg : 8; 1324 u32 zeroes3; 1325 u32 data[NR_MEASUREMENT_CHARS]; 1326 } *scmc_area; 1327 1328 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1329 if (!scmc_area) 1330 return -ENOMEM; 1331 1332 scmc_area->request.length = 0x0010; 1333 scmc_area->request.code = 0x0022; 1334 1335 scmc_area->first_chpid = chp->id; 1336 scmc_area->last_chpid = chp->id; 1337 1338 ccode = chsc(scmc_area); 1339 if (ccode > 0) { 1340 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1341 goto out; 1342 } 1343 1344 switch (scmc_area->response.code) { 1345 case 0x0001: /* Success. */ 1346 if (!scmc_area->not_valid) { 1347 chp->cmg = scmc_area->cmg; 1348 chp->shared = scmc_area->shared; 1349 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1350 (struct cmg_chars *) 1351 &scmc_area->data); 1352 } else { 1353 chp->cmg = -1; 1354 chp->shared = -1; 1355 } 1356 ret = 0; 1357 break; 1358 case 0x0003: /* Invalid block. */ 1359 case 0x0007: /* Invalid format. */ 1360 case 0x0008: /* Invalid bit combination. */ 1361 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1362 ret = -EINVAL; 1363 break; 1364 case 0x0004: /* Command not provided. */ 1365 CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 1366 ret = -EOPNOTSUPP; 1367 break; 1368 default: 1369 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1370 scmc_area->response.code); 1371 ret = -EIO; 1372 } 1373 out: 1374 free_page((unsigned long)scmc_area); 1375 return ret; 1376 } 1377 1378 /* 1379 * Entries for chpids on the system bus. 1380 * This replaces /proc/chpids. 1381 */ 1382 static int 1383 new_channel_path(int chpid) 1384 { 1385 struct channel_path *chp; 1386 int ret; 1387 1388 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); 1389 if (!chp) 1390 return -ENOMEM; 1391 1392 /* fill in status, etc. */ 1393 chp->id = chpid; 1394 chp->state = 1; 1395 chp->dev.parent = &css[0]->device; 1396 chp->dev.release = chp_release; 1397 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1398 1399 /* Obtain channel path description and fill it in. */ 1400 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 1401 if (ret) 1402 goto out_free; 1403 /* Get channel-measurement characteristics. */ 1404 if (css_characteristics_avail && css_chsc_characteristics.scmc 1405 && css_chsc_characteristics.secm) { 1406 ret = chsc_get_channel_measurement_chars(chp); 1407 if (ret) 1408 goto out_free; 1409 } else { 1410 static int msg_done; 1411 1412 if (!msg_done) { 1413 printk(KERN_WARNING "cio: Channel measurements not " 1414 "available, continuing.\n"); 1415 msg_done = 1; 1416 } 1417 chp->cmg = -1; 1418 } 1419 1420 /* make it known to the system */ 1421 ret = device_register(&chp->dev); 1422 if (ret) { 1423 printk(KERN_WARNING "%s: could not register %02x\n", 1424 __func__, chpid); 1425 goto out_free; 1426 } 1427 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 1428 if (ret) { 1429 device_unregister(&chp->dev); 1430 goto out_free; 1431 } 1432 mutex_lock(&css[0]->mutex); 1433 if (css[0]->cm_enabled) { 1434 ret = chsc_add_chp_cmg_attr(chp); 1435 if (ret) { 1436 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 1437 device_unregister(&chp->dev); 1438 mutex_unlock(&css[0]->mutex); 1439 goto out_free; 1440 } 1441 } 1442 css[0]->chps[chpid] = chp; 1443 mutex_unlock(&css[0]->mutex); 1444 return ret; 1445 out_free: 1446 kfree(chp); 1447 return ret; 1448 } 1449 1450 void * 1451 chsc_get_chp_desc(struct subchannel *sch, int chp_no) 1452 { 1453 struct channel_path *chp; 1454 struct channel_path_desc *desc; 1455 1456 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; 1457 if (!chp) 1458 return NULL; 1459 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1460 if (!desc) 1461 return NULL; 1462 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 1463 return desc; 1464 } 1465 1466 static int reset_channel_path(struct channel_path *chp) 1467 { 1468 int cc; 1469 1470 cc = rchp(chp->id); 1471 switch (cc) { 1472 case 0: 1473 return 0; 1474 case 2: 1475 return -EBUSY; 1476 default: 1477 return -ENODEV; 1478 } 1479 } 1480 1481 static void reset_channel_paths_css(struct channel_subsystem *css) 1482 { 1483 int i; 1484 1485 for (i = 0; i <= __MAX_CHPID; i++) { 1486 if (css->chps[i]) 1487 reset_channel_path(css->chps[i]); 1488 } 1489 } 1490 1491 void cio_reset_channel_paths(void) 1492 { 1493 int i; 1494 1495 for (i = 0; i <= __MAX_CSSID; i++) { 1496 if (css[i] && css[i]->valid) 1497 reset_channel_paths_css(css[i]); 1498 } 1499 } 1500 1501 static int __init 1502 chsc_alloc_sei_area(void) 1503 { 1504 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1505 if (!sei_page) 1506 printk(KERN_WARNING"Can't allocate page for processing of " \ 1507 "chsc machine checks!\n"); 1508 return (sei_page ? 0 : -ENOMEM); 1509 } 1510 1511 int __init 1512 chsc_enable_facility(int operation_code) 1513 { 1514 int ret; 1515 struct { 1516 struct chsc_header request; 1517 u8 reserved1:4; 1518 u8 format:4; 1519 u8 reserved2; 1520 u16 operation_code; 1521 u32 reserved3; 1522 u32 reserved4; 1523 u32 operation_data_area[252]; 1524 struct chsc_header response; 1525 u32 reserved5:4; 1526 u32 format2:4; 1527 u32 reserved6:24; 1528 } *sda_area; 1529 1530 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1531 if (!sda_area) 1532 return -ENOMEM; 1533 sda_area->request.length = 0x0400; 1534 sda_area->request.code = 0x0031; 1535 sda_area->operation_code = operation_code; 1536 1537 ret = chsc(sda_area); 1538 if (ret > 0) { 1539 ret = (ret == 3) ? -ENODEV : -EBUSY; 1540 goto out; 1541 } 1542 switch (sda_area->response.code) { 1543 case 0x0001: /* everything ok */ 1544 ret = 0; 1545 break; 1546 case 0x0003: /* invalid request block */ 1547 case 0x0007: 1548 ret = -EINVAL; 1549 break; 1550 case 0x0004: /* command not provided */ 1551 case 0x0101: /* facility not provided */ 1552 ret = -EOPNOTSUPP; 1553 break; 1554 default: /* something went wrong */ 1555 ret = -EIO; 1556 } 1557 out: 1558 free_page((unsigned long)sda_area); 1559 return ret; 1560 } 1561 1562 subsys_initcall(chsc_alloc_sei_area); 1563 1564 struct css_general_char css_general_characteristics; 1565 struct css_chsc_char css_chsc_characteristics; 1566 1567 int __init 1568 chsc_determine_css_characteristics(void) 1569 { 1570 int result; 1571 struct { 1572 struct chsc_header request; 1573 u32 reserved1; 1574 u32 reserved2; 1575 u32 reserved3; 1576 struct chsc_header response; 1577 u32 reserved4; 1578 u32 general_char[510]; 1579 u32 chsc_char[518]; 1580 } *scsc_area; 1581 1582 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1583 if (!scsc_area) { 1584 printk(KERN_WARNING"cio: Was not able to determine available" \ 1585 "CHSCs due to no memory.\n"); 1586 return -ENOMEM; 1587 } 1588 1589 scsc_area->request.length = 0x0010; 1590 scsc_area->request.code = 0x0010; 1591 1592 result = chsc(scsc_area); 1593 if (result) { 1594 printk(KERN_WARNING"cio: Was not able to determine " \ 1595 "available CHSCs, cc=%i.\n", result); 1596 result = -EIO; 1597 goto exit; 1598 } 1599 1600 if (scsc_area->response.code != 1) { 1601 printk(KERN_WARNING"cio: Was not able to determine " \ 1602 "available CHSCs.\n"); 1603 result = -EIO; 1604 goto exit; 1605 } 1606 memcpy(&css_general_characteristics, scsc_area->general_char, 1607 sizeof(css_general_characteristics)); 1608 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1609 sizeof(css_chsc_characteristics)); 1610 exit: 1611 free_page ((unsigned long) scsc_area); 1612 return result; 1613 } 1614 1615 EXPORT_SYMBOL_GPL(css_general_characteristics); 1616 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1617