1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com) 10 */ 11 12 #include <linux/module.h> 13 #include <linux/config.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 18 #include <asm/cio.h> 19 20 #include "css.h" 21 #include "cio.h" 22 #include "cio_debug.h" 23 #include "ioasm.h" 24 #include "chsc.h" 25 26 static void *sei_page; 27 28 static int new_channel_path(int chpid); 29 30 static inline void 31 set_chp_logically_online(int chp, int onoff) 32 { 33 css[0]->chps[chp]->state = onoff; 34 } 35 36 static int 37 get_chp_status(int chp) 38 { 39 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 } 41 42 void 43 chsc_validate_chpids(struct subchannel *sch) 44 { 45 int mask, chp; 46 47 for (chp = 0; chp <= 7; chp++) { 48 mask = 0x80 >> chp; 49 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 /* disable using this path */ 51 sch->opm &= ~mask; 52 } 53 } 54 55 void 56 chpid_is_actually_online(int chp) 57 { 58 int state; 59 60 state = get_chp_status(chp); 61 if (state < 0) { 62 need_rescan = 1; 63 queue_work(slow_path_wq, &slow_path_work); 64 } else 65 WARN_ON(!state); 66 } 67 68 /* FIXME: this is _always_ called for every subchannel. shouldn't we 69 * process more than one at a time? */ 70 static int 71 chsc_get_sch_desc_irq(struct subchannel *sch, void *page) 72 { 73 int ccode, j; 74 75 struct { 76 struct chsc_header request; 77 u16 reserved1a:10; 78 u16 ssid:2; 79 u16 reserved1b:4; 80 u16 f_sch; /* first subchannel */ 81 u16 reserved2; 82 u16 l_sch; /* last subchannel */ 83 u32 reserved3; 84 struct chsc_header response; 85 u32 reserved4; 86 u8 sch_valid : 1; 87 u8 dev_valid : 1; 88 u8 st : 3; /* subchannel type */ 89 u8 zeroes : 3; 90 u8 unit_addr; /* unit address */ 91 u16 devno; /* device number */ 92 u8 path_mask; 93 u8 fla_valid_mask; 94 u16 sch; /* subchannel */ 95 u8 chpid[8]; /* chpids 0-7 */ 96 u16 fla[8]; /* full link addresses 0-7 */ 97 } *ssd_area; 98 99 ssd_area = page; 100 101 ssd_area->request.length = 0x0010; 102 ssd_area->request.code = 0x0004; 103 104 ssd_area->ssid = sch->schid.ssid; 105 ssd_area->f_sch = sch->schid.sch_no; 106 ssd_area->l_sch = sch->schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 if (ccode > 0) { 110 pr_debug("chsc returned with ccode = %d\n", ccode); 111 return (ccode == 3) ? -ENODEV : -EBUSY; 112 } 113 114 switch (ssd_area->response.code) { 115 case 0x0001: /* everything ok */ 116 break; 117 case 0x0002: 118 CIO_CRW_EVENT(2, "Invalid command!\n"); 119 return -EINVAL; 120 case 0x0003: 121 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 122 return -EINVAL; 123 case 0x0004: 124 CIO_CRW_EVENT(2, "Model does not provide ssd\n"); 125 return -EOPNOTSUPP; 126 default: 127 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 128 ssd_area->response.code); 129 return -EIO; 130 } 131 132 /* 133 * ssd_area->st stores the type of the detected 134 * subchannel, with the following definitions: 135 * 136 * 0: I/O subchannel: All fields have meaning 137 * 1: CHSC subchannel: Only sch_val, st and sch 138 * have meaning 139 * 2: Message subchannel: All fields except unit_addr 140 * have meaning 141 * 3: ADM subchannel: Only sch_val, st and sch 142 * have meaning 143 * 144 * Other types are currently undefined. 145 */ 146 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 147 CIO_CRW_EVENT(0, "Strange subchannel type %d" 148 " for sch 0.%x.%04x\n", ssd_area->st, 149 sch->schid.ssid, sch->schid.sch_no); 150 /* 151 * There may have been a new subchannel type defined in the 152 * time since this code was written; since we don't know which 153 * fields have meaning and what to do with it we just jump out 154 */ 155 return 0; 156 } else { 157 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 158 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", 159 sch->schid.ssid, sch->schid.sch_no, 160 type[ssd_area->st]); 161 162 sch->ssd_info.valid = 1; 163 sch->ssd_info.type = ssd_area->st; 164 } 165 166 if (ssd_area->st == 0 || ssd_area->st == 2) { 167 for (j = 0; j < 8; j++) { 168 if (!((0x80 >> j) & ssd_area->path_mask & 169 ssd_area->fla_valid_mask)) 170 continue; 171 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 172 sch->ssd_info.fla[j] = ssd_area->fla[j]; 173 } 174 } 175 return 0; 176 } 177 178 int 179 css_get_ssd_info(struct subchannel *sch) 180 { 181 int ret; 182 void *page; 183 184 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 185 if (!page) 186 return -ENOMEM; 187 spin_lock_irq(&sch->lock); 188 ret = chsc_get_sch_desc_irq(sch, page); 189 if (ret) { 190 static int cio_chsc_err_msg; 191 192 if (!cio_chsc_err_msg) { 193 printk(KERN_ERR 194 "chsc_get_sch_descriptions:" 195 " Error %d while doing chsc; " 196 "processing some machine checks may " 197 "not work\n", ret); 198 cio_chsc_err_msg = 1; 199 } 200 } 201 spin_unlock_irq(&sch->lock); 202 free_page((unsigned long)page); 203 if (!ret) { 204 int j, chpid; 205 /* Allocate channel path structures, if needed. */ 206 for (j = 0; j < 8; j++) { 207 chpid = sch->ssd_info.chpid[j]; 208 if (chpid && (get_chp_status(chpid) < 0)) 209 new_channel_path(chpid); 210 } 211 } 212 return ret; 213 } 214 215 static int 216 s390_subchannel_remove_chpid(struct device *dev, void *data) 217 { 218 int j; 219 int mask; 220 struct subchannel *sch; 221 struct channel_path *chpid; 222 struct schib schib; 223 224 sch = to_subchannel(dev); 225 chpid = data; 226 for (j = 0; j < 8; j++) 227 if (sch->schib.pmcw.chpid[j] == chpid->id) 228 break; 229 if (j >= 8) 230 return 0; 231 232 mask = 0x80 >> j; 233 spin_lock_irq(&sch->lock); 234 235 stsch(sch->schid, &schib); 236 if (!schib.pmcw.dnv) 237 goto out_unreg; 238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 239 /* Check for single path devices. */ 240 if (sch->schib.pmcw.pim == 0x80) 241 goto out_unreg; 242 if (sch->vpm == mask) 243 goto out_unreg; 244 245 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND | 246 SCSW_ACTL_HALT_PEND | 247 SCSW_ACTL_START_PEND | 248 SCSW_ACTL_RESUME_PEND)) && 249 (sch->schib.pmcw.lpum == mask)) { 250 int cc = cio_cancel(sch); 251 252 if (cc == -ENODEV) 253 goto out_unreg; 254 255 if (cc == -EINVAL) { 256 cc = cio_clear(sch); 257 if (cc == -ENODEV) 258 goto out_unreg; 259 /* Call handler. */ 260 if (sch->driver && sch->driver->termination) 261 sch->driver->termination(&sch->dev); 262 goto out_unlock; 263 } 264 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 265 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 266 (sch->schib.pmcw.lpum == mask)) { 267 int cc; 268 269 cc = cio_clear(sch); 270 if (cc == -ENODEV) 271 goto out_unreg; 272 /* Call handler. */ 273 if (sch->driver && sch->driver->termination) 274 sch->driver->termination(&sch->dev); 275 goto out_unlock; 276 } 277 278 /* trigger path verification. */ 279 if (sch->driver && sch->driver->verify) 280 sch->driver->verify(&sch->dev); 281 out_unlock: 282 spin_unlock_irq(&sch->lock); 283 return 0; 284 out_unreg: 285 spin_unlock_irq(&sch->lock); 286 sch->lpm = 0; 287 if (css_enqueue_subchannel_slow(sch->schid)) { 288 css_clear_subchannel_slow_list(); 289 need_rescan = 1; 290 } 291 return 0; 292 } 293 294 static inline void 295 s390_set_chpid_offline( __u8 chpid) 296 { 297 char dbf_txt[15]; 298 struct device *dev; 299 300 sprintf(dbf_txt, "chpr%x", chpid); 301 CIO_TRACE_EVENT(2, dbf_txt); 302 303 if (get_chp_status(chpid) <= 0) 304 return; 305 dev = get_device(&css[0]->chps[chpid]->dev); 306 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), 307 s390_subchannel_remove_chpid); 308 309 if (need_rescan || css_slow_subchannels_exist()) 310 queue_work(slow_path_wq, &slow_path_work); 311 put_device(dev); 312 } 313 314 struct res_acc_data { 315 struct channel_path *chp; 316 u32 fla_mask; 317 u16 fla; 318 }; 319 320 static int 321 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) 322 { 323 int found; 324 int chp; 325 int ccode; 326 327 found = 0; 328 for (chp = 0; chp <= 7; chp++) 329 /* 330 * check if chpid is in information updated by ssd 331 */ 332 if (sch->ssd_info.valid && 333 sch->ssd_info.chpid[chp] == res_data->chp->id && 334 (sch->ssd_info.fla[chp] & res_data->fla_mask) 335 == res_data->fla) { 336 found = 1; 337 break; 338 } 339 340 if (found == 0) 341 return 0; 342 343 /* 344 * Do a stsch to update our subchannel structure with the 345 * new path information and eventually check for logically 346 * offline chpids. 347 */ 348 ccode = stsch(sch->schid, &sch->schib); 349 if (ccode > 0) 350 return 0; 351 352 return 0x80 >> chp; 353 } 354 355 static inline int 356 s390_process_res_acc_new_sch(struct subchannel_id schid) 357 { 358 struct schib schib; 359 int ret; 360 /* 361 * We don't know the device yet, but since a path 362 * may be available now to the device we'll have 363 * to do recognition again. 364 * Since we don't have any idea about which chpid 365 * that beast may be on we'll have to do a stsch 366 * on all devices, grr... 367 */ 368 if (stsch_err(schid, &schib)) 369 /* We're through */ 370 return need_rescan ? -EAGAIN : -ENXIO; 371 372 /* Put it on the slow path. */ 373 ret = css_enqueue_subchannel_slow(schid); 374 if (ret) { 375 css_clear_subchannel_slow_list(); 376 need_rescan = 1; 377 return -EAGAIN; 378 } 379 return 0; 380 } 381 382 static int 383 __s390_process_res_acc(struct subchannel_id schid, void *data) 384 { 385 int chp_mask, old_lpm; 386 struct res_acc_data *res_data; 387 struct subchannel *sch; 388 389 res_data = (struct res_acc_data *)data; 390 sch = get_subchannel_by_schid(schid); 391 if (!sch) 392 /* Check if a subchannel is newly available. */ 393 return s390_process_res_acc_new_sch(schid); 394 395 spin_lock_irq(&sch->lock); 396 397 chp_mask = s390_process_res_acc_sch(res_data, sch); 398 399 if (chp_mask == 0) { 400 spin_unlock_irq(&sch->lock); 401 return 0; 402 } 403 old_lpm = sch->lpm; 404 sch->lpm = ((sch->schib.pmcw.pim & 405 sch->schib.pmcw.pam & 406 sch->schib.pmcw.pom) 407 | chp_mask) & sch->opm; 408 if (!old_lpm && sch->lpm) 409 device_trigger_reprobe(sch); 410 else if (sch->driver && sch->driver->verify) 411 sch->driver->verify(&sch->dev); 412 413 spin_unlock_irq(&sch->lock); 414 put_device(&sch->dev); 415 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 416 } 417 418 419 static int 420 s390_process_res_acc (struct res_acc_data *res_data) 421 { 422 int rc; 423 char dbf_txt[15]; 424 425 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 426 CIO_TRACE_EVENT( 2, dbf_txt); 427 if (res_data->fla != 0) { 428 sprintf(dbf_txt, "fla%x", res_data->fla); 429 CIO_TRACE_EVENT( 2, dbf_txt); 430 } 431 432 /* 433 * I/O resources may have become accessible. 434 * Scan through all subchannels that may be concerned and 435 * do a validation on those. 436 * The more information we have (info), the less scanning 437 * will we have to do. 438 */ 439 rc = for_each_subchannel(__s390_process_res_acc, res_data); 440 if (css_slow_subchannels_exist()) 441 rc = -EAGAIN; 442 else if (rc != -EAGAIN) 443 rc = 0; 444 return rc; 445 } 446 447 static int 448 __get_chpid_from_lir(void *data) 449 { 450 struct lir { 451 u8 iq; 452 u8 ic; 453 u16 sci; 454 /* incident-node descriptor */ 455 u32 indesc[28]; 456 /* attached-node descriptor */ 457 u32 andesc[28]; 458 /* incident-specific information */ 459 u32 isinfo[28]; 460 } *lir; 461 462 lir = (struct lir*) data; 463 if (!(lir->iq&0x80)) 464 /* NULL link incident record */ 465 return -EINVAL; 466 if (!(lir->indesc[0]&0xc0000000)) 467 /* node descriptor not valid */ 468 return -EINVAL; 469 if (!(lir->indesc[0]&0x10000000)) 470 /* don't handle device-type nodes - FIXME */ 471 return -EINVAL; 472 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 473 474 return (u16) (lir->indesc[0]&0x000000ff); 475 } 476 477 int 478 chsc_process_crw(void) 479 { 480 int chpid, ret; 481 struct res_acc_data res_data; 482 struct { 483 struct chsc_header request; 484 u32 reserved1; 485 u32 reserved2; 486 u32 reserved3; 487 struct chsc_header response; 488 u32 reserved4; 489 u8 flags; 490 u8 vf; /* validity flags */ 491 u8 rs; /* reporting source */ 492 u8 cc; /* content code */ 493 u16 fla; /* full link address */ 494 u16 rsid; /* reporting source id */ 495 u32 reserved5; 496 u32 reserved6; 497 u32 ccdf[96]; /* content-code dependent field */ 498 /* ccdf has to be big enough for a link-incident record */ 499 } *sei_area; 500 501 if (!sei_page) 502 return 0; 503 /* 504 * build the chsc request block for store event information 505 * and do the call 506 * This function is only called by the machine check handler thread, 507 * so we don't need locking for the sei_page. 508 */ 509 sei_area = sei_page; 510 511 CIO_TRACE_EVENT( 2, "prcss"); 512 ret = 0; 513 do { 514 int ccode, status; 515 struct device *dev; 516 memset(sei_area, 0, sizeof(*sei_area)); 517 memset(&res_data, 0, sizeof(struct res_acc_data)); 518 sei_area->request.length = 0x0010; 519 sei_area->request.code = 0x000e; 520 521 ccode = chsc(sei_area); 522 if (ccode > 0) 523 return 0; 524 525 switch (sei_area->response.code) { 526 /* for debug purposes, check for problems */ 527 case 0x0001: 528 CIO_CRW_EVENT(4, "chsc_process_crw: event information " 529 "successfully stored\n"); 530 break; /* everything ok */ 531 case 0x0002: 532 CIO_CRW_EVENT(2, 533 "chsc_process_crw: invalid command!\n"); 534 return 0; 535 case 0x0003: 536 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " 537 "request block!\n"); 538 return 0; 539 case 0x0005: 540 CIO_CRW_EVENT(2, "chsc_process_crw: no event " 541 "information stored\n"); 542 return 0; 543 default: 544 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", 545 sei_area->response.code); 546 return 0; 547 } 548 549 /* Check if we might have lost some information. */ 550 if (sei_area->flags & 0x40) 551 CIO_CRW_EVENT(2, "chsc_process_crw: Event information " 552 "has been lost due to overflow!\n"); 553 554 if (sei_area->rs != 4) { 555 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " 556 "(%04X) isn't a chpid!\n", 557 sei_area->rsid); 558 continue; 559 } 560 561 /* which kind of information was stored? */ 562 switch (sei_area->cc) { 563 case 1: /* link incident*/ 564 CIO_CRW_EVENT(4, "chsc_process_crw: " 565 "channel subsystem reports link incident," 566 " reporting source is chpid %x\n", 567 sei_area->rsid); 568 chpid = __get_chpid_from_lir(sei_area->ccdf); 569 if (chpid < 0) 570 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", 571 __FUNCTION__); 572 else 573 s390_set_chpid_offline(chpid); 574 break; 575 576 case 2: /* i/o resource accessibiliy */ 577 CIO_CRW_EVENT(4, "chsc_process_crw: " 578 "channel subsystem reports some I/O " 579 "devices may have become accessible\n"); 580 pr_debug("Data received after sei: \n"); 581 pr_debug("Validity flags: %x\n", sei_area->vf); 582 583 /* allocate a new channel path structure, if needed */ 584 status = get_chp_status(sei_area->rsid); 585 if (status < 0) 586 new_channel_path(sei_area->rsid); 587 else if (!status) 588 break; 589 dev = get_device(&css[0]->chps[sei_area->rsid]->dev); 590 res_data.chp = to_channelpath(dev); 591 pr_debug("chpid: %x", sei_area->rsid); 592 if ((sei_area->vf & 0xc0) != 0) { 593 res_data.fla = sei_area->fla; 594 if ((sei_area->vf & 0xc0) == 0xc0) { 595 pr_debug(" full link addr: %x", 596 sei_area->fla); 597 res_data.fla_mask = 0xffff; 598 } else { 599 pr_debug(" link addr: %x", 600 sei_area->fla); 601 res_data.fla_mask = 0xff00; 602 } 603 } 604 ret = s390_process_res_acc(&res_data); 605 pr_debug("\n\n"); 606 put_device(dev); 607 break; 608 609 default: /* other stuff */ 610 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", 611 sei_area->cc); 612 break; 613 } 614 } while (sei_area->flags & 0x80); 615 return ret; 616 } 617 618 static inline int 619 __chp_add_new_sch(struct subchannel_id schid) 620 { 621 struct schib schib; 622 int ret; 623 624 if (stsch(schid, &schib)) 625 /* We're through */ 626 return need_rescan ? -EAGAIN : -ENXIO; 627 628 /* Put it on the slow path. */ 629 ret = css_enqueue_subchannel_slow(schid); 630 if (ret) { 631 css_clear_subchannel_slow_list(); 632 need_rescan = 1; 633 return -EAGAIN; 634 } 635 return 0; 636 } 637 638 639 static int 640 __chp_add(struct subchannel_id schid, void *data) 641 { 642 int i; 643 struct channel_path *chp; 644 struct subchannel *sch; 645 646 chp = (struct channel_path *)data; 647 sch = get_subchannel_by_schid(schid); 648 if (!sch) 649 /* Check if the subchannel is now available. */ 650 return __chp_add_new_sch(schid); 651 spin_lock_irq(&sch->lock); 652 for (i=0; i<8; i++) 653 if (sch->schib.pmcw.chpid[i] == chp->id) { 654 if (stsch(sch->schid, &sch->schib) != 0) { 655 /* Endgame. */ 656 spin_unlock(&sch->lock); 657 return -ENXIO; 658 } 659 break; 660 } 661 if (i==8) { 662 spin_unlock(&sch->lock); 663 return 0; 664 } 665 sch->lpm = ((sch->schib.pmcw.pim & 666 sch->schib.pmcw.pam & 667 sch->schib.pmcw.pom) 668 | 0x80 >> i) & sch->opm; 669 670 if (sch->driver && sch->driver->verify) 671 sch->driver->verify(&sch->dev); 672 673 spin_unlock_irq(&sch->lock); 674 put_device(&sch->dev); 675 return 0; 676 } 677 678 static int 679 chp_add(int chpid) 680 { 681 int rc; 682 char dbf_txt[15]; 683 struct device *dev; 684 685 if (!get_chp_status(chpid)) 686 return 0; /* no need to do the rest */ 687 688 sprintf(dbf_txt, "cadd%x", chpid); 689 CIO_TRACE_EVENT(2, dbf_txt); 690 691 dev = get_device(&css[0]->chps[chpid]->dev); 692 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 693 if (css_slow_subchannels_exist()) 694 rc = -EAGAIN; 695 if (rc != -EAGAIN) 696 rc = 0; 697 put_device(dev); 698 return rc; 699 } 700 701 /* 702 * Handling of crw machine checks with channel path source. 703 */ 704 int 705 chp_process_crw(int chpid, int on) 706 { 707 if (on == 0) { 708 /* Path has gone. We use the link incident routine.*/ 709 s390_set_chpid_offline(chpid); 710 return 0; /* De-register is async anyway. */ 711 } 712 /* 713 * Path has come. Allocate a new channel path structure, 714 * if needed. 715 */ 716 if (get_chp_status(chpid) < 0) 717 new_channel_path(chpid); 718 /* Avoid the extra overhead in process_rec_acc. */ 719 return chp_add(chpid); 720 } 721 722 static inline int 723 __check_for_io_and_kill(struct subchannel *sch, int index) 724 { 725 int cc; 726 727 if (!device_is_online(sch)) 728 /* cio could be doing I/O. */ 729 return 0; 730 cc = stsch(sch->schid, &sch->schib); 731 if (cc) 732 return 0; 733 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 734 device_set_waiting(sch); 735 return 1; 736 } 737 return 0; 738 } 739 740 static inline void 741 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 742 { 743 int chp, old_lpm; 744 unsigned long flags; 745 746 if (!sch->ssd_info.valid) 747 return; 748 749 spin_lock_irqsave(&sch->lock, flags); 750 old_lpm = sch->lpm; 751 for (chp = 0; chp < 8; chp++) { 752 if (sch->ssd_info.chpid[chp] != chpid) 753 continue; 754 755 if (on) { 756 sch->opm |= (0x80 >> chp); 757 sch->lpm |= (0x80 >> chp); 758 if (!old_lpm) 759 device_trigger_reprobe(sch); 760 else if (sch->driver && sch->driver->verify) 761 sch->driver->verify(&sch->dev); 762 } else { 763 sch->opm &= ~(0x80 >> chp); 764 sch->lpm &= ~(0x80 >> chp); 765 /* 766 * Give running I/O a grace period in which it 767 * can successfully terminate, even using the 768 * just varied off path. Then kill it. 769 */ 770 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 771 if (css_enqueue_subchannel_slow(sch->schid)) { 772 css_clear_subchannel_slow_list(); 773 need_rescan = 1; 774 } 775 } else if (sch->driver && sch->driver->verify) 776 sch->driver->verify(&sch->dev); 777 } 778 break; 779 } 780 spin_unlock_irqrestore(&sch->lock, flags); 781 } 782 783 static int 784 s390_subchannel_vary_chpid_off(struct device *dev, void *data) 785 { 786 struct subchannel *sch; 787 __u8 *chpid; 788 789 sch = to_subchannel(dev); 790 chpid = data; 791 792 __s390_subchannel_vary_chpid(sch, *chpid, 0); 793 return 0; 794 } 795 796 static int 797 s390_subchannel_vary_chpid_on(struct device *dev, void *data) 798 { 799 struct subchannel *sch; 800 __u8 *chpid; 801 802 sch = to_subchannel(dev); 803 chpid = data; 804 805 __s390_subchannel_vary_chpid(sch, *chpid, 1); 806 return 0; 807 } 808 809 static int 810 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 811 { 812 struct schib schib; 813 struct subchannel *sch; 814 815 sch = get_subchannel_by_schid(schid); 816 if (sch) { 817 put_device(&sch->dev); 818 return 0; 819 } 820 if (stsch_err(schid, &schib)) 821 /* We're through */ 822 return -ENXIO; 823 /* Put it on the slow path. */ 824 if (css_enqueue_subchannel_slow(schid)) { 825 css_clear_subchannel_slow_list(); 826 need_rescan = 1; 827 return -EAGAIN; 828 } 829 return 0; 830 } 831 832 /* 833 * Function: s390_vary_chpid 834 * Varies the specified chpid online or offline 835 */ 836 static int 837 s390_vary_chpid( __u8 chpid, int on) 838 { 839 char dbf_text[15]; 840 int status; 841 842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 843 CIO_TRACE_EVENT( 2, dbf_text); 844 845 status = get_chp_status(chpid); 846 if (status < 0) { 847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); 848 return -EINVAL; 849 } 850 851 if (!on && !status) { 852 printk(KERN_ERR "chpid %x is already offline\n", chpid); 853 return -EINVAL; 854 } 855 856 set_chp_logically_online(chpid, on); 857 858 /* 859 * Redo PathVerification on the devices the chpid connects to 860 */ 861 862 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 863 s390_subchannel_vary_chpid_on : 864 s390_subchannel_vary_chpid_off); 865 if (on) 866 /* Scan for new devices on varied on path. */ 867 for_each_subchannel(__s390_vary_chpid_on, NULL); 868 if (need_rescan || css_slow_subchannels_exist()) 869 queue_work(slow_path_wq, &slow_path_work); 870 return 0; 871 } 872 873 /* 874 * Channel measurement related functions 875 */ 876 static ssize_t 877 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, 878 size_t count) 879 { 880 struct channel_path *chp; 881 unsigned int size; 882 883 chp = to_channelpath(container_of(kobj, struct device, kobj)); 884 if (!chp->cmg_chars) 885 return 0; 886 887 size = sizeof(struct cmg_chars); 888 889 if (off > size) 890 return 0; 891 if (off + count > size) 892 count = size - off; 893 memcpy(buf, chp->cmg_chars + off, count); 894 return count; 895 } 896 897 static struct bin_attribute chp_measurement_chars_attr = { 898 .attr = { 899 .name = "measurement_chars", 900 .mode = S_IRUSR, 901 .owner = THIS_MODULE, 902 }, 903 .size = sizeof(struct cmg_chars), 904 .read = chp_measurement_chars_read, 905 }; 906 907 static void 908 chp_measurement_copy_block(struct cmg_entry *buf, 909 struct channel_subsystem *css, int chpid) 910 { 911 void *area; 912 struct cmg_entry *entry, reference_buf; 913 int idx; 914 915 if (chpid < 128) { 916 area = css->cub_addr1; 917 idx = chpid; 918 } else { 919 area = css->cub_addr2; 920 idx = chpid - 128; 921 } 922 entry = area + (idx * sizeof(struct cmg_entry)); 923 do { 924 memcpy(buf, entry, sizeof(*entry)); 925 memcpy(&reference_buf, entry, sizeof(*entry)); 926 } while (reference_buf.values[0] != buf->values[0]); 927 } 928 929 static ssize_t 930 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 931 { 932 struct channel_path *chp; 933 struct channel_subsystem *css; 934 unsigned int size; 935 936 chp = to_channelpath(container_of(kobj, struct device, kobj)); 937 css = to_css(chp->dev.parent); 938 939 size = sizeof(struct cmg_chars); 940 941 /* Only allow single reads. */ 942 if (off || count < size) 943 return 0; 944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 945 return count; 946 } 947 948 static struct bin_attribute chp_measurement_attr = { 949 .attr = { 950 .name = "measurement", 951 .mode = S_IRUSR, 952 .owner = THIS_MODULE, 953 }, 954 .size = sizeof(struct cmg_entry), 955 .read = chp_measurement_read, 956 }; 957 958 static void 959 chsc_remove_chp_cmg_attr(struct channel_path *chp) 960 { 961 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 } 964 965 static int 966 chsc_add_chp_cmg_attr(struct channel_path *chp) 967 { 968 int ret; 969 970 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 &chp_measurement_chars_attr); 972 if (ret) 973 return ret; 974 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 975 if (ret) 976 sysfs_remove_bin_file(&chp->dev.kobj, 977 &chp_measurement_chars_attr); 978 return ret; 979 } 980 981 static void 982 chsc_remove_cmg_attr(struct channel_subsystem *css) 983 { 984 int i; 985 986 for (i = 0; i <= __MAX_CHPID; i++) { 987 if (!css->chps[i]) 988 continue; 989 chsc_remove_chp_cmg_attr(css->chps[i]); 990 } 991 } 992 993 static int 994 chsc_add_cmg_attr(struct channel_subsystem *css) 995 { 996 int i, ret; 997 998 ret = 0; 999 for (i = 0; i <= __MAX_CHPID; i++) { 1000 if (!css->chps[i]) 1001 continue; 1002 ret = chsc_add_chp_cmg_attr(css->chps[i]); 1003 if (ret) 1004 goto cleanup; 1005 } 1006 return ret; 1007 cleanup: 1008 for (--i; i >= 0; i--) { 1009 if (!css->chps[i]) 1010 continue; 1011 chsc_remove_chp_cmg_attr(css->chps[i]); 1012 } 1013 return ret; 1014 } 1015 1016 1017 static int 1018 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 1019 { 1020 struct { 1021 struct chsc_header request; 1022 u32 operation_code : 2; 1023 u32 : 30; 1024 u32 key : 4; 1025 u32 : 28; 1026 u32 zeroes1; 1027 u32 cub_addr1; 1028 u32 zeroes2; 1029 u32 cub_addr2; 1030 u32 reserved[13]; 1031 struct chsc_header response; 1032 u32 status : 8; 1033 u32 : 4; 1034 u32 fmt : 4; 1035 u32 : 16; 1036 } *secm_area; 1037 int ret, ccode; 1038 1039 secm_area = page; 1040 secm_area->request.length = 0x0050; 1041 secm_area->request.code = 0x0016; 1042 1043 secm_area->key = PAGE_DEFAULT_KEY; 1044 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 1045 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 1046 1047 secm_area->operation_code = enable ? 0 : 1; 1048 1049 ccode = chsc(secm_area); 1050 if (ccode > 0) 1051 return (ccode == 3) ? -ENODEV : -EBUSY; 1052 1053 switch (secm_area->response.code) { 1054 case 0x0001: /* Success. */ 1055 ret = 0; 1056 break; 1057 case 0x0003: /* Invalid block. */ 1058 case 0x0007: /* Invalid format. */ 1059 case 0x0008: /* Other invalid block. */ 1060 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1061 ret = -EINVAL; 1062 break; 1063 case 0x0004: /* Command not provided in model. */ 1064 CIO_CRW_EVENT(2, "Model does not provide secm\n"); 1065 ret = -EOPNOTSUPP; 1066 break; 1067 case 0x0102: /* cub adresses incorrect */ 1068 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 1069 ret = -EINVAL; 1070 break; 1071 case 0x0103: /* key error */ 1072 CIO_CRW_EVENT(2, "Access key error in secm\n"); 1073 ret = -EINVAL; 1074 break; 1075 case 0x0105: /* error while starting */ 1076 CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 1077 ret = -EIO; 1078 break; 1079 default: 1080 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1081 secm_area->response.code); 1082 ret = -EIO; 1083 } 1084 return ret; 1085 } 1086 1087 int 1088 chsc_secm(struct channel_subsystem *css, int enable) 1089 { 1090 void *secm_area; 1091 int ret; 1092 1093 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1094 if (!secm_area) 1095 return -ENOMEM; 1096 1097 mutex_lock(&css->mutex); 1098 if (enable && !css->cm_enabled) { 1099 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1100 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1101 if (!css->cub_addr1 || !css->cub_addr2) { 1102 free_page((unsigned long)css->cub_addr1); 1103 free_page((unsigned long)css->cub_addr2); 1104 free_page((unsigned long)secm_area); 1105 mutex_unlock(&css->mutex); 1106 return -ENOMEM; 1107 } 1108 } 1109 ret = __chsc_do_secm(css, enable, secm_area); 1110 if (!ret) { 1111 css->cm_enabled = enable; 1112 if (css->cm_enabled) { 1113 ret = chsc_add_cmg_attr(css); 1114 if (ret) { 1115 memset(secm_area, 0, PAGE_SIZE); 1116 __chsc_do_secm(css, 0, secm_area); 1117 css->cm_enabled = 0; 1118 } 1119 } else 1120 chsc_remove_cmg_attr(css); 1121 } 1122 if (enable && !css->cm_enabled) { 1123 free_page((unsigned long)css->cub_addr1); 1124 free_page((unsigned long)css->cub_addr2); 1125 } 1126 mutex_unlock(&css->mutex); 1127 free_page((unsigned long)secm_area); 1128 return ret; 1129 } 1130 1131 /* 1132 * Files for the channel path entries. 1133 */ 1134 static ssize_t 1135 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) 1136 { 1137 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1138 1139 if (!chp) 1140 return 0; 1141 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : 1142 sprintf(buf, "offline\n")); 1143 } 1144 1145 static ssize_t 1146 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1147 { 1148 struct channel_path *cp = container_of(dev, struct channel_path, dev); 1149 char cmd[10]; 1150 int num_args; 1151 int error; 1152 1153 num_args = sscanf(buf, "%5s", cmd); 1154 if (!num_args) 1155 return count; 1156 1157 if (!strnicmp(cmd, "on", 2)) 1158 error = s390_vary_chpid(cp->id, 1); 1159 else if (!strnicmp(cmd, "off", 3)) 1160 error = s390_vary_chpid(cp->id, 0); 1161 else 1162 error = -EINVAL; 1163 1164 return error < 0 ? error : count; 1165 1166 } 1167 1168 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 1169 1170 static ssize_t 1171 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1172 { 1173 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1174 1175 if (!chp) 1176 return 0; 1177 return sprintf(buf, "%x\n", chp->desc.desc); 1178 } 1179 1180 static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 1181 1182 static ssize_t 1183 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) 1184 { 1185 struct channel_path *chp = to_channelpath(dev); 1186 1187 if (!chp) 1188 return 0; 1189 if (chp->cmg == -1) /* channel measurements not available */ 1190 return sprintf(buf, "unknown\n"); 1191 return sprintf(buf, "%x\n", chp->cmg); 1192 } 1193 1194 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); 1195 1196 static ssize_t 1197 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 1198 { 1199 struct channel_path *chp = to_channelpath(dev); 1200 1201 if (!chp) 1202 return 0; 1203 if (chp->shared == -1) /* channel measurements not available */ 1204 return sprintf(buf, "unknown\n"); 1205 return sprintf(buf, "%x\n", chp->shared); 1206 } 1207 1208 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 1209 1210 static struct attribute * chp_attrs[] = { 1211 &dev_attr_status.attr, 1212 &dev_attr_type.attr, 1213 &dev_attr_cmg.attr, 1214 &dev_attr_shared.attr, 1215 NULL, 1216 }; 1217 1218 static struct attribute_group chp_attr_group = { 1219 .attrs = chp_attrs, 1220 }; 1221 1222 static void 1223 chp_release(struct device *dev) 1224 { 1225 struct channel_path *cp; 1226 1227 cp = container_of(dev, struct channel_path, dev); 1228 kfree(cp); 1229 } 1230 1231 static int 1232 chsc_determine_channel_path_description(int chpid, 1233 struct channel_path_desc *desc) 1234 { 1235 int ccode, ret; 1236 1237 struct { 1238 struct chsc_header request; 1239 u32 : 24; 1240 u32 first_chpid : 8; 1241 u32 : 24; 1242 u32 last_chpid : 8; 1243 u32 zeroes1; 1244 struct chsc_header response; 1245 u32 zeroes2; 1246 struct channel_path_desc desc; 1247 } *scpd_area; 1248 1249 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1250 if (!scpd_area) 1251 return -ENOMEM; 1252 1253 scpd_area->request.length = 0x0010; 1254 scpd_area->request.code = 0x0002; 1255 1256 scpd_area->first_chpid = chpid; 1257 scpd_area->last_chpid = chpid; 1258 1259 ccode = chsc(scpd_area); 1260 if (ccode > 0) { 1261 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1262 goto out; 1263 } 1264 1265 switch (scpd_area->response.code) { 1266 case 0x0001: /* Success. */ 1267 memcpy(desc, &scpd_area->desc, 1268 sizeof(struct channel_path_desc)); 1269 ret = 0; 1270 break; 1271 case 0x0003: /* Invalid block. */ 1272 case 0x0007: /* Invalid format. */ 1273 case 0x0008: /* Other invalid block. */ 1274 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1275 ret = -EINVAL; 1276 break; 1277 case 0x0004: /* Command not provided in model. */ 1278 CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 1279 ret = -EOPNOTSUPP; 1280 break; 1281 default: 1282 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1283 scpd_area->response.code); 1284 ret = -EIO; 1285 } 1286 out: 1287 free_page((unsigned long)scpd_area); 1288 return ret; 1289 } 1290 1291 static void 1292 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1293 struct cmg_chars *chars) 1294 { 1295 switch (chp->cmg) { 1296 case 2: 1297 case 3: 1298 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 1299 GFP_KERNEL); 1300 if (chp->cmg_chars) { 1301 int i, mask; 1302 struct cmg_chars *cmg_chars; 1303 1304 cmg_chars = chp->cmg_chars; 1305 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1306 mask = 0x80 >> (i + 3); 1307 if (cmcv & mask) 1308 cmg_chars->values[i] = chars->values[i]; 1309 else 1310 cmg_chars->values[i] = 0; 1311 } 1312 } 1313 break; 1314 default: 1315 /* No cmg-dependent data. */ 1316 break; 1317 } 1318 } 1319 1320 static int 1321 chsc_get_channel_measurement_chars(struct channel_path *chp) 1322 { 1323 int ccode, ret; 1324 1325 struct { 1326 struct chsc_header request; 1327 u32 : 24; 1328 u32 first_chpid : 8; 1329 u32 : 24; 1330 u32 last_chpid : 8; 1331 u32 zeroes1; 1332 struct chsc_header response; 1333 u32 zeroes2; 1334 u32 not_valid : 1; 1335 u32 shared : 1; 1336 u32 : 22; 1337 u32 chpid : 8; 1338 u32 cmcv : 5; 1339 u32 : 11; 1340 u32 cmgq : 8; 1341 u32 cmg : 8; 1342 u32 zeroes3; 1343 u32 data[NR_MEASUREMENT_CHARS]; 1344 } *scmc_area; 1345 1346 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1347 if (!scmc_area) 1348 return -ENOMEM; 1349 1350 scmc_area->request.length = 0x0010; 1351 scmc_area->request.code = 0x0022; 1352 1353 scmc_area->first_chpid = chp->id; 1354 scmc_area->last_chpid = chp->id; 1355 1356 ccode = chsc(scmc_area); 1357 if (ccode > 0) { 1358 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1359 goto out; 1360 } 1361 1362 switch (scmc_area->response.code) { 1363 case 0x0001: /* Success. */ 1364 if (!scmc_area->not_valid) { 1365 chp->cmg = scmc_area->cmg; 1366 chp->shared = scmc_area->shared; 1367 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1368 (struct cmg_chars *) 1369 &scmc_area->data); 1370 } else { 1371 chp->cmg = -1; 1372 chp->shared = -1; 1373 } 1374 ret = 0; 1375 break; 1376 case 0x0003: /* Invalid block. */ 1377 case 0x0007: /* Invalid format. */ 1378 case 0x0008: /* Invalid bit combination. */ 1379 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1380 ret = -EINVAL; 1381 break; 1382 case 0x0004: /* Command not provided. */ 1383 CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 1384 ret = -EOPNOTSUPP; 1385 break; 1386 default: 1387 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1388 scmc_area->response.code); 1389 ret = -EIO; 1390 } 1391 out: 1392 free_page((unsigned long)scmc_area); 1393 return ret; 1394 } 1395 1396 /* 1397 * Entries for chpids on the system bus. 1398 * This replaces /proc/chpids. 1399 */ 1400 static int 1401 new_channel_path(int chpid) 1402 { 1403 struct channel_path *chp; 1404 int ret; 1405 1406 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); 1407 if (!chp) 1408 return -ENOMEM; 1409 1410 /* fill in status, etc. */ 1411 chp->id = chpid; 1412 chp->state = 1; 1413 chp->dev = (struct device) { 1414 .parent = &css[0]->device, 1415 .release = chp_release, 1416 }; 1417 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1418 1419 /* Obtain channel path description and fill it in. */ 1420 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 1421 if (ret) 1422 goto out_free; 1423 /* Get channel-measurement characteristics. */ 1424 if (css_characteristics_avail && css_chsc_characteristics.scmc 1425 && css_chsc_characteristics.secm) { 1426 ret = chsc_get_channel_measurement_chars(chp); 1427 if (ret) 1428 goto out_free; 1429 } else { 1430 static int msg_done; 1431 1432 if (!msg_done) { 1433 printk(KERN_WARNING "cio: Channel measurements not " 1434 "available, continuing.\n"); 1435 msg_done = 1; 1436 } 1437 chp->cmg = -1; 1438 } 1439 1440 /* make it known to the system */ 1441 ret = device_register(&chp->dev); 1442 if (ret) { 1443 printk(KERN_WARNING "%s: could not register %02x\n", 1444 __func__, chpid); 1445 goto out_free; 1446 } 1447 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 1448 if (ret) { 1449 device_unregister(&chp->dev); 1450 goto out_free; 1451 } 1452 mutex_lock(&css[0]->mutex); 1453 if (css[0]->cm_enabled) { 1454 ret = chsc_add_chp_cmg_attr(chp); 1455 if (ret) { 1456 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 1457 device_unregister(&chp->dev); 1458 mutex_unlock(&css[0]->mutex); 1459 goto out_free; 1460 } 1461 } 1462 css[0]->chps[chpid] = chp; 1463 mutex_unlock(&css[0]->mutex); 1464 return ret; 1465 out_free: 1466 kfree(chp); 1467 return ret; 1468 } 1469 1470 void * 1471 chsc_get_chp_desc(struct subchannel *sch, int chp_no) 1472 { 1473 struct channel_path *chp; 1474 struct channel_path_desc *desc; 1475 1476 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; 1477 if (!chp) 1478 return NULL; 1479 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1480 if (!desc) 1481 return NULL; 1482 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 1483 return desc; 1484 } 1485 1486 1487 static int __init 1488 chsc_alloc_sei_area(void) 1489 { 1490 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1491 if (!sei_page) 1492 printk(KERN_WARNING"Can't allocate page for processing of " \ 1493 "chsc machine checks!\n"); 1494 return (sei_page ? 0 : -ENOMEM); 1495 } 1496 1497 int __init 1498 chsc_enable_facility(int operation_code) 1499 { 1500 int ret; 1501 struct { 1502 struct chsc_header request; 1503 u8 reserved1:4; 1504 u8 format:4; 1505 u8 reserved2; 1506 u16 operation_code; 1507 u32 reserved3; 1508 u32 reserved4; 1509 u32 operation_data_area[252]; 1510 struct chsc_header response; 1511 u32 reserved5:4; 1512 u32 format2:4; 1513 u32 reserved6:24; 1514 } *sda_area; 1515 1516 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1517 if (!sda_area) 1518 return -ENOMEM; 1519 sda_area->request.length = 0x0400; 1520 sda_area->request.code = 0x0031; 1521 sda_area->operation_code = operation_code; 1522 1523 ret = chsc(sda_area); 1524 if (ret > 0) { 1525 ret = (ret == 3) ? -ENODEV : -EBUSY; 1526 goto out; 1527 } 1528 switch (sda_area->response.code) { 1529 case 0x0001: /* everything ok */ 1530 ret = 0; 1531 break; 1532 case 0x0003: /* invalid request block */ 1533 case 0x0007: 1534 ret = -EINVAL; 1535 break; 1536 case 0x0004: /* command not provided */ 1537 case 0x0101: /* facility not provided */ 1538 ret = -EOPNOTSUPP; 1539 break; 1540 default: /* something went wrong */ 1541 ret = -EIO; 1542 } 1543 out: 1544 free_page((unsigned long)sda_area); 1545 return ret; 1546 } 1547 1548 subsys_initcall(chsc_alloc_sei_area); 1549 1550 struct css_general_char css_general_characteristics; 1551 struct css_chsc_char css_chsc_characteristics; 1552 1553 int __init 1554 chsc_determine_css_characteristics(void) 1555 { 1556 int result; 1557 struct { 1558 struct chsc_header request; 1559 u32 reserved1; 1560 u32 reserved2; 1561 u32 reserved3; 1562 struct chsc_header response; 1563 u32 reserved4; 1564 u32 general_char[510]; 1565 u32 chsc_char[518]; 1566 } *scsc_area; 1567 1568 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1569 if (!scsc_area) { 1570 printk(KERN_WARNING"cio: Was not able to determine available" \ 1571 "CHSCs due to no memory.\n"); 1572 return -ENOMEM; 1573 } 1574 1575 scsc_area->request.length = 0x0010; 1576 scsc_area->request.code = 0x0010; 1577 1578 result = chsc(scsc_area); 1579 if (result) { 1580 printk(KERN_WARNING"cio: Was not able to determine " \ 1581 "available CHSCs, cc=%i.\n", result); 1582 result = -EIO; 1583 goto exit; 1584 } 1585 1586 if (scsc_area->response.code != 1) { 1587 printk(KERN_WARNING"cio: Was not able to determine " \ 1588 "available CHSCs.\n"); 1589 result = -EIO; 1590 goto exit; 1591 } 1592 memcpy(&css_general_characteristics, scsc_area->general_char, 1593 sizeof(css_general_characteristics)); 1594 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1595 sizeof(css_chsc_characteristics)); 1596 exit: 1597 free_page ((unsigned long) scsc_area); 1598 return result; 1599 } 1600 1601 EXPORT_SYMBOL_GPL(css_general_characteristics); 1602 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1603