1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static DEFINE_SPINLOCK(siosl_lock); 33 static DEFINE_SPINLOCK(sda_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 default: 57 return -EIO; 58 } 59 } 60 EXPORT_SYMBOL_GPL(chsc_error_from_response); 61 62 struct chsc_ssd_area { 63 struct chsc_header request; 64 u16 :10; 65 u16 ssid:2; 66 u16 :4; 67 u16 f_sch; /* first subchannel */ 68 u16 :16; 69 u16 l_sch; /* last subchannel */ 70 u32 :32; 71 struct chsc_header response; 72 u32 :32; 73 u8 sch_valid : 1; 74 u8 dev_valid : 1; 75 u8 st : 3; /* subchannel type */ 76 u8 zeroes : 3; 77 u8 unit_addr; /* unit address */ 78 u16 devno; /* device number */ 79 u8 path_mask; 80 u8 fla_valid_mask; 81 u16 sch; /* subchannel */ 82 u8 chpid[8]; /* chpids 0-7 */ 83 u16 fla[8]; /* full link addresses 0-7 */ 84 } __attribute__ ((packed)); 85 86 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 87 { 88 unsigned long page; 89 struct chsc_ssd_area *ssd_area; 90 int ccode; 91 int ret; 92 int i; 93 int mask; 94 95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 96 if (!page) 97 return -ENOMEM; 98 ssd_area = (struct chsc_ssd_area *) page; 99 ssd_area->request.length = 0x0010; 100 ssd_area->request.code = 0x0004; 101 ssd_area->ssid = schid.ssid; 102 ssd_area->f_sch = schid.sch_no; 103 ssd_area->l_sch = schid.sch_no; 104 105 ccode = chsc(ssd_area); 106 /* Check response. */ 107 if (ccode > 0) { 108 ret = (ccode == 3) ? -ENODEV : -EBUSY; 109 goto out_free; 110 } 111 ret = chsc_error_from_response(ssd_area->response.code); 112 if (ret != 0) { 113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 114 schid.ssid, schid.sch_no, 115 ssd_area->response.code); 116 goto out_free; 117 } 118 if (!ssd_area->sch_valid) { 119 ret = -ENODEV; 120 goto out_free; 121 } 122 /* Copy data */ 123 ret = 0; 124 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 126 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 127 goto out_free; 128 ssd->path_mask = ssd_area->path_mask; 129 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 130 for (i = 0; i < 8; i++) { 131 mask = 0x80 >> i; 132 if (ssd_area->path_mask & mask) { 133 chp_id_init(&ssd->chpid[i]); 134 ssd->chpid[i].id = ssd_area->chpid[i]; 135 } 136 if (ssd_area->fla_valid_mask & mask) 137 ssd->fla[i] = ssd_area->fla[i]; 138 } 139 out_free: 140 free_page(page); 141 return ret; 142 } 143 144 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 145 { 146 spin_lock_irq(sch->lock); 147 if (sch->driver && sch->driver->chp_event) 148 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 149 goto out_unreg; 150 spin_unlock_irq(sch->lock); 151 return 0; 152 153 out_unreg: 154 sch->lpm = 0; 155 spin_unlock_irq(sch->lock); 156 css_schedule_eval(sch->schid); 157 return 0; 158 } 159 160 void chsc_chp_offline(struct chp_id chpid) 161 { 162 char dbf_txt[15]; 163 struct chp_link link; 164 165 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 166 CIO_TRACE_EVENT(2, dbf_txt); 167 168 if (chp_get_status(chpid) <= 0) 169 return; 170 memset(&link, 0, sizeof(struct chp_link)); 171 link.chpid = chpid; 172 /* Wait until previous actions have settled. */ 173 css_wait_for_slow_path(); 174 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 175 } 176 177 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 178 { 179 struct schib schib; 180 /* 181 * We don't know the device yet, but since a path 182 * may be available now to the device we'll have 183 * to do recognition again. 184 * Since we don't have any idea about which chpid 185 * that beast may be on we'll have to do a stsch 186 * on all devices, grr... 187 */ 188 if (stsch_err(schid, &schib)) 189 /* We're through */ 190 return -ENXIO; 191 192 /* Put it on the slow path. */ 193 css_schedule_eval(schid); 194 return 0; 195 } 196 197 static int __s390_process_res_acc(struct subchannel *sch, void *data) 198 { 199 spin_lock_irq(sch->lock); 200 if (sch->driver && sch->driver->chp_event) 201 sch->driver->chp_event(sch, data, CHP_ONLINE); 202 spin_unlock_irq(sch->lock); 203 204 return 0; 205 } 206 207 static void s390_process_res_acc(struct chp_link *link) 208 { 209 char dbf_txt[15]; 210 211 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 212 link->chpid.id); 213 CIO_TRACE_EVENT( 2, dbf_txt); 214 if (link->fla != 0) { 215 sprintf(dbf_txt, "fla%x", link->fla); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 } 218 /* Wait until previous actions have settled. */ 219 css_wait_for_slow_path(); 220 /* 221 * I/O resources may have become accessible. 222 * Scan through all subchannels that may be concerned and 223 * do a validation on those. 224 * The more information we have (info), the less scanning 225 * will we have to do. 226 */ 227 for_each_subchannel_staged(__s390_process_res_acc, 228 s390_process_res_acc_new_sch, link); 229 } 230 231 static int 232 __get_chpid_from_lir(void *data) 233 { 234 struct lir { 235 u8 iq; 236 u8 ic; 237 u16 sci; 238 /* incident-node descriptor */ 239 u32 indesc[28]; 240 /* attached-node descriptor */ 241 u32 andesc[28]; 242 /* incident-specific information */ 243 u32 isinfo[28]; 244 } __attribute__ ((packed)) *lir; 245 246 lir = data; 247 if (!(lir->iq&0x80)) 248 /* NULL link incident record */ 249 return -EINVAL; 250 if (!(lir->indesc[0]&0xc0000000)) 251 /* node descriptor not valid */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0x10000000)) 254 /* don't handle device-type nodes - FIXME */ 255 return -EINVAL; 256 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 257 258 return (u16) (lir->indesc[0]&0x000000ff); 259 } 260 261 struct chsc_sei_area { 262 struct chsc_header request; 263 u32 reserved1; 264 u32 reserved2; 265 u32 reserved3; 266 struct chsc_header response; 267 u32 reserved4; 268 u8 flags; 269 u8 vf; /* validity flags */ 270 u8 rs; /* reporting source */ 271 u8 cc; /* content code */ 272 u16 fla; /* full link address */ 273 u16 rsid; /* reporting source id */ 274 u32 reserved5; 275 u32 reserved6; 276 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 277 /* ccdf has to be big enough for a link-incident record */ 278 } __attribute__ ((packed)); 279 280 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 281 { 282 struct chp_id chpid; 283 int id; 284 285 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 286 sei_area->rs, sei_area->rsid); 287 if (sei_area->rs != 4) 288 return; 289 id = __get_chpid_from_lir(sei_area->ccdf); 290 if (id < 0) 291 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 292 else { 293 chp_id_init(&chpid); 294 chpid.id = id; 295 chsc_chp_offline(chpid); 296 } 297 } 298 299 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 300 { 301 struct chp_link link; 302 struct chp_id chpid; 303 int status; 304 305 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 306 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 307 if (sei_area->rs != 4) 308 return; 309 chp_id_init(&chpid); 310 chpid.id = sei_area->rsid; 311 /* allocate a new channel path structure, if needed */ 312 status = chp_get_status(chpid); 313 if (status < 0) 314 chp_new(chpid); 315 else if (!status) 316 return; 317 memset(&link, 0, sizeof(struct chp_link)); 318 link.chpid = chpid; 319 if ((sei_area->vf & 0xc0) != 0) { 320 link.fla = sei_area->fla; 321 if ((sei_area->vf & 0xc0) == 0xc0) 322 /* full link address */ 323 link.fla_mask = 0xffff; 324 else 325 /* link address */ 326 link.fla_mask = 0xff00; 327 } 328 s390_process_res_acc(&link); 329 } 330 331 struct chp_config_data { 332 u8 map[32]; 333 u8 op; 334 u8 pc; 335 }; 336 337 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 338 { 339 struct chp_config_data *data; 340 struct chp_id chpid; 341 int num; 342 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 343 344 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 345 if (sei_area->rs != 0) 346 return; 347 data = (struct chp_config_data *) &(sei_area->ccdf); 348 chp_id_init(&chpid); 349 for (num = 0; num <= __MAX_CHPID; num++) { 350 if (!chp_test_bit(data->map, num)) 351 continue; 352 chpid.id = num; 353 pr_notice("Processing %s for channel path %x.%02x\n", 354 events[data->op], chpid.cssid, chpid.id); 355 switch (data->op) { 356 case 0: 357 chp_cfg_schedule(chpid, 1); 358 break; 359 case 1: 360 chp_cfg_schedule(chpid, 0); 361 break; 362 case 2: 363 chp_cfg_cancel_deconfigure(chpid); 364 break; 365 } 366 } 367 } 368 369 static void chsc_process_sei(struct chsc_sei_area *sei_area) 370 { 371 /* Check if we might have lost some information. */ 372 if (sei_area->flags & 0x40) { 373 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 374 css_schedule_eval_all(); 375 } 376 /* which kind of information was stored? */ 377 switch (sei_area->cc) { 378 case 1: /* link incident*/ 379 chsc_process_sei_link_incident(sei_area); 380 break; 381 case 2: /* i/o resource accessibiliy */ 382 chsc_process_sei_res_acc(sei_area); 383 break; 384 case 8: /* channel-path-configuration notification */ 385 chsc_process_sei_chp_config(sei_area); 386 break; 387 default: /* other stuff */ 388 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 389 sei_area->cc); 390 break; 391 } 392 } 393 394 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 395 { 396 struct chsc_sei_area *sei_area; 397 398 if (overflow) { 399 css_schedule_eval_all(); 400 return; 401 } 402 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 403 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 404 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 405 crw0->erc, crw0->rsid); 406 if (!sei_page) 407 return; 408 /* Access to sei_page is serialized through machine check handler 409 * thread, so no need for locking. */ 410 sei_area = sei_page; 411 412 CIO_TRACE_EVENT(2, "prcss"); 413 do { 414 memset(sei_area, 0, sizeof(*sei_area)); 415 sei_area->request.length = 0x0010; 416 sei_area->request.code = 0x000e; 417 if (chsc(sei_area)) 418 break; 419 420 if (sei_area->response.code == 0x0001) { 421 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 422 chsc_process_sei(sei_area); 423 } else { 424 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 425 sei_area->response.code); 426 break; 427 } 428 } while (sei_area->flags & 0x80); 429 } 430 431 void chsc_chp_online(struct chp_id chpid) 432 { 433 char dbf_txt[15]; 434 struct chp_link link; 435 436 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 437 CIO_TRACE_EVENT(2, dbf_txt); 438 439 if (chp_get_status(chpid) != 0) { 440 memset(&link, 0, sizeof(struct chp_link)); 441 link.chpid = chpid; 442 /* Wait until previous actions have settled. */ 443 css_wait_for_slow_path(); 444 for_each_subchannel_staged(__s390_process_res_acc, NULL, 445 &link); 446 } 447 } 448 449 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 450 struct chp_id chpid, int on) 451 { 452 unsigned long flags; 453 struct chp_link link; 454 455 memset(&link, 0, sizeof(struct chp_link)); 456 link.chpid = chpid; 457 spin_lock_irqsave(sch->lock, flags); 458 if (sch->driver && sch->driver->chp_event) 459 sch->driver->chp_event(sch, &link, 460 on ? CHP_VARY_ON : CHP_VARY_OFF); 461 spin_unlock_irqrestore(sch->lock, flags); 462 } 463 464 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 465 { 466 struct chp_id *chpid = data; 467 468 __s390_subchannel_vary_chpid(sch, *chpid, 0); 469 return 0; 470 } 471 472 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 473 { 474 struct chp_id *chpid = data; 475 476 __s390_subchannel_vary_chpid(sch, *chpid, 1); 477 return 0; 478 } 479 480 static int 481 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 482 { 483 struct schib schib; 484 485 if (stsch_err(schid, &schib)) 486 /* We're through */ 487 return -ENXIO; 488 /* Put it on the slow path. */ 489 css_schedule_eval(schid); 490 return 0; 491 } 492 493 /** 494 * chsc_chp_vary - propagate channel-path vary operation to subchannels 495 * @chpid: channl-path ID 496 * @on: non-zero for vary online, zero for vary offline 497 */ 498 int chsc_chp_vary(struct chp_id chpid, int on) 499 { 500 struct chp_link link; 501 502 memset(&link, 0, sizeof(struct chp_link)); 503 link.chpid = chpid; 504 /* Wait until previous actions have settled. */ 505 css_wait_for_slow_path(); 506 /* 507 * Redo PathVerification on the devices the chpid connects to 508 */ 509 510 if (on) 511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 512 __s390_vary_chpid_on, &link); 513 else 514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 515 NULL, &link); 516 517 return 0; 518 } 519 520 static void 521 chsc_remove_cmg_attr(struct channel_subsystem *css) 522 { 523 int i; 524 525 for (i = 0; i <= __MAX_CHPID; i++) { 526 if (!css->chps[i]) 527 continue; 528 chp_remove_cmg_attr(css->chps[i]); 529 } 530 } 531 532 static int 533 chsc_add_cmg_attr(struct channel_subsystem *css) 534 { 535 int i, ret; 536 537 ret = 0; 538 for (i = 0; i <= __MAX_CHPID; i++) { 539 if (!css->chps[i]) 540 continue; 541 ret = chp_add_cmg_attr(css->chps[i]); 542 if (ret) 543 goto cleanup; 544 } 545 return ret; 546 cleanup: 547 for (--i; i >= 0; i--) { 548 if (!css->chps[i]) 549 continue; 550 chp_remove_cmg_attr(css->chps[i]); 551 } 552 return ret; 553 } 554 555 int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 556 { 557 struct { 558 struct chsc_header request; 559 u32 operation_code : 2; 560 u32 : 30; 561 u32 key : 4; 562 u32 : 28; 563 u32 zeroes1; 564 u32 cub_addr1; 565 u32 zeroes2; 566 u32 cub_addr2; 567 u32 reserved[13]; 568 struct chsc_header response; 569 u32 status : 8; 570 u32 : 4; 571 u32 fmt : 4; 572 u32 : 16; 573 } __attribute__ ((packed)) *secm_area; 574 int ret, ccode; 575 576 secm_area = page; 577 secm_area->request.length = 0x0050; 578 secm_area->request.code = 0x0016; 579 580 secm_area->key = PAGE_DEFAULT_KEY >> 4; 581 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 582 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 583 584 secm_area->operation_code = enable ? 0 : 1; 585 586 ccode = chsc(secm_area); 587 if (ccode > 0) 588 return (ccode == 3) ? -ENODEV : -EBUSY; 589 590 switch (secm_area->response.code) { 591 case 0x0102: 592 case 0x0103: 593 ret = -EINVAL; 594 break; 595 default: 596 ret = chsc_error_from_response(secm_area->response.code); 597 } 598 if (ret != 0) 599 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 600 secm_area->response.code); 601 return ret; 602 } 603 604 int 605 chsc_secm(struct channel_subsystem *css, int enable) 606 { 607 void *secm_area; 608 int ret; 609 610 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 611 if (!secm_area) 612 return -ENOMEM; 613 614 if (enable && !css->cm_enabled) { 615 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 616 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 617 if (!css->cub_addr1 || !css->cub_addr2) { 618 free_page((unsigned long)css->cub_addr1); 619 free_page((unsigned long)css->cub_addr2); 620 free_page((unsigned long)secm_area); 621 return -ENOMEM; 622 } 623 } 624 ret = __chsc_do_secm(css, enable, secm_area); 625 if (!ret) { 626 css->cm_enabled = enable; 627 if (css->cm_enabled) { 628 ret = chsc_add_cmg_attr(css); 629 if (ret) { 630 memset(secm_area, 0, PAGE_SIZE); 631 __chsc_do_secm(css, 0, secm_area); 632 css->cm_enabled = 0; 633 } 634 } else 635 chsc_remove_cmg_attr(css); 636 } 637 if (!css->cm_enabled) { 638 free_page((unsigned long)css->cub_addr1); 639 free_page((unsigned long)css->cub_addr2); 640 } 641 free_page((unsigned long)secm_area); 642 return ret; 643 } 644 645 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 646 int c, int m, 647 struct chsc_response_struct *resp) 648 { 649 int ccode, ret; 650 651 struct { 652 struct chsc_header request; 653 u32 : 2; 654 u32 m : 1; 655 u32 c : 1; 656 u32 fmt : 4; 657 u32 cssid : 8; 658 u32 : 4; 659 u32 rfmt : 4; 660 u32 first_chpid : 8; 661 u32 : 24; 662 u32 last_chpid : 8; 663 u32 zeroes1; 664 struct chsc_header response; 665 u8 data[PAGE_SIZE - 20]; 666 } __attribute__ ((packed)) *scpd_area; 667 668 if ((rfmt == 1) && !css_general_characteristics.fcs) 669 return -EINVAL; 670 if ((rfmt == 2) && !css_general_characteristics.cib) 671 return -EINVAL; 672 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 673 if (!scpd_area) 674 return -ENOMEM; 675 676 scpd_area->request.length = 0x0010; 677 scpd_area->request.code = 0x0002; 678 679 scpd_area->cssid = chpid.cssid; 680 scpd_area->first_chpid = chpid.id; 681 scpd_area->last_chpid = chpid.id; 682 scpd_area->m = m; 683 scpd_area->c = c; 684 scpd_area->fmt = fmt; 685 scpd_area->rfmt = rfmt; 686 687 ccode = chsc(scpd_area); 688 if (ccode > 0) { 689 ret = (ccode == 3) ? -ENODEV : -EBUSY; 690 goto out; 691 } 692 693 ret = chsc_error_from_response(scpd_area->response.code); 694 if (ret == 0) 695 /* Success. */ 696 memcpy(resp, &scpd_area->response, scpd_area->response.length); 697 else 698 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 699 scpd_area->response.code); 700 out: 701 free_page((unsigned long)scpd_area); 702 return ret; 703 } 704 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 705 706 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 707 struct channel_path_desc *desc) 708 { 709 struct chsc_response_struct *chsc_resp; 710 int ret; 711 712 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 713 if (!chsc_resp) 714 return -ENOMEM; 715 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 716 if (ret) 717 goto out_free; 718 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 719 out_free: 720 kfree(chsc_resp); 721 return ret; 722 } 723 724 static void 725 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 726 struct cmg_chars *chars) 727 { 728 switch (chp->cmg) { 729 case 2: 730 case 3: 731 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 732 GFP_KERNEL); 733 if (chp->cmg_chars) { 734 int i, mask; 735 struct cmg_chars *cmg_chars; 736 737 cmg_chars = chp->cmg_chars; 738 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 739 mask = 0x80 >> (i + 3); 740 if (cmcv & mask) 741 cmg_chars->values[i] = chars->values[i]; 742 else 743 cmg_chars->values[i] = 0; 744 } 745 } 746 break; 747 default: 748 /* No cmg-dependent data. */ 749 break; 750 } 751 } 752 753 int chsc_get_channel_measurement_chars(struct channel_path *chp) 754 { 755 int ccode, ret; 756 757 struct { 758 struct chsc_header request; 759 u32 : 24; 760 u32 first_chpid : 8; 761 u32 : 24; 762 u32 last_chpid : 8; 763 u32 zeroes1; 764 struct chsc_header response; 765 u32 zeroes2; 766 u32 not_valid : 1; 767 u32 shared : 1; 768 u32 : 22; 769 u32 chpid : 8; 770 u32 cmcv : 5; 771 u32 : 11; 772 u32 cmgq : 8; 773 u32 cmg : 8; 774 u32 zeroes3; 775 u32 data[NR_MEASUREMENT_CHARS]; 776 } __attribute__ ((packed)) *scmc_area; 777 778 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 779 if (!scmc_area) 780 return -ENOMEM; 781 782 scmc_area->request.length = 0x0010; 783 scmc_area->request.code = 0x0022; 784 785 scmc_area->first_chpid = chp->chpid.id; 786 scmc_area->last_chpid = chp->chpid.id; 787 788 ccode = chsc(scmc_area); 789 if (ccode > 0) { 790 ret = (ccode == 3) ? -ENODEV : -EBUSY; 791 goto out; 792 } 793 794 ret = chsc_error_from_response(scmc_area->response.code); 795 if (ret == 0) { 796 /* Success. */ 797 if (!scmc_area->not_valid) { 798 chp->cmg = scmc_area->cmg; 799 chp->shared = scmc_area->shared; 800 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 801 (struct cmg_chars *) 802 &scmc_area->data); 803 } else { 804 chp->cmg = -1; 805 chp->shared = -1; 806 } 807 } else { 808 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 809 scmc_area->response.code); 810 } 811 out: 812 free_page((unsigned long)scmc_area); 813 return ret; 814 } 815 816 int __init chsc_alloc_sei_area(void) 817 { 818 int ret; 819 820 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 821 if (!sei_page) { 822 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 823 "chsc machine checks!\n"); 824 return -ENOMEM; 825 } 826 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 827 if (ret) 828 kfree(sei_page); 829 return ret; 830 } 831 832 void __init chsc_free_sei_area(void) 833 { 834 crw_unregister_handler(CRW_RSC_CSS); 835 kfree(sei_page); 836 } 837 838 int chsc_enable_facility(int operation_code) 839 { 840 int ret; 841 static struct { 842 struct chsc_header request; 843 u8 reserved1:4; 844 u8 format:4; 845 u8 reserved2; 846 u16 operation_code; 847 u32 reserved3; 848 u32 reserved4; 849 u32 operation_data_area[252]; 850 struct chsc_header response; 851 u32 reserved5:4; 852 u32 format2:4; 853 u32 reserved6:24; 854 } __attribute__ ((packed, aligned(4096))) sda_area; 855 856 spin_lock(&sda_lock); 857 memset(&sda_area, 0, sizeof(sda_area)); 858 sda_area.request.length = 0x0400; 859 sda_area.request.code = 0x0031; 860 sda_area.operation_code = operation_code; 861 862 ret = chsc(&sda_area); 863 if (ret > 0) { 864 ret = (ret == 3) ? -ENODEV : -EBUSY; 865 goto out; 866 } 867 868 switch (sda_area.response.code) { 869 case 0x0101: 870 ret = -EOPNOTSUPP; 871 break; 872 default: 873 ret = chsc_error_from_response(sda_area.response.code); 874 } 875 if (ret != 0) 876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 877 operation_code, sda_area.response.code); 878 out: 879 spin_unlock(&sda_lock); 880 return ret; 881 } 882 883 struct css_general_char css_general_characteristics; 884 struct css_chsc_char css_chsc_characteristics; 885 886 int __init 887 chsc_determine_css_characteristics(void) 888 { 889 int result; 890 struct { 891 struct chsc_header request; 892 u32 reserved1; 893 u32 reserved2; 894 u32 reserved3; 895 struct chsc_header response; 896 u32 reserved4; 897 u32 general_char[510]; 898 u32 chsc_char[518]; 899 } __attribute__ ((packed)) *scsc_area; 900 901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 902 if (!scsc_area) 903 return -ENOMEM; 904 905 scsc_area->request.length = 0x0010; 906 scsc_area->request.code = 0x0010; 907 908 result = chsc(scsc_area); 909 if (result) { 910 result = (result == 3) ? -ENODEV : -EBUSY; 911 goto exit; 912 } 913 914 result = chsc_error_from_response(scsc_area->response.code); 915 if (result == 0) { 916 memcpy(&css_general_characteristics, scsc_area->general_char, 917 sizeof(css_general_characteristics)); 918 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 919 sizeof(css_chsc_characteristics)); 920 } else 921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 922 scsc_area->response.code); 923 exit: 924 free_page ((unsigned long) scsc_area); 925 return result; 926 } 927 928 EXPORT_SYMBOL_GPL(css_general_characteristics); 929 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 930 931 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 932 { 933 struct { 934 struct chsc_header request; 935 unsigned int rsvd0; 936 unsigned int op : 8; 937 unsigned int rsvd1 : 8; 938 unsigned int ctrl : 16; 939 unsigned int rsvd2[5]; 940 struct chsc_header response; 941 unsigned int rsvd3[7]; 942 } __attribute__ ((packed)) *rr; 943 int rc; 944 945 memset(page, 0, PAGE_SIZE); 946 rr = page; 947 rr->request.length = 0x0020; 948 rr->request.code = 0x0033; 949 rr->op = op; 950 rr->ctrl = ctrl; 951 rc = chsc(rr); 952 if (rc) 953 return -EIO; 954 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 955 return rc; 956 } 957 958 int chsc_sstpi(void *page, void *result, size_t size) 959 { 960 struct { 961 struct chsc_header request; 962 unsigned int rsvd0[3]; 963 struct chsc_header response; 964 char data[size]; 965 } __attribute__ ((packed)) *rr; 966 int rc; 967 968 memset(page, 0, PAGE_SIZE); 969 rr = page; 970 rr->request.length = 0x0010; 971 rr->request.code = 0x0038; 972 rc = chsc(rr); 973 if (rc) 974 return -EIO; 975 memcpy(result, &rr->data, size); 976 return (rr->response.code == 0x0001) ? 0 : -EIO; 977 } 978 979 static struct { 980 struct chsc_header request; 981 u32 word1; 982 struct subchannel_id sid; 983 u32 word3; 984 struct chsc_header response; 985 u32 word[11]; 986 } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); 987 988 int chsc_siosl(struct subchannel_id schid) 989 { 990 unsigned long flags; 991 int ccode; 992 int rc; 993 994 spin_lock_irqsave(&siosl_lock, flags); 995 memset(&siosl_area, 0, sizeof(siosl_area)); 996 siosl_area.request.length = 0x0010; 997 siosl_area.request.code = 0x0046; 998 siosl_area.word1 = 0x80000000; 999 siosl_area.sid = schid; 1000 1001 ccode = chsc(&siosl_area); 1002 if (ccode > 0) { 1003 if (ccode == 3) 1004 rc = -ENODEV; 1005 else 1006 rc = -EBUSY; 1007 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1008 schid.ssid, schid.sch_no, ccode); 1009 goto out; 1010 } 1011 rc = chsc_error_from_response(siosl_area.response.code); 1012 if (rc) 1013 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1014 schid.ssid, schid.sch_no, 1015 siosl_area.response.code); 1016 else 1017 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1018 schid.ssid, schid.sch_no); 1019 out: 1020 spin_unlock_irqrestore(&siosl_lock, flags); 1021 1022 return rc; 1023 } 1024 EXPORT_SYMBOL_GPL(chsc_siosl); 1025