1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999, 2010 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 18 #include <asm/cio.h> 19 #include <asm/chpid.h> 20 #include <asm/chsc.h> 21 #include <asm/crw.h> 22 23 #include "css.h" 24 #include "cio.h" 25 #include "cio_debug.h" 26 #include "ioasm.h" 27 #include "chp.h" 28 #include "chsc.h" 29 30 static void *sei_page; 31 static void *chsc_page; 32 static DEFINE_SPINLOCK(chsc_page_lock); 33 34 /** 35 * chsc_error_from_response() - convert a chsc response to an error 36 * @response: chsc response code 37 * 38 * Returns an appropriate Linux error code for @response. 39 */ 40 int chsc_error_from_response(int response) 41 { 42 switch (response) { 43 case 0x0001: 44 return 0; 45 case 0x0002: 46 case 0x0003: 47 case 0x0006: 48 case 0x0007: 49 case 0x0008: 50 case 0x000a: 51 case 0x0104: 52 return -EINVAL; 53 case 0x0004: 54 return -EOPNOTSUPP; 55 default: 56 return -EIO; 57 } 58 } 59 EXPORT_SYMBOL_GPL(chsc_error_from_response); 60 61 struct chsc_ssd_area { 62 struct chsc_header request; 63 u16 :10; 64 u16 ssid:2; 65 u16 :4; 66 u16 f_sch; /* first subchannel */ 67 u16 :16; 68 u16 l_sch; /* last subchannel */ 69 u32 :32; 70 struct chsc_header response; 71 u32 :32; 72 u8 sch_valid : 1; 73 u8 dev_valid : 1; 74 u8 st : 3; /* subchannel type */ 75 u8 zeroes : 3; 76 u8 unit_addr; /* unit address */ 77 u16 devno; /* device number */ 78 u8 path_mask; 79 u8 fla_valid_mask; 80 u16 sch; /* subchannel */ 81 u8 chpid[8]; /* chpids 0-7 */ 82 u16 fla[8]; /* full link addresses 0-7 */ 83 } __attribute__ ((packed)); 84 85 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 86 { 87 struct chsc_ssd_area *ssd_area; 88 int ccode; 89 int ret; 90 int i; 91 int mask; 92 93 spin_lock_irq(&chsc_page_lock); 94 memset(chsc_page, 0, PAGE_SIZE); 95 ssd_area = chsc_page; 96 ssd_area->request.length = 0x0010; 97 ssd_area->request.code = 0x0004; 98 ssd_area->ssid = schid.ssid; 99 ssd_area->f_sch = schid.sch_no; 100 ssd_area->l_sch = schid.sch_no; 101 102 ccode = chsc(ssd_area); 103 /* Check response. */ 104 if (ccode > 0) { 105 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 goto out; 107 } 108 ret = chsc_error_from_response(ssd_area->response.code); 109 if (ret != 0) { 110 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 schid.ssid, schid.sch_no, 112 ssd_area->response.code); 113 goto out; 114 } 115 if (!ssd_area->sch_valid) { 116 ret = -ENODEV; 117 goto out; 118 } 119 /* Copy data */ 120 ret = 0; 121 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 goto out; 125 ssd->path_mask = ssd_area->path_mask; 126 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 for (i = 0; i < 8; i++) { 128 mask = 0x80 >> i; 129 if (ssd_area->path_mask & mask) { 130 chp_id_init(&ssd->chpid[i]); 131 ssd->chpid[i].id = ssd_area->chpid[i]; 132 } 133 if (ssd_area->fla_valid_mask & mask) 134 ssd->fla[i] = ssd_area->fla[i]; 135 } 136 out: 137 spin_unlock_irq(&chsc_page_lock); 138 return ret; 139 } 140 141 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 142 { 143 spin_lock_irq(sch->lock); 144 if (sch->driver && sch->driver->chp_event) 145 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 146 goto out_unreg; 147 spin_unlock_irq(sch->lock); 148 return 0; 149 150 out_unreg: 151 sch->lpm = 0; 152 spin_unlock_irq(sch->lock); 153 css_schedule_eval(sch->schid); 154 return 0; 155 } 156 157 void chsc_chp_offline(struct chp_id chpid) 158 { 159 char dbf_txt[15]; 160 struct chp_link link; 161 162 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 163 CIO_TRACE_EVENT(2, dbf_txt); 164 165 if (chp_get_status(chpid) <= 0) 166 return; 167 memset(&link, 0, sizeof(struct chp_link)); 168 link.chpid = chpid; 169 /* Wait until previous actions have settled. */ 170 css_wait_for_slow_path(); 171 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 172 } 173 174 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 175 { 176 struct schib schib; 177 /* 178 * We don't know the device yet, but since a path 179 * may be available now to the device we'll have 180 * to do recognition again. 181 * Since we don't have any idea about which chpid 182 * that beast may be on we'll have to do a stsch 183 * on all devices, grr... 184 */ 185 if (stsch_err(schid, &schib)) 186 /* We're through */ 187 return -ENXIO; 188 189 /* Put it on the slow path. */ 190 css_schedule_eval(schid); 191 return 0; 192 } 193 194 static int __s390_process_res_acc(struct subchannel *sch, void *data) 195 { 196 spin_lock_irq(sch->lock); 197 if (sch->driver && sch->driver->chp_event) 198 sch->driver->chp_event(sch, data, CHP_ONLINE); 199 spin_unlock_irq(sch->lock); 200 201 return 0; 202 } 203 204 static void s390_process_res_acc(struct chp_link *link) 205 { 206 char dbf_txt[15]; 207 208 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 209 link->chpid.id); 210 CIO_TRACE_EVENT( 2, dbf_txt); 211 if (link->fla != 0) { 212 sprintf(dbf_txt, "fla%x", link->fla); 213 CIO_TRACE_EVENT( 2, dbf_txt); 214 } 215 /* Wait until previous actions have settled. */ 216 css_wait_for_slow_path(); 217 /* 218 * I/O resources may have become accessible. 219 * Scan through all subchannels that may be concerned and 220 * do a validation on those. 221 * The more information we have (info), the less scanning 222 * will we have to do. 223 */ 224 for_each_subchannel_staged(__s390_process_res_acc, 225 s390_process_res_acc_new_sch, link); 226 } 227 228 static int 229 __get_chpid_from_lir(void *data) 230 { 231 struct lir { 232 u8 iq; 233 u8 ic; 234 u16 sci; 235 /* incident-node descriptor */ 236 u32 indesc[28]; 237 /* attached-node descriptor */ 238 u32 andesc[28]; 239 /* incident-specific information */ 240 u32 isinfo[28]; 241 } __attribute__ ((packed)) *lir; 242 243 lir = data; 244 if (!(lir->iq&0x80)) 245 /* NULL link incident record */ 246 return -EINVAL; 247 if (!(lir->indesc[0]&0xc0000000)) 248 /* node descriptor not valid */ 249 return -EINVAL; 250 if (!(lir->indesc[0]&0x10000000)) 251 /* don't handle device-type nodes - FIXME */ 252 return -EINVAL; 253 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 254 255 return (u16) (lir->indesc[0]&0x000000ff); 256 } 257 258 struct chsc_sei_area { 259 struct chsc_header request; 260 u32 reserved1; 261 u32 reserved2; 262 u32 reserved3; 263 struct chsc_header response; 264 u32 reserved4; 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved5; 272 u32 reserved6; 273 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 274 /* ccdf has to be big enough for a link-incident record */ 275 } __attribute__ ((packed)); 276 277 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 278 { 279 struct chp_id chpid; 280 int id; 281 282 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 283 sei_area->rs, sei_area->rsid); 284 if (sei_area->rs != 4) 285 return; 286 id = __get_chpid_from_lir(sei_area->ccdf); 287 if (id < 0) 288 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 289 else { 290 chp_id_init(&chpid); 291 chpid.id = id; 292 chsc_chp_offline(chpid); 293 } 294 } 295 296 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 297 { 298 struct chp_link link; 299 struct chp_id chpid; 300 int status; 301 302 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 303 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 304 if (sei_area->rs != 4) 305 return; 306 chp_id_init(&chpid); 307 chpid.id = sei_area->rsid; 308 /* allocate a new channel path structure, if needed */ 309 status = chp_get_status(chpid); 310 if (status < 0) 311 chp_new(chpid); 312 else if (!status) 313 return; 314 memset(&link, 0, sizeof(struct chp_link)); 315 link.chpid = chpid; 316 if ((sei_area->vf & 0xc0) != 0) { 317 link.fla = sei_area->fla; 318 if ((sei_area->vf & 0xc0) == 0xc0) 319 /* full link address */ 320 link.fla_mask = 0xffff; 321 else 322 /* link address */ 323 link.fla_mask = 0xff00; 324 } 325 s390_process_res_acc(&link); 326 } 327 328 static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area) 329 { 330 struct channel_path *chp; 331 struct chp_id chpid; 332 u8 *data; 333 int num; 334 335 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 336 if (sei_area->rs != 0) 337 return; 338 data = sei_area->ccdf; 339 chp_id_init(&chpid); 340 for (num = 0; num <= __MAX_CHPID; num++) { 341 if (!chp_test_bit(data, num)) 342 continue; 343 chpid.id = num; 344 345 CIO_CRW_EVENT(4, "Update information for channel path " 346 "%x.%02x\n", chpid.cssid, chpid.id); 347 chp = chpid_to_chp(chpid); 348 if (!chp) { 349 chp_new(chpid); 350 continue; 351 } 352 mutex_lock(&chp->lock); 353 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 354 mutex_unlock(&chp->lock); 355 } 356 } 357 358 struct chp_config_data { 359 u8 map[32]; 360 u8 op; 361 u8 pc; 362 }; 363 364 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 365 { 366 struct chp_config_data *data; 367 struct chp_id chpid; 368 int num; 369 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 370 371 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 372 if (sei_area->rs != 0) 373 return; 374 data = (struct chp_config_data *) &(sei_area->ccdf); 375 chp_id_init(&chpid); 376 for (num = 0; num <= __MAX_CHPID; num++) { 377 if (!chp_test_bit(data->map, num)) 378 continue; 379 chpid.id = num; 380 pr_notice("Processing %s for channel path %x.%02x\n", 381 events[data->op], chpid.cssid, chpid.id); 382 switch (data->op) { 383 case 0: 384 chp_cfg_schedule(chpid, 1); 385 break; 386 case 1: 387 chp_cfg_schedule(chpid, 0); 388 break; 389 case 2: 390 chp_cfg_cancel_deconfigure(chpid); 391 break; 392 } 393 } 394 } 395 396 static void chsc_process_sei(struct chsc_sei_area *sei_area) 397 { 398 /* Check if we might have lost some information. */ 399 if (sei_area->flags & 0x40) { 400 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 401 css_schedule_eval_all(); 402 } 403 /* which kind of information was stored? */ 404 switch (sei_area->cc) { 405 case 1: /* link incident*/ 406 chsc_process_sei_link_incident(sei_area); 407 break; 408 case 2: /* i/o resource accessibility */ 409 chsc_process_sei_res_acc(sei_area); 410 break; 411 case 7: /* channel-path-availability information */ 412 chsc_process_sei_chp_avail(sei_area); 413 break; 414 case 8: /* channel-path-configuration notification */ 415 chsc_process_sei_chp_config(sei_area); 416 break; 417 default: /* other stuff */ 418 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 419 sei_area->cc); 420 break; 421 } 422 } 423 424 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 425 { 426 struct chsc_sei_area *sei_area; 427 428 if (overflow) { 429 css_schedule_eval_all(); 430 return; 431 } 432 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 433 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 434 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 435 crw0->erc, crw0->rsid); 436 if (!sei_page) 437 return; 438 /* Access to sei_page is serialized through machine check handler 439 * thread, so no need for locking. */ 440 sei_area = sei_page; 441 442 CIO_TRACE_EVENT(2, "prcss"); 443 do { 444 memset(sei_area, 0, sizeof(*sei_area)); 445 sei_area->request.length = 0x0010; 446 sei_area->request.code = 0x000e; 447 if (chsc(sei_area)) 448 break; 449 450 if (sei_area->response.code == 0x0001) { 451 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 452 chsc_process_sei(sei_area); 453 } else { 454 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 455 sei_area->response.code); 456 break; 457 } 458 } while (sei_area->flags & 0x80); 459 } 460 461 void chsc_chp_online(struct chp_id chpid) 462 { 463 char dbf_txt[15]; 464 struct chp_link link; 465 466 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 467 CIO_TRACE_EVENT(2, dbf_txt); 468 469 if (chp_get_status(chpid) != 0) { 470 memset(&link, 0, sizeof(struct chp_link)); 471 link.chpid = chpid; 472 /* Wait until previous actions have settled. */ 473 css_wait_for_slow_path(); 474 for_each_subchannel_staged(__s390_process_res_acc, NULL, 475 &link); 476 } 477 } 478 479 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 480 struct chp_id chpid, int on) 481 { 482 unsigned long flags; 483 struct chp_link link; 484 485 memset(&link, 0, sizeof(struct chp_link)); 486 link.chpid = chpid; 487 spin_lock_irqsave(sch->lock, flags); 488 if (sch->driver && sch->driver->chp_event) 489 sch->driver->chp_event(sch, &link, 490 on ? CHP_VARY_ON : CHP_VARY_OFF); 491 spin_unlock_irqrestore(sch->lock, flags); 492 } 493 494 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 495 { 496 struct chp_id *chpid = data; 497 498 __s390_subchannel_vary_chpid(sch, *chpid, 0); 499 return 0; 500 } 501 502 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 503 { 504 struct chp_id *chpid = data; 505 506 __s390_subchannel_vary_chpid(sch, *chpid, 1); 507 return 0; 508 } 509 510 static int 511 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 512 { 513 struct schib schib; 514 515 if (stsch_err(schid, &schib)) 516 /* We're through */ 517 return -ENXIO; 518 /* Put it on the slow path. */ 519 css_schedule_eval(schid); 520 return 0; 521 } 522 523 /** 524 * chsc_chp_vary - propagate channel-path vary operation to subchannels 525 * @chpid: channl-path ID 526 * @on: non-zero for vary online, zero for vary offline 527 */ 528 int chsc_chp_vary(struct chp_id chpid, int on) 529 { 530 struct channel_path *chp = chpid_to_chp(chpid); 531 532 /* Wait until previous actions have settled. */ 533 css_wait_for_slow_path(); 534 /* 535 * Redo PathVerification on the devices the chpid connects to 536 */ 537 if (on) { 538 /* Try to update the channel path descritor. */ 539 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 540 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 541 __s390_vary_chpid_on, &chpid); 542 } else 543 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 544 NULL, &chpid); 545 546 return 0; 547 } 548 549 static void 550 chsc_remove_cmg_attr(struct channel_subsystem *css) 551 { 552 int i; 553 554 for (i = 0; i <= __MAX_CHPID; i++) { 555 if (!css->chps[i]) 556 continue; 557 chp_remove_cmg_attr(css->chps[i]); 558 } 559 } 560 561 static int 562 chsc_add_cmg_attr(struct channel_subsystem *css) 563 { 564 int i, ret; 565 566 ret = 0; 567 for (i = 0; i <= __MAX_CHPID; i++) { 568 if (!css->chps[i]) 569 continue; 570 ret = chp_add_cmg_attr(css->chps[i]); 571 if (ret) 572 goto cleanup; 573 } 574 return ret; 575 cleanup: 576 for (--i; i >= 0; i--) { 577 if (!css->chps[i]) 578 continue; 579 chp_remove_cmg_attr(css->chps[i]); 580 } 581 return ret; 582 } 583 584 int __chsc_do_secm(struct channel_subsystem *css, int enable) 585 { 586 struct { 587 struct chsc_header request; 588 u32 operation_code : 2; 589 u32 : 30; 590 u32 key : 4; 591 u32 : 28; 592 u32 zeroes1; 593 u32 cub_addr1; 594 u32 zeroes2; 595 u32 cub_addr2; 596 u32 reserved[13]; 597 struct chsc_header response; 598 u32 status : 8; 599 u32 : 4; 600 u32 fmt : 4; 601 u32 : 16; 602 } __attribute__ ((packed)) *secm_area; 603 int ret, ccode; 604 605 spin_lock_irq(&chsc_page_lock); 606 memset(chsc_page, 0, PAGE_SIZE); 607 secm_area = chsc_page; 608 secm_area->request.length = 0x0050; 609 secm_area->request.code = 0x0016; 610 611 secm_area->key = PAGE_DEFAULT_KEY >> 4; 612 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 613 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 614 615 secm_area->operation_code = enable ? 0 : 1; 616 617 ccode = chsc(secm_area); 618 if (ccode > 0) { 619 ret = (ccode == 3) ? -ENODEV : -EBUSY; 620 goto out; 621 } 622 623 switch (secm_area->response.code) { 624 case 0x0102: 625 case 0x0103: 626 ret = -EINVAL; 627 break; 628 default: 629 ret = chsc_error_from_response(secm_area->response.code); 630 } 631 if (ret != 0) 632 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 633 secm_area->response.code); 634 out: 635 spin_unlock_irq(&chsc_page_lock); 636 return ret; 637 } 638 639 int 640 chsc_secm(struct channel_subsystem *css, int enable) 641 { 642 int ret; 643 644 if (enable && !css->cm_enabled) { 645 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 646 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 647 if (!css->cub_addr1 || !css->cub_addr2) { 648 free_page((unsigned long)css->cub_addr1); 649 free_page((unsigned long)css->cub_addr2); 650 return -ENOMEM; 651 } 652 } 653 ret = __chsc_do_secm(css, enable); 654 if (!ret) { 655 css->cm_enabled = enable; 656 if (css->cm_enabled) { 657 ret = chsc_add_cmg_attr(css); 658 if (ret) { 659 __chsc_do_secm(css, 0); 660 css->cm_enabled = 0; 661 } 662 } else 663 chsc_remove_cmg_attr(css); 664 } 665 if (!css->cm_enabled) { 666 free_page((unsigned long)css->cub_addr1); 667 free_page((unsigned long)css->cub_addr2); 668 } 669 return ret; 670 } 671 672 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 673 int c, int m, void *page) 674 { 675 struct chsc_scpd *scpd_area; 676 int ccode, ret; 677 678 if ((rfmt == 1) && !css_general_characteristics.fcs) 679 return -EINVAL; 680 if ((rfmt == 2) && !css_general_characteristics.cib) 681 return -EINVAL; 682 683 memset(page, 0, PAGE_SIZE); 684 scpd_area = page; 685 scpd_area->request.length = 0x0010; 686 scpd_area->request.code = 0x0002; 687 scpd_area->cssid = chpid.cssid; 688 scpd_area->first_chpid = chpid.id; 689 scpd_area->last_chpid = chpid.id; 690 scpd_area->m = m; 691 scpd_area->c = c; 692 scpd_area->fmt = fmt; 693 scpd_area->rfmt = rfmt; 694 695 ccode = chsc(scpd_area); 696 if (ccode > 0) 697 return (ccode == 3) ? -ENODEV : -EBUSY; 698 699 ret = chsc_error_from_response(scpd_area->response.code); 700 if (ret) 701 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 702 scpd_area->response.code); 703 return ret; 704 } 705 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 706 707 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 708 struct channel_path_desc *desc) 709 { 710 struct chsc_response_struct *chsc_resp; 711 struct chsc_scpd *scpd_area; 712 unsigned long flags; 713 int ret; 714 715 spin_lock_irqsave(&chsc_page_lock, flags); 716 scpd_area = chsc_page; 717 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 718 if (ret) 719 goto out; 720 chsc_resp = (void *)&scpd_area->response; 721 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 722 out: 723 spin_unlock_irqrestore(&chsc_page_lock, flags); 724 return ret; 725 } 726 727 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 728 struct channel_path_desc_fmt1 *desc) 729 { 730 struct chsc_response_struct *chsc_resp; 731 struct chsc_scpd *scpd_area; 732 int ret; 733 734 spin_lock_irq(&chsc_page_lock); 735 scpd_area = chsc_page; 736 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 737 if (ret) 738 goto out; 739 chsc_resp = (void *)&scpd_area->response; 740 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 741 out: 742 spin_unlock_irq(&chsc_page_lock); 743 return ret; 744 } 745 746 static void 747 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 748 struct cmg_chars *chars) 749 { 750 struct cmg_chars *cmg_chars; 751 int i, mask; 752 753 cmg_chars = chp->cmg_chars; 754 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 755 mask = 0x80 >> (i + 3); 756 if (cmcv & mask) 757 cmg_chars->values[i] = chars->values[i]; 758 else 759 cmg_chars->values[i] = 0; 760 } 761 } 762 763 int chsc_get_channel_measurement_chars(struct channel_path *chp) 764 { 765 struct cmg_chars *cmg_chars; 766 int ccode, ret; 767 768 struct { 769 struct chsc_header request; 770 u32 : 24; 771 u32 first_chpid : 8; 772 u32 : 24; 773 u32 last_chpid : 8; 774 u32 zeroes1; 775 struct chsc_header response; 776 u32 zeroes2; 777 u32 not_valid : 1; 778 u32 shared : 1; 779 u32 : 22; 780 u32 chpid : 8; 781 u32 cmcv : 5; 782 u32 : 11; 783 u32 cmgq : 8; 784 u32 cmg : 8; 785 u32 zeroes3; 786 u32 data[NR_MEASUREMENT_CHARS]; 787 } __attribute__ ((packed)) *scmc_area; 788 789 chp->cmg_chars = NULL; 790 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 791 if (!cmg_chars) 792 return -ENOMEM; 793 794 spin_lock_irq(&chsc_page_lock); 795 memset(chsc_page, 0, PAGE_SIZE); 796 scmc_area = chsc_page; 797 scmc_area->request.length = 0x0010; 798 scmc_area->request.code = 0x0022; 799 scmc_area->first_chpid = chp->chpid.id; 800 scmc_area->last_chpid = chp->chpid.id; 801 802 ccode = chsc(scmc_area); 803 if (ccode > 0) { 804 ret = (ccode == 3) ? -ENODEV : -EBUSY; 805 goto out; 806 } 807 808 ret = chsc_error_from_response(scmc_area->response.code); 809 if (ret) { 810 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 811 scmc_area->response.code); 812 goto out; 813 } 814 if (scmc_area->not_valid) { 815 chp->cmg = -1; 816 chp->shared = -1; 817 goto out; 818 } 819 chp->cmg = scmc_area->cmg; 820 chp->shared = scmc_area->shared; 821 if (chp->cmg != 2 && chp->cmg != 3) { 822 /* No cmg-dependent data. */ 823 goto out; 824 } 825 chp->cmg_chars = cmg_chars; 826 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 827 (struct cmg_chars *) &scmc_area->data); 828 out: 829 spin_unlock_irq(&chsc_page_lock); 830 if (!chp->cmg_chars) 831 kfree(cmg_chars); 832 833 return ret; 834 } 835 836 int __init chsc_init(void) 837 { 838 int ret; 839 840 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 841 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 842 if (!sei_page || !chsc_page) { 843 ret = -ENOMEM; 844 goto out_err; 845 } 846 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 847 if (ret) 848 goto out_err; 849 return ret; 850 out_err: 851 free_page((unsigned long)chsc_page); 852 free_page((unsigned long)sei_page); 853 return ret; 854 } 855 856 void __init chsc_init_cleanup(void) 857 { 858 crw_unregister_handler(CRW_RSC_CSS); 859 free_page((unsigned long)chsc_page); 860 free_page((unsigned long)sei_page); 861 } 862 863 int chsc_enable_facility(int operation_code) 864 { 865 unsigned long flags; 866 int ret; 867 struct { 868 struct chsc_header request; 869 u8 reserved1:4; 870 u8 format:4; 871 u8 reserved2; 872 u16 operation_code; 873 u32 reserved3; 874 u32 reserved4; 875 u32 operation_data_area[252]; 876 struct chsc_header response; 877 u32 reserved5:4; 878 u32 format2:4; 879 u32 reserved6:24; 880 } __attribute__ ((packed)) *sda_area; 881 882 spin_lock_irqsave(&chsc_page_lock, flags); 883 memset(chsc_page, 0, PAGE_SIZE); 884 sda_area = chsc_page; 885 sda_area->request.length = 0x0400; 886 sda_area->request.code = 0x0031; 887 sda_area->operation_code = operation_code; 888 889 ret = chsc(sda_area); 890 if (ret > 0) { 891 ret = (ret == 3) ? -ENODEV : -EBUSY; 892 goto out; 893 } 894 895 switch (sda_area->response.code) { 896 case 0x0101: 897 ret = -EOPNOTSUPP; 898 break; 899 default: 900 ret = chsc_error_from_response(sda_area->response.code); 901 } 902 if (ret != 0) 903 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 904 operation_code, sda_area->response.code); 905 out: 906 spin_unlock_irqrestore(&chsc_page_lock, flags); 907 return ret; 908 } 909 910 struct css_general_char css_general_characteristics; 911 struct css_chsc_char css_chsc_characteristics; 912 913 int __init 914 chsc_determine_css_characteristics(void) 915 { 916 int result; 917 struct { 918 struct chsc_header request; 919 u32 reserved1; 920 u32 reserved2; 921 u32 reserved3; 922 struct chsc_header response; 923 u32 reserved4; 924 u32 general_char[510]; 925 u32 chsc_char[508]; 926 } __attribute__ ((packed)) *scsc_area; 927 928 spin_lock_irq(&chsc_page_lock); 929 memset(chsc_page, 0, PAGE_SIZE); 930 scsc_area = chsc_page; 931 scsc_area->request.length = 0x0010; 932 scsc_area->request.code = 0x0010; 933 934 result = chsc(scsc_area); 935 if (result) { 936 result = (result == 3) ? -ENODEV : -EBUSY; 937 goto exit; 938 } 939 940 result = chsc_error_from_response(scsc_area->response.code); 941 if (result == 0) { 942 memcpy(&css_general_characteristics, scsc_area->general_char, 943 sizeof(css_general_characteristics)); 944 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 945 sizeof(css_chsc_characteristics)); 946 } else 947 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 948 scsc_area->response.code); 949 exit: 950 spin_unlock_irq(&chsc_page_lock); 951 return result; 952 } 953 954 EXPORT_SYMBOL_GPL(css_general_characteristics); 955 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 956 957 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 958 { 959 struct { 960 struct chsc_header request; 961 unsigned int rsvd0; 962 unsigned int op : 8; 963 unsigned int rsvd1 : 8; 964 unsigned int ctrl : 16; 965 unsigned int rsvd2[5]; 966 struct chsc_header response; 967 unsigned int rsvd3[7]; 968 } __attribute__ ((packed)) *rr; 969 int rc; 970 971 memset(page, 0, PAGE_SIZE); 972 rr = page; 973 rr->request.length = 0x0020; 974 rr->request.code = 0x0033; 975 rr->op = op; 976 rr->ctrl = ctrl; 977 rc = chsc(rr); 978 if (rc) 979 return -EIO; 980 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 981 return rc; 982 } 983 984 int chsc_sstpi(void *page, void *result, size_t size) 985 { 986 struct { 987 struct chsc_header request; 988 unsigned int rsvd0[3]; 989 struct chsc_header response; 990 char data[size]; 991 } __attribute__ ((packed)) *rr; 992 int rc; 993 994 memset(page, 0, PAGE_SIZE); 995 rr = page; 996 rr->request.length = 0x0010; 997 rr->request.code = 0x0038; 998 rc = chsc(rr); 999 if (rc) 1000 return -EIO; 1001 memcpy(result, &rr->data, size); 1002 return (rr->response.code == 0x0001) ? 0 : -EIO; 1003 } 1004 1005 int chsc_siosl(struct subchannel_id schid) 1006 { 1007 struct { 1008 struct chsc_header request; 1009 u32 word1; 1010 struct subchannel_id sid; 1011 u32 word3; 1012 struct chsc_header response; 1013 u32 word[11]; 1014 } __attribute__ ((packed)) *siosl_area; 1015 unsigned long flags; 1016 int ccode; 1017 int rc; 1018 1019 spin_lock_irqsave(&chsc_page_lock, flags); 1020 memset(chsc_page, 0, PAGE_SIZE); 1021 siosl_area = chsc_page; 1022 siosl_area->request.length = 0x0010; 1023 siosl_area->request.code = 0x0046; 1024 siosl_area->word1 = 0x80000000; 1025 siosl_area->sid = schid; 1026 1027 ccode = chsc(siosl_area); 1028 if (ccode > 0) { 1029 if (ccode == 3) 1030 rc = -ENODEV; 1031 else 1032 rc = -EBUSY; 1033 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1034 schid.ssid, schid.sch_no, ccode); 1035 goto out; 1036 } 1037 rc = chsc_error_from_response(siosl_area->response.code); 1038 if (rc) 1039 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1040 schid.ssid, schid.sch_no, 1041 siosl_area->response.code); 1042 else 1043 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1044 schid.ssid, schid.sch_no); 1045 out: 1046 spin_unlock_irqrestore(&chsc_page_lock, flags); 1047 return rc; 1048 } 1049 EXPORT_SYMBOL_GPL(chsc_siosl); 1050