1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999, 2010 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 18 #include <asm/cio.h> 19 #include <asm/chpid.h> 20 #include <asm/chsc.h> 21 #include <asm/crw.h> 22 23 #include "css.h" 24 #include "cio.h" 25 #include "cio_debug.h" 26 #include "ioasm.h" 27 #include "chp.h" 28 #include "chsc.h" 29 30 static void *sei_page; 31 static void *chsc_page; 32 static DEFINE_SPINLOCK(chsc_page_lock); 33 34 /** 35 * chsc_error_from_response() - convert a chsc response to an error 36 * @response: chsc response code 37 * 38 * Returns an appropriate Linux error code for @response. 39 */ 40 int chsc_error_from_response(int response) 41 { 42 switch (response) { 43 case 0x0001: 44 return 0; 45 case 0x0002: 46 case 0x0003: 47 case 0x0006: 48 case 0x0007: 49 case 0x0008: 50 case 0x000a: 51 case 0x0104: 52 return -EINVAL; 53 case 0x0004: 54 return -EOPNOTSUPP; 55 case 0x000b: 56 return -EBUSY; 57 case 0x0100: 58 case 0x0102: 59 return -ENOMEM; 60 default: 61 return -EIO; 62 } 63 } 64 EXPORT_SYMBOL_GPL(chsc_error_from_response); 65 66 struct chsc_ssd_area { 67 struct chsc_header request; 68 u16 :10; 69 u16 ssid:2; 70 u16 :4; 71 u16 f_sch; /* first subchannel */ 72 u16 :16; 73 u16 l_sch; /* last subchannel */ 74 u32 :32; 75 struct chsc_header response; 76 u32 :32; 77 u8 sch_valid : 1; 78 u8 dev_valid : 1; 79 u8 st : 3; /* subchannel type */ 80 u8 zeroes : 3; 81 u8 unit_addr; /* unit address */ 82 u16 devno; /* device number */ 83 u8 path_mask; 84 u8 fla_valid_mask; 85 u16 sch; /* subchannel */ 86 u8 chpid[8]; /* chpids 0-7 */ 87 u16 fla[8]; /* full link addresses 0-7 */ 88 } __attribute__ ((packed)); 89 90 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 91 { 92 struct chsc_ssd_area *ssd_area; 93 int ccode; 94 int ret; 95 int i; 96 int mask; 97 98 spin_lock_irq(&chsc_page_lock); 99 memset(chsc_page, 0, PAGE_SIZE); 100 ssd_area = chsc_page; 101 ssd_area->request.length = 0x0010; 102 ssd_area->request.code = 0x0004; 103 ssd_area->ssid = schid.ssid; 104 ssd_area->f_sch = schid.sch_no; 105 ssd_area->l_sch = schid.sch_no; 106 107 ccode = chsc(ssd_area); 108 /* Check response. */ 109 if (ccode > 0) { 110 ret = (ccode == 3) ? -ENODEV : -EBUSY; 111 goto out; 112 } 113 ret = chsc_error_from_response(ssd_area->response.code); 114 if (ret != 0) { 115 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 116 schid.ssid, schid.sch_no, 117 ssd_area->response.code); 118 goto out; 119 } 120 if (!ssd_area->sch_valid) { 121 ret = -ENODEV; 122 goto out; 123 } 124 /* Copy data */ 125 ret = 0; 126 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 127 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 128 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 129 goto out; 130 ssd->path_mask = ssd_area->path_mask; 131 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 132 for (i = 0; i < 8; i++) { 133 mask = 0x80 >> i; 134 if (ssd_area->path_mask & mask) { 135 chp_id_init(&ssd->chpid[i]); 136 ssd->chpid[i].id = ssd_area->chpid[i]; 137 } 138 if (ssd_area->fla_valid_mask & mask) 139 ssd->fla[i] = ssd_area->fla[i]; 140 } 141 out: 142 spin_unlock_irq(&chsc_page_lock); 143 return ret; 144 } 145 146 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 147 { 148 spin_lock_irq(sch->lock); 149 if (sch->driver && sch->driver->chp_event) 150 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 151 goto out_unreg; 152 spin_unlock_irq(sch->lock); 153 return 0; 154 155 out_unreg: 156 sch->lpm = 0; 157 spin_unlock_irq(sch->lock); 158 css_schedule_eval(sch->schid); 159 return 0; 160 } 161 162 void chsc_chp_offline(struct chp_id chpid) 163 { 164 char dbf_txt[15]; 165 struct chp_link link; 166 167 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 168 CIO_TRACE_EVENT(2, dbf_txt); 169 170 if (chp_get_status(chpid) <= 0) 171 return; 172 memset(&link, 0, sizeof(struct chp_link)); 173 link.chpid = chpid; 174 /* Wait until previous actions have settled. */ 175 css_wait_for_slow_path(); 176 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 177 } 178 179 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 180 { 181 struct schib schib; 182 /* 183 * We don't know the device yet, but since a path 184 * may be available now to the device we'll have 185 * to do recognition again. 186 * Since we don't have any idea about which chpid 187 * that beast may be on we'll have to do a stsch 188 * on all devices, grr... 189 */ 190 if (stsch_err(schid, &schib)) 191 /* We're through */ 192 return -ENXIO; 193 194 /* Put it on the slow path. */ 195 css_schedule_eval(schid); 196 return 0; 197 } 198 199 static int __s390_process_res_acc(struct subchannel *sch, void *data) 200 { 201 spin_lock_irq(sch->lock); 202 if (sch->driver && sch->driver->chp_event) 203 sch->driver->chp_event(sch, data, CHP_ONLINE); 204 spin_unlock_irq(sch->lock); 205 206 return 0; 207 } 208 209 static void s390_process_res_acc(struct chp_link *link) 210 { 211 char dbf_txt[15]; 212 213 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 214 link->chpid.id); 215 CIO_TRACE_EVENT( 2, dbf_txt); 216 if (link->fla != 0) { 217 sprintf(dbf_txt, "fla%x", link->fla); 218 CIO_TRACE_EVENT( 2, dbf_txt); 219 } 220 /* Wait until previous actions have settled. */ 221 css_wait_for_slow_path(); 222 /* 223 * I/O resources may have become accessible. 224 * Scan through all subchannels that may be concerned and 225 * do a validation on those. 226 * The more information we have (info), the less scanning 227 * will we have to do. 228 */ 229 for_each_subchannel_staged(__s390_process_res_acc, 230 s390_process_res_acc_new_sch, link); 231 } 232 233 static int 234 __get_chpid_from_lir(void *data) 235 { 236 struct lir { 237 u8 iq; 238 u8 ic; 239 u16 sci; 240 /* incident-node descriptor */ 241 u32 indesc[28]; 242 /* attached-node descriptor */ 243 u32 andesc[28]; 244 /* incident-specific information */ 245 u32 isinfo[28]; 246 } __attribute__ ((packed)) *lir; 247 248 lir = data; 249 if (!(lir->iq&0x80)) 250 /* NULL link incident record */ 251 return -EINVAL; 252 if (!(lir->indesc[0]&0xc0000000)) 253 /* node descriptor not valid */ 254 return -EINVAL; 255 if (!(lir->indesc[0]&0x10000000)) 256 /* don't handle device-type nodes - FIXME */ 257 return -EINVAL; 258 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 259 260 return (u16) (lir->indesc[0]&0x000000ff); 261 } 262 263 struct chsc_sei_area { 264 struct chsc_header request; 265 u32 reserved1; 266 u32 reserved2; 267 u32 reserved3; 268 struct chsc_header response; 269 u32 reserved4; 270 u8 flags; 271 u8 vf; /* validity flags */ 272 u8 rs; /* reporting source */ 273 u8 cc; /* content code */ 274 u16 fla; /* full link address */ 275 u16 rsid; /* reporting source id */ 276 u32 reserved5; 277 u32 reserved6; 278 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 279 /* ccdf has to be big enough for a link-incident record */ 280 } __attribute__ ((packed)); 281 282 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 283 { 284 struct chp_id chpid; 285 int id; 286 287 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 288 sei_area->rs, sei_area->rsid); 289 if (sei_area->rs != 4) 290 return; 291 id = __get_chpid_from_lir(sei_area->ccdf); 292 if (id < 0) 293 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 294 else { 295 chp_id_init(&chpid); 296 chpid.id = id; 297 chsc_chp_offline(chpid); 298 } 299 } 300 301 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 302 { 303 struct chp_link link; 304 struct chp_id chpid; 305 int status; 306 307 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 308 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 309 if (sei_area->rs != 4) 310 return; 311 chp_id_init(&chpid); 312 chpid.id = sei_area->rsid; 313 /* allocate a new channel path structure, if needed */ 314 status = chp_get_status(chpid); 315 if (status < 0) 316 chp_new(chpid); 317 else if (!status) 318 return; 319 memset(&link, 0, sizeof(struct chp_link)); 320 link.chpid = chpid; 321 if ((sei_area->vf & 0xc0) != 0) { 322 link.fla = sei_area->fla; 323 if ((sei_area->vf & 0xc0) == 0xc0) 324 /* full link address */ 325 link.fla_mask = 0xffff; 326 else 327 /* link address */ 328 link.fla_mask = 0xff00; 329 } 330 s390_process_res_acc(&link); 331 } 332 333 static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area) 334 { 335 struct channel_path *chp; 336 struct chp_id chpid; 337 u8 *data; 338 int num; 339 340 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 341 if (sei_area->rs != 0) 342 return; 343 data = sei_area->ccdf; 344 chp_id_init(&chpid); 345 for (num = 0; num <= __MAX_CHPID; num++) { 346 if (!chp_test_bit(data, num)) 347 continue; 348 chpid.id = num; 349 350 CIO_CRW_EVENT(4, "Update information for channel path " 351 "%x.%02x\n", chpid.cssid, chpid.id); 352 chp = chpid_to_chp(chpid); 353 if (!chp) { 354 chp_new(chpid); 355 continue; 356 } 357 mutex_lock(&chp->lock); 358 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 359 mutex_unlock(&chp->lock); 360 } 361 } 362 363 struct chp_config_data { 364 u8 map[32]; 365 u8 op; 366 u8 pc; 367 }; 368 369 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 370 { 371 struct chp_config_data *data; 372 struct chp_id chpid; 373 int num; 374 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 375 376 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 377 if (sei_area->rs != 0) 378 return; 379 data = (struct chp_config_data *) &(sei_area->ccdf); 380 chp_id_init(&chpid); 381 for (num = 0; num <= __MAX_CHPID; num++) { 382 if (!chp_test_bit(data->map, num)) 383 continue; 384 chpid.id = num; 385 pr_notice("Processing %s for channel path %x.%02x\n", 386 events[data->op], chpid.cssid, chpid.id); 387 switch (data->op) { 388 case 0: 389 chp_cfg_schedule(chpid, 1); 390 break; 391 case 1: 392 chp_cfg_schedule(chpid, 0); 393 break; 394 case 2: 395 chp_cfg_cancel_deconfigure(chpid); 396 break; 397 } 398 } 399 } 400 401 static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area) 402 { 403 int ret; 404 405 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 406 if (sei_area->rs != 7) 407 return; 408 409 ret = scm_update_information(); 410 if (ret) 411 CIO_CRW_EVENT(0, "chsc: updating change notification" 412 " failed (rc=%d).\n", ret); 413 } 414 415 static void chsc_process_sei(struct chsc_sei_area *sei_area) 416 { 417 /* Check if we might have lost some information. */ 418 if (sei_area->flags & 0x40) { 419 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 420 css_schedule_eval_all(); 421 } 422 /* which kind of information was stored? */ 423 switch (sei_area->cc) { 424 case 1: /* link incident*/ 425 chsc_process_sei_link_incident(sei_area); 426 break; 427 case 2: /* i/o resource accessibility */ 428 chsc_process_sei_res_acc(sei_area); 429 break; 430 case 7: /* channel-path-availability information */ 431 chsc_process_sei_chp_avail(sei_area); 432 break; 433 case 8: /* channel-path-configuration notification */ 434 chsc_process_sei_chp_config(sei_area); 435 break; 436 case 12: /* scm change notification */ 437 chsc_process_sei_scm_change(sei_area); 438 break; 439 default: /* other stuff */ 440 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 441 sei_area->cc); 442 break; 443 } 444 } 445 446 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 447 { 448 struct chsc_sei_area *sei_area; 449 450 if (overflow) { 451 css_schedule_eval_all(); 452 return; 453 } 454 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 455 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 456 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 457 crw0->erc, crw0->rsid); 458 if (!sei_page) 459 return; 460 /* Access to sei_page is serialized through machine check handler 461 * thread, so no need for locking. */ 462 sei_area = sei_page; 463 464 CIO_TRACE_EVENT(2, "prcss"); 465 do { 466 memset(sei_area, 0, sizeof(*sei_area)); 467 sei_area->request.length = 0x0010; 468 sei_area->request.code = 0x000e; 469 if (chsc(sei_area)) 470 break; 471 472 if (sei_area->response.code == 0x0001) { 473 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 474 chsc_process_sei(sei_area); 475 } else { 476 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 477 sei_area->response.code); 478 break; 479 } 480 } while (sei_area->flags & 0x80); 481 } 482 483 void chsc_chp_online(struct chp_id chpid) 484 { 485 char dbf_txt[15]; 486 struct chp_link link; 487 488 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 489 CIO_TRACE_EVENT(2, dbf_txt); 490 491 if (chp_get_status(chpid) != 0) { 492 memset(&link, 0, sizeof(struct chp_link)); 493 link.chpid = chpid; 494 /* Wait until previous actions have settled. */ 495 css_wait_for_slow_path(); 496 for_each_subchannel_staged(__s390_process_res_acc, NULL, 497 &link); 498 } 499 } 500 501 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 502 struct chp_id chpid, int on) 503 { 504 unsigned long flags; 505 struct chp_link link; 506 507 memset(&link, 0, sizeof(struct chp_link)); 508 link.chpid = chpid; 509 spin_lock_irqsave(sch->lock, flags); 510 if (sch->driver && sch->driver->chp_event) 511 sch->driver->chp_event(sch, &link, 512 on ? CHP_VARY_ON : CHP_VARY_OFF); 513 spin_unlock_irqrestore(sch->lock, flags); 514 } 515 516 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 517 { 518 struct chp_id *chpid = data; 519 520 __s390_subchannel_vary_chpid(sch, *chpid, 0); 521 return 0; 522 } 523 524 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 525 { 526 struct chp_id *chpid = data; 527 528 __s390_subchannel_vary_chpid(sch, *chpid, 1); 529 return 0; 530 } 531 532 static int 533 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 534 { 535 struct schib schib; 536 537 if (stsch_err(schid, &schib)) 538 /* We're through */ 539 return -ENXIO; 540 /* Put it on the slow path. */ 541 css_schedule_eval(schid); 542 return 0; 543 } 544 545 /** 546 * chsc_chp_vary - propagate channel-path vary operation to subchannels 547 * @chpid: channl-path ID 548 * @on: non-zero for vary online, zero for vary offline 549 */ 550 int chsc_chp_vary(struct chp_id chpid, int on) 551 { 552 struct channel_path *chp = chpid_to_chp(chpid); 553 554 /* Wait until previous actions have settled. */ 555 css_wait_for_slow_path(); 556 /* 557 * Redo PathVerification on the devices the chpid connects to 558 */ 559 if (on) { 560 /* Try to update the channel path descritor. */ 561 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 562 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 563 __s390_vary_chpid_on, &chpid); 564 } else 565 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 566 NULL, &chpid); 567 568 return 0; 569 } 570 571 static void 572 chsc_remove_cmg_attr(struct channel_subsystem *css) 573 { 574 int i; 575 576 for (i = 0; i <= __MAX_CHPID; i++) { 577 if (!css->chps[i]) 578 continue; 579 chp_remove_cmg_attr(css->chps[i]); 580 } 581 } 582 583 static int 584 chsc_add_cmg_attr(struct channel_subsystem *css) 585 { 586 int i, ret; 587 588 ret = 0; 589 for (i = 0; i <= __MAX_CHPID; i++) { 590 if (!css->chps[i]) 591 continue; 592 ret = chp_add_cmg_attr(css->chps[i]); 593 if (ret) 594 goto cleanup; 595 } 596 return ret; 597 cleanup: 598 for (--i; i >= 0; i--) { 599 if (!css->chps[i]) 600 continue; 601 chp_remove_cmg_attr(css->chps[i]); 602 } 603 return ret; 604 } 605 606 int __chsc_do_secm(struct channel_subsystem *css, int enable) 607 { 608 struct { 609 struct chsc_header request; 610 u32 operation_code : 2; 611 u32 : 30; 612 u32 key : 4; 613 u32 : 28; 614 u32 zeroes1; 615 u32 cub_addr1; 616 u32 zeroes2; 617 u32 cub_addr2; 618 u32 reserved[13]; 619 struct chsc_header response; 620 u32 status : 8; 621 u32 : 4; 622 u32 fmt : 4; 623 u32 : 16; 624 } __attribute__ ((packed)) *secm_area; 625 int ret, ccode; 626 627 spin_lock_irq(&chsc_page_lock); 628 memset(chsc_page, 0, PAGE_SIZE); 629 secm_area = chsc_page; 630 secm_area->request.length = 0x0050; 631 secm_area->request.code = 0x0016; 632 633 secm_area->key = PAGE_DEFAULT_KEY >> 4; 634 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 635 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 636 637 secm_area->operation_code = enable ? 0 : 1; 638 639 ccode = chsc(secm_area); 640 if (ccode > 0) { 641 ret = (ccode == 3) ? -ENODEV : -EBUSY; 642 goto out; 643 } 644 645 switch (secm_area->response.code) { 646 case 0x0102: 647 case 0x0103: 648 ret = -EINVAL; 649 break; 650 default: 651 ret = chsc_error_from_response(secm_area->response.code); 652 } 653 if (ret != 0) 654 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 655 secm_area->response.code); 656 out: 657 spin_unlock_irq(&chsc_page_lock); 658 return ret; 659 } 660 661 int 662 chsc_secm(struct channel_subsystem *css, int enable) 663 { 664 int ret; 665 666 if (enable && !css->cm_enabled) { 667 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 668 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 669 if (!css->cub_addr1 || !css->cub_addr2) { 670 free_page((unsigned long)css->cub_addr1); 671 free_page((unsigned long)css->cub_addr2); 672 return -ENOMEM; 673 } 674 } 675 ret = __chsc_do_secm(css, enable); 676 if (!ret) { 677 css->cm_enabled = enable; 678 if (css->cm_enabled) { 679 ret = chsc_add_cmg_attr(css); 680 if (ret) { 681 __chsc_do_secm(css, 0); 682 css->cm_enabled = 0; 683 } 684 } else 685 chsc_remove_cmg_attr(css); 686 } 687 if (!css->cm_enabled) { 688 free_page((unsigned long)css->cub_addr1); 689 free_page((unsigned long)css->cub_addr2); 690 } 691 return ret; 692 } 693 694 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 695 int c, int m, void *page) 696 { 697 struct chsc_scpd *scpd_area; 698 int ccode, ret; 699 700 if ((rfmt == 1) && !css_general_characteristics.fcs) 701 return -EINVAL; 702 if ((rfmt == 2) && !css_general_characteristics.cib) 703 return -EINVAL; 704 705 memset(page, 0, PAGE_SIZE); 706 scpd_area = page; 707 scpd_area->request.length = 0x0010; 708 scpd_area->request.code = 0x0002; 709 scpd_area->cssid = chpid.cssid; 710 scpd_area->first_chpid = chpid.id; 711 scpd_area->last_chpid = chpid.id; 712 scpd_area->m = m; 713 scpd_area->c = c; 714 scpd_area->fmt = fmt; 715 scpd_area->rfmt = rfmt; 716 717 ccode = chsc(scpd_area); 718 if (ccode > 0) 719 return (ccode == 3) ? -ENODEV : -EBUSY; 720 721 ret = chsc_error_from_response(scpd_area->response.code); 722 if (ret) 723 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 724 scpd_area->response.code); 725 return ret; 726 } 727 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 728 729 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 730 struct channel_path_desc *desc) 731 { 732 struct chsc_response_struct *chsc_resp; 733 struct chsc_scpd *scpd_area; 734 unsigned long flags; 735 int ret; 736 737 spin_lock_irqsave(&chsc_page_lock, flags); 738 scpd_area = chsc_page; 739 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 740 if (ret) 741 goto out; 742 chsc_resp = (void *)&scpd_area->response; 743 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 744 out: 745 spin_unlock_irqrestore(&chsc_page_lock, flags); 746 return ret; 747 } 748 749 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 750 struct channel_path_desc_fmt1 *desc) 751 { 752 struct chsc_response_struct *chsc_resp; 753 struct chsc_scpd *scpd_area; 754 int ret; 755 756 spin_lock_irq(&chsc_page_lock); 757 scpd_area = chsc_page; 758 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 759 if (ret) 760 goto out; 761 chsc_resp = (void *)&scpd_area->response; 762 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 763 out: 764 spin_unlock_irq(&chsc_page_lock); 765 return ret; 766 } 767 768 static void 769 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 770 struct cmg_chars *chars) 771 { 772 struct cmg_chars *cmg_chars; 773 int i, mask; 774 775 cmg_chars = chp->cmg_chars; 776 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 777 mask = 0x80 >> (i + 3); 778 if (cmcv & mask) 779 cmg_chars->values[i] = chars->values[i]; 780 else 781 cmg_chars->values[i] = 0; 782 } 783 } 784 785 int chsc_get_channel_measurement_chars(struct channel_path *chp) 786 { 787 struct cmg_chars *cmg_chars; 788 int ccode, ret; 789 790 struct { 791 struct chsc_header request; 792 u32 : 24; 793 u32 first_chpid : 8; 794 u32 : 24; 795 u32 last_chpid : 8; 796 u32 zeroes1; 797 struct chsc_header response; 798 u32 zeroes2; 799 u32 not_valid : 1; 800 u32 shared : 1; 801 u32 : 22; 802 u32 chpid : 8; 803 u32 cmcv : 5; 804 u32 : 11; 805 u32 cmgq : 8; 806 u32 cmg : 8; 807 u32 zeroes3; 808 u32 data[NR_MEASUREMENT_CHARS]; 809 } __attribute__ ((packed)) *scmc_area; 810 811 chp->cmg_chars = NULL; 812 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 813 if (!cmg_chars) 814 return -ENOMEM; 815 816 spin_lock_irq(&chsc_page_lock); 817 memset(chsc_page, 0, PAGE_SIZE); 818 scmc_area = chsc_page; 819 scmc_area->request.length = 0x0010; 820 scmc_area->request.code = 0x0022; 821 scmc_area->first_chpid = chp->chpid.id; 822 scmc_area->last_chpid = chp->chpid.id; 823 824 ccode = chsc(scmc_area); 825 if (ccode > 0) { 826 ret = (ccode == 3) ? -ENODEV : -EBUSY; 827 goto out; 828 } 829 830 ret = chsc_error_from_response(scmc_area->response.code); 831 if (ret) { 832 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 833 scmc_area->response.code); 834 goto out; 835 } 836 if (scmc_area->not_valid) { 837 chp->cmg = -1; 838 chp->shared = -1; 839 goto out; 840 } 841 chp->cmg = scmc_area->cmg; 842 chp->shared = scmc_area->shared; 843 if (chp->cmg != 2 && chp->cmg != 3) { 844 /* No cmg-dependent data. */ 845 goto out; 846 } 847 chp->cmg_chars = cmg_chars; 848 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 849 (struct cmg_chars *) &scmc_area->data); 850 out: 851 spin_unlock_irq(&chsc_page_lock); 852 if (!chp->cmg_chars) 853 kfree(cmg_chars); 854 855 return ret; 856 } 857 858 int __init chsc_init(void) 859 { 860 int ret; 861 862 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 863 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 864 if (!sei_page || !chsc_page) { 865 ret = -ENOMEM; 866 goto out_err; 867 } 868 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 869 if (ret) 870 goto out_err; 871 return ret; 872 out_err: 873 free_page((unsigned long)chsc_page); 874 free_page((unsigned long)sei_page); 875 return ret; 876 } 877 878 void __init chsc_init_cleanup(void) 879 { 880 crw_unregister_handler(CRW_RSC_CSS); 881 free_page((unsigned long)chsc_page); 882 free_page((unsigned long)sei_page); 883 } 884 885 int chsc_enable_facility(int operation_code) 886 { 887 unsigned long flags; 888 int ret; 889 struct { 890 struct chsc_header request; 891 u8 reserved1:4; 892 u8 format:4; 893 u8 reserved2; 894 u16 operation_code; 895 u32 reserved3; 896 u32 reserved4; 897 u32 operation_data_area[252]; 898 struct chsc_header response; 899 u32 reserved5:4; 900 u32 format2:4; 901 u32 reserved6:24; 902 } __attribute__ ((packed)) *sda_area; 903 904 spin_lock_irqsave(&chsc_page_lock, flags); 905 memset(chsc_page, 0, PAGE_SIZE); 906 sda_area = chsc_page; 907 sda_area->request.length = 0x0400; 908 sda_area->request.code = 0x0031; 909 sda_area->operation_code = operation_code; 910 911 ret = chsc(sda_area); 912 if (ret > 0) { 913 ret = (ret == 3) ? -ENODEV : -EBUSY; 914 goto out; 915 } 916 917 switch (sda_area->response.code) { 918 case 0x0101: 919 ret = -EOPNOTSUPP; 920 break; 921 default: 922 ret = chsc_error_from_response(sda_area->response.code); 923 } 924 if (ret != 0) 925 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 926 operation_code, sda_area->response.code); 927 out: 928 spin_unlock_irqrestore(&chsc_page_lock, flags); 929 return ret; 930 } 931 932 struct css_general_char css_general_characteristics; 933 struct css_chsc_char css_chsc_characteristics; 934 935 int __init 936 chsc_determine_css_characteristics(void) 937 { 938 int result; 939 struct { 940 struct chsc_header request; 941 u32 reserved1; 942 u32 reserved2; 943 u32 reserved3; 944 struct chsc_header response; 945 u32 reserved4; 946 u32 general_char[510]; 947 u32 chsc_char[508]; 948 } __attribute__ ((packed)) *scsc_area; 949 950 spin_lock_irq(&chsc_page_lock); 951 memset(chsc_page, 0, PAGE_SIZE); 952 scsc_area = chsc_page; 953 scsc_area->request.length = 0x0010; 954 scsc_area->request.code = 0x0010; 955 956 result = chsc(scsc_area); 957 if (result) { 958 result = (result == 3) ? -ENODEV : -EBUSY; 959 goto exit; 960 } 961 962 result = chsc_error_from_response(scsc_area->response.code); 963 if (result == 0) { 964 memcpy(&css_general_characteristics, scsc_area->general_char, 965 sizeof(css_general_characteristics)); 966 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 967 sizeof(css_chsc_characteristics)); 968 } else 969 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 970 scsc_area->response.code); 971 exit: 972 spin_unlock_irq(&chsc_page_lock); 973 return result; 974 } 975 976 EXPORT_SYMBOL_GPL(css_general_characteristics); 977 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 978 979 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 980 { 981 struct { 982 struct chsc_header request; 983 unsigned int rsvd0; 984 unsigned int op : 8; 985 unsigned int rsvd1 : 8; 986 unsigned int ctrl : 16; 987 unsigned int rsvd2[5]; 988 struct chsc_header response; 989 unsigned int rsvd3[7]; 990 } __attribute__ ((packed)) *rr; 991 int rc; 992 993 memset(page, 0, PAGE_SIZE); 994 rr = page; 995 rr->request.length = 0x0020; 996 rr->request.code = 0x0033; 997 rr->op = op; 998 rr->ctrl = ctrl; 999 rc = chsc(rr); 1000 if (rc) 1001 return -EIO; 1002 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1003 return rc; 1004 } 1005 1006 int chsc_sstpi(void *page, void *result, size_t size) 1007 { 1008 struct { 1009 struct chsc_header request; 1010 unsigned int rsvd0[3]; 1011 struct chsc_header response; 1012 char data[size]; 1013 } __attribute__ ((packed)) *rr; 1014 int rc; 1015 1016 memset(page, 0, PAGE_SIZE); 1017 rr = page; 1018 rr->request.length = 0x0010; 1019 rr->request.code = 0x0038; 1020 rc = chsc(rr); 1021 if (rc) 1022 return -EIO; 1023 memcpy(result, &rr->data, size); 1024 return (rr->response.code == 0x0001) ? 0 : -EIO; 1025 } 1026 1027 int chsc_siosl(struct subchannel_id schid) 1028 { 1029 struct { 1030 struct chsc_header request; 1031 u32 word1; 1032 struct subchannel_id sid; 1033 u32 word3; 1034 struct chsc_header response; 1035 u32 word[11]; 1036 } __attribute__ ((packed)) *siosl_area; 1037 unsigned long flags; 1038 int ccode; 1039 int rc; 1040 1041 spin_lock_irqsave(&chsc_page_lock, flags); 1042 memset(chsc_page, 0, PAGE_SIZE); 1043 siosl_area = chsc_page; 1044 siosl_area->request.length = 0x0010; 1045 siosl_area->request.code = 0x0046; 1046 siosl_area->word1 = 0x80000000; 1047 siosl_area->sid = schid; 1048 1049 ccode = chsc(siosl_area); 1050 if (ccode > 0) { 1051 if (ccode == 3) 1052 rc = -ENODEV; 1053 else 1054 rc = -EBUSY; 1055 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1056 schid.ssid, schid.sch_no, ccode); 1057 goto out; 1058 } 1059 rc = chsc_error_from_response(siosl_area->response.code); 1060 if (rc) 1061 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1062 schid.ssid, schid.sch_no, 1063 siosl_area->response.code); 1064 else 1065 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1066 schid.ssid, schid.sch_no); 1067 out: 1068 spin_unlock_irqrestore(&chsc_page_lock, flags); 1069 return rc; 1070 } 1071 EXPORT_SYMBOL_GPL(chsc_siosl); 1072 1073 /** 1074 * chsc_scm_info() - store SCM information (SSI) 1075 * @scm_area: request and response block for SSI 1076 * @token: continuation token 1077 * 1078 * Returns 0 on success. 1079 */ 1080 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1081 { 1082 int ccode, ret; 1083 1084 memset(scm_area, 0, sizeof(*scm_area)); 1085 scm_area->request.length = 0x0020; 1086 scm_area->request.code = 0x004C; 1087 scm_area->reqtok = token; 1088 1089 ccode = chsc(scm_area); 1090 if (ccode > 0) { 1091 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1092 goto out; 1093 } 1094 ret = chsc_error_from_response(scm_area->response.code); 1095 if (ret != 0) 1096 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1097 scm_area->response.code); 1098 out: 1099 return ret; 1100 } 1101 EXPORT_SYMBOL_GPL(chsc_scm_info); 1102