1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999,2012 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/pci.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static void *chsc_page; 33 static DEFINE_SPINLOCK(chsc_page_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 case 0x000b: 57 return -EBUSY; 58 case 0x0100: 59 case 0x0102: 60 return -ENOMEM; 61 default: 62 return -EIO; 63 } 64 } 65 EXPORT_SYMBOL_GPL(chsc_error_from_response); 66 67 struct chsc_ssd_area { 68 struct chsc_header request; 69 u16 :10; 70 u16 ssid:2; 71 u16 :4; 72 u16 f_sch; /* first subchannel */ 73 u16 :16; 74 u16 l_sch; /* last subchannel */ 75 u32 :32; 76 struct chsc_header response; 77 u32 :32; 78 u8 sch_valid : 1; 79 u8 dev_valid : 1; 80 u8 st : 3; /* subchannel type */ 81 u8 zeroes : 3; 82 u8 unit_addr; /* unit address */ 83 u16 devno; /* device number */ 84 u8 path_mask; 85 u8 fla_valid_mask; 86 u16 sch; /* subchannel */ 87 u8 chpid[8]; /* chpids 0-7 */ 88 u16 fla[8]; /* full link addresses 0-7 */ 89 } __attribute__ ((packed)); 90 91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 92 { 93 struct chsc_ssd_area *ssd_area; 94 int ccode; 95 int ret; 96 int i; 97 int mask; 98 99 spin_lock_irq(&chsc_page_lock); 100 memset(chsc_page, 0, PAGE_SIZE); 101 ssd_area = chsc_page; 102 ssd_area->request.length = 0x0010; 103 ssd_area->request.code = 0x0004; 104 ssd_area->ssid = schid.ssid; 105 ssd_area->f_sch = schid.sch_no; 106 ssd_area->l_sch = schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 /* Check response. */ 110 if (ccode > 0) { 111 ret = (ccode == 3) ? -ENODEV : -EBUSY; 112 goto out; 113 } 114 ret = chsc_error_from_response(ssd_area->response.code); 115 if (ret != 0) { 116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 117 schid.ssid, schid.sch_no, 118 ssd_area->response.code); 119 goto out; 120 } 121 if (!ssd_area->sch_valid) { 122 ret = -ENODEV; 123 goto out; 124 } 125 /* Copy data */ 126 ret = 0; 127 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 129 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 130 goto out; 131 ssd->path_mask = ssd_area->path_mask; 132 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 133 for (i = 0; i < 8; i++) { 134 mask = 0x80 >> i; 135 if (ssd_area->path_mask & mask) { 136 chp_id_init(&ssd->chpid[i]); 137 ssd->chpid[i].id = ssd_area->chpid[i]; 138 } 139 if (ssd_area->fla_valid_mask & mask) 140 ssd->fla[i] = ssd_area->fla[i]; 141 } 142 out: 143 spin_unlock_irq(&chsc_page_lock); 144 return ret; 145 } 146 147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 148 { 149 spin_lock_irq(sch->lock); 150 if (sch->driver && sch->driver->chp_event) 151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 152 goto out_unreg; 153 spin_unlock_irq(sch->lock); 154 return 0; 155 156 out_unreg: 157 sch->lpm = 0; 158 spin_unlock_irq(sch->lock); 159 css_schedule_eval(sch->schid); 160 return 0; 161 } 162 163 void chsc_chp_offline(struct chp_id chpid) 164 { 165 char dbf_txt[15]; 166 struct chp_link link; 167 168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 169 CIO_TRACE_EVENT(2, dbf_txt); 170 171 if (chp_get_status(chpid) <= 0) 172 return; 173 memset(&link, 0, sizeof(struct chp_link)); 174 link.chpid = chpid; 175 /* Wait until previous actions have settled. */ 176 css_wait_for_slow_path(); 177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 178 } 179 180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 181 { 182 struct schib schib; 183 /* 184 * We don't know the device yet, but since a path 185 * may be available now to the device we'll have 186 * to do recognition again. 187 * Since we don't have any idea about which chpid 188 * that beast may be on we'll have to do a stsch 189 * on all devices, grr... 190 */ 191 if (stsch_err(schid, &schib)) 192 /* We're through */ 193 return -ENXIO; 194 195 /* Put it on the slow path. */ 196 css_schedule_eval(schid); 197 return 0; 198 } 199 200 static int __s390_process_res_acc(struct subchannel *sch, void *data) 201 { 202 spin_lock_irq(sch->lock); 203 if (sch->driver && sch->driver->chp_event) 204 sch->driver->chp_event(sch, data, CHP_ONLINE); 205 spin_unlock_irq(sch->lock); 206 207 return 0; 208 } 209 210 static void s390_process_res_acc(struct chp_link *link) 211 { 212 char dbf_txt[15]; 213 214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 215 link->chpid.id); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 if (link->fla != 0) { 218 sprintf(dbf_txt, "fla%x", link->fla); 219 CIO_TRACE_EVENT( 2, dbf_txt); 220 } 221 /* Wait until previous actions have settled. */ 222 css_wait_for_slow_path(); 223 /* 224 * I/O resources may have become accessible. 225 * Scan through all subchannels that may be concerned and 226 * do a validation on those. 227 * The more information we have (info), the less scanning 228 * will we have to do. 229 */ 230 for_each_subchannel_staged(__s390_process_res_acc, 231 s390_process_res_acc_new_sch, link); 232 } 233 234 static int 235 __get_chpid_from_lir(void *data) 236 { 237 struct lir { 238 u8 iq; 239 u8 ic; 240 u16 sci; 241 /* incident-node descriptor */ 242 u32 indesc[28]; 243 /* attached-node descriptor */ 244 u32 andesc[28]; 245 /* incident-specific information */ 246 u32 isinfo[28]; 247 } __attribute__ ((packed)) *lir; 248 249 lir = data; 250 if (!(lir->iq&0x80)) 251 /* NULL link incident record */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0xc0000000)) 254 /* node descriptor not valid */ 255 return -EINVAL; 256 if (!(lir->indesc[0]&0x10000000)) 257 /* don't handle device-type nodes - FIXME */ 258 return -EINVAL; 259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 260 261 return (u16) (lir->indesc[0]&0x000000ff); 262 } 263 264 struct chsc_sei_nt0_area { 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved1; 272 u32 reserved2; 273 /* ccdf has to be big enough for a link-incident record */ 274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 275 } __packed; 276 277 struct chsc_sei_nt2_area { 278 u8 flags; /* p and v bit */ 279 u8 reserved1; 280 u8 reserved2; 281 u8 cc; /* content code */ 282 u32 reserved3[13]; 283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 284 } __packed; 285 286 #define CHSC_SEI_NT0 0ULL 287 #define CHSC_SEI_NT2 (1ULL << 61) 288 289 struct chsc_sei { 290 struct chsc_header request; 291 u32 reserved1; 292 u64 ntsm; /* notification type mask */ 293 struct chsc_header response; 294 u32 reserved2; 295 union { 296 struct chsc_sei_nt0_area nt0_area; 297 struct chsc_sei_nt2_area nt2_area; 298 u8 nt_area[PAGE_SIZE - 24]; 299 } u; 300 } __packed; 301 302 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 303 { 304 struct chp_id chpid; 305 int id; 306 307 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 308 sei_area->rs, sei_area->rsid); 309 if (sei_area->rs != 4) 310 return; 311 id = __get_chpid_from_lir(sei_area->ccdf); 312 if (id < 0) 313 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 314 else { 315 chp_id_init(&chpid); 316 chpid.id = id; 317 chsc_chp_offline(chpid); 318 } 319 } 320 321 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 322 { 323 struct chp_link link; 324 struct chp_id chpid; 325 int status; 326 327 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 328 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 329 if (sei_area->rs != 4) 330 return; 331 chp_id_init(&chpid); 332 chpid.id = sei_area->rsid; 333 /* allocate a new channel path structure, if needed */ 334 status = chp_get_status(chpid); 335 if (status < 0) 336 chp_new(chpid); 337 else if (!status) 338 return; 339 memset(&link, 0, sizeof(struct chp_link)); 340 link.chpid = chpid; 341 if ((sei_area->vf & 0xc0) != 0) { 342 link.fla = sei_area->fla; 343 if ((sei_area->vf & 0xc0) == 0xc0) 344 /* full link address */ 345 link.fla_mask = 0xffff; 346 else 347 /* link address */ 348 link.fla_mask = 0xff00; 349 } 350 s390_process_res_acc(&link); 351 } 352 353 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 354 { 355 struct channel_path *chp; 356 struct chp_id chpid; 357 u8 *data; 358 int num; 359 360 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 361 if (sei_area->rs != 0) 362 return; 363 data = sei_area->ccdf; 364 chp_id_init(&chpid); 365 for (num = 0; num <= __MAX_CHPID; num++) { 366 if (!chp_test_bit(data, num)) 367 continue; 368 chpid.id = num; 369 370 CIO_CRW_EVENT(4, "Update information for channel path " 371 "%x.%02x\n", chpid.cssid, chpid.id); 372 chp = chpid_to_chp(chpid); 373 if (!chp) { 374 chp_new(chpid); 375 continue; 376 } 377 mutex_lock(&chp->lock); 378 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 379 mutex_unlock(&chp->lock); 380 } 381 } 382 383 struct chp_config_data { 384 u8 map[32]; 385 u8 op; 386 u8 pc; 387 }; 388 389 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 390 { 391 struct chp_config_data *data; 392 struct chp_id chpid; 393 int num; 394 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 395 396 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 397 if (sei_area->rs != 0) 398 return; 399 data = (struct chp_config_data *) &(sei_area->ccdf); 400 chp_id_init(&chpid); 401 for (num = 0; num <= __MAX_CHPID; num++) { 402 if (!chp_test_bit(data->map, num)) 403 continue; 404 chpid.id = num; 405 pr_notice("Processing %s for channel path %x.%02x\n", 406 events[data->op], chpid.cssid, chpid.id); 407 switch (data->op) { 408 case 0: 409 chp_cfg_schedule(chpid, 1); 410 break; 411 case 1: 412 chp_cfg_schedule(chpid, 0); 413 break; 414 case 2: 415 chp_cfg_cancel_deconfigure(chpid); 416 break; 417 } 418 } 419 } 420 421 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 422 { 423 int ret; 424 425 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 426 if (sei_area->rs != 7) 427 return; 428 429 ret = scm_update_information(); 430 if (ret) 431 CIO_CRW_EVENT(0, "chsc: updating change notification" 432 " failed (rc=%d).\n", ret); 433 } 434 435 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 436 { 437 #ifdef CONFIG_PCI 438 switch (sei_area->cc) { 439 case 1: 440 zpci_event_error(sei_area->ccdf); 441 break; 442 case 2: 443 zpci_event_availability(sei_area->ccdf); 444 break; 445 default: 446 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", 447 sei_area->cc); 448 break; 449 } 450 #endif 451 } 452 453 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 454 { 455 /* which kind of information was stored? */ 456 switch (sei_area->cc) { 457 case 1: /* link incident*/ 458 chsc_process_sei_link_incident(sei_area); 459 break; 460 case 2: /* i/o resource accessibility */ 461 chsc_process_sei_res_acc(sei_area); 462 break; 463 case 7: /* channel-path-availability information */ 464 chsc_process_sei_chp_avail(sei_area); 465 break; 466 case 8: /* channel-path-configuration notification */ 467 chsc_process_sei_chp_config(sei_area); 468 break; 469 case 12: /* scm change notification */ 470 chsc_process_sei_scm_change(sei_area); 471 break; 472 default: /* other stuff */ 473 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 474 sei_area->cc); 475 break; 476 } 477 } 478 479 static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) 480 { 481 do { 482 memset(sei, 0, sizeof(*sei)); 483 sei->request.length = 0x0010; 484 sei->request.code = 0x000e; 485 sei->ntsm = ntsm; 486 487 if (chsc(sei)) 488 break; 489 490 if (sei->response.code == 0x0001) { 491 CIO_CRW_EVENT(2, "chsc: sei successful\n"); 492 493 /* Check if we might have lost some information. */ 494 if (sei->u.nt0_area.flags & 0x40) { 495 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 496 css_schedule_eval_all(); 497 } 498 499 switch (sei->ntsm) { 500 case CHSC_SEI_NT0: 501 chsc_process_sei_nt0(&sei->u.nt0_area); 502 return 1; 503 case CHSC_SEI_NT2: 504 chsc_process_sei_nt2(&sei->u.nt2_area); 505 return 1; 506 default: 507 CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n", 508 sei->ntsm); 509 return 0; 510 } 511 } else { 512 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 513 sei->response.code); 514 break; 515 } 516 } while (sei->u.nt0_area.flags & 0x80); 517 518 return 0; 519 } 520 521 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 522 { 523 struct chsc_sei *sei; 524 525 if (overflow) { 526 css_schedule_eval_all(); 527 return; 528 } 529 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 530 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 531 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 532 crw0->erc, crw0->rsid); 533 if (!sei_page) 534 return; 535 /* Access to sei_page is serialized through machine check handler 536 * thread, so no need for locking. */ 537 sei = sei_page; 538 539 CIO_TRACE_EVENT(2, "prcss"); 540 541 /* 542 * The ntsm does not allow to select NT0 and NT2 together. We need to 543 * first check for NT2, than additionally for NT0... 544 */ 545 #ifdef CONFIG_PCI 546 if (!__chsc_process_crw(sei, CHSC_SEI_NT2)) 547 #endif 548 __chsc_process_crw(sei, CHSC_SEI_NT0); 549 } 550 551 void chsc_chp_online(struct chp_id chpid) 552 { 553 char dbf_txt[15]; 554 struct chp_link link; 555 556 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 557 CIO_TRACE_EVENT(2, dbf_txt); 558 559 if (chp_get_status(chpid) != 0) { 560 memset(&link, 0, sizeof(struct chp_link)); 561 link.chpid = chpid; 562 /* Wait until previous actions have settled. */ 563 css_wait_for_slow_path(); 564 for_each_subchannel_staged(__s390_process_res_acc, NULL, 565 &link); 566 } 567 } 568 569 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 570 struct chp_id chpid, int on) 571 { 572 unsigned long flags; 573 struct chp_link link; 574 575 memset(&link, 0, sizeof(struct chp_link)); 576 link.chpid = chpid; 577 spin_lock_irqsave(sch->lock, flags); 578 if (sch->driver && sch->driver->chp_event) 579 sch->driver->chp_event(sch, &link, 580 on ? CHP_VARY_ON : CHP_VARY_OFF); 581 spin_unlock_irqrestore(sch->lock, flags); 582 } 583 584 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 585 { 586 struct chp_id *chpid = data; 587 588 __s390_subchannel_vary_chpid(sch, *chpid, 0); 589 return 0; 590 } 591 592 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 593 { 594 struct chp_id *chpid = data; 595 596 __s390_subchannel_vary_chpid(sch, *chpid, 1); 597 return 0; 598 } 599 600 static int 601 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 602 { 603 struct schib schib; 604 605 if (stsch_err(schid, &schib)) 606 /* We're through */ 607 return -ENXIO; 608 /* Put it on the slow path. */ 609 css_schedule_eval(schid); 610 return 0; 611 } 612 613 /** 614 * chsc_chp_vary - propagate channel-path vary operation to subchannels 615 * @chpid: channl-path ID 616 * @on: non-zero for vary online, zero for vary offline 617 */ 618 int chsc_chp_vary(struct chp_id chpid, int on) 619 { 620 struct channel_path *chp = chpid_to_chp(chpid); 621 622 /* Wait until previous actions have settled. */ 623 css_wait_for_slow_path(); 624 /* 625 * Redo PathVerification on the devices the chpid connects to 626 */ 627 if (on) { 628 /* Try to update the channel path descritor. */ 629 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 630 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 631 __s390_vary_chpid_on, &chpid); 632 } else 633 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 634 NULL, &chpid); 635 636 return 0; 637 } 638 639 static void 640 chsc_remove_cmg_attr(struct channel_subsystem *css) 641 { 642 int i; 643 644 for (i = 0; i <= __MAX_CHPID; i++) { 645 if (!css->chps[i]) 646 continue; 647 chp_remove_cmg_attr(css->chps[i]); 648 } 649 } 650 651 static int 652 chsc_add_cmg_attr(struct channel_subsystem *css) 653 { 654 int i, ret; 655 656 ret = 0; 657 for (i = 0; i <= __MAX_CHPID; i++) { 658 if (!css->chps[i]) 659 continue; 660 ret = chp_add_cmg_attr(css->chps[i]); 661 if (ret) 662 goto cleanup; 663 } 664 return ret; 665 cleanup: 666 for (--i; i >= 0; i--) { 667 if (!css->chps[i]) 668 continue; 669 chp_remove_cmg_attr(css->chps[i]); 670 } 671 return ret; 672 } 673 674 int __chsc_do_secm(struct channel_subsystem *css, int enable) 675 { 676 struct { 677 struct chsc_header request; 678 u32 operation_code : 2; 679 u32 : 30; 680 u32 key : 4; 681 u32 : 28; 682 u32 zeroes1; 683 u32 cub_addr1; 684 u32 zeroes2; 685 u32 cub_addr2; 686 u32 reserved[13]; 687 struct chsc_header response; 688 u32 status : 8; 689 u32 : 4; 690 u32 fmt : 4; 691 u32 : 16; 692 } __attribute__ ((packed)) *secm_area; 693 int ret, ccode; 694 695 spin_lock_irq(&chsc_page_lock); 696 memset(chsc_page, 0, PAGE_SIZE); 697 secm_area = chsc_page; 698 secm_area->request.length = 0x0050; 699 secm_area->request.code = 0x0016; 700 701 secm_area->key = PAGE_DEFAULT_KEY >> 4; 702 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 703 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 704 705 secm_area->operation_code = enable ? 0 : 1; 706 707 ccode = chsc(secm_area); 708 if (ccode > 0) { 709 ret = (ccode == 3) ? -ENODEV : -EBUSY; 710 goto out; 711 } 712 713 switch (secm_area->response.code) { 714 case 0x0102: 715 case 0x0103: 716 ret = -EINVAL; 717 break; 718 default: 719 ret = chsc_error_from_response(secm_area->response.code); 720 } 721 if (ret != 0) 722 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 723 secm_area->response.code); 724 out: 725 spin_unlock_irq(&chsc_page_lock); 726 return ret; 727 } 728 729 int 730 chsc_secm(struct channel_subsystem *css, int enable) 731 { 732 int ret; 733 734 if (enable && !css->cm_enabled) { 735 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 736 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 737 if (!css->cub_addr1 || !css->cub_addr2) { 738 free_page((unsigned long)css->cub_addr1); 739 free_page((unsigned long)css->cub_addr2); 740 return -ENOMEM; 741 } 742 } 743 ret = __chsc_do_secm(css, enable); 744 if (!ret) { 745 css->cm_enabled = enable; 746 if (css->cm_enabled) { 747 ret = chsc_add_cmg_attr(css); 748 if (ret) { 749 __chsc_do_secm(css, 0); 750 css->cm_enabled = 0; 751 } 752 } else 753 chsc_remove_cmg_attr(css); 754 } 755 if (!css->cm_enabled) { 756 free_page((unsigned long)css->cub_addr1); 757 free_page((unsigned long)css->cub_addr2); 758 } 759 return ret; 760 } 761 762 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 763 int c, int m, void *page) 764 { 765 struct chsc_scpd *scpd_area; 766 int ccode, ret; 767 768 if ((rfmt == 1) && !css_general_characteristics.fcs) 769 return -EINVAL; 770 if ((rfmt == 2) && !css_general_characteristics.cib) 771 return -EINVAL; 772 773 memset(page, 0, PAGE_SIZE); 774 scpd_area = page; 775 scpd_area->request.length = 0x0010; 776 scpd_area->request.code = 0x0002; 777 scpd_area->cssid = chpid.cssid; 778 scpd_area->first_chpid = chpid.id; 779 scpd_area->last_chpid = chpid.id; 780 scpd_area->m = m; 781 scpd_area->c = c; 782 scpd_area->fmt = fmt; 783 scpd_area->rfmt = rfmt; 784 785 ccode = chsc(scpd_area); 786 if (ccode > 0) 787 return (ccode == 3) ? -ENODEV : -EBUSY; 788 789 ret = chsc_error_from_response(scpd_area->response.code); 790 if (ret) 791 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 792 scpd_area->response.code); 793 return ret; 794 } 795 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 796 797 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 798 struct channel_path_desc *desc) 799 { 800 struct chsc_response_struct *chsc_resp; 801 struct chsc_scpd *scpd_area; 802 unsigned long flags; 803 int ret; 804 805 spin_lock_irqsave(&chsc_page_lock, flags); 806 scpd_area = chsc_page; 807 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 808 if (ret) 809 goto out; 810 chsc_resp = (void *)&scpd_area->response; 811 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 812 out: 813 spin_unlock_irqrestore(&chsc_page_lock, flags); 814 return ret; 815 } 816 817 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 818 struct channel_path_desc_fmt1 *desc) 819 { 820 struct chsc_response_struct *chsc_resp; 821 struct chsc_scpd *scpd_area; 822 int ret; 823 824 spin_lock_irq(&chsc_page_lock); 825 scpd_area = chsc_page; 826 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 827 if (ret) 828 goto out; 829 chsc_resp = (void *)&scpd_area->response; 830 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 831 out: 832 spin_unlock_irq(&chsc_page_lock); 833 return ret; 834 } 835 836 static void 837 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 838 struct cmg_chars *chars) 839 { 840 struct cmg_chars *cmg_chars; 841 int i, mask; 842 843 cmg_chars = chp->cmg_chars; 844 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 845 mask = 0x80 >> (i + 3); 846 if (cmcv & mask) 847 cmg_chars->values[i] = chars->values[i]; 848 else 849 cmg_chars->values[i] = 0; 850 } 851 } 852 853 int chsc_get_channel_measurement_chars(struct channel_path *chp) 854 { 855 struct cmg_chars *cmg_chars; 856 int ccode, ret; 857 858 struct { 859 struct chsc_header request; 860 u32 : 24; 861 u32 first_chpid : 8; 862 u32 : 24; 863 u32 last_chpid : 8; 864 u32 zeroes1; 865 struct chsc_header response; 866 u32 zeroes2; 867 u32 not_valid : 1; 868 u32 shared : 1; 869 u32 : 22; 870 u32 chpid : 8; 871 u32 cmcv : 5; 872 u32 : 11; 873 u32 cmgq : 8; 874 u32 cmg : 8; 875 u32 zeroes3; 876 u32 data[NR_MEASUREMENT_CHARS]; 877 } __attribute__ ((packed)) *scmc_area; 878 879 chp->cmg_chars = NULL; 880 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 881 if (!cmg_chars) 882 return -ENOMEM; 883 884 spin_lock_irq(&chsc_page_lock); 885 memset(chsc_page, 0, PAGE_SIZE); 886 scmc_area = chsc_page; 887 scmc_area->request.length = 0x0010; 888 scmc_area->request.code = 0x0022; 889 scmc_area->first_chpid = chp->chpid.id; 890 scmc_area->last_chpid = chp->chpid.id; 891 892 ccode = chsc(scmc_area); 893 if (ccode > 0) { 894 ret = (ccode == 3) ? -ENODEV : -EBUSY; 895 goto out; 896 } 897 898 ret = chsc_error_from_response(scmc_area->response.code); 899 if (ret) { 900 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 901 scmc_area->response.code); 902 goto out; 903 } 904 if (scmc_area->not_valid) { 905 chp->cmg = -1; 906 chp->shared = -1; 907 goto out; 908 } 909 chp->cmg = scmc_area->cmg; 910 chp->shared = scmc_area->shared; 911 if (chp->cmg != 2 && chp->cmg != 3) { 912 /* No cmg-dependent data. */ 913 goto out; 914 } 915 chp->cmg_chars = cmg_chars; 916 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 917 (struct cmg_chars *) &scmc_area->data); 918 out: 919 spin_unlock_irq(&chsc_page_lock); 920 if (!chp->cmg_chars) 921 kfree(cmg_chars); 922 923 return ret; 924 } 925 926 int __init chsc_init(void) 927 { 928 int ret; 929 930 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 931 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 932 if (!sei_page || !chsc_page) { 933 ret = -ENOMEM; 934 goto out_err; 935 } 936 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 937 if (ret) 938 goto out_err; 939 return ret; 940 out_err: 941 free_page((unsigned long)chsc_page); 942 free_page((unsigned long)sei_page); 943 return ret; 944 } 945 946 void __init chsc_init_cleanup(void) 947 { 948 crw_unregister_handler(CRW_RSC_CSS); 949 free_page((unsigned long)chsc_page); 950 free_page((unsigned long)sei_page); 951 } 952 953 int chsc_enable_facility(int operation_code) 954 { 955 unsigned long flags; 956 int ret; 957 struct { 958 struct chsc_header request; 959 u8 reserved1:4; 960 u8 format:4; 961 u8 reserved2; 962 u16 operation_code; 963 u32 reserved3; 964 u32 reserved4; 965 u32 operation_data_area[252]; 966 struct chsc_header response; 967 u32 reserved5:4; 968 u32 format2:4; 969 u32 reserved6:24; 970 } __attribute__ ((packed)) *sda_area; 971 972 spin_lock_irqsave(&chsc_page_lock, flags); 973 memset(chsc_page, 0, PAGE_SIZE); 974 sda_area = chsc_page; 975 sda_area->request.length = 0x0400; 976 sda_area->request.code = 0x0031; 977 sda_area->operation_code = operation_code; 978 979 ret = chsc(sda_area); 980 if (ret > 0) { 981 ret = (ret == 3) ? -ENODEV : -EBUSY; 982 goto out; 983 } 984 985 switch (sda_area->response.code) { 986 case 0x0101: 987 ret = -EOPNOTSUPP; 988 break; 989 default: 990 ret = chsc_error_from_response(sda_area->response.code); 991 } 992 if (ret != 0) 993 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 994 operation_code, sda_area->response.code); 995 out: 996 spin_unlock_irqrestore(&chsc_page_lock, flags); 997 return ret; 998 } 999 1000 struct css_general_char css_general_characteristics; 1001 struct css_chsc_char css_chsc_characteristics; 1002 1003 int __init 1004 chsc_determine_css_characteristics(void) 1005 { 1006 int result; 1007 struct { 1008 struct chsc_header request; 1009 u32 reserved1; 1010 u32 reserved2; 1011 u32 reserved3; 1012 struct chsc_header response; 1013 u32 reserved4; 1014 u32 general_char[510]; 1015 u32 chsc_char[508]; 1016 } __attribute__ ((packed)) *scsc_area; 1017 1018 spin_lock_irq(&chsc_page_lock); 1019 memset(chsc_page, 0, PAGE_SIZE); 1020 scsc_area = chsc_page; 1021 scsc_area->request.length = 0x0010; 1022 scsc_area->request.code = 0x0010; 1023 1024 result = chsc(scsc_area); 1025 if (result) { 1026 result = (result == 3) ? -ENODEV : -EBUSY; 1027 goto exit; 1028 } 1029 1030 result = chsc_error_from_response(scsc_area->response.code); 1031 if (result == 0) { 1032 memcpy(&css_general_characteristics, scsc_area->general_char, 1033 sizeof(css_general_characteristics)); 1034 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1035 sizeof(css_chsc_characteristics)); 1036 } else 1037 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1038 scsc_area->response.code); 1039 exit: 1040 spin_unlock_irq(&chsc_page_lock); 1041 return result; 1042 } 1043 1044 EXPORT_SYMBOL_GPL(css_general_characteristics); 1045 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1046 1047 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 1048 { 1049 struct { 1050 struct chsc_header request; 1051 unsigned int rsvd0; 1052 unsigned int op : 8; 1053 unsigned int rsvd1 : 8; 1054 unsigned int ctrl : 16; 1055 unsigned int rsvd2[5]; 1056 struct chsc_header response; 1057 unsigned int rsvd3[7]; 1058 } __attribute__ ((packed)) *rr; 1059 int rc; 1060 1061 memset(page, 0, PAGE_SIZE); 1062 rr = page; 1063 rr->request.length = 0x0020; 1064 rr->request.code = 0x0033; 1065 rr->op = op; 1066 rr->ctrl = ctrl; 1067 rc = chsc(rr); 1068 if (rc) 1069 return -EIO; 1070 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1071 return rc; 1072 } 1073 1074 int chsc_sstpi(void *page, void *result, size_t size) 1075 { 1076 struct { 1077 struct chsc_header request; 1078 unsigned int rsvd0[3]; 1079 struct chsc_header response; 1080 char data[size]; 1081 } __attribute__ ((packed)) *rr; 1082 int rc; 1083 1084 memset(page, 0, PAGE_SIZE); 1085 rr = page; 1086 rr->request.length = 0x0010; 1087 rr->request.code = 0x0038; 1088 rc = chsc(rr); 1089 if (rc) 1090 return -EIO; 1091 memcpy(result, &rr->data, size); 1092 return (rr->response.code == 0x0001) ? 0 : -EIO; 1093 } 1094 1095 int chsc_siosl(struct subchannel_id schid) 1096 { 1097 struct { 1098 struct chsc_header request; 1099 u32 word1; 1100 struct subchannel_id sid; 1101 u32 word3; 1102 struct chsc_header response; 1103 u32 word[11]; 1104 } __attribute__ ((packed)) *siosl_area; 1105 unsigned long flags; 1106 int ccode; 1107 int rc; 1108 1109 spin_lock_irqsave(&chsc_page_lock, flags); 1110 memset(chsc_page, 0, PAGE_SIZE); 1111 siosl_area = chsc_page; 1112 siosl_area->request.length = 0x0010; 1113 siosl_area->request.code = 0x0046; 1114 siosl_area->word1 = 0x80000000; 1115 siosl_area->sid = schid; 1116 1117 ccode = chsc(siosl_area); 1118 if (ccode > 0) { 1119 if (ccode == 3) 1120 rc = -ENODEV; 1121 else 1122 rc = -EBUSY; 1123 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1124 schid.ssid, schid.sch_no, ccode); 1125 goto out; 1126 } 1127 rc = chsc_error_from_response(siosl_area->response.code); 1128 if (rc) 1129 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1130 schid.ssid, schid.sch_no, 1131 siosl_area->response.code); 1132 else 1133 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1134 schid.ssid, schid.sch_no); 1135 out: 1136 spin_unlock_irqrestore(&chsc_page_lock, flags); 1137 return rc; 1138 } 1139 EXPORT_SYMBOL_GPL(chsc_siosl); 1140 1141 /** 1142 * chsc_scm_info() - store SCM information (SSI) 1143 * @scm_area: request and response block for SSI 1144 * @token: continuation token 1145 * 1146 * Returns 0 on success. 1147 */ 1148 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1149 { 1150 int ccode, ret; 1151 1152 memset(scm_area, 0, sizeof(*scm_area)); 1153 scm_area->request.length = 0x0020; 1154 scm_area->request.code = 0x004C; 1155 scm_area->reqtok = token; 1156 1157 ccode = chsc(scm_area); 1158 if (ccode > 0) { 1159 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1160 goto out; 1161 } 1162 ret = chsc_error_from_response(scm_area->response.code); 1163 if (ret != 0) 1164 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1165 scm_area->response.code); 1166 out: 1167 return ret; 1168 } 1169 EXPORT_SYMBOL_GPL(chsc_scm_info); 1170