1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2012 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/init.h> 18 #include <linux/device.h> 19 #include <linux/mutex.h> 20 #include <linux/pci.h> 21 22 #include <asm/cio.h> 23 #include <asm/chpid.h> 24 #include <asm/chsc.h> 25 #include <asm/crw.h> 26 #include <asm/isc.h> 27 #include <asm/ebcdic.h> 28 29 #include "css.h" 30 #include "cio.h" 31 #include "cio_debug.h" 32 #include "ioasm.h" 33 #include "chp.h" 34 #include "chsc.h" 35 36 static void *sei_page; 37 static void *chsc_page; 38 static DEFINE_SPINLOCK(chsc_page_lock); 39 40 #define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */ 41 #define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */ 42 43 static BLOCKING_NOTIFIER_HEAD(chsc_notifiers); 44 45 int chsc_notifier_register(struct notifier_block *nb) 46 { 47 return blocking_notifier_chain_register(&chsc_notifiers, nb); 48 } 49 EXPORT_SYMBOL(chsc_notifier_register); 50 51 int chsc_notifier_unregister(struct notifier_block *nb) 52 { 53 return blocking_notifier_chain_unregister(&chsc_notifiers, nb); 54 } 55 EXPORT_SYMBOL(chsc_notifier_unregister); 56 57 /** 58 * chsc_error_from_response() - convert a chsc response to an error 59 * @response: chsc response code 60 * 61 * Returns an appropriate Linux error code for @response. 62 */ 63 int chsc_error_from_response(int response) 64 { 65 switch (response) { 66 case 0x0001: 67 return 0; 68 case 0x0002: 69 case 0x0003: 70 case 0x0006: 71 case 0x0007: 72 case 0x0008: 73 case 0x000a: 74 case 0x0104: 75 return -EINVAL; 76 case 0x0004: 77 case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */ 78 return -EOPNOTSUPP; 79 case 0x000b: 80 case 0x0107: /* "Channel busy" for the op 0x003d */ 81 return -EBUSY; 82 case 0x0100: 83 case 0x0102: 84 return -ENOMEM; 85 case 0x0108: /* "HW limit exceeded" for the op 0x003d */ 86 return -EUSERS; 87 default: 88 return -EIO; 89 } 90 } 91 EXPORT_SYMBOL_GPL(chsc_error_from_response); 92 93 struct chsc_ssd_area { 94 struct chsc_header request; 95 u16 :10; 96 u16 ssid:2; 97 u16 :4; 98 u16 f_sch; /* first subchannel */ 99 u16 :16; 100 u16 l_sch; /* last subchannel */ 101 u32 :32; 102 struct chsc_header response; 103 u32 :32; 104 u8 sch_valid : 1; 105 u8 dev_valid : 1; 106 u8 st : 3; /* subchannel type */ 107 u8 zeroes : 3; 108 u8 unit_addr; /* unit address */ 109 u16 devno; /* device number */ 110 u8 path_mask; 111 u8 fla_valid_mask; 112 u16 sch; /* subchannel */ 113 u8 chpid[8]; /* chpids 0-7 */ 114 u16 fla[8]; /* full link addresses 0-7 */ 115 } __packed __aligned(PAGE_SIZE); 116 117 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 118 { 119 struct chsc_ssd_area *ssd_area; 120 unsigned long flags; 121 int ccode; 122 int ret; 123 int i; 124 int mask; 125 126 spin_lock_irqsave(&chsc_page_lock, flags); 127 memset(chsc_page, 0, PAGE_SIZE); 128 ssd_area = chsc_page; 129 ssd_area->request.length = 0x0010; 130 ssd_area->request.code = 0x0004; 131 ssd_area->ssid = schid.ssid; 132 ssd_area->f_sch = schid.sch_no; 133 ssd_area->l_sch = schid.sch_no; 134 135 ccode = chsc(ssd_area); 136 /* Check response. */ 137 if (ccode > 0) { 138 ret = (ccode == 3) ? -ENODEV : -EBUSY; 139 goto out; 140 } 141 ret = chsc_error_from_response(ssd_area->response.code); 142 if (ret != 0) { 143 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 144 schid.ssid, schid.sch_no, 145 ssd_area->response.code); 146 goto out; 147 } 148 if (!ssd_area->sch_valid) { 149 ret = -ENODEV; 150 goto out; 151 } 152 /* Copy data */ 153 ret = 0; 154 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 155 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 156 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 157 goto out; 158 ssd->path_mask = ssd_area->path_mask; 159 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 160 for (i = 0; i < 8; i++) { 161 mask = 0x80 >> i; 162 if (ssd_area->path_mask & mask) { 163 chp_id_init(&ssd->chpid[i]); 164 ssd->chpid[i].id = ssd_area->chpid[i]; 165 } 166 if (ssd_area->fla_valid_mask & mask) 167 ssd->fla[i] = ssd_area->fla[i]; 168 } 169 out: 170 spin_unlock_irqrestore(&chsc_page_lock, flags); 171 return ret; 172 } 173 174 /** 175 * chsc_ssqd() - store subchannel QDIO data (SSQD) 176 * @schid: id of the subchannel on which SSQD is performed 177 * @ssqd: request and response block for SSQD 178 * 179 * Returns 0 on success. 180 */ 181 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) 182 { 183 memset(ssqd, 0, sizeof(*ssqd)); 184 ssqd->request.length = 0x0010; 185 ssqd->request.code = 0x0024; 186 ssqd->first_sch = schid.sch_no; 187 ssqd->last_sch = schid.sch_no; 188 ssqd->ssid = schid.ssid; 189 190 if (chsc(ssqd)) 191 return -EIO; 192 193 return chsc_error_from_response(ssqd->response.code); 194 } 195 EXPORT_SYMBOL_GPL(chsc_ssqd); 196 197 /** 198 * chsc_sadc() - set adapter device controls (SADC) 199 * @schid: id of the subchannel on which SADC is performed 200 * @scssc: request and response block for SADC 201 * @summary_indicator_addr: summary indicator address 202 * @subchannel_indicator_addr: subchannel indicator address 203 * @isc: Interruption Subclass for this subchannel 204 * 205 * Returns 0 on success. 206 */ 207 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 208 dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc) 209 { 210 memset(scssc, 0, sizeof(*scssc)); 211 scssc->request.length = 0x0fe0; 212 scssc->request.code = 0x0021; 213 scssc->operation_code = 0; 214 215 scssc->summary_indicator_addr = summary_indicator_addr; 216 scssc->subchannel_indicator_addr = subchannel_indicator_addr; 217 218 scssc->ks = PAGE_DEFAULT_KEY >> 4; 219 scssc->kc = PAGE_DEFAULT_KEY >> 4; 220 scssc->isc = isc; 221 scssc->schid = schid; 222 223 /* enable the time delay disablement facility */ 224 if (css_general_characteristics.aif_tdd) 225 scssc->word_with_d_bit = 0x10000000; 226 227 if (chsc(scssc)) 228 return -EIO; 229 230 return chsc_error_from_response(scssc->response.code); 231 } 232 EXPORT_SYMBOL_GPL(chsc_sadc); 233 234 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 235 { 236 spin_lock_irq(&sch->lock); 237 if (sch->driver && sch->driver->chp_event) 238 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 239 goto out_unreg; 240 spin_unlock_irq(&sch->lock); 241 return 0; 242 243 out_unreg: 244 sch->lpm = 0; 245 spin_unlock_irq(&sch->lock); 246 css_schedule_eval(sch->schid); 247 return 0; 248 } 249 250 void chsc_chp_offline(struct chp_id chpid) 251 { 252 struct channel_path *chp = chpid_to_chp(chpid); 253 struct chp_link link; 254 char dbf_txt[15]; 255 256 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 257 CIO_TRACE_EVENT(2, dbf_txt); 258 259 if (chp_get_status(chpid) <= 0) 260 return; 261 memset(&link, 0, sizeof(struct chp_link)); 262 link.chpid = chpid; 263 /* Wait until previous actions have settled. */ 264 css_wait_for_slow_path(); 265 266 mutex_lock(&chp->lock); 267 chp_update_desc(chp); 268 mutex_unlock(&chp->lock); 269 270 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 271 } 272 273 static int __s390_process_res_acc(struct subchannel *sch, void *data) 274 { 275 spin_lock_irq(&sch->lock); 276 if (sch->driver && sch->driver->chp_event) 277 sch->driver->chp_event(sch, data, CHP_ONLINE); 278 spin_unlock_irq(&sch->lock); 279 280 return 0; 281 } 282 283 static void s390_process_res_acc(struct chp_link *link) 284 { 285 char dbf_txt[15]; 286 287 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 288 link->chpid.id); 289 CIO_TRACE_EVENT( 2, dbf_txt); 290 if (link->fla != 0) { 291 sprintf(dbf_txt, "fla%x", link->fla); 292 CIO_TRACE_EVENT( 2, dbf_txt); 293 } 294 /* Wait until previous actions have settled. */ 295 css_wait_for_slow_path(); 296 /* 297 * I/O resources may have become accessible. 298 * Scan through all subchannels that may be concerned and 299 * do a validation on those. 300 * The more information we have (info), the less scanning 301 * will we have to do. 302 */ 303 for_each_subchannel_staged(__s390_process_res_acc, NULL, link); 304 css_schedule_reprobe(); 305 } 306 307 static int process_fces_event(struct subchannel *sch, void *data) 308 { 309 spin_lock_irq(&sch->lock); 310 if (sch->driver && sch->driver->chp_event) 311 sch->driver->chp_event(sch, data, CHP_FCES_EVENT); 312 spin_unlock_irq(&sch->lock); 313 return 0; 314 } 315 316 struct chsc_sei_nt0_area { 317 u8 flags; 318 u8 vf; /* validity flags */ 319 u8 rs; /* reporting source */ 320 u8 cc; /* content code */ 321 u16 fla; /* full link address */ 322 u16 rsid; /* reporting source id */ 323 u32 reserved1; 324 u32 reserved2; 325 /* ccdf has to be big enough for a link-incident record */ 326 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 327 } __packed; 328 329 struct chsc_sei_nt2_area { 330 u8 flags; /* p and v bit */ 331 u8 reserved1; 332 u8 reserved2; 333 u8 cc; /* content code */ 334 u32 reserved3[13]; 335 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 336 } __packed; 337 338 #define CHSC_SEI_NT0 (1ULL << 63) 339 #define CHSC_SEI_NT2 (1ULL << 61) 340 341 struct chsc_sei { 342 struct chsc_header request; 343 u32 reserved1; 344 u64 ntsm; /* notification type mask */ 345 struct chsc_header response; 346 u32 :24; 347 u8 nt; 348 union { 349 struct chsc_sei_nt0_area nt0_area; 350 struct chsc_sei_nt2_area nt2_area; 351 u8 nt_area[PAGE_SIZE - 24]; 352 } u; 353 } __packed __aligned(PAGE_SIZE); 354 355 /* 356 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface" 357 */ 358 359 #define LIR_IQ_CLASS_INFO 0 360 #define LIR_IQ_CLASS_DEGRADED 1 361 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2 362 363 struct lir { 364 struct { 365 u32 null:1; 366 u32 reserved:3; 367 u32 class:2; 368 u32 reserved2:2; 369 } __packed iq; 370 u32 ic:8; 371 u32 reserved:16; 372 struct node_descriptor incident_node; 373 struct node_descriptor attached_node; 374 u8 reserved2[32]; 375 } __packed; 376 377 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */ 378 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */ 379 380 /* Copy EBCDIC text, convert to ASCII and optionally add delimiter. */ 381 static char *store_ebcdic(char *dest, const char *src, unsigned long len, 382 char delim) 383 { 384 memcpy(dest, src, len); 385 EBCASC(dest, len); 386 387 if (delim) 388 dest[len++] = delim; 389 390 return dest + len; 391 } 392 393 static void chsc_link_from_sei(struct chp_link *link, 394 struct chsc_sei_nt0_area *sei_area) 395 { 396 if ((sei_area->vf & SEI_VF_FLA) != 0) { 397 link->fla = sei_area->fla; 398 link->fla_mask = ((sei_area->vf & SEI_VF_FLA) == SEI_VF_FLA) ? 399 0xffff : 0xff00; 400 } 401 } 402 403 /* Format node ID and parameters for output in LIR log message. */ 404 static void format_node_data(char *params, char *id, struct node_descriptor *nd) 405 { 406 memset(params, 0, PARAMS_LEN); 407 memset(id, 0, NODEID_LEN); 408 409 if (nd->validity != ND_VALIDITY_VALID) { 410 strscpy(params, "n/a", PARAMS_LEN); 411 strscpy(id, "n/a", NODEID_LEN); 412 return; 413 } 414 415 /* PARAMS=xx,xxxxxx */ 416 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params); 417 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */ 418 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/'); 419 id = store_ebcdic(id, nd->model, sizeof(nd->model), ','); 420 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.'); 421 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0); 422 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ','); 423 sprintf(id, "%04X", nd->tag); 424 } 425 426 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 427 { 428 struct lir *lir = (struct lir *) &sei_area->ccdf; 429 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN], 430 aunodeid[NODEID_LEN]; 431 432 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n", 433 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]); 434 435 /* Ignore NULL Link Incident Records. */ 436 if (lir->iq.null) 437 return; 438 439 /* Inform user that a link requires maintenance actions because it has 440 * become degraded or not operational. Note that this log message is 441 * the primary intention behind a Link Incident Record. */ 442 443 format_node_data(iuparams, iunodeid, &lir->incident_node); 444 format_node_data(auparams, aunodeid, &lir->attached_node); 445 446 switch (lir->iq.class) { 447 case LIR_IQ_CLASS_DEGRADED: 448 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x " 449 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n", 450 sei_area->rs, sei_area->rsid, lir->ic, iuparams, 451 iunodeid, auparams, aunodeid); 452 break; 453 case LIR_IQ_CLASS_NOT_OPERATIONAL: 454 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x " 455 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n", 456 sei_area->rs, sei_area->rsid, lir->ic, iuparams, 457 iunodeid, auparams, aunodeid); 458 break; 459 default: 460 break; 461 } 462 } 463 464 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 465 { 466 struct channel_path *chp; 467 struct chp_link link; 468 struct chp_id chpid; 469 int status; 470 471 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 472 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 473 if (sei_area->rs != 4) 474 return; 475 chp_id_init(&chpid); 476 chpid.id = sei_area->rsid; 477 /* allocate a new channel path structure, if needed */ 478 status = chp_get_status(chpid); 479 if (!status) 480 return; 481 482 if (status < 0) { 483 chp_new(chpid); 484 } else { 485 chp = chpid_to_chp(chpid); 486 mutex_lock(&chp->lock); 487 chp_update_desc(chp); 488 mutex_unlock(&chp->lock); 489 } 490 memset(&link, 0, sizeof(struct chp_link)); 491 link.chpid = chpid; 492 chsc_link_from_sei(&link, sei_area); 493 s390_process_res_acc(&link); 494 } 495 496 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 497 { 498 struct channel_path *chp; 499 struct chp_id chpid; 500 u8 *data; 501 int num; 502 503 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 504 if (sei_area->rs != 0) 505 return; 506 data = sei_area->ccdf; 507 chp_id_init(&chpid); 508 for (num = 0; num <= __MAX_CHPID; num++) { 509 if (!chp_test_bit(data, num)) 510 continue; 511 chpid.id = num; 512 513 CIO_CRW_EVENT(4, "Update information for channel path " 514 "%x.%02x\n", chpid.cssid, chpid.id); 515 chp = chpid_to_chp(chpid); 516 if (!chp) { 517 chp_new(chpid); 518 continue; 519 } 520 mutex_lock(&chp->lock); 521 chp_update_desc(chp); 522 mutex_unlock(&chp->lock); 523 } 524 } 525 526 struct chp_config_data { 527 u8 map[32]; 528 u8 op; 529 u8 pc; 530 }; 531 532 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 533 { 534 struct chp_config_data *data; 535 struct chp_id chpid; 536 int num; 537 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 538 539 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 540 if (sei_area->rs != 0) 541 return; 542 data = (struct chp_config_data *) &(sei_area->ccdf); 543 chp_id_init(&chpid); 544 for (num = 0; num <= __MAX_CHPID; num++) { 545 if (!chp_test_bit(data->map, num)) 546 continue; 547 chpid.id = num; 548 pr_notice("Processing %s for channel path %x.%02x\n", 549 events[data->op], chpid.cssid, chpid.id); 550 switch (data->op) { 551 case 0: 552 chp_cfg_schedule(chpid, 1); 553 break; 554 case 1: 555 chp_cfg_schedule(chpid, 0); 556 break; 557 case 2: 558 chp_cfg_cancel_deconfigure(chpid); 559 break; 560 } 561 } 562 } 563 564 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 565 { 566 int ret; 567 568 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 569 if (sei_area->rs != 7) 570 return; 571 572 ret = scm_update_information(); 573 if (ret) 574 CIO_CRW_EVENT(0, "chsc: updating change notification" 575 " failed (rc=%d).\n", ret); 576 } 577 578 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) 579 { 580 int ret; 581 582 CIO_CRW_EVENT(4, "chsc: scm available information\n"); 583 if (sei_area->rs != 7) 584 return; 585 586 ret = scm_process_availability_information(); 587 if (ret) 588 CIO_CRW_EVENT(0, "chsc: process availability information" 589 " failed (rc=%d).\n", ret); 590 } 591 592 static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) 593 { 594 CIO_CRW_EVENT(3, "chsc: ap config changed\n"); 595 if (sei_area->rs != 5) 596 return; 597 598 blocking_notifier_call_chain(&chsc_notifiers, 599 CHSC_NOTIFY_AP_CFG, NULL); 600 } 601 602 static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area) 603 { 604 struct chp_link link; 605 struct chp_id chpid; 606 struct channel_path *chp; 607 608 CIO_CRW_EVENT(4, 609 "chsc: FCES status notification (rs=%02x, rs_id=%04x, FCES-status=%x)\n", 610 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]); 611 612 if (sei_area->rs != SEI_RS_CHPID) 613 return; 614 chp_id_init(&chpid); 615 chpid.id = sei_area->rsid; 616 617 /* Ignore the event on unknown/invalid chp */ 618 chp = chpid_to_chp(chpid); 619 if (!chp) 620 return; 621 622 memset(&link, 0, sizeof(struct chp_link)); 623 link.chpid = chpid; 624 chsc_link_from_sei(&link, sei_area); 625 626 for_each_subchannel_staged(process_fces_event, NULL, &link); 627 } 628 629 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 630 { 631 switch (sei_area->cc) { 632 case 1: 633 zpci_event_error(sei_area->ccdf); 634 break; 635 case 2: 636 zpci_event_availability(sei_area->ccdf); 637 break; 638 default: 639 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 640 sei_area->cc); 641 break; 642 } 643 } 644 645 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 646 { 647 /* which kind of information was stored? */ 648 switch (sei_area->cc) { 649 case 1: /* link incident*/ 650 chsc_process_sei_link_incident(sei_area); 651 break; 652 case 2: /* i/o resource accessibility */ 653 chsc_process_sei_res_acc(sei_area); 654 break; 655 case 3: /* ap config changed */ 656 chsc_process_sei_ap_cfg_chg(sei_area); 657 break; 658 case 7: /* channel-path-availability information */ 659 chsc_process_sei_chp_avail(sei_area); 660 break; 661 case 8: /* channel-path-configuration notification */ 662 chsc_process_sei_chp_config(sei_area); 663 break; 664 case 12: /* scm change notification */ 665 chsc_process_sei_scm_change(sei_area); 666 break; 667 case 14: /* scm available notification */ 668 chsc_process_sei_scm_avail(sei_area); 669 break; 670 case 15: /* FCES event notification */ 671 chsc_process_sei_fces_event(sei_area); 672 break; 673 default: /* other stuff */ 674 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 675 sei_area->cc); 676 break; 677 } 678 679 /* Check if we might have lost some information. */ 680 if (sei_area->flags & 0x40) { 681 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 682 css_schedule_eval_all(); 683 } 684 } 685 686 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 687 { 688 static int ntsm_unsupported; 689 690 while (true) { 691 memset(sei, 0, sizeof(*sei)); 692 sei->request.length = 0x0010; 693 sei->request.code = 0x000e; 694 if (!ntsm_unsupported) 695 sei->ntsm = ntsm; 696 697 if (chsc(sei)) 698 break; 699 700 if (sei->response.code != 0x0001) { 701 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", 702 sei->response.code, sei->ntsm); 703 704 if (sei->response.code == 3 && sei->ntsm) { 705 /* Fallback for old firmware. */ 706 ntsm_unsupported = 1; 707 continue; 708 } 709 break; 710 } 711 712 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 713 switch (sei->nt) { 714 case 0: 715 chsc_process_sei_nt0(&sei->u.nt0_area); 716 break; 717 case 2: 718 chsc_process_sei_nt2(&sei->u.nt2_area); 719 break; 720 default: 721 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 722 break; 723 } 724 725 if (!(sei->u.nt0_area.flags & 0x80)) 726 break; 727 } 728 } 729 730 /* 731 * Handle channel subsystem related CRWs. 732 * Use store event information to find out what's going on. 733 * 734 * Note: Access to sei_page is serialized through machine check handler 735 * thread, so no need for locking. 736 */ 737 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 738 { 739 struct chsc_sei *sei = sei_page; 740 741 if (overflow) { 742 css_schedule_eval_all(); 743 return; 744 } 745 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 746 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 747 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 748 crw0->erc, crw0->rsid); 749 750 CIO_TRACE_EVENT(2, "prcss"); 751 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 752 } 753 754 void chsc_chp_online(struct chp_id chpid) 755 { 756 struct channel_path *chp = chpid_to_chp(chpid); 757 struct chp_link link; 758 char dbf_txt[15]; 759 760 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 761 CIO_TRACE_EVENT(2, dbf_txt); 762 763 if (chp_get_status(chpid) != 0) { 764 memset(&link, 0, sizeof(struct chp_link)); 765 link.chpid = chpid; 766 /* Wait until previous actions have settled. */ 767 css_wait_for_slow_path(); 768 769 mutex_lock(&chp->lock); 770 chp_update_desc(chp); 771 mutex_unlock(&chp->lock); 772 773 for_each_subchannel_staged(__s390_process_res_acc, NULL, 774 &link); 775 css_schedule_reprobe(); 776 } 777 } 778 779 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 780 struct chp_id chpid, int on) 781 { 782 unsigned long flags; 783 struct chp_link link; 784 785 memset(&link, 0, sizeof(struct chp_link)); 786 link.chpid = chpid; 787 spin_lock_irqsave(&sch->lock, flags); 788 if (sch->driver && sch->driver->chp_event) 789 sch->driver->chp_event(sch, &link, 790 on ? CHP_VARY_ON : CHP_VARY_OFF); 791 spin_unlock_irqrestore(&sch->lock, flags); 792 } 793 794 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 795 { 796 struct chp_id *chpid = data; 797 798 __s390_subchannel_vary_chpid(sch, *chpid, 0); 799 return 0; 800 } 801 802 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 803 { 804 struct chp_id *chpid = data; 805 806 __s390_subchannel_vary_chpid(sch, *chpid, 1); 807 return 0; 808 } 809 810 /** 811 * chsc_chp_vary - propagate channel-path vary operation to subchannels 812 * @chpid: channl-path ID 813 * @on: non-zero for vary online, zero for vary offline 814 */ 815 int chsc_chp_vary(struct chp_id chpid, int on) 816 { 817 struct channel_path *chp = chpid_to_chp(chpid); 818 819 /* 820 * Redo PathVerification on the devices the chpid connects to 821 */ 822 if (on) { 823 /* Try to update the channel path description. */ 824 chp_update_desc(chp); 825 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 826 NULL, &chpid); 827 css_schedule_reprobe(); 828 } else 829 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 830 NULL, &chpid); 831 832 return 0; 833 } 834 835 static void 836 chsc_remove_cmg_attr(struct channel_subsystem *css) 837 { 838 int i; 839 840 for (i = 0; i <= __MAX_CHPID; i++) { 841 if (!css->chps[i]) 842 continue; 843 chp_remove_cmg_attr(css->chps[i]); 844 } 845 } 846 847 static int 848 chsc_add_cmg_attr(struct channel_subsystem *css) 849 { 850 int i, ret; 851 852 ret = 0; 853 for (i = 0; i <= __MAX_CHPID; i++) { 854 if (!css->chps[i]) 855 continue; 856 ret = chp_add_cmg_attr(css->chps[i]); 857 if (ret) 858 goto cleanup; 859 } 860 return ret; 861 cleanup: 862 while (i--) { 863 if (!css->chps[i]) 864 continue; 865 chp_remove_cmg_attr(css->chps[i]); 866 } 867 return ret; 868 } 869 870 int __chsc_do_secm(struct channel_subsystem *css, int enable) 871 { 872 struct { 873 struct chsc_header request; 874 u32 operation_code : 2; 875 u32 : 1; 876 u32 e : 1; 877 u32 : 28; 878 u32 key : 4; 879 u32 : 28; 880 dma64_t cub[CSS_NUM_CUB_PAGES]; 881 dma64_t ecub[CSS_NUM_ECUB_PAGES]; 882 u32 reserved[5]; 883 struct chsc_header response; 884 u32 status : 8; 885 u32 : 4; 886 u32 fmt : 4; 887 u32 : 16; 888 } __packed *secm_area; 889 unsigned long flags; 890 int ret, ccode, i; 891 892 spin_lock_irqsave(&chsc_page_lock, flags); 893 memset(chsc_page, 0, PAGE_SIZE); 894 secm_area = chsc_page; 895 secm_area->request.length = 0x0050; 896 secm_area->request.code = 0x0016; 897 898 secm_area->key = PAGE_DEFAULT_KEY >> 4; 899 secm_area->e = 1; 900 901 for (i = 0; i < CSS_NUM_CUB_PAGES; i++) 902 secm_area->cub[i] = (__force dma64_t)virt_to_dma32(css->cub[i]); 903 for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) 904 secm_area->ecub[i] = virt_to_dma64(css->ecub[i]); 905 906 secm_area->operation_code = enable ? 0 : 1; 907 908 ccode = chsc(secm_area); 909 if (ccode > 0) { 910 ret = (ccode == 3) ? -ENODEV : -EBUSY; 911 goto out; 912 } 913 914 switch (secm_area->response.code) { 915 case 0x0102: 916 case 0x0103: 917 ret = -EINVAL; 918 break; 919 default: 920 ret = chsc_error_from_response(secm_area->response.code); 921 } 922 if (ret != 0) 923 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 924 secm_area->response.code); 925 out: 926 spin_unlock_irqrestore(&chsc_page_lock, flags); 927 return ret; 928 } 929 930 static int cub_alloc(struct channel_subsystem *css) 931 { 932 int i; 933 934 for (i = 0; i < CSS_NUM_CUB_PAGES; i++) { 935 css->cub[i] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 936 if (!css->cub[i]) 937 return -ENOMEM; 938 } 939 for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) { 940 css->ecub[i] = (void *)get_zeroed_page(GFP_KERNEL); 941 if (!css->ecub[i]) 942 return -ENOMEM; 943 } 944 945 return 0; 946 } 947 948 static void cub_free(struct channel_subsystem *css) 949 { 950 int i; 951 952 for (i = 0; i < CSS_NUM_CUB_PAGES; i++) { 953 free_page((unsigned long)css->cub[i]); 954 css->cub[i] = NULL; 955 } 956 for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) { 957 free_page((unsigned long)css->ecub[i]); 958 css->ecub[i] = NULL; 959 } 960 } 961 962 int 963 chsc_secm(struct channel_subsystem *css, int enable) 964 { 965 int ret; 966 967 if (enable && !css->cm_enabled) { 968 ret = cub_alloc(css); 969 if (ret) 970 goto out; 971 } 972 ret = __chsc_do_secm(css, enable); 973 if (!ret) { 974 css->cm_enabled = enable; 975 if (css->cm_enabled) { 976 ret = chsc_add_cmg_attr(css); 977 if (ret) { 978 __chsc_do_secm(css, 0); 979 css->cm_enabled = 0; 980 } 981 } else 982 chsc_remove_cmg_attr(css); 983 } 984 985 out: 986 if (!css->cm_enabled) 987 cub_free(css); 988 989 return ret; 990 } 991 992 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 993 int c, int m, void *page) 994 { 995 struct chsc_scpd *scpd_area; 996 int ccode, ret; 997 998 if ((rfmt == 1 || rfmt == 0) && c == 1 && 999 !css_general_characteristics.fcs) 1000 return -EINVAL; 1001 if ((rfmt == 2) && !css_general_characteristics.cib) 1002 return -EINVAL; 1003 if ((rfmt == 3) && !css_general_characteristics.util_str) 1004 return -EINVAL; 1005 1006 memset(page, 0, PAGE_SIZE); 1007 scpd_area = page; 1008 scpd_area->request.length = 0x0010; 1009 scpd_area->request.code = 0x0002; 1010 scpd_area->cssid = chpid.cssid; 1011 scpd_area->first_chpid = chpid.id; 1012 scpd_area->last_chpid = chpid.id; 1013 scpd_area->m = m; 1014 scpd_area->c = c; 1015 scpd_area->fmt = fmt; 1016 scpd_area->rfmt = rfmt; 1017 1018 ccode = chsc(scpd_area); 1019 if (ccode > 0) 1020 return (ccode == 3) ? -ENODEV : -EBUSY; 1021 1022 ret = chsc_error_from_response(scpd_area->response.code); 1023 if (ret) 1024 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 1025 scpd_area->response.code); 1026 return ret; 1027 } 1028 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 1029 1030 #define chsc_det_chp_desc(FMT, c) \ 1031 int chsc_determine_fmt##FMT##_channel_path_desc( \ 1032 struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \ 1033 { \ 1034 struct chsc_scpd *scpd_area; \ 1035 unsigned long flags; \ 1036 int ret; \ 1037 \ 1038 spin_lock_irqsave(&chsc_page_lock, flags); \ 1039 scpd_area = chsc_page; \ 1040 ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \ 1041 scpd_area); \ 1042 if (ret) \ 1043 goto out; \ 1044 \ 1045 memcpy(desc, scpd_area->data, sizeof(*desc)); \ 1046 out: \ 1047 spin_unlock_irqrestore(&chsc_page_lock, flags); \ 1048 return ret; \ 1049 } 1050 1051 chsc_det_chp_desc(0, 0) 1052 chsc_det_chp_desc(1, 1) 1053 chsc_det_chp_desc(3, 0) 1054 1055 static void 1056 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1057 struct cmg_chars *chars) 1058 { 1059 int i, mask; 1060 1061 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1062 mask = 0x80 >> (i + 3); 1063 if (cmcv & mask) 1064 chp->cmg_chars.values[i] = chars->values[i]; 1065 else 1066 chp->cmg_chars.values[i] = 0; 1067 } 1068 } 1069 1070 static unsigned long scmc_get_speed(u32 s, u32 p) 1071 { 1072 unsigned long speed = s; 1073 1074 if (!p) 1075 p = 8; 1076 while (p--) 1077 speed *= 10; 1078 1079 return speed; 1080 } 1081 1082 int chsc_get_channel_measurement_chars(struct channel_path *chp) 1083 { 1084 unsigned long flags; 1085 int ccode, ret; 1086 1087 struct { 1088 struct chsc_header request; 1089 u32 : 24; 1090 u32 first_chpid : 8; 1091 u32 : 24; 1092 u32 last_chpid : 8; 1093 u32 zeroes1; 1094 struct chsc_header response; 1095 u32 zeroes2; 1096 struct cmg_cmcb cmcb; 1097 } *scmc_area; 1098 1099 chp->shared = -1; 1100 chp->cmg = -1; 1101 chp->extended = 0; 1102 chp->speed = 0; 1103 1104 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) 1105 return -EINVAL; 1106 1107 spin_lock_irqsave(&chsc_page_lock, flags); 1108 memset(chsc_page, 0, PAGE_SIZE); 1109 scmc_area = chsc_page; 1110 scmc_area->request.length = 0x0010; 1111 scmc_area->request.code = 0x0022; 1112 scmc_area->first_chpid = chp->chpid.id; 1113 scmc_area->last_chpid = chp->chpid.id; 1114 1115 ccode = chsc(scmc_area); 1116 if (ccode > 0) { 1117 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1118 goto out; 1119 } 1120 1121 ret = chsc_error_from_response(scmc_area->response.code); 1122 if (ret) { 1123 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 1124 scmc_area->response.code); 1125 goto out; 1126 } 1127 chp->cmcb = scmc_area->cmcb; 1128 if (scmc_area->cmcb.not_valid) 1129 goto out; 1130 1131 chp->cmg = scmc_area->cmcb.cmg; 1132 chp->shared = scmc_area->cmcb.shared; 1133 chp->extended = scmc_area->cmcb.extended; 1134 chp->speed = scmc_get_speed(scmc_area->cmcb.cmgs, scmc_area->cmcb.cmgp); 1135 chsc_initialize_cmg_chars(chp, scmc_area->cmcb.cmcv, 1136 (struct cmg_chars *)&scmc_area->cmcb.data); 1137 out: 1138 spin_unlock_irqrestore(&chsc_page_lock, flags); 1139 return ret; 1140 } 1141 1142 int __init chsc_init(void) 1143 { 1144 int ret; 1145 1146 sei_page = (void *)get_zeroed_page(GFP_KERNEL); 1147 chsc_page = (void *)get_zeroed_page(GFP_KERNEL); 1148 if (!sei_page || !chsc_page) { 1149 ret = -ENOMEM; 1150 goto out_err; 1151 } 1152 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 1153 if (ret) 1154 goto out_err; 1155 return ret; 1156 out_err: 1157 free_page((unsigned long)chsc_page); 1158 free_page((unsigned long)sei_page); 1159 return ret; 1160 } 1161 1162 void __init chsc_init_cleanup(void) 1163 { 1164 crw_unregister_handler(CRW_RSC_CSS); 1165 free_page((unsigned long)chsc_page); 1166 free_page((unsigned long)sei_page); 1167 } 1168 1169 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code) 1170 { 1171 int ret; 1172 1173 sda_area->request.length = 0x0400; 1174 sda_area->request.code = 0x0031; 1175 sda_area->operation_code = operation_code; 1176 1177 ret = chsc(sda_area); 1178 if (ret > 0) { 1179 ret = (ret == 3) ? -ENODEV : -EBUSY; 1180 goto out; 1181 } 1182 1183 switch (sda_area->response.code) { 1184 case 0x0101: 1185 ret = -EOPNOTSUPP; 1186 break; 1187 default: 1188 ret = chsc_error_from_response(sda_area->response.code); 1189 } 1190 out: 1191 return ret; 1192 } 1193 1194 int chsc_enable_facility(int operation_code) 1195 { 1196 struct chsc_sda_area *sda_area; 1197 unsigned long flags; 1198 int ret; 1199 1200 spin_lock_irqsave(&chsc_page_lock, flags); 1201 memset(chsc_page, 0, PAGE_SIZE); 1202 sda_area = chsc_page; 1203 1204 ret = __chsc_enable_facility(sda_area, operation_code); 1205 if (ret != 0) 1206 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1207 operation_code, sda_area->response.code); 1208 1209 spin_unlock_irqrestore(&chsc_page_lock, flags); 1210 return ret; 1211 } 1212 1213 int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid) 1214 { 1215 struct { 1216 struct chsc_header request; 1217 u8 atype; 1218 u32 : 24; 1219 u32 reserved1[6]; 1220 struct chsc_header response; 1221 u32 reserved2[3]; 1222 struct { 1223 u8 cssid; 1224 u8 iid; 1225 u32 : 16; 1226 } list[]; 1227 } *sdcal_area; 1228 int ret; 1229 1230 spin_lock_irq(&chsc_page_lock); 1231 memset(chsc_page, 0, PAGE_SIZE); 1232 sdcal_area = chsc_page; 1233 sdcal_area->request.length = 0x0020; 1234 sdcal_area->request.code = 0x0034; 1235 sdcal_area->atype = 4; 1236 1237 ret = chsc(sdcal_area); 1238 if (ret) { 1239 ret = (ret == 3) ? -ENODEV : -EBUSY; 1240 goto exit; 1241 } 1242 1243 ret = chsc_error_from_response(sdcal_area->response.code); 1244 if (ret) { 1245 CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n", 1246 sdcal_area->response.code); 1247 goto exit; 1248 } 1249 1250 if ((addr_t) &sdcal_area->list[idx] < 1251 (addr_t) &sdcal_area->response + sdcal_area->response.length) { 1252 *cssid = sdcal_area->list[idx].cssid; 1253 *iid = sdcal_area->list[idx].iid; 1254 } 1255 else 1256 ret = -ENODEV; 1257 exit: 1258 spin_unlock_irq(&chsc_page_lock); 1259 return ret; 1260 } 1261 1262 struct css_general_char css_general_characteristics; 1263 struct css_chsc_char css_chsc_characteristics; 1264 1265 int __init 1266 chsc_determine_css_characteristics(void) 1267 { 1268 unsigned long flags; 1269 int result; 1270 struct { 1271 struct chsc_header request; 1272 u32 reserved1; 1273 u32 reserved2; 1274 u32 reserved3; 1275 struct chsc_header response; 1276 u32 reserved4; 1277 u32 general_char[510]; 1278 u32 chsc_char[508]; 1279 } *scsc_area; 1280 1281 spin_lock_irqsave(&chsc_page_lock, flags); 1282 memset(chsc_page, 0, PAGE_SIZE); 1283 scsc_area = chsc_page; 1284 scsc_area->request.length = 0x0010; 1285 scsc_area->request.code = 0x0010; 1286 1287 result = chsc(scsc_area); 1288 if (result) { 1289 result = (result == 3) ? -ENODEV : -EBUSY; 1290 goto exit; 1291 } 1292 1293 result = chsc_error_from_response(scsc_area->response.code); 1294 if (result == 0) { 1295 memcpy(&css_general_characteristics, scsc_area->general_char, 1296 sizeof(css_general_characteristics)); 1297 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1298 sizeof(css_chsc_characteristics)); 1299 } else 1300 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1301 scsc_area->response.code); 1302 exit: 1303 spin_unlock_irqrestore(&chsc_page_lock, flags); 1304 return result; 1305 } 1306 1307 EXPORT_SYMBOL_GPL(css_general_characteristics); 1308 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1309 1310 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta) 1311 { 1312 struct { 1313 struct chsc_header request; 1314 unsigned int rsvd0; 1315 unsigned int op : 8; 1316 unsigned int rsvd1 : 8; 1317 unsigned int ctrl : 16; 1318 unsigned int rsvd2[5]; 1319 struct chsc_header response; 1320 unsigned int rsvd3[3]; 1321 s64 clock_delta; 1322 unsigned int rsvd4[2]; 1323 } *rr; 1324 int rc; 1325 1326 memset(page, 0, PAGE_SIZE); 1327 rr = page; 1328 rr->request.length = 0x0020; 1329 rr->request.code = 0x0033; 1330 rr->op = op; 1331 rr->ctrl = ctrl; 1332 rc = chsc(rr); 1333 if (rc) 1334 return -EIO; 1335 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1336 if (clock_delta) 1337 *clock_delta = rr->clock_delta; 1338 return rc; 1339 } 1340 1341 int chsc_sstpi(void *page, void *result, size_t size) 1342 { 1343 struct { 1344 struct chsc_header request; 1345 unsigned int rsvd0[3]; 1346 struct chsc_header response; 1347 char data[]; 1348 } *rr; 1349 int rc; 1350 1351 memset(page, 0, PAGE_SIZE); 1352 rr = page; 1353 rr->request.length = 0x0010; 1354 rr->request.code = 0x0038; 1355 rc = chsc(rr); 1356 if (rc) 1357 return -EIO; 1358 memcpy(result, &rr->data, size); 1359 return (rr->response.code == 0x0001) ? 0 : -EIO; 1360 } 1361 1362 int chsc_stzi(void *page, void *result, size_t size) 1363 { 1364 struct { 1365 struct chsc_header request; 1366 unsigned int rsvd0[3]; 1367 struct chsc_header response; 1368 char data[]; 1369 } *rr; 1370 int rc; 1371 1372 memset(page, 0, PAGE_SIZE); 1373 rr = page; 1374 rr->request.length = 0x0010; 1375 rr->request.code = 0x003e; 1376 rc = chsc(rr); 1377 if (rc) 1378 return -EIO; 1379 memcpy(result, &rr->data, size); 1380 return (rr->response.code == 0x0001) ? 0 : -EIO; 1381 } 1382 1383 int chsc_siosl(struct subchannel_id schid) 1384 { 1385 struct { 1386 struct chsc_header request; 1387 u32 word1; 1388 struct subchannel_id sid; 1389 u32 word3; 1390 struct chsc_header response; 1391 u32 word[11]; 1392 } *siosl_area; 1393 unsigned long flags; 1394 int ccode; 1395 int rc; 1396 1397 spin_lock_irqsave(&chsc_page_lock, flags); 1398 memset(chsc_page, 0, PAGE_SIZE); 1399 siosl_area = chsc_page; 1400 siosl_area->request.length = 0x0010; 1401 siosl_area->request.code = 0x0046; 1402 siosl_area->word1 = 0x80000000; 1403 siosl_area->sid = schid; 1404 1405 ccode = chsc(siosl_area); 1406 if (ccode > 0) { 1407 if (ccode == 3) 1408 rc = -ENODEV; 1409 else 1410 rc = -EBUSY; 1411 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1412 schid.ssid, schid.sch_no, ccode); 1413 goto out; 1414 } 1415 rc = chsc_error_from_response(siosl_area->response.code); 1416 if (rc) 1417 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1418 schid.ssid, schid.sch_no, 1419 siosl_area->response.code); 1420 else 1421 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1422 schid.ssid, schid.sch_no); 1423 out: 1424 spin_unlock_irqrestore(&chsc_page_lock, flags); 1425 return rc; 1426 } 1427 EXPORT_SYMBOL_GPL(chsc_siosl); 1428 1429 /** 1430 * chsc_scm_info() - store SCM information (SSI) 1431 * @scm_area: request and response block for SSI 1432 * @token: continuation token 1433 * 1434 * Returns 0 on success. 1435 */ 1436 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1437 { 1438 int ccode, ret; 1439 1440 memset(scm_area, 0, sizeof(*scm_area)); 1441 scm_area->request.length = 0x0020; 1442 scm_area->request.code = 0x004C; 1443 scm_area->reqtok = token; 1444 1445 ccode = chsc(scm_area); 1446 if (ccode > 0) { 1447 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1448 goto out; 1449 } 1450 ret = chsc_error_from_response(scm_area->response.code); 1451 if (ret != 0) 1452 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1453 scm_area->response.code); 1454 out: 1455 return ret; 1456 } 1457 EXPORT_SYMBOL_GPL(chsc_scm_info); 1458 1459 /** 1460 * chsc_pnso() - Perform Network-Subchannel Operation 1461 * @schid: id of the subchannel on which PNSO is performed 1462 * @pnso_area: request and response block for the operation 1463 * @oc: Operation Code 1464 * @resume_token: resume token for multiblock response 1465 * @cnc: Boolean change-notification control 1466 * 1467 * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) 1468 * 1469 * Returns 0 on success. 1470 */ 1471 int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area, 1472 u8 oc, struct chsc_pnso_resume_token resume_token, int cnc) 1473 { 1474 memset(pnso_area, 0, sizeof(*pnso_area)); 1475 pnso_area->request.length = 0x0030; 1476 pnso_area->request.code = 0x003d; /* network-subchannel operation */ 1477 pnso_area->m = schid.m; 1478 pnso_area->ssid = schid.ssid; 1479 pnso_area->sch = schid.sch_no; 1480 pnso_area->cssid = schid.cssid; 1481 pnso_area->oc = oc; 1482 pnso_area->resume_token = resume_token; 1483 pnso_area->n = (cnc != 0); 1484 if (chsc(pnso_area)) 1485 return -EIO; 1486 return chsc_error_from_response(pnso_area->response.code); 1487 } 1488 1489 int chsc_sgib(u32 origin) 1490 { 1491 struct { 1492 struct chsc_header request; 1493 u16 op; 1494 u8 reserved01[2]; 1495 u8 reserved02:4; 1496 u8 fmt:4; 1497 u8 reserved03[7]; 1498 /* operation data area begin */ 1499 u8 reserved04[4]; 1500 u32 gib_origin; 1501 u8 reserved05[10]; 1502 u8 aix; 1503 u8 reserved06[4029]; 1504 struct chsc_header response; 1505 u8 reserved07[4]; 1506 } *sgib_area; 1507 int ret; 1508 1509 spin_lock_irq(&chsc_page_lock); 1510 memset(chsc_page, 0, PAGE_SIZE); 1511 sgib_area = chsc_page; 1512 sgib_area->request.length = 0x0fe0; 1513 sgib_area->request.code = 0x0021; 1514 sgib_area->op = 0x1; 1515 sgib_area->gib_origin = origin; 1516 1517 ret = chsc(sgib_area); 1518 if (ret == 0) 1519 ret = chsc_error_from_response(sgib_area->response.code); 1520 spin_unlock_irq(&chsc_page_lock); 1521 1522 return ret; 1523 } 1524 EXPORT_SYMBOL_GPL(chsc_sgib); 1525 1526 #define SCUD_REQ_LEN 0x10 /* SCUD request block length */ 1527 #define SCUD_REQ_CMD 0x4b /* SCUD Command Code */ 1528 1529 struct chse_cudb { 1530 u16 flags:8; 1531 u16 chp_valid:8; 1532 u16 cu; 1533 u32 esm_valid:8; 1534 u32:24; 1535 u8 chpid[8]; 1536 u32:32; 1537 u32:32; 1538 u8 esm[8]; 1539 u32 efla[8]; 1540 } __packed; 1541 1542 struct chsc_scud { 1543 struct chsc_header request; 1544 u16:4; 1545 u16 fmt:4; 1546 u16 cssid:8; 1547 u16 first_cu; 1548 u16:16; 1549 u16 last_cu; 1550 u32:32; 1551 struct chsc_header response; 1552 u16:4; 1553 u16 fmt_resp:4; 1554 u32:24; 1555 struct chse_cudb cudb[]; 1556 } __packed; 1557 1558 /** 1559 * chsc_scud() - Store control-unit description. 1560 * @cu: number of the control-unit 1561 * @esm: 8 1-byte endpoint security mode values 1562 * @esm_valid: validity mask for @esm 1563 * 1564 * Interface to retrieve information about the endpoint security 1565 * modes for up to 8 paths of a control unit. 1566 * 1567 * Returns 0 on success. 1568 */ 1569 int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid) 1570 { 1571 struct chsc_scud *scud = chsc_page; 1572 int ret; 1573 1574 spin_lock_irq(&chsc_page_lock); 1575 memset(chsc_page, 0, PAGE_SIZE); 1576 scud->request.length = SCUD_REQ_LEN; 1577 scud->request.code = SCUD_REQ_CMD; 1578 scud->fmt = 0; 1579 scud->cssid = 0; 1580 scud->first_cu = cu; 1581 scud->last_cu = cu; 1582 1583 ret = chsc(scud); 1584 if (!ret) 1585 ret = chsc_error_from_response(scud->response.code); 1586 1587 if (!ret && (scud->response.length <= 8 || scud->fmt_resp != 0 1588 || !(scud->cudb[0].flags & 0x80) 1589 || scud->cudb[0].cu != cu)) { 1590 1591 CIO_MSG_EVENT(2, "chsc: scud failed rc=%04x, L2=%04x " 1592 "FMT=%04x, cudb.flags=%02x, cudb.cu=%04x", 1593 scud->response.code, scud->response.length, 1594 scud->fmt_resp, scud->cudb[0].flags, scud->cudb[0].cu); 1595 ret = -EINVAL; 1596 } 1597 1598 if (ret) 1599 goto out; 1600 1601 memcpy(esm, scud->cudb[0].esm, sizeof(*esm)); 1602 *esm_valid = scud->cudb[0].esm_valid; 1603 out: 1604 spin_unlock_irq(&chsc_page_lock); 1605 return ret; 1606 } 1607 EXPORT_SYMBOL_GPL(chsc_scud); 1608