1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13 #include <linux/stddef.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/hdreg.h> /* HDIO_GETGEO */ 17 #include <linux/bio.h> 18 #include <linux/module.h> 19 #include <linux/compat.h> 20 #include <linux/init.h> 21 #include <linux/seq_file.h> 22 #include <linux/uaccess.h> 23 #include <linux/io.h> 24 25 #include <asm/css_chars.h> 26 #include <asm/debug.h> 27 #include <asm/idals.h> 28 #include <asm/ebcdic.h> 29 #include <asm/cio.h> 30 #include <asm/ccwdev.h> 31 #include <asm/itcw.h> 32 #include <asm/schid.h> 33 #include <asm/chpid.h> 34 35 #include "dasd_int.h" 36 #include "dasd_eckd.h" 37 38 /* 39 * raw track access always map to 64k in memory 40 * so it maps to 16 blocks of 4k per track 41 */ 42 #define DASD_RAW_BLOCK_PER_TRACK 16 43 #define DASD_RAW_BLOCKSIZE 4096 44 /* 64k are 128 x 512 byte sectors */ 45 #define DASD_RAW_SECTORS_PER_TRACK 128 46 47 MODULE_LICENSE("GPL"); 48 49 static struct dasd_discipline dasd_eckd_discipline; 50 51 /* The ccw bus type uses this table to find devices that it sends to 52 * dasd_eckd_probe */ 53 static struct ccw_device_id dasd_eckd_ids[] = { 54 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 55 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 56 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 57 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 58 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 59 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 60 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 61 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 62 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 63 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 64 { /* end of list */ }, 65 }; 66 67 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 68 69 static struct ccw_driver dasd_eckd_driver; /* see below */ 70 71 static void *rawpadpage; 72 73 #define INIT_CQR_OK 0 74 #define INIT_CQR_UNFORMATTED 1 75 #define INIT_CQR_ERROR 2 76 77 /* emergency request for reserve/release */ 78 static struct { 79 struct dasd_ccw_req cqr; 80 struct ccw1 ccw; 81 char data[32]; 82 } *dasd_reserve_req; 83 static DEFINE_MUTEX(dasd_reserve_mutex); 84 85 static struct { 86 struct dasd_ccw_req cqr; 87 struct ccw1 ccw[2]; 88 char data[40]; 89 } *dasd_vol_info_req; 90 static DEFINE_MUTEX(dasd_vol_info_mutex); 91 92 struct ext_pool_exhaust_work_data { 93 struct work_struct worker; 94 struct dasd_device *device; 95 struct dasd_device *base; 96 }; 97 98 /* definitions for the path verification worker */ 99 struct pe_handler_work_data { 100 struct work_struct worker; 101 struct dasd_device *device; 102 struct dasd_ccw_req cqr; 103 struct ccw1 ccw; 104 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 105 int isglobal; 106 __u8 tbvpm; 107 __u8 fcsecpm; 108 }; 109 static struct pe_handler_work_data *pe_handler_worker; 110 static DEFINE_MUTEX(dasd_pe_handler_mutex); 111 112 struct check_attention_work_data { 113 struct work_struct worker; 114 struct dasd_device *device; 115 __u8 lpum; 116 }; 117 118 static int dasd_eckd_ext_pool_id(struct dasd_device *); 119 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 120 struct dasd_device *, struct dasd_device *, 121 unsigned int, int, unsigned int, unsigned int, 122 unsigned int, unsigned int); 123 static int dasd_eckd_query_pprc_status(struct dasd_device *, 124 struct dasd_pprc_data_sc4 *); 125 126 /* initial attempt at a probe function. this can be simplified once 127 * the other detection code is gone */ 128 static int 129 dasd_eckd_probe (struct ccw_device *cdev) 130 { 131 int ret; 132 133 /* set ECKD specific ccw-device options */ 134 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 135 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 136 if (ret) { 137 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 138 "dasd_eckd_probe: could not set " 139 "ccw-device options"); 140 return ret; 141 } 142 ret = dasd_generic_probe(cdev); 143 return ret; 144 } 145 146 static int 147 dasd_eckd_set_online(struct ccw_device *cdev) 148 { 149 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 150 } 151 152 static const int sizes_trk0[] = { 28, 148, 84 }; 153 #define LABEL_SIZE 140 154 155 /* head and record addresses of count_area read in analysis ccw */ 156 static const int count_area_head[] = { 0, 0, 0, 0, 1 }; 157 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 158 159 static inline unsigned int 160 ceil_quot(unsigned int d1, unsigned int d2) 161 { 162 return (d1 + (d2 - 1)) / d2; 163 } 164 165 static unsigned int 166 recs_per_track(struct dasd_eckd_characteristics * rdc, 167 unsigned int kl, unsigned int dl) 168 { 169 int dn, kn; 170 171 switch (rdc->dev_type) { 172 case 0x3380: 173 if (kl) 174 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 175 ceil_quot(dl + 12, 32)); 176 else 177 return 1499 / (15 + ceil_quot(dl + 12, 32)); 178 case 0x3390: 179 dn = ceil_quot(dl + 6, 232) + 1; 180 if (kl) { 181 kn = ceil_quot(kl + 6, 232) + 1; 182 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 183 9 + ceil_quot(dl + 6 * dn, 34)); 184 } else 185 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 186 case 0x9345: 187 dn = ceil_quot(dl + 6, 232) + 1; 188 if (kl) { 189 kn = ceil_quot(kl + 6, 232) + 1; 190 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 191 ceil_quot(dl + 6 * dn, 34)); 192 } else 193 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 194 } 195 return 0; 196 } 197 198 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 199 { 200 geo->cyl = (__u16) cyl; 201 geo->head = cyl >> 16; 202 geo->head <<= 4; 203 geo->head |= head; 204 } 205 206 /* 207 * calculate failing track from sense data depending if 208 * it is an EAV device or not 209 */ 210 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, 211 sector_t *track) 212 { 213 struct dasd_eckd_private *private = device->private; 214 u8 *sense = NULL; 215 u32 cyl; 216 u8 head; 217 218 sense = dasd_get_sense(irb); 219 if (!sense) { 220 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 221 "ESE error no sense data\n"); 222 return -EINVAL; 223 } 224 if (!(sense[27] & DASD_SENSE_BIT_2)) { 225 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 226 "ESE error no valid track data\n"); 227 return -EINVAL; 228 } 229 230 if (sense[27] & DASD_SENSE_BIT_3) { 231 /* enhanced addressing */ 232 cyl = sense[30] << 20; 233 cyl |= (sense[31] & 0xF0) << 12; 234 cyl |= sense[28] << 8; 235 cyl |= sense[29]; 236 } else { 237 cyl = sense[29] << 8; 238 cyl |= sense[30]; 239 } 240 head = sense[31] & 0x0F; 241 *track = cyl * private->rdc_data.trk_per_cyl + head; 242 return 0; 243 } 244 245 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, 246 struct dasd_device *device) 247 { 248 struct dasd_eckd_private *private = device->private; 249 int rc; 250 251 rc = get_phys_clock(&data->ep_sys_time); 252 /* 253 * Ignore return code if XRC is not supported or 254 * sync clock is switched off 255 */ 256 if ((rc && !private->rdc_data.facilities.XRC_supported) || 257 rc == -EOPNOTSUPP || rc == -EACCES) 258 return 0; 259 260 /* switch on System Time Stamp - needed for XRC Support */ 261 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 262 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 263 264 if (ccw) { 265 ccw->count = sizeof(struct DE_eckd_data); 266 ccw->flags |= CCW_FLAG_SLI; 267 } 268 269 return rc; 270 } 271 272 static int 273 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 274 unsigned int totrk, int cmd, struct dasd_device *device, 275 int blksize) 276 { 277 struct dasd_eckd_private *private = device->private; 278 u16 heads, beghead, endhead; 279 u32 begcyl, endcyl; 280 int rc = 0; 281 282 if (ccw) { 283 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 284 ccw->flags = 0; 285 ccw->count = 16; 286 ccw->cda = virt_to_dma32(data); 287 } 288 289 memset(data, 0, sizeof(struct DE_eckd_data)); 290 switch (cmd) { 291 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 292 case DASD_ECKD_CCW_READ_RECORD_ZERO: 293 case DASD_ECKD_CCW_READ: 294 case DASD_ECKD_CCW_READ_MT: 295 case DASD_ECKD_CCW_READ_CKD: 296 case DASD_ECKD_CCW_READ_CKD_MT: 297 case DASD_ECKD_CCW_READ_KD: 298 case DASD_ECKD_CCW_READ_KD_MT: 299 data->mask.perm = 0x1; 300 data->attributes.operation = private->attrib.operation; 301 break; 302 case DASD_ECKD_CCW_READ_COUNT: 303 data->mask.perm = 0x1; 304 data->attributes.operation = DASD_BYPASS_CACHE; 305 break; 306 case DASD_ECKD_CCW_READ_TRACK: 307 case DASD_ECKD_CCW_READ_TRACK_DATA: 308 data->mask.perm = 0x1; 309 data->attributes.operation = private->attrib.operation; 310 data->blk_size = 0; 311 break; 312 case DASD_ECKD_CCW_WRITE: 313 case DASD_ECKD_CCW_WRITE_MT: 314 case DASD_ECKD_CCW_WRITE_KD: 315 case DASD_ECKD_CCW_WRITE_KD_MT: 316 data->mask.perm = 0x02; 317 data->attributes.operation = private->attrib.operation; 318 rc = set_timestamp(ccw, data, device); 319 break; 320 case DASD_ECKD_CCW_WRITE_CKD: 321 case DASD_ECKD_CCW_WRITE_CKD_MT: 322 data->attributes.operation = DASD_BYPASS_CACHE; 323 rc = set_timestamp(ccw, data, device); 324 break; 325 case DASD_ECKD_CCW_ERASE: 326 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 327 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 328 data->mask.perm = 0x3; 329 data->mask.auth = 0x1; 330 data->attributes.operation = DASD_BYPASS_CACHE; 331 rc = set_timestamp(ccw, data, device); 332 break; 333 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 334 data->mask.perm = 0x03; 335 data->attributes.operation = private->attrib.operation; 336 data->blk_size = 0; 337 break; 338 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 339 data->mask.perm = 0x02; 340 data->attributes.operation = private->attrib.operation; 341 data->blk_size = blksize; 342 rc = set_timestamp(ccw, data, device); 343 break; 344 default: 345 dev_err(&device->cdev->dev, 346 "0x%x is not a known command\n", cmd); 347 break; 348 } 349 350 data->attributes.mode = 0x3; /* ECKD */ 351 352 if ((private->rdc_data.cu_type == 0x2105 || 353 private->rdc_data.cu_type == 0x2107 || 354 private->rdc_data.cu_type == 0x1750) 355 && !(private->uses_cdl && trk < 2)) 356 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 357 358 heads = private->rdc_data.trk_per_cyl; 359 begcyl = trk / heads; 360 beghead = trk % heads; 361 endcyl = totrk / heads; 362 endhead = totrk % heads; 363 364 /* check for sequential prestage - enhance cylinder range */ 365 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 366 data->attributes.operation == DASD_SEQ_ACCESS) { 367 368 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 369 endcyl += private->attrib.nr_cyl; 370 else 371 endcyl = (private->real_cyl - 1); 372 } 373 374 set_ch_t(&data->beg_ext, begcyl, beghead); 375 set_ch_t(&data->end_ext, endcyl, endhead); 376 return rc; 377 } 378 379 380 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, 381 unsigned int trk, unsigned int rec_on_trk, 382 int count, int cmd, struct dasd_device *device, 383 unsigned int reclen, unsigned int tlf) 384 { 385 struct dasd_eckd_private *private = device->private; 386 int sector; 387 int dn, d; 388 389 if (ccw) { 390 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT; 391 ccw->flags = 0; 392 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) 393 ccw->count = 22; 394 else 395 ccw->count = 20; 396 ccw->cda = virt_to_dma32(data); 397 } 398 399 memset(data, 0, sizeof(*data)); 400 sector = 0; 401 if (rec_on_trk) { 402 switch (private->rdc_data.dev_type) { 403 case 0x3390: 404 dn = ceil_quot(reclen + 6, 232); 405 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 406 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 407 break; 408 case 0x3380: 409 d = 7 + ceil_quot(reclen + 12, 32); 410 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 411 break; 412 } 413 } 414 data->sector = sector; 415 /* note: meaning of count depends on the operation 416 * for record based I/O it's the number of records, but for 417 * track based I/O it's the number of tracks 418 */ 419 data->count = count; 420 switch (cmd) { 421 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 422 data->operation.orientation = 0x3; 423 data->operation.operation = 0x03; 424 break; 425 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 426 data->operation.orientation = 0x3; 427 data->operation.operation = 0x16; 428 break; 429 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 430 data->operation.orientation = 0x1; 431 data->operation.operation = 0x03; 432 data->count++; 433 break; 434 case DASD_ECKD_CCW_READ_RECORD_ZERO: 435 data->operation.orientation = 0x3; 436 data->operation.operation = 0x16; 437 data->count++; 438 break; 439 case DASD_ECKD_CCW_WRITE: 440 case DASD_ECKD_CCW_WRITE_MT: 441 case DASD_ECKD_CCW_WRITE_KD: 442 case DASD_ECKD_CCW_WRITE_KD_MT: 443 data->auxiliary.length_valid = 0x1; 444 data->length = reclen; 445 data->operation.operation = 0x01; 446 break; 447 case DASD_ECKD_CCW_WRITE_CKD: 448 case DASD_ECKD_CCW_WRITE_CKD_MT: 449 data->auxiliary.length_valid = 0x1; 450 data->length = reclen; 451 data->operation.operation = 0x03; 452 break; 453 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 454 data->operation.orientation = 0x0; 455 data->operation.operation = 0x3F; 456 data->extended_operation = 0x11; 457 data->length = 0; 458 data->extended_parameter_length = 0x02; 459 if (data->count > 8) { 460 data->extended_parameter[0] = 0xFF; 461 data->extended_parameter[1] = 0xFF; 462 data->extended_parameter[1] <<= (16 - count); 463 } else { 464 data->extended_parameter[0] = 0xFF; 465 data->extended_parameter[0] <<= (8 - count); 466 data->extended_parameter[1] = 0x00; 467 } 468 data->sector = 0xFF; 469 break; 470 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 471 data->auxiliary.length_valid = 0x1; 472 data->length = reclen; /* not tlf, as one might think */ 473 data->operation.operation = 0x3F; 474 data->extended_operation = 0x23; 475 break; 476 case DASD_ECKD_CCW_READ: 477 case DASD_ECKD_CCW_READ_MT: 478 case DASD_ECKD_CCW_READ_KD: 479 case DASD_ECKD_CCW_READ_KD_MT: 480 data->auxiliary.length_valid = 0x1; 481 data->length = reclen; 482 data->operation.operation = 0x06; 483 break; 484 case DASD_ECKD_CCW_READ_CKD: 485 case DASD_ECKD_CCW_READ_CKD_MT: 486 data->auxiliary.length_valid = 0x1; 487 data->length = reclen; 488 data->operation.operation = 0x16; 489 break; 490 case DASD_ECKD_CCW_READ_COUNT: 491 data->operation.operation = 0x06; 492 break; 493 case DASD_ECKD_CCW_READ_TRACK: 494 data->operation.orientation = 0x1; 495 data->operation.operation = 0x0C; 496 data->extended_parameter_length = 0; 497 data->sector = 0xFF; 498 break; 499 case DASD_ECKD_CCW_READ_TRACK_DATA: 500 data->auxiliary.length_valid = 0x1; 501 data->length = tlf; 502 data->operation.operation = 0x0C; 503 break; 504 case DASD_ECKD_CCW_ERASE: 505 data->length = reclen; 506 data->auxiliary.length_valid = 0x1; 507 data->operation.operation = 0x0b; 508 break; 509 default: 510 DBF_DEV_EVENT(DBF_ERR, device, 511 "fill LRE unknown opcode 0x%x", cmd); 512 BUG(); 513 } 514 set_ch_t(&data->seek_addr, 515 trk / private->rdc_data.trk_per_cyl, 516 trk % private->rdc_data.trk_per_cyl); 517 data->search_arg.cyl = data->seek_addr.cyl; 518 data->search_arg.head = data->seek_addr.head; 519 data->search_arg.record = rec_on_trk; 520 } 521 522 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 523 unsigned int trk, unsigned int totrk, int cmd, 524 struct dasd_device *basedev, struct dasd_device *startdev, 525 unsigned int format, unsigned int rec_on_trk, int count, 526 unsigned int blksize, unsigned int tlf) 527 { 528 struct dasd_eckd_private *basepriv, *startpriv; 529 struct LRE_eckd_data *lredata; 530 struct DE_eckd_data *dedata; 531 int rc = 0; 532 533 basepriv = basedev->private; 534 startpriv = startdev->private; 535 dedata = &pfxdata->define_extent; 536 lredata = &pfxdata->locate_record; 537 538 ccw->cmd_code = DASD_ECKD_CCW_PFX; 539 ccw->flags = 0; 540 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 541 ccw->count = sizeof(*pfxdata) + 2; 542 ccw->cda = virt_to_dma32(pfxdata); 543 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 544 } else { 545 ccw->count = sizeof(*pfxdata); 546 ccw->cda = virt_to_dma32(pfxdata); 547 memset(pfxdata, 0, sizeof(*pfxdata)); 548 } 549 550 /* prefix data */ 551 if (format > 1) { 552 DBF_DEV_EVENT(DBF_ERR, basedev, 553 "PFX LRE unknown format 0x%x", format); 554 BUG(); 555 return -EINVAL; 556 } 557 pfxdata->format = format; 558 pfxdata->base_address = basepriv->conf.ned->unit_addr; 559 pfxdata->base_lss = basepriv->conf.ned->ID; 560 pfxdata->validity.define_extent = 1; 561 562 /* private uid is kept up to date, conf_data may be outdated */ 563 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 564 pfxdata->validity.verify_base = 1; 565 566 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 567 pfxdata->validity.verify_base = 1; 568 pfxdata->validity.hyper_pav = 1; 569 } 570 571 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 572 573 /* 574 * For some commands the System Time Stamp is set in the define extent 575 * data when XRC is supported. The validity of the time stamp must be 576 * reflected in the prefix data as well. 577 */ 578 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 579 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 580 581 if (format == 1) { 582 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd, 583 basedev, blksize, tlf); 584 } 585 586 return rc; 587 } 588 589 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 590 unsigned int trk, unsigned int totrk, int cmd, 591 struct dasd_device *basedev, struct dasd_device *startdev) 592 { 593 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 594 0, 0, 0, 0, 0); 595 } 596 597 static void 598 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 599 unsigned int rec_on_trk, int no_rec, int cmd, 600 struct dasd_device * device, int reclen) 601 { 602 struct dasd_eckd_private *private = device->private; 603 int sector; 604 int dn, d; 605 606 DBF_DEV_EVENT(DBF_INFO, device, 607 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 608 trk, rec_on_trk, no_rec, cmd, reclen); 609 610 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 611 ccw->flags = 0; 612 ccw->count = 16; 613 ccw->cda = virt_to_dma32(data); 614 615 memset(data, 0, sizeof(struct LO_eckd_data)); 616 sector = 0; 617 if (rec_on_trk) { 618 switch (private->rdc_data.dev_type) { 619 case 0x3390: 620 dn = ceil_quot(reclen + 6, 232); 621 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 622 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 623 break; 624 case 0x3380: 625 d = 7 + ceil_quot(reclen + 12, 32); 626 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 627 break; 628 } 629 } 630 data->sector = sector; 631 data->count = no_rec; 632 switch (cmd) { 633 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 634 data->operation.orientation = 0x3; 635 data->operation.operation = 0x03; 636 break; 637 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 638 data->operation.orientation = 0x3; 639 data->operation.operation = 0x16; 640 break; 641 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 642 data->operation.orientation = 0x1; 643 data->operation.operation = 0x03; 644 data->count++; 645 break; 646 case DASD_ECKD_CCW_READ_RECORD_ZERO: 647 data->operation.orientation = 0x3; 648 data->operation.operation = 0x16; 649 data->count++; 650 break; 651 case DASD_ECKD_CCW_WRITE: 652 case DASD_ECKD_CCW_WRITE_MT: 653 case DASD_ECKD_CCW_WRITE_KD: 654 case DASD_ECKD_CCW_WRITE_KD_MT: 655 data->auxiliary.last_bytes_used = 0x1; 656 data->length = reclen; 657 data->operation.operation = 0x01; 658 break; 659 case DASD_ECKD_CCW_WRITE_CKD: 660 case DASD_ECKD_CCW_WRITE_CKD_MT: 661 data->auxiliary.last_bytes_used = 0x1; 662 data->length = reclen; 663 data->operation.operation = 0x03; 664 break; 665 case DASD_ECKD_CCW_READ: 666 case DASD_ECKD_CCW_READ_MT: 667 case DASD_ECKD_CCW_READ_KD: 668 case DASD_ECKD_CCW_READ_KD_MT: 669 data->auxiliary.last_bytes_used = 0x1; 670 data->length = reclen; 671 data->operation.operation = 0x06; 672 break; 673 case DASD_ECKD_CCW_READ_CKD: 674 case DASD_ECKD_CCW_READ_CKD_MT: 675 data->auxiliary.last_bytes_used = 0x1; 676 data->length = reclen; 677 data->operation.operation = 0x16; 678 break; 679 case DASD_ECKD_CCW_READ_COUNT: 680 data->operation.operation = 0x06; 681 break; 682 case DASD_ECKD_CCW_ERASE: 683 data->length = reclen; 684 data->auxiliary.last_bytes_used = 0x1; 685 data->operation.operation = 0x0b; 686 break; 687 default: 688 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 689 "opcode 0x%x", cmd); 690 } 691 set_ch_t(&data->seek_addr, 692 trk / private->rdc_data.trk_per_cyl, 693 trk % private->rdc_data.trk_per_cyl); 694 data->search_arg.cyl = data->seek_addr.cyl; 695 data->search_arg.head = data->seek_addr.head; 696 data->search_arg.record = rec_on_trk; 697 } 698 699 /* 700 * Returns 1 if the block is one of the special blocks that needs 701 * to get read/written with the KD variant of the command. 702 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 703 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 704 * Luckily the KD variants differ only by one bit (0x08) from the 705 * normal variant. So don't wonder about code like: 706 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 707 * ccw->cmd_code |= 0x8; 708 */ 709 static inline int 710 dasd_eckd_cdl_special(int blk_per_trk, int recid) 711 { 712 if (recid < 3) 713 return 1; 714 if (recid < blk_per_trk) 715 return 0; 716 if (recid < 2 * blk_per_trk) 717 return 1; 718 return 0; 719 } 720 721 /* 722 * Returns the record size for the special blocks of the cdl format. 723 * Only returns something useful if dasd_eckd_cdl_special is true 724 * for the recid. 725 */ 726 static inline int 727 dasd_eckd_cdl_reclen(int recid) 728 { 729 if (recid < 3) 730 return sizes_trk0[recid]; 731 return LABEL_SIZE; 732 } 733 /* create unique id from private structure. */ 734 static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid) 735 { 736 int count; 737 738 memset(uid, 0, sizeof(struct dasd_uid)); 739 memcpy(uid->vendor, conf->ned->HDA_manufacturer, 740 sizeof(uid->vendor) - 1); 741 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 742 memcpy(uid->serial, &conf->ned->serial, 743 sizeof(uid->serial) - 1); 744 EBCASC(uid->serial, sizeof(uid->serial) - 1); 745 uid->ssid = conf->gneq->subsystemID; 746 uid->real_unit_addr = conf->ned->unit_addr; 747 if (conf->sneq) { 748 uid->type = conf->sneq->sua_flags; 749 if (uid->type == UA_BASE_PAV_ALIAS) 750 uid->base_unit_addr = conf->sneq->base_unit_addr; 751 } else { 752 uid->type = UA_BASE_DEVICE; 753 } 754 if (conf->vdsneq) { 755 for (count = 0; count < 16; count++) { 756 sprintf(uid->vduit+2*count, "%02x", 757 conf->vdsneq->uit[count]); 758 } 759 } 760 } 761 762 /* 763 * Generate device unique id that specifies the physical device. 764 */ 765 static int dasd_eckd_generate_uid(struct dasd_device *device) 766 { 767 struct dasd_eckd_private *private = device->private; 768 unsigned long flags; 769 770 if (!private) 771 return -ENODEV; 772 if (!private->conf.ned || !private->conf.gneq) 773 return -ENODEV; 774 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 775 create_uid(&private->conf, &private->uid); 776 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 777 return 0; 778 } 779 780 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 781 { 782 struct dasd_eckd_private *private = device->private; 783 unsigned long flags; 784 785 if (private) { 786 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 787 *uid = private->uid; 788 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 789 return 0; 790 } 791 return -EINVAL; 792 } 793 794 /* 795 * compare device UID with data of a given dasd_eckd_private structure 796 * return 0 for match 797 */ 798 static int dasd_eckd_compare_path_uid(struct dasd_device *device, 799 struct dasd_conf *path_conf) 800 { 801 struct dasd_uid device_uid; 802 struct dasd_uid path_uid; 803 804 create_uid(path_conf, &path_uid); 805 dasd_eckd_get_uid(device, &device_uid); 806 807 return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid)); 808 } 809 810 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 811 struct dasd_ccw_req *cqr, 812 __u8 *rcd_buffer, 813 __u8 lpm) 814 { 815 struct ccw1 *ccw; 816 /* 817 * buffer has to start with EBCDIC "V1.0" to show 818 * support for virtual device SNEQ 819 */ 820 rcd_buffer[0] = 0xE5; 821 rcd_buffer[1] = 0xF1; 822 rcd_buffer[2] = 0x4B; 823 rcd_buffer[3] = 0xF0; 824 825 ccw = cqr->cpaddr; 826 ccw->cmd_code = DASD_ECKD_CCW_RCD; 827 ccw->flags = 0; 828 ccw->cda = virt_to_dma32(rcd_buffer); 829 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 830 cqr->magic = DASD_ECKD_MAGIC; 831 832 cqr->startdev = device; 833 cqr->memdev = device; 834 cqr->block = NULL; 835 cqr->expires = 10*HZ; 836 cqr->lpm = lpm; 837 cqr->retries = 256; 838 cqr->buildclk = get_tod_clock(); 839 cqr->status = DASD_CQR_FILLED; 840 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 841 } 842 843 /* 844 * Wakeup helper for read_conf 845 * if the cqr is not done and needs some error recovery 846 * the buffer has to be re-initialized with the EBCDIC "V1.0" 847 * to show support for virtual device SNEQ 848 */ 849 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 850 { 851 struct ccw1 *ccw; 852 __u8 *rcd_buffer; 853 854 if (cqr->status != DASD_CQR_DONE) { 855 ccw = cqr->cpaddr; 856 rcd_buffer = dma32_to_virt(ccw->cda); 857 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 858 859 rcd_buffer[0] = 0xE5; 860 rcd_buffer[1] = 0xF1; 861 rcd_buffer[2] = 0x4B; 862 rcd_buffer[3] = 0xF0; 863 } 864 dasd_wakeup_cb(cqr, data); 865 } 866 867 static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 868 struct dasd_ccw_req *cqr, 869 __u8 *rcd_buffer, 870 __u8 lpm) 871 { 872 struct ciw *ciw; 873 int rc; 874 /* 875 * sanity check: scan for RCD command in extended SenseID data 876 * some devices do not support RCD 877 */ 878 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 879 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 880 return -EOPNOTSUPP; 881 882 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 883 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 884 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 885 cqr->retries = 5; 886 cqr->callback = read_conf_cb; 887 rc = dasd_sleep_on_immediatly(cqr); 888 return rc; 889 } 890 891 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 892 void **rcd_buffer, 893 int *rcd_buffer_size, __u8 lpm) 894 { 895 struct ciw *ciw; 896 char *rcd_buf = NULL; 897 int ret; 898 struct dasd_ccw_req *cqr; 899 900 /* 901 * sanity check: scan for RCD command in extended SenseID data 902 * some devices do not support RCD 903 */ 904 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 905 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 906 ret = -EOPNOTSUPP; 907 goto out_error; 908 } 909 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 910 if (!rcd_buf) { 911 ret = -ENOMEM; 912 goto out_error; 913 } 914 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 915 0, /* use rcd_buf as data ara */ 916 device, NULL); 917 if (IS_ERR(cqr)) { 918 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 919 "Could not allocate RCD request"); 920 ret = -ENOMEM; 921 goto out_error; 922 } 923 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 924 cqr->callback = read_conf_cb; 925 ret = dasd_sleep_on(cqr); 926 /* 927 * on success we update the user input parms 928 */ 929 dasd_sfree_request(cqr, cqr->memdev); 930 if (ret) 931 goto out_error; 932 933 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 934 *rcd_buffer = rcd_buf; 935 return 0; 936 out_error: 937 kfree(rcd_buf); 938 *rcd_buffer = NULL; 939 *rcd_buffer_size = 0; 940 return ret; 941 } 942 943 static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf) 944 { 945 946 struct dasd_sneq *sneq; 947 int i, count; 948 949 conf->ned = NULL; 950 conf->sneq = NULL; 951 conf->vdsneq = NULL; 952 conf->gneq = NULL; 953 count = conf->len / sizeof(struct dasd_sneq); 954 sneq = (struct dasd_sneq *)conf->data; 955 for (i = 0; i < count; ++i) { 956 if (sneq->flags.identifier == 1 && sneq->format == 1) 957 conf->sneq = sneq; 958 else if (sneq->flags.identifier == 1 && sneq->format == 4) 959 conf->vdsneq = (struct vd_sneq *)sneq; 960 else if (sneq->flags.identifier == 2) 961 conf->gneq = (struct dasd_gneq *)sneq; 962 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 963 conf->ned = (struct dasd_ned *)sneq; 964 sneq++; 965 } 966 if (!conf->ned || !conf->gneq) { 967 conf->ned = NULL; 968 conf->sneq = NULL; 969 conf->vdsneq = NULL; 970 conf->gneq = NULL; 971 return -EINVAL; 972 } 973 return 0; 974 975 }; 976 977 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 978 { 979 struct dasd_gneq *gneq; 980 int i, count, found; 981 982 count = conf_len / sizeof(*gneq); 983 gneq = (struct dasd_gneq *)conf_data; 984 found = 0; 985 for (i = 0; i < count; ++i) { 986 if (gneq->flags.identifier == 2) { 987 found = 1; 988 break; 989 } 990 gneq++; 991 } 992 if (found) 993 return ((char *)gneq)[18] & 0x07; 994 else 995 return 0; 996 } 997 998 static void dasd_eckd_store_conf_data(struct dasd_device *device, 999 struct dasd_conf_data *conf_data, int chp) 1000 { 1001 struct dasd_eckd_private *private = device->private; 1002 struct channel_path_desc_fmt0 *chp_desc; 1003 struct subchannel_id sch_id; 1004 void *cdp; 1005 1006 /* 1007 * path handling and read_conf allocate data 1008 * free it before replacing the pointer 1009 * also replace the old private->conf_data pointer 1010 * with the new one if this points to the same data 1011 */ 1012 cdp = device->path[chp].conf_data; 1013 if (private->conf.data == cdp) { 1014 private->conf.data = (void *)conf_data; 1015 dasd_eckd_identify_conf_parts(&private->conf); 1016 } 1017 ccw_device_get_schid(device->cdev, &sch_id); 1018 device->path[chp].conf_data = conf_data; 1019 device->path[chp].cssid = sch_id.cssid; 1020 device->path[chp].ssid = sch_id.ssid; 1021 chp_desc = ccw_device_get_chp_desc(device->cdev, chp); 1022 if (chp_desc) 1023 device->path[chp].chpid = chp_desc->chpid; 1024 kfree(chp_desc); 1025 kfree(cdp); 1026 } 1027 1028 static void dasd_eckd_clear_conf_data(struct dasd_device *device) 1029 { 1030 struct dasd_eckd_private *private = device->private; 1031 int i; 1032 1033 private->conf.data = NULL; 1034 private->conf.len = 0; 1035 for (i = 0; i < 8; i++) { 1036 kfree(device->path[i].conf_data); 1037 device->path[i].conf_data = NULL; 1038 device->path[i].cssid = 0; 1039 device->path[i].ssid = 0; 1040 device->path[i].chpid = 0; 1041 dasd_path_notoper(device, i); 1042 } 1043 } 1044 1045 static void dasd_eckd_read_fc_security(struct dasd_device *device) 1046 { 1047 struct dasd_eckd_private *private = device->private; 1048 u8 esm_valid; 1049 u8 esm[8]; 1050 int chp; 1051 int rc; 1052 1053 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid); 1054 if (rc) { 1055 for (chp = 0; chp < 8; chp++) 1056 device->path[chp].fc_security = 0; 1057 return; 1058 } 1059 1060 for (chp = 0; chp < 8; chp++) { 1061 if (esm_valid & (0x80 >> chp)) 1062 device->path[chp].fc_security = esm[chp]; 1063 else 1064 device->path[chp].fc_security = 0; 1065 } 1066 } 1067 1068 static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid) 1069 { 1070 struct dasd_uid uid; 1071 1072 create_uid(conf, &uid); 1073 snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s", 1074 uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr, 1075 uid.vduit[0] ? "." : "", uid.vduit); 1076 } 1077 1078 static int dasd_eckd_check_cabling(struct dasd_device *device, 1079 void *conf_data, __u8 lpm) 1080 { 1081 char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN]; 1082 struct dasd_eckd_private *private = device->private; 1083 struct dasd_conf path_conf; 1084 1085 path_conf.data = conf_data; 1086 path_conf.len = DASD_ECKD_RCD_DATA_SIZE; 1087 if (dasd_eckd_identify_conf_parts(&path_conf)) 1088 return 1; 1089 1090 if (dasd_eckd_compare_path_uid(device, &path_conf)) { 1091 dasd_eckd_get_uid_string(&path_conf, print_path_uid); 1092 dasd_eckd_get_uid_string(&private->conf, print_device_uid); 1093 dev_err(&device->cdev->dev, 1094 "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n", 1095 lpm, print_path_uid, print_device_uid); 1096 return 1; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static int dasd_eckd_read_conf(struct dasd_device *device) 1103 { 1104 void *conf_data; 1105 int conf_len, conf_data_saved; 1106 int rc, path_err, pos; 1107 __u8 lpm, opm; 1108 struct dasd_eckd_private *private; 1109 1110 private = device->private; 1111 opm = ccw_device_get_path_mask(device->cdev); 1112 conf_data_saved = 0; 1113 path_err = 0; 1114 /* get configuration data per operational path */ 1115 for (lpm = 0x80; lpm; lpm>>= 1) { 1116 if (!(lpm & opm)) 1117 continue; 1118 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1119 &conf_len, lpm); 1120 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1121 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1122 "Read configuration data returned " 1123 "error %d", rc); 1124 return rc; 1125 } 1126 if (conf_data == NULL) { 1127 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1128 "No configuration data " 1129 "retrieved"); 1130 /* no further analysis possible */ 1131 dasd_path_add_opm(device, opm); 1132 continue; /* no error */ 1133 } 1134 /* save first valid configuration data */ 1135 if (!conf_data_saved) { 1136 /* initially clear previously stored conf_data */ 1137 dasd_eckd_clear_conf_data(device); 1138 private->conf.data = conf_data; 1139 private->conf.len = conf_len; 1140 if (dasd_eckd_identify_conf_parts(&private->conf)) { 1141 private->conf.data = NULL; 1142 private->conf.len = 0; 1143 kfree(conf_data); 1144 continue; 1145 } 1146 /* 1147 * build device UID that other path data 1148 * can be compared to it 1149 */ 1150 dasd_eckd_generate_uid(device); 1151 conf_data_saved++; 1152 } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) { 1153 dasd_path_add_cablepm(device, lpm); 1154 path_err = -EINVAL; 1155 kfree(conf_data); 1156 continue; 1157 } 1158 1159 pos = pathmask_to_pos(lpm); 1160 dasd_eckd_store_conf_data(device, conf_data, pos); 1161 1162 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1163 case 0x02: 1164 dasd_path_add_nppm(device, lpm); 1165 break; 1166 case 0x03: 1167 dasd_path_add_ppm(device, lpm); 1168 break; 1169 } 1170 if (!dasd_path_get_opm(device)) { 1171 dasd_path_set_opm(device, lpm); 1172 dasd_generic_path_operational(device); 1173 } else { 1174 dasd_path_add_opm(device, lpm); 1175 } 1176 } 1177 1178 return path_err; 1179 } 1180 1181 static u32 get_fcx_max_data(struct dasd_device *device) 1182 { 1183 struct dasd_eckd_private *private = device->private; 1184 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1185 unsigned int mdc; 1186 int tpm; 1187 1188 if (dasd_nofcx) 1189 return 0; 1190 /* is transport mode supported? */ 1191 fcx_in_css = css_general_characteristics.fcx; 1192 fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04; 1193 fcx_in_features = private->features.feature[40] & 0x80; 1194 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1195 1196 if (!tpm) 1197 return 0; 1198 1199 mdc = ccw_device_get_mdc(device->cdev, 0); 1200 if (mdc == 0) { 1201 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1202 return 0; 1203 } else { 1204 return (u32)mdc * FCX_MAX_DATA_FACTOR; 1205 } 1206 } 1207 1208 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1209 { 1210 struct dasd_eckd_private *private = device->private; 1211 unsigned int mdc; 1212 u32 fcx_max_data; 1213 1214 if (private->fcx_max_data) { 1215 mdc = ccw_device_get_mdc(device->cdev, lpm); 1216 if (mdc == 0) { 1217 dev_warn(&device->cdev->dev, 1218 "Detecting the maximum data size for zHPF " 1219 "requests failed (rc=%d) for a new path %x\n", 1220 mdc, lpm); 1221 return mdc; 1222 } 1223 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1224 if (fcx_max_data < private->fcx_max_data) { 1225 dev_warn(&device->cdev->dev, 1226 "The maximum data size for zHPF requests %u " 1227 "on a new path %x is below the active maximum " 1228 "%u\n", fcx_max_data, lpm, 1229 private->fcx_max_data); 1230 return -EACCES; 1231 } 1232 } 1233 return 0; 1234 } 1235 1236 static int rebuild_device_uid(struct dasd_device *device, 1237 struct pe_handler_work_data *data) 1238 { 1239 struct dasd_eckd_private *private = device->private; 1240 __u8 lpm, opm = dasd_path_get_opm(device); 1241 int rc = -ENODEV; 1242 1243 for (lpm = 0x80; lpm; lpm >>= 1) { 1244 if (!(lpm & opm)) 1245 continue; 1246 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1247 memset(&data->cqr, 0, sizeof(data->cqr)); 1248 data->cqr.cpaddr = &data->ccw; 1249 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1250 data->rcd_buffer, 1251 lpm); 1252 1253 if (rc) { 1254 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1255 continue; 1256 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1257 "Read configuration data " 1258 "returned error %d", rc); 1259 break; 1260 } 1261 memcpy(private->conf.data, data->rcd_buffer, 1262 DASD_ECKD_RCD_DATA_SIZE); 1263 if (dasd_eckd_identify_conf_parts(&private->conf)) { 1264 rc = -ENODEV; 1265 } else /* first valid path is enough */ 1266 break; 1267 } 1268 1269 if (!rc) 1270 rc = dasd_eckd_generate_uid(device); 1271 1272 return rc; 1273 } 1274 1275 static void dasd_eckd_path_available_action(struct dasd_device *device, 1276 struct pe_handler_work_data *data) 1277 { 1278 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1279 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1280 struct dasd_conf_data *conf_data; 1281 char print_uid[DASD_UID_STRLEN]; 1282 struct dasd_conf path_conf; 1283 unsigned long flags; 1284 int rc, pos; 1285 1286 opm = 0; 1287 npm = 0; 1288 ppm = 0; 1289 epm = 0; 1290 hpfpm = 0; 1291 cablepm = 0; 1292 1293 for (lpm = 0x80; lpm; lpm >>= 1) { 1294 if (!(lpm & data->tbvpm)) 1295 continue; 1296 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1297 memset(&data->cqr, 0, sizeof(data->cqr)); 1298 data->cqr.cpaddr = &data->ccw; 1299 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1300 data->rcd_buffer, 1301 lpm); 1302 if (!rc) { 1303 switch (dasd_eckd_path_access(data->rcd_buffer, 1304 DASD_ECKD_RCD_DATA_SIZE) 1305 ) { 1306 case 0x02: 1307 npm |= lpm; 1308 break; 1309 case 0x03: 1310 ppm |= lpm; 1311 break; 1312 } 1313 opm |= lpm; 1314 } else if (rc == -EOPNOTSUPP) { 1315 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1316 "path verification: No configuration " 1317 "data retrieved"); 1318 opm |= lpm; 1319 } else if (rc == -EAGAIN) { 1320 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1321 "path verification: device is stopped," 1322 " try again later"); 1323 epm |= lpm; 1324 } else { 1325 dev_warn(&device->cdev->dev, 1326 "Reading device feature codes failed " 1327 "(rc=%d) for new path %x\n", rc, lpm); 1328 continue; 1329 } 1330 if (verify_fcx_max_data(device, lpm)) { 1331 opm &= ~lpm; 1332 npm &= ~lpm; 1333 ppm &= ~lpm; 1334 hpfpm |= lpm; 1335 continue; 1336 } 1337 1338 /* 1339 * save conf_data for comparison after 1340 * rebuild_device_uid may have changed 1341 * the original data 1342 */ 1343 memcpy(&path_rcd_buf, data->rcd_buffer, 1344 DASD_ECKD_RCD_DATA_SIZE); 1345 path_conf.data = (void *)&path_rcd_buf; 1346 path_conf.len = DASD_ECKD_RCD_DATA_SIZE; 1347 if (dasd_eckd_identify_conf_parts(&path_conf)) { 1348 path_conf.data = NULL; 1349 path_conf.len = 0; 1350 continue; 1351 } 1352 1353 /* 1354 * compare path UID with device UID only if at least 1355 * one valid path is left 1356 * in other case the device UID may have changed and 1357 * the first working path UID will be used as device UID 1358 */ 1359 if (dasd_path_get_opm(device) && 1360 dasd_eckd_compare_path_uid(device, &path_conf)) { 1361 /* 1362 * the comparison was not successful 1363 * rebuild the device UID with at least one 1364 * known path in case a z/VM hyperswap command 1365 * has changed the device 1366 * 1367 * after this compare again 1368 * 1369 * if either the rebuild or the recompare fails 1370 * the path can not be used 1371 */ 1372 if (rebuild_device_uid(device, data) || 1373 dasd_eckd_compare_path_uid( 1374 device, &path_conf)) { 1375 dasd_eckd_get_uid_string(&path_conf, print_uid); 1376 dev_err(&device->cdev->dev, 1377 "The newly added channel path %02X " 1378 "will not be used because it leads " 1379 "to a different device %s\n", 1380 lpm, print_uid); 1381 opm &= ~lpm; 1382 npm &= ~lpm; 1383 ppm &= ~lpm; 1384 cablepm |= lpm; 1385 continue; 1386 } 1387 } 1388 1389 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL); 1390 if (conf_data) { 1391 memcpy(conf_data, data->rcd_buffer, 1392 DASD_ECKD_RCD_DATA_SIZE); 1393 } else { 1394 /* 1395 * path is operational but path config data could not 1396 * be stored due to low mem condition 1397 * add it to the error path mask and schedule a path 1398 * verification later that this could be added again 1399 */ 1400 epm |= lpm; 1401 } 1402 pos = pathmask_to_pos(lpm); 1403 dasd_eckd_store_conf_data(device, conf_data, pos); 1404 1405 /* 1406 * There is a small chance that a path is lost again between 1407 * above path verification and the following modification of 1408 * the device opm mask. We could avoid that race here by using 1409 * yet another path mask, but we rather deal with this unlikely 1410 * situation in dasd_start_IO. 1411 */ 1412 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1413 if (!dasd_path_get_opm(device) && opm) { 1414 dasd_path_set_opm(device, opm); 1415 dasd_generic_path_operational(device); 1416 } else { 1417 dasd_path_add_opm(device, opm); 1418 } 1419 dasd_path_add_nppm(device, npm); 1420 dasd_path_add_ppm(device, ppm); 1421 if (epm) { 1422 dasd_path_add_tbvpm(device, epm); 1423 dasd_device_set_timer(device, 50); 1424 } 1425 dasd_path_add_cablepm(device, cablepm); 1426 dasd_path_add_nohpfpm(device, hpfpm); 1427 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1428 1429 dasd_path_create_kobj(device, pos); 1430 } 1431 } 1432 1433 static void do_pe_handler_work(struct work_struct *work) 1434 { 1435 struct pe_handler_work_data *data; 1436 struct dasd_device *device; 1437 1438 data = container_of(work, struct pe_handler_work_data, worker); 1439 device = data->device; 1440 1441 /* delay path verification until device was resumed */ 1442 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1443 schedule_work(work); 1444 return; 1445 } 1446 /* check if path verification already running and delay if so */ 1447 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1448 schedule_work(work); 1449 return; 1450 } 1451 1452 if (data->tbvpm) 1453 dasd_eckd_path_available_action(device, data); 1454 if (data->fcsecpm) 1455 dasd_eckd_read_fc_security(device); 1456 1457 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1458 dasd_put_device(device); 1459 if (data->isglobal) 1460 mutex_unlock(&dasd_pe_handler_mutex); 1461 else 1462 kfree(data); 1463 } 1464 1465 static int dasd_eckd_pe_handler(struct dasd_device *device, 1466 __u8 tbvpm, __u8 fcsecpm) 1467 { 1468 struct pe_handler_work_data *data; 1469 1470 data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1471 if (!data) { 1472 if (mutex_trylock(&dasd_pe_handler_mutex)) { 1473 data = pe_handler_worker; 1474 data->isglobal = 1; 1475 } else { 1476 return -ENOMEM; 1477 } 1478 } 1479 INIT_WORK(&data->worker, do_pe_handler_work); 1480 dasd_get_device(device); 1481 data->device = device; 1482 data->tbvpm = tbvpm; 1483 data->fcsecpm = fcsecpm; 1484 schedule_work(&data->worker); 1485 return 0; 1486 } 1487 1488 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) 1489 { 1490 struct dasd_eckd_private *private = device->private; 1491 unsigned long flags; 1492 1493 if (!private->fcx_max_data) 1494 private->fcx_max_data = get_fcx_max_data(device); 1495 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1496 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); 1497 dasd_schedule_device_bh(device); 1498 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1499 } 1500 1501 static int dasd_eckd_read_features(struct dasd_device *device) 1502 { 1503 struct dasd_eckd_private *private = device->private; 1504 struct dasd_psf_prssd_data *prssdp; 1505 struct dasd_rssd_features *features; 1506 struct dasd_ccw_req *cqr; 1507 struct ccw1 *ccw; 1508 int rc; 1509 1510 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1511 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1512 (sizeof(struct dasd_psf_prssd_data) + 1513 sizeof(struct dasd_rssd_features)), 1514 device, NULL); 1515 if (IS_ERR(cqr)) { 1516 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1517 "allocate initialization request"); 1518 return PTR_ERR(cqr); 1519 } 1520 cqr->startdev = device; 1521 cqr->memdev = device; 1522 cqr->block = NULL; 1523 cqr->retries = 256; 1524 cqr->expires = 10 * HZ; 1525 1526 /* Prepare for Read Subsystem Data */ 1527 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1528 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1529 prssdp->order = PSF_ORDER_PRSSD; 1530 prssdp->suborder = 0x41; /* Read Feature Codes */ 1531 /* all other bytes of prssdp must be zero */ 1532 1533 ccw = cqr->cpaddr; 1534 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1535 ccw->count = sizeof(struct dasd_psf_prssd_data); 1536 ccw->flags |= CCW_FLAG_CC; 1537 ccw->cda = virt_to_dma32(prssdp); 1538 1539 /* Read Subsystem Data - feature codes */ 1540 features = (struct dasd_rssd_features *) (prssdp + 1); 1541 memset(features, 0, sizeof(struct dasd_rssd_features)); 1542 1543 ccw++; 1544 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1545 ccw->count = sizeof(struct dasd_rssd_features); 1546 ccw->cda = virt_to_dma32(features); 1547 1548 cqr->buildclk = get_tod_clock(); 1549 cqr->status = DASD_CQR_FILLED; 1550 rc = dasd_sleep_on(cqr); 1551 if (rc == 0) { 1552 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1553 features = (struct dasd_rssd_features *) (prssdp + 1); 1554 memcpy(&private->features, features, 1555 sizeof(struct dasd_rssd_features)); 1556 } else 1557 dev_warn(&device->cdev->dev, "Reading device feature codes" 1558 " failed with rc=%d\n", rc); 1559 dasd_sfree_request(cqr, cqr->memdev); 1560 return rc; 1561 } 1562 1563 /* Read Volume Information - Volume Storage Query */ 1564 static int dasd_eckd_read_vol_info(struct dasd_device *device) 1565 { 1566 struct dasd_eckd_private *private = device->private; 1567 struct dasd_psf_prssd_data *prssdp; 1568 struct dasd_rssd_vsq *vsq; 1569 struct dasd_ccw_req *cqr; 1570 struct ccw1 *ccw; 1571 int useglobal; 1572 int rc; 1573 1574 /* This command cannot be executed on an alias device */ 1575 if (private->uid.type == UA_BASE_PAV_ALIAS || 1576 private->uid.type == UA_HYPER_PAV_ALIAS) 1577 return 0; 1578 1579 useglobal = 0; 1580 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1581 sizeof(*prssdp) + sizeof(*vsq), device, NULL); 1582 if (IS_ERR(cqr)) { 1583 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1584 "Could not allocate initialization request"); 1585 mutex_lock(&dasd_vol_info_mutex); 1586 useglobal = 1; 1587 cqr = &dasd_vol_info_req->cqr; 1588 memset(cqr, 0, sizeof(*cqr)); 1589 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); 1590 cqr->cpaddr = &dasd_vol_info_req->ccw; 1591 cqr->data = &dasd_vol_info_req->data; 1592 cqr->magic = DASD_ECKD_MAGIC; 1593 } 1594 1595 /* Prepare for Read Subsystem Data */ 1596 prssdp = cqr->data; 1597 prssdp->order = PSF_ORDER_PRSSD; 1598 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ 1599 prssdp->lss = private->conf.ned->ID; 1600 prssdp->volume = private->conf.ned->unit_addr; 1601 1602 ccw = cqr->cpaddr; 1603 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1604 ccw->count = sizeof(*prssdp); 1605 ccw->flags |= CCW_FLAG_CC; 1606 ccw->cda = virt_to_dma32(prssdp); 1607 1608 /* Read Subsystem Data - Volume Storage Query */ 1609 vsq = (struct dasd_rssd_vsq *)(prssdp + 1); 1610 memset(vsq, 0, sizeof(*vsq)); 1611 1612 ccw++; 1613 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1614 ccw->count = sizeof(*vsq); 1615 ccw->flags |= CCW_FLAG_SLI; 1616 ccw->cda = virt_to_dma32(vsq); 1617 1618 cqr->buildclk = get_tod_clock(); 1619 cqr->status = DASD_CQR_FILLED; 1620 cqr->startdev = device; 1621 cqr->memdev = device; 1622 cqr->block = NULL; 1623 cqr->retries = 256; 1624 cqr->expires = device->default_expires * HZ; 1625 /* The command might not be supported. Suppress the error output */ 1626 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1627 1628 rc = dasd_sleep_on_interruptible(cqr); 1629 if (rc == 0) { 1630 memcpy(&private->vsq, vsq, sizeof(*vsq)); 1631 } else { 1632 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1633 "Reading the volume storage information failed with rc=%d", rc); 1634 } 1635 1636 if (useglobal) 1637 mutex_unlock(&dasd_vol_info_mutex); 1638 else 1639 dasd_sfree_request(cqr, cqr->memdev); 1640 1641 return rc; 1642 } 1643 1644 static int dasd_eckd_is_ese(struct dasd_device *device) 1645 { 1646 struct dasd_eckd_private *private = device->private; 1647 1648 return private->vsq.vol_info.ese; 1649 } 1650 1651 static int dasd_eckd_ext_pool_id(struct dasd_device *device) 1652 { 1653 struct dasd_eckd_private *private = device->private; 1654 1655 return private->vsq.extent_pool_id; 1656 } 1657 1658 /* 1659 * This value represents the total amount of available space. As more space is 1660 * allocated by ESE volumes, this value will decrease. 1661 * The data for this value is therefore updated on any call. 1662 */ 1663 static int dasd_eckd_space_configured(struct dasd_device *device) 1664 { 1665 struct dasd_eckd_private *private = device->private; 1666 int rc; 1667 1668 rc = dasd_eckd_read_vol_info(device); 1669 1670 return rc ? : private->vsq.space_configured; 1671 } 1672 1673 /* 1674 * The value of space allocated by an ESE volume may have changed and is 1675 * therefore updated on any call. 1676 */ 1677 static int dasd_eckd_space_allocated(struct dasd_device *device) 1678 { 1679 struct dasd_eckd_private *private = device->private; 1680 int rc; 1681 1682 rc = dasd_eckd_read_vol_info(device); 1683 1684 return rc ? : private->vsq.space_allocated; 1685 } 1686 1687 static int dasd_eckd_logical_capacity(struct dasd_device *device) 1688 { 1689 struct dasd_eckd_private *private = device->private; 1690 1691 return private->vsq.logical_capacity; 1692 } 1693 1694 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) 1695 { 1696 struct ext_pool_exhaust_work_data *data; 1697 struct dasd_device *device; 1698 struct dasd_device *base; 1699 1700 data = container_of(work, struct ext_pool_exhaust_work_data, worker); 1701 device = data->device; 1702 base = data->base; 1703 1704 if (!base) 1705 base = device; 1706 if (dasd_eckd_space_configured(base) != 0) { 1707 dasd_generic_space_avail(device); 1708 } else { 1709 dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); 1710 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); 1711 } 1712 1713 dasd_put_device(device); 1714 kfree(data); 1715 } 1716 1717 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, 1718 struct dasd_ccw_req *cqr) 1719 { 1720 struct ext_pool_exhaust_work_data *data; 1721 1722 data = kzalloc(sizeof(*data), GFP_ATOMIC); 1723 if (!data) 1724 return -ENOMEM; 1725 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); 1726 dasd_get_device(device); 1727 data->device = device; 1728 1729 if (cqr->block) 1730 data->base = cqr->block->base; 1731 else if (cqr->basedev) 1732 data->base = cqr->basedev; 1733 else 1734 data->base = NULL; 1735 1736 schedule_work(&data->worker); 1737 1738 return 0; 1739 } 1740 1741 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, 1742 struct dasd_rssd_lcq *lcq) 1743 { 1744 struct dasd_eckd_private *private = device->private; 1745 int pool_id = dasd_eckd_ext_pool_id(device); 1746 struct dasd_ext_pool_sum eps; 1747 int i; 1748 1749 for (i = 0; i < lcq->pool_count; i++) { 1750 eps = lcq->ext_pool_sum[i]; 1751 if (eps.pool_id == pool_id) { 1752 memcpy(&private->eps, &eps, 1753 sizeof(struct dasd_ext_pool_sum)); 1754 } 1755 } 1756 } 1757 1758 /* Read Extent Pool Information - Logical Configuration Query */ 1759 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) 1760 { 1761 struct dasd_eckd_private *private = device->private; 1762 struct dasd_psf_prssd_data *prssdp; 1763 struct dasd_rssd_lcq *lcq; 1764 struct dasd_ccw_req *cqr; 1765 struct ccw1 *ccw; 1766 int rc; 1767 1768 /* This command cannot be executed on an alias device */ 1769 if (private->uid.type == UA_BASE_PAV_ALIAS || 1770 private->uid.type == UA_HYPER_PAV_ALIAS) 1771 return 0; 1772 1773 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1774 sizeof(*prssdp) + sizeof(*lcq), device, NULL); 1775 if (IS_ERR(cqr)) { 1776 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1777 "Could not allocate initialization request"); 1778 return PTR_ERR(cqr); 1779 } 1780 1781 /* Prepare for Read Subsystem Data */ 1782 prssdp = cqr->data; 1783 memset(prssdp, 0, sizeof(*prssdp)); 1784 prssdp->order = PSF_ORDER_PRSSD; 1785 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ 1786 1787 ccw = cqr->cpaddr; 1788 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1789 ccw->count = sizeof(*prssdp); 1790 ccw->flags |= CCW_FLAG_CC; 1791 ccw->cda = virt_to_dma32(prssdp); 1792 1793 lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1794 memset(lcq, 0, sizeof(*lcq)); 1795 1796 ccw++; 1797 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1798 ccw->count = sizeof(*lcq); 1799 ccw->flags |= CCW_FLAG_SLI; 1800 ccw->cda = virt_to_dma32(lcq); 1801 1802 cqr->buildclk = get_tod_clock(); 1803 cqr->status = DASD_CQR_FILLED; 1804 cqr->startdev = device; 1805 cqr->memdev = device; 1806 cqr->block = NULL; 1807 cqr->retries = 256; 1808 cqr->expires = device->default_expires * HZ; 1809 /* The command might not be supported. Suppress the error output */ 1810 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1811 1812 rc = dasd_sleep_on_interruptible(cqr); 1813 if (rc == 0) { 1814 dasd_eckd_cpy_ext_pool_data(device, lcq); 1815 } else { 1816 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1817 "Reading the logical configuration failed with rc=%d", rc); 1818 } 1819 1820 dasd_sfree_request(cqr, cqr->memdev); 1821 1822 return rc; 1823 } 1824 1825 /* 1826 * Depending on the device type, the extent size is specified either as 1827 * cylinders per extent (CKD) or size per extent (FBA) 1828 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. 1829 */ 1830 static int dasd_eckd_ext_size(struct dasd_device *device) 1831 { 1832 struct dasd_eckd_private *private = device->private; 1833 struct dasd_ext_pool_sum eps = private->eps; 1834 1835 if (!eps.flags.extent_size_valid) 1836 return 0; 1837 if (eps.extent_size.size_1G) 1838 return 1113; 1839 if (eps.extent_size.size_16M) 1840 return 21; 1841 1842 return 0; 1843 } 1844 1845 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) 1846 { 1847 struct dasd_eckd_private *private = device->private; 1848 1849 return private->eps.warn_thrshld; 1850 } 1851 1852 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) 1853 { 1854 struct dasd_eckd_private *private = device->private; 1855 1856 return private->eps.flags.capacity_at_warnlevel; 1857 } 1858 1859 /* 1860 * Extent Pool out of space 1861 */ 1862 static int dasd_eckd_ext_pool_oos(struct dasd_device *device) 1863 { 1864 struct dasd_eckd_private *private = device->private; 1865 1866 return private->eps.flags.pool_oos; 1867 } 1868 1869 /* 1870 * Build CP for Perform Subsystem Function - SSC. 1871 */ 1872 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1873 int enable_pav) 1874 { 1875 struct dasd_ccw_req *cqr; 1876 struct dasd_psf_ssc_data *psf_ssc_data; 1877 struct ccw1 *ccw; 1878 1879 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1880 sizeof(struct dasd_psf_ssc_data), 1881 device, NULL); 1882 1883 if (IS_ERR(cqr)) { 1884 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1885 "Could not allocate PSF-SSC request"); 1886 return cqr; 1887 } 1888 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1889 psf_ssc_data->order = PSF_ORDER_SSC; 1890 psf_ssc_data->suborder = 0xc0; 1891 if (enable_pav) { 1892 psf_ssc_data->suborder |= 0x08; 1893 psf_ssc_data->reserved[0] = 0x88; 1894 } 1895 ccw = cqr->cpaddr; 1896 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1897 ccw->cda = virt_to_dma32(psf_ssc_data); 1898 ccw->count = 66; 1899 1900 cqr->startdev = device; 1901 cqr->memdev = device; 1902 cqr->block = NULL; 1903 cqr->retries = 256; 1904 cqr->expires = 10*HZ; 1905 cqr->buildclk = get_tod_clock(); 1906 cqr->status = DASD_CQR_FILLED; 1907 return cqr; 1908 } 1909 1910 /* 1911 * Perform Subsystem Function. 1912 * It is necessary to trigger CIO for channel revalidation since this 1913 * call might change behaviour of DASD devices. 1914 */ 1915 static int 1916 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1917 unsigned long flags) 1918 { 1919 struct dasd_ccw_req *cqr; 1920 int rc; 1921 1922 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1923 if (IS_ERR(cqr)) 1924 return PTR_ERR(cqr); 1925 1926 /* 1927 * set flags e.g. turn on failfast, to prevent blocking 1928 * the calling function should handle failed requests 1929 */ 1930 cqr->flags |= flags; 1931 1932 rc = dasd_sleep_on(cqr); 1933 if (!rc) 1934 /* trigger CIO to reprobe devices */ 1935 css_schedule_reprobe(); 1936 else if (cqr->intrc == -EAGAIN) 1937 rc = -EAGAIN; 1938 1939 dasd_sfree_request(cqr, cqr->memdev); 1940 return rc; 1941 } 1942 1943 /* 1944 * Valide storage server of current device. 1945 */ 1946 static int dasd_eckd_validate_server(struct dasd_device *device, 1947 unsigned long flags) 1948 { 1949 struct dasd_eckd_private *private = device->private; 1950 int enable_pav, rc; 1951 1952 if (private->uid.type == UA_BASE_PAV_ALIAS || 1953 private->uid.type == UA_HYPER_PAV_ALIAS) 1954 return 0; 1955 if (dasd_nopav || MACHINE_IS_VM) 1956 enable_pav = 0; 1957 else 1958 enable_pav = 1; 1959 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1960 1961 /* may be requested feature is not available on server, 1962 * therefore just report error and go ahead */ 1963 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1964 "returned rc=%d", private->uid.ssid, rc); 1965 return rc; 1966 } 1967 1968 /* 1969 * worker to do a validate server in case of a lost pathgroup 1970 */ 1971 static void dasd_eckd_do_validate_server(struct work_struct *work) 1972 { 1973 struct dasd_device *device = container_of(work, struct dasd_device, 1974 kick_validate); 1975 unsigned long flags = 0; 1976 1977 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 1978 if (dasd_eckd_validate_server(device, flags) 1979 == -EAGAIN) { 1980 /* schedule worker again if failed */ 1981 schedule_work(&device->kick_validate); 1982 return; 1983 } 1984 1985 dasd_put_device(device); 1986 } 1987 1988 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1989 { 1990 dasd_get_device(device); 1991 /* exit if device not online or in offline processing */ 1992 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1993 device->state < DASD_STATE_ONLINE) { 1994 dasd_put_device(device); 1995 return; 1996 } 1997 /* queue call to do_validate_server to the kernel event daemon. */ 1998 if (!schedule_work(&device->kick_validate)) 1999 dasd_put_device(device); 2000 } 2001 2002 /* 2003 * return if the device is the copy relation primary if a copy relation is active 2004 */ 2005 static int dasd_device_is_primary(struct dasd_device *device) 2006 { 2007 if (!device->copy) 2008 return 1; 2009 2010 if (device->copy->active->device == device) 2011 return 1; 2012 2013 return 0; 2014 } 2015 2016 static int dasd_eckd_alloc_block(struct dasd_device *device) 2017 { 2018 struct dasd_block *block; 2019 struct dasd_uid temp_uid; 2020 2021 if (!dasd_device_is_primary(device)) 2022 return 0; 2023 2024 dasd_eckd_get_uid(device, &temp_uid); 2025 if (temp_uid.type == UA_BASE_DEVICE) { 2026 block = dasd_alloc_block(); 2027 if (IS_ERR(block)) { 2028 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 2029 "could not allocate dasd block structure"); 2030 return PTR_ERR(block); 2031 } 2032 device->block = block; 2033 block->base = device; 2034 } 2035 return 0; 2036 } 2037 2038 static bool dasd_eckd_pprc_enabled(struct dasd_device *device) 2039 { 2040 struct dasd_eckd_private *private = device->private; 2041 2042 return private->rdc_data.facilities.PPRC_enabled; 2043 } 2044 2045 /* 2046 * Check device characteristics. 2047 * If the device is accessible using ECKD discipline, the device is enabled. 2048 */ 2049 static int 2050 dasd_eckd_check_characteristics(struct dasd_device *device) 2051 { 2052 struct dasd_eckd_private *private = device->private; 2053 int rc, i; 2054 int readonly; 2055 unsigned long value; 2056 2057 /* setup work queue for validate server*/ 2058 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 2059 /* setup work queue for summary unit check */ 2060 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); 2061 2062 if (!ccw_device_is_pathgroup(device->cdev)) { 2063 dev_warn(&device->cdev->dev, 2064 "A channel path group could not be established\n"); 2065 return -EIO; 2066 } 2067 if (!ccw_device_is_multipath(device->cdev)) { 2068 dev_info(&device->cdev->dev, 2069 "The DASD is not operating in multipath mode\n"); 2070 } 2071 if (!private) { 2072 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 2073 if (!private) { 2074 dev_warn(&device->cdev->dev, 2075 "Allocating memory for private DASD data " 2076 "failed\n"); 2077 return -ENOMEM; 2078 } 2079 device->private = private; 2080 } else { 2081 memset(private, 0, sizeof(*private)); 2082 } 2083 /* Invalidate status of initial analysis. */ 2084 private->init_cqr_status = -1; 2085 /* Set default cache operations. */ 2086 private->attrib.operation = DASD_NORMAL_CACHE; 2087 private->attrib.nr_cyl = 0; 2088 2089 /* Read Configuration Data */ 2090 rc = dasd_eckd_read_conf(device); 2091 if (rc) 2092 goto out_err1; 2093 2094 /* set some default values */ 2095 device->default_expires = DASD_EXPIRES; 2096 device->default_retries = DASD_RETRIES; 2097 device->path_thrhld = DASD_ECKD_PATH_THRHLD; 2098 device->path_interval = DASD_ECKD_PATH_INTERVAL; 2099 device->aq_timeouts = DASD_RETRIES_MAX; 2100 2101 if (private->conf.gneq) { 2102 value = 1; 2103 for (i = 0; i < private->conf.gneq->timeout.value; i++) 2104 value = 10 * value; 2105 value = value * private->conf.gneq->timeout.number; 2106 /* do not accept useless values */ 2107 if (value != 0 && value <= DASD_EXPIRES_MAX) 2108 device->default_expires = value; 2109 } 2110 2111 /* Read Device Characteristics */ 2112 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2113 &private->rdc_data, 64); 2114 if (rc) { 2115 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 2116 "Read device characteristic failed, rc=%d", rc); 2117 goto out_err1; 2118 } 2119 2120 /* setup PPRC for device from devmap */ 2121 rc = dasd_devmap_set_device_copy_relation(device->cdev, 2122 dasd_eckd_pprc_enabled(device)); 2123 if (rc) { 2124 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 2125 "copy relation setup failed, rc=%d", rc); 2126 goto out_err1; 2127 } 2128 2129 /* check if block device is needed and allocate in case */ 2130 rc = dasd_eckd_alloc_block(device); 2131 if (rc) 2132 goto out_err1; 2133 2134 /* register lcu with alias handling, enable PAV */ 2135 rc = dasd_alias_make_device_known_to_lcu(device); 2136 if (rc) 2137 goto out_err2; 2138 2139 dasd_eckd_validate_server(device, 0); 2140 2141 /* device may report different configuration data after LCU setup */ 2142 rc = dasd_eckd_read_conf(device); 2143 if (rc) 2144 goto out_err3; 2145 2146 dasd_eckd_read_fc_security(device); 2147 dasd_path_create_kobjects(device); 2148 2149 /* Read Feature Codes */ 2150 dasd_eckd_read_features(device); 2151 2152 /* Read Volume Information */ 2153 dasd_eckd_read_vol_info(device); 2154 2155 /* Read Extent Pool Information */ 2156 dasd_eckd_read_ext_pool_info(device); 2157 2158 if ((device->features & DASD_FEATURE_USERAW) && 2159 !(private->rdc_data.facilities.RT_in_LR)) { 2160 dev_err(&device->cdev->dev, "The storage server does not " 2161 "support raw-track access\n"); 2162 rc = -EINVAL; 2163 goto out_err3; 2164 } 2165 2166 /* find the valid cylinder size */ 2167 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 2168 private->rdc_data.long_no_cyl) 2169 private->real_cyl = private->rdc_data.long_no_cyl; 2170 else 2171 private->real_cyl = private->rdc_data.no_cyl; 2172 2173 private->fcx_max_data = get_fcx_max_data(device); 2174 2175 readonly = dasd_device_is_ro(device); 2176 if (readonly) 2177 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 2178 2179 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2180 "with %d cylinders, %d heads, %d sectors%s\n", 2181 private->rdc_data.dev_type, 2182 private->rdc_data.dev_model, 2183 private->rdc_data.cu_type, 2184 private->rdc_data.cu_model.model, 2185 private->real_cyl, 2186 private->rdc_data.trk_per_cyl, 2187 private->rdc_data.sec_per_trk, 2188 readonly ? ", read-only device" : ""); 2189 return 0; 2190 2191 out_err3: 2192 dasd_alias_disconnect_device_from_lcu(device); 2193 out_err2: 2194 dasd_free_block(device->block); 2195 device->block = NULL; 2196 out_err1: 2197 dasd_eckd_clear_conf_data(device); 2198 dasd_path_remove_kobjects(device); 2199 kfree(device->private); 2200 device->private = NULL; 2201 return rc; 2202 } 2203 2204 static void dasd_eckd_uncheck_device(struct dasd_device *device) 2205 { 2206 struct dasd_eckd_private *private = device->private; 2207 2208 if (!private) 2209 return; 2210 2211 dasd_alias_disconnect_device_from_lcu(device); 2212 private->conf.ned = NULL; 2213 private->conf.sneq = NULL; 2214 private->conf.vdsneq = NULL; 2215 private->conf.gneq = NULL; 2216 dasd_eckd_clear_conf_data(device); 2217 dasd_path_remove_kobjects(device); 2218 } 2219 2220 static struct dasd_ccw_req * 2221 dasd_eckd_analysis_ccw(struct dasd_device *device) 2222 { 2223 struct dasd_eckd_private *private = device->private; 2224 struct eckd_count *count_data; 2225 struct LO_eckd_data *LO_data; 2226 struct dasd_ccw_req *cqr; 2227 struct ccw1 *ccw; 2228 int cplength, datasize; 2229 int i; 2230 2231 cplength = 8; 2232 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 2233 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, 2234 NULL); 2235 if (IS_ERR(cqr)) 2236 return cqr; 2237 ccw = cqr->cpaddr; 2238 /* Define extent for the first 2 tracks. */ 2239 define_extent(ccw++, cqr->data, 0, 1, 2240 DASD_ECKD_CCW_READ_COUNT, device, 0); 2241 LO_data = cqr->data + sizeof(struct DE_eckd_data); 2242 /* Locate record for the first 4 records on track 0. */ 2243 ccw[-1].flags |= CCW_FLAG_CC; 2244 locate_record(ccw++, LO_data++, 0, 0, 4, 2245 DASD_ECKD_CCW_READ_COUNT, device, 0); 2246 2247 count_data = private->count_area; 2248 for (i = 0; i < 4; i++) { 2249 ccw[-1].flags |= CCW_FLAG_CC; 2250 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2251 ccw->flags = 0; 2252 ccw->count = 8; 2253 ccw->cda = virt_to_dma32(count_data); 2254 ccw++; 2255 count_data++; 2256 } 2257 2258 /* Locate record for the first record on track 1. */ 2259 ccw[-1].flags |= CCW_FLAG_CC; 2260 locate_record(ccw++, LO_data++, 1, 0, 1, 2261 DASD_ECKD_CCW_READ_COUNT, device, 0); 2262 /* Read count ccw. */ 2263 ccw[-1].flags |= CCW_FLAG_CC; 2264 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2265 ccw->flags = 0; 2266 ccw->count = 8; 2267 ccw->cda = virt_to_dma32(count_data); 2268 2269 cqr->block = NULL; 2270 cqr->startdev = device; 2271 cqr->memdev = device; 2272 cqr->retries = 255; 2273 cqr->buildclk = get_tod_clock(); 2274 cqr->status = DASD_CQR_FILLED; 2275 /* Set flags to suppress output for expected errors */ 2276 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2277 2278 return cqr; 2279 } 2280 2281 /* differentiate between 'no record found' and any other error */ 2282 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 2283 { 2284 char *sense; 2285 if (init_cqr->status == DASD_CQR_DONE) 2286 return INIT_CQR_OK; 2287 else if (init_cqr->status == DASD_CQR_NEED_ERP || 2288 init_cqr->status == DASD_CQR_FAILED) { 2289 sense = dasd_get_sense(&init_cqr->irb); 2290 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 2291 return INIT_CQR_UNFORMATTED; 2292 else 2293 return INIT_CQR_ERROR; 2294 } else 2295 return INIT_CQR_ERROR; 2296 } 2297 2298 /* 2299 * This is the callback function for the init_analysis cqr. It saves 2300 * the status of the initial analysis ccw before it frees it and kicks 2301 * the device to continue the startup sequence. This will call 2302 * dasd_eckd_do_analysis again (if the devices has not been marked 2303 * for deletion in the meantime). 2304 */ 2305 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 2306 void *data) 2307 { 2308 struct dasd_device *device = init_cqr->startdev; 2309 struct dasd_eckd_private *private = device->private; 2310 2311 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 2312 dasd_sfree_request(init_cqr, device); 2313 dasd_kick_device(device); 2314 } 2315 2316 static int dasd_eckd_start_analysis(struct dasd_block *block) 2317 { 2318 struct dasd_ccw_req *init_cqr; 2319 2320 init_cqr = dasd_eckd_analysis_ccw(block->base); 2321 if (IS_ERR(init_cqr)) 2322 return PTR_ERR(init_cqr); 2323 init_cqr->callback = dasd_eckd_analysis_callback; 2324 init_cqr->callback_data = NULL; 2325 init_cqr->expires = 5*HZ; 2326 /* first try without ERP, so we can later handle unformatted 2327 * devices as special case 2328 */ 2329 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 2330 init_cqr->retries = 0; 2331 dasd_add_request_head(init_cqr); 2332 return -EAGAIN; 2333 } 2334 2335 static int dasd_eckd_end_analysis(struct dasd_block *block) 2336 { 2337 struct dasd_device *device = block->base; 2338 struct dasd_eckd_private *private = device->private; 2339 struct eckd_count *count_area; 2340 unsigned int sb, blk_per_trk; 2341 int status, i; 2342 struct dasd_ccw_req *init_cqr; 2343 2344 status = private->init_cqr_status; 2345 private->init_cqr_status = -1; 2346 if (status == INIT_CQR_ERROR) { 2347 /* try again, this time with full ERP */ 2348 init_cqr = dasd_eckd_analysis_ccw(device); 2349 dasd_sleep_on(init_cqr); 2350 status = dasd_eckd_analysis_evaluation(init_cqr); 2351 dasd_sfree_request(init_cqr, device); 2352 } 2353 2354 if (device->features & DASD_FEATURE_USERAW) { 2355 block->bp_block = DASD_RAW_BLOCKSIZE; 2356 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 2357 block->s2b_shift = 3; 2358 goto raw; 2359 } 2360 2361 if (status == INIT_CQR_UNFORMATTED) { 2362 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 2363 return -EMEDIUMTYPE; 2364 } else if (status == INIT_CQR_ERROR) { 2365 dev_err(&device->cdev->dev, 2366 "Detecting the DASD disk layout failed because " 2367 "of an I/O error\n"); 2368 return -EIO; 2369 } 2370 2371 private->uses_cdl = 1; 2372 /* Check Track 0 for Compatible Disk Layout */ 2373 count_area = NULL; 2374 for (i = 0; i < 3; i++) { 2375 if (private->count_area[i].kl != 4 || 2376 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 2377 private->count_area[i].cyl != 0 || 2378 private->count_area[i].head != count_area_head[i] || 2379 private->count_area[i].record != count_area_rec[i]) { 2380 private->uses_cdl = 0; 2381 break; 2382 } 2383 } 2384 if (i == 3) 2385 count_area = &private->count_area[3]; 2386 2387 if (private->uses_cdl == 0) { 2388 for (i = 0; i < 5; i++) { 2389 if ((private->count_area[i].kl != 0) || 2390 (private->count_area[i].dl != 2391 private->count_area[0].dl) || 2392 private->count_area[i].cyl != 0 || 2393 private->count_area[i].head != count_area_head[i] || 2394 private->count_area[i].record != count_area_rec[i]) 2395 break; 2396 } 2397 if (i == 5) 2398 count_area = &private->count_area[0]; 2399 } else { 2400 if (private->count_area[3].record == 1) 2401 dev_warn(&device->cdev->dev, 2402 "Track 0 has no records following the VTOC\n"); 2403 } 2404 2405 if (count_area != NULL && count_area->kl == 0) { 2406 /* we found notthing violating our disk layout */ 2407 if (dasd_check_blocksize(count_area->dl) == 0) 2408 block->bp_block = count_area->dl; 2409 } 2410 if (block->bp_block == 0) { 2411 dev_warn(&device->cdev->dev, 2412 "The disk layout of the DASD is not supported\n"); 2413 return -EMEDIUMTYPE; 2414 } 2415 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2416 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2417 block->s2b_shift++; 2418 2419 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2420 2421 raw: 2422 block->blocks = ((unsigned long) private->real_cyl * 2423 private->rdc_data.trk_per_cyl * 2424 blk_per_trk); 2425 2426 dev_info(&device->cdev->dev, 2427 "DASD with %u KB/block, %lu KB total size, %u KB/track, " 2428 "%s\n", (block->bp_block >> 10), 2429 (((unsigned long) private->real_cyl * 2430 private->rdc_data.trk_per_cyl * 2431 blk_per_trk * (block->bp_block >> 9)) >> 1), 2432 ((blk_per_trk * block->bp_block) >> 10), 2433 private->uses_cdl ? 2434 "compatible disk layout" : "linux disk layout"); 2435 2436 return 0; 2437 } 2438 2439 static int dasd_eckd_do_analysis(struct dasd_block *block) 2440 { 2441 struct dasd_eckd_private *private = block->base->private; 2442 2443 if (private->init_cqr_status < 0) 2444 return dasd_eckd_start_analysis(block); 2445 else 2446 return dasd_eckd_end_analysis(block); 2447 } 2448 2449 static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2450 { 2451 return dasd_alias_add_device(device); 2452 }; 2453 2454 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2455 { 2456 if (cancel_work_sync(&device->reload_device)) 2457 dasd_put_device(device); 2458 if (cancel_work_sync(&device->kick_validate)) 2459 dasd_put_device(device); 2460 2461 return 0; 2462 }; 2463 2464 static int dasd_eckd_basic_to_known(struct dasd_device *device) 2465 { 2466 return dasd_alias_remove_device(device); 2467 }; 2468 2469 static int 2470 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2471 { 2472 struct dasd_eckd_private *private = block->base->private; 2473 2474 if (dasd_check_blocksize(block->bp_block) == 0) { 2475 geo->sectors = recs_per_track(&private->rdc_data, 2476 0, block->bp_block); 2477 } 2478 geo->cylinders = private->rdc_data.no_cyl; 2479 geo->heads = private->rdc_data.trk_per_cyl; 2480 return 0; 2481 } 2482 2483 /* 2484 * Build the TCW request for the format check 2485 */ 2486 static struct dasd_ccw_req * 2487 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, 2488 int enable_pav, struct eckd_count *fmt_buffer, 2489 int rpt) 2490 { 2491 struct dasd_eckd_private *start_priv; 2492 struct dasd_device *startdev = NULL; 2493 struct tidaw *last_tidaw = NULL; 2494 struct dasd_ccw_req *cqr; 2495 struct itcw *itcw; 2496 int itcw_size; 2497 int count; 2498 int rc; 2499 int i; 2500 2501 if (enable_pav) 2502 startdev = dasd_alias_get_start_dev(base); 2503 2504 if (!startdev) 2505 startdev = base; 2506 2507 start_priv = startdev->private; 2508 2509 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2510 2511 /* 2512 * we're adding 'count' amount of tidaw to the itcw. 2513 * calculate the corresponding itcw_size 2514 */ 2515 itcw_size = itcw_calc_size(0, count, 0); 2516 2517 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2518 if (IS_ERR(cqr)) 2519 return cqr; 2520 2521 start_priv->count++; 2522 2523 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0); 2524 if (IS_ERR(itcw)) { 2525 rc = -EINVAL; 2526 goto out_err; 2527 } 2528 2529 cqr->cpaddr = itcw_get_tcw(itcw); 2530 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit, 2531 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count, 2532 sizeof(struct eckd_count), 2533 count * sizeof(struct eckd_count), 0, rpt); 2534 if (rc) 2535 goto out_err; 2536 2537 for (i = 0; i < count; i++) { 2538 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++, 2539 sizeof(struct eckd_count)); 2540 if (IS_ERR(last_tidaw)) { 2541 rc = -EINVAL; 2542 goto out_err; 2543 } 2544 } 2545 2546 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2547 itcw_finalize(itcw); 2548 2549 cqr->cpmode = 1; 2550 cqr->startdev = startdev; 2551 cqr->memdev = startdev; 2552 cqr->basedev = base; 2553 cqr->retries = startdev->default_retries; 2554 cqr->expires = startdev->default_expires * HZ; 2555 cqr->buildclk = get_tod_clock(); 2556 cqr->status = DASD_CQR_FILLED; 2557 /* Set flags to suppress output for expected errors */ 2558 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 2559 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 2560 2561 return cqr; 2562 2563 out_err: 2564 dasd_sfree_request(cqr, startdev); 2565 2566 return ERR_PTR(rc); 2567 } 2568 2569 /* 2570 * Build the CCW request for the format check 2571 */ 2572 static struct dasd_ccw_req * 2573 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, 2574 int enable_pav, struct eckd_count *fmt_buffer, int rpt) 2575 { 2576 struct dasd_eckd_private *start_priv; 2577 struct dasd_eckd_private *base_priv; 2578 struct dasd_device *startdev = NULL; 2579 struct dasd_ccw_req *cqr; 2580 struct ccw1 *ccw; 2581 void *data; 2582 int cplength, datasize; 2583 int use_prefix; 2584 int count; 2585 int i; 2586 2587 if (enable_pav) 2588 startdev = dasd_alias_get_start_dev(base); 2589 2590 if (!startdev) 2591 startdev = base; 2592 2593 start_priv = startdev->private; 2594 base_priv = base->private; 2595 2596 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2597 2598 use_prefix = base_priv->features.feature[8] & 0x01; 2599 2600 if (use_prefix) { 2601 cplength = 1; 2602 datasize = sizeof(struct PFX_eckd_data); 2603 } else { 2604 cplength = 2; 2605 datasize = sizeof(struct DE_eckd_data) + 2606 sizeof(struct LO_eckd_data); 2607 } 2608 cplength += count; 2609 2610 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2611 if (IS_ERR(cqr)) 2612 return cqr; 2613 2614 start_priv->count++; 2615 data = cqr->data; 2616 ccw = cqr->cpaddr; 2617 2618 if (use_prefix) { 2619 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit, 2620 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0, 2621 count, 0, 0); 2622 } else { 2623 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit, 2624 DASD_ECKD_CCW_READ_COUNT, startdev, 0); 2625 2626 data += sizeof(struct DE_eckd_data); 2627 ccw[-1].flags |= CCW_FLAG_CC; 2628 2629 locate_record(ccw++, data, fdata->start_unit, 0, count, 2630 DASD_ECKD_CCW_READ_COUNT, base, 0); 2631 } 2632 2633 for (i = 0; i < count; i++) { 2634 ccw[-1].flags |= CCW_FLAG_CC; 2635 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2636 ccw->flags = CCW_FLAG_SLI; 2637 ccw->count = 8; 2638 ccw->cda = virt_to_dma32(fmt_buffer); 2639 ccw++; 2640 fmt_buffer++; 2641 } 2642 2643 cqr->startdev = startdev; 2644 cqr->memdev = startdev; 2645 cqr->basedev = base; 2646 cqr->retries = DASD_RETRIES; 2647 cqr->expires = startdev->default_expires * HZ; 2648 cqr->buildclk = get_tod_clock(); 2649 cqr->status = DASD_CQR_FILLED; 2650 /* Set flags to suppress output for expected errors */ 2651 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2652 2653 return cqr; 2654 } 2655 2656 static struct dasd_ccw_req * 2657 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, 2658 struct format_data_t *fdata, int enable_pav) 2659 { 2660 struct dasd_eckd_private *base_priv; 2661 struct dasd_eckd_private *start_priv; 2662 struct dasd_ccw_req *fcp; 2663 struct eckd_count *ect; 2664 struct ch_t address; 2665 struct ccw1 *ccw; 2666 void *data; 2667 int rpt; 2668 int cplength, datasize; 2669 int i, j; 2670 int intensity = 0; 2671 int r0_perm; 2672 int nr_tracks; 2673 int use_prefix; 2674 2675 if (enable_pav) 2676 startdev = dasd_alias_get_start_dev(base); 2677 2678 if (!startdev) 2679 startdev = base; 2680 2681 start_priv = startdev->private; 2682 base_priv = base->private; 2683 2684 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2685 2686 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2687 2688 /* 2689 * fdata->intensity is a bit string that tells us what to do: 2690 * Bit 0: write record zero 2691 * Bit 1: write home address, currently not supported 2692 * Bit 2: invalidate tracks 2693 * Bit 3: use OS/390 compatible disk layout (cdl) 2694 * Bit 4: do not allow storage subsystem to modify record zero 2695 * Only some bit combinations do make sense. 2696 */ 2697 if (fdata->intensity & 0x10) { 2698 r0_perm = 0; 2699 intensity = fdata->intensity & ~0x10; 2700 } else { 2701 r0_perm = 1; 2702 intensity = fdata->intensity; 2703 } 2704 2705 use_prefix = base_priv->features.feature[8] & 0x01; 2706 2707 switch (intensity) { 2708 case 0x00: /* Normal format */ 2709 case 0x08: /* Normal format, use cdl. */ 2710 cplength = 2 + (rpt*nr_tracks); 2711 if (use_prefix) 2712 datasize = sizeof(struct PFX_eckd_data) + 2713 sizeof(struct LO_eckd_data) + 2714 rpt * nr_tracks * sizeof(struct eckd_count); 2715 else 2716 datasize = sizeof(struct DE_eckd_data) + 2717 sizeof(struct LO_eckd_data) + 2718 rpt * nr_tracks * sizeof(struct eckd_count); 2719 break; 2720 case 0x01: /* Write record zero and format track. */ 2721 case 0x09: /* Write record zero and format track, use cdl. */ 2722 cplength = 2 + rpt * nr_tracks; 2723 if (use_prefix) 2724 datasize = sizeof(struct PFX_eckd_data) + 2725 sizeof(struct LO_eckd_data) + 2726 sizeof(struct eckd_count) + 2727 rpt * nr_tracks * sizeof(struct eckd_count); 2728 else 2729 datasize = sizeof(struct DE_eckd_data) + 2730 sizeof(struct LO_eckd_data) + 2731 sizeof(struct eckd_count) + 2732 rpt * nr_tracks * sizeof(struct eckd_count); 2733 break; 2734 case 0x04: /* Invalidate track. */ 2735 case 0x0c: /* Invalidate track, use cdl. */ 2736 cplength = 3; 2737 if (use_prefix) 2738 datasize = sizeof(struct PFX_eckd_data) + 2739 sizeof(struct LO_eckd_data) + 2740 sizeof(struct eckd_count); 2741 else 2742 datasize = sizeof(struct DE_eckd_data) + 2743 sizeof(struct LO_eckd_data) + 2744 sizeof(struct eckd_count); 2745 break; 2746 default: 2747 dev_warn(&startdev->cdev->dev, 2748 "An I/O control call used incorrect flags 0x%x\n", 2749 fdata->intensity); 2750 return ERR_PTR(-EINVAL); 2751 } 2752 2753 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2754 if (IS_ERR(fcp)) 2755 return fcp; 2756 2757 start_priv->count++; 2758 data = fcp->data; 2759 ccw = fcp->cpaddr; 2760 2761 switch (intensity & ~0x08) { 2762 case 0x00: /* Normal format. */ 2763 if (use_prefix) { 2764 prefix(ccw++, (struct PFX_eckd_data *) data, 2765 fdata->start_unit, fdata->stop_unit, 2766 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2767 /* grant subsystem permission to format R0 */ 2768 if (r0_perm) 2769 ((struct PFX_eckd_data *)data) 2770 ->define_extent.ga_extended |= 0x04; 2771 data += sizeof(struct PFX_eckd_data); 2772 } else { 2773 define_extent(ccw++, (struct DE_eckd_data *) data, 2774 fdata->start_unit, fdata->stop_unit, 2775 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2776 /* grant subsystem permission to format R0 */ 2777 if (r0_perm) 2778 ((struct DE_eckd_data *) data) 2779 ->ga_extended |= 0x04; 2780 data += sizeof(struct DE_eckd_data); 2781 } 2782 ccw[-1].flags |= CCW_FLAG_CC; 2783 locate_record(ccw++, (struct LO_eckd_data *) data, 2784 fdata->start_unit, 0, rpt*nr_tracks, 2785 DASD_ECKD_CCW_WRITE_CKD, base, 2786 fdata->blksize); 2787 data += sizeof(struct LO_eckd_data); 2788 break; 2789 case 0x01: /* Write record zero + format track. */ 2790 if (use_prefix) { 2791 prefix(ccw++, (struct PFX_eckd_data *) data, 2792 fdata->start_unit, fdata->stop_unit, 2793 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2794 base, startdev); 2795 data += sizeof(struct PFX_eckd_data); 2796 } else { 2797 define_extent(ccw++, (struct DE_eckd_data *) data, 2798 fdata->start_unit, fdata->stop_unit, 2799 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0); 2800 data += sizeof(struct DE_eckd_data); 2801 } 2802 ccw[-1].flags |= CCW_FLAG_CC; 2803 locate_record(ccw++, (struct LO_eckd_data *) data, 2804 fdata->start_unit, 0, rpt * nr_tracks + 1, 2805 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2806 base->block->bp_block); 2807 data += sizeof(struct LO_eckd_data); 2808 break; 2809 case 0x04: /* Invalidate track. */ 2810 if (use_prefix) { 2811 prefix(ccw++, (struct PFX_eckd_data *) data, 2812 fdata->start_unit, fdata->stop_unit, 2813 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2814 data += sizeof(struct PFX_eckd_data); 2815 } else { 2816 define_extent(ccw++, (struct DE_eckd_data *) data, 2817 fdata->start_unit, fdata->stop_unit, 2818 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2819 data += sizeof(struct DE_eckd_data); 2820 } 2821 ccw[-1].flags |= CCW_FLAG_CC; 2822 locate_record(ccw++, (struct LO_eckd_data *) data, 2823 fdata->start_unit, 0, 1, 2824 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2825 data += sizeof(struct LO_eckd_data); 2826 break; 2827 } 2828 2829 for (j = 0; j < nr_tracks; j++) { 2830 /* calculate cylinder and head for the current track */ 2831 set_ch_t(&address, 2832 (fdata->start_unit + j) / 2833 base_priv->rdc_data.trk_per_cyl, 2834 (fdata->start_unit + j) % 2835 base_priv->rdc_data.trk_per_cyl); 2836 if (intensity & 0x01) { /* write record zero */ 2837 ect = (struct eckd_count *) data; 2838 data += sizeof(struct eckd_count); 2839 ect->cyl = address.cyl; 2840 ect->head = address.head; 2841 ect->record = 0; 2842 ect->kl = 0; 2843 ect->dl = 8; 2844 ccw[-1].flags |= CCW_FLAG_CC; 2845 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2846 ccw->flags = CCW_FLAG_SLI; 2847 ccw->count = 8; 2848 ccw->cda = virt_to_dma32(ect); 2849 ccw++; 2850 } 2851 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2852 ect = (struct eckd_count *) data; 2853 data += sizeof(struct eckd_count); 2854 ect->cyl = address.cyl; 2855 ect->head = address.head; 2856 ect->record = 1; 2857 ect->kl = 0; 2858 ect->dl = 0; 2859 ccw[-1].flags |= CCW_FLAG_CC; 2860 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2861 ccw->flags = CCW_FLAG_SLI; 2862 ccw->count = 8; 2863 ccw->cda = virt_to_dma32(ect); 2864 } else { /* write remaining records */ 2865 for (i = 0; i < rpt; i++) { 2866 ect = (struct eckd_count *) data; 2867 data += sizeof(struct eckd_count); 2868 ect->cyl = address.cyl; 2869 ect->head = address.head; 2870 ect->record = i + 1; 2871 ect->kl = 0; 2872 ect->dl = fdata->blksize; 2873 /* 2874 * Check for special tracks 0-1 2875 * when formatting CDL 2876 */ 2877 if ((intensity & 0x08) && 2878 address.cyl == 0 && address.head == 0) { 2879 if (i < 3) { 2880 ect->kl = 4; 2881 ect->dl = sizes_trk0[i] - 4; 2882 } 2883 } 2884 if ((intensity & 0x08) && 2885 address.cyl == 0 && address.head == 1) { 2886 ect->kl = 44; 2887 ect->dl = LABEL_SIZE - 44; 2888 } 2889 ccw[-1].flags |= CCW_FLAG_CC; 2890 if (i != 0 || j == 0) 2891 ccw->cmd_code = 2892 DASD_ECKD_CCW_WRITE_CKD; 2893 else 2894 ccw->cmd_code = 2895 DASD_ECKD_CCW_WRITE_CKD_MT; 2896 ccw->flags = CCW_FLAG_SLI; 2897 ccw->count = 8; 2898 ccw->cda = virt_to_dma32(ect); 2899 ccw++; 2900 } 2901 } 2902 } 2903 2904 fcp->startdev = startdev; 2905 fcp->memdev = startdev; 2906 fcp->basedev = base; 2907 fcp->retries = 256; 2908 fcp->expires = startdev->default_expires * HZ; 2909 fcp->buildclk = get_tod_clock(); 2910 fcp->status = DASD_CQR_FILLED; 2911 2912 return fcp; 2913 } 2914 2915 /* 2916 * Wrapper function to build a CCW request depending on input data 2917 */ 2918 static struct dasd_ccw_req * 2919 dasd_eckd_format_build_ccw_req(struct dasd_device *base, 2920 struct format_data_t *fdata, int enable_pav, 2921 int tpm, struct eckd_count *fmt_buffer, int rpt) 2922 { 2923 struct dasd_ccw_req *ccw_req; 2924 2925 if (!fmt_buffer) { 2926 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); 2927 } else { 2928 if (tpm) 2929 ccw_req = dasd_eckd_build_check_tcw(base, fdata, 2930 enable_pav, 2931 fmt_buffer, rpt); 2932 else 2933 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav, 2934 fmt_buffer, rpt); 2935 } 2936 2937 return ccw_req; 2938 } 2939 2940 /* 2941 * Sanity checks on format_data 2942 */ 2943 static int dasd_eckd_format_sanity_checks(struct dasd_device *base, 2944 struct format_data_t *fdata) 2945 { 2946 struct dasd_eckd_private *private = base->private; 2947 2948 if (fdata->start_unit >= 2949 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2950 dev_warn(&base->cdev->dev, 2951 "Start track number %u used in formatting is too big\n", 2952 fdata->start_unit); 2953 return -EINVAL; 2954 } 2955 if (fdata->stop_unit >= 2956 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2957 dev_warn(&base->cdev->dev, 2958 "Stop track number %u used in formatting is too big\n", 2959 fdata->stop_unit); 2960 return -EINVAL; 2961 } 2962 if (fdata->start_unit > fdata->stop_unit) { 2963 dev_warn(&base->cdev->dev, 2964 "Start track %u used in formatting exceeds end track\n", 2965 fdata->start_unit); 2966 return -EINVAL; 2967 } 2968 if (dasd_check_blocksize(fdata->blksize) != 0) { 2969 dev_warn(&base->cdev->dev, 2970 "The DASD cannot be formatted with block size %u\n", 2971 fdata->blksize); 2972 return -EINVAL; 2973 } 2974 return 0; 2975 } 2976 2977 /* 2978 * This function will process format_data originally coming from an IOCTL 2979 */ 2980 static int dasd_eckd_format_process_data(struct dasd_device *base, 2981 struct format_data_t *fdata, 2982 int enable_pav, int tpm, 2983 struct eckd_count *fmt_buffer, int rpt, 2984 struct irb *irb) 2985 { 2986 struct dasd_eckd_private *private = base->private; 2987 struct dasd_ccw_req *cqr, *n; 2988 struct list_head format_queue; 2989 struct dasd_device *device; 2990 char *sense = NULL; 2991 int old_start, old_stop, format_step; 2992 int step, retry; 2993 int rc; 2994 2995 rc = dasd_eckd_format_sanity_checks(base, fdata); 2996 if (rc) 2997 return rc; 2998 2999 INIT_LIST_HEAD(&format_queue); 3000 3001 old_start = fdata->start_unit; 3002 old_stop = fdata->stop_unit; 3003 3004 if (!tpm && fmt_buffer != NULL) { 3005 /* Command Mode / Format Check */ 3006 format_step = 1; 3007 } else if (tpm && fmt_buffer != NULL) { 3008 /* Transport Mode / Format Check */ 3009 format_step = DASD_CQR_MAX_CCW / rpt; 3010 } else { 3011 /* Normal Formatting */ 3012 format_step = DASD_CQR_MAX_CCW / 3013 recs_per_track(&private->rdc_data, 0, fdata->blksize); 3014 } 3015 3016 do { 3017 retry = 0; 3018 while (fdata->start_unit <= old_stop) { 3019 step = fdata->stop_unit - fdata->start_unit + 1; 3020 if (step > format_step) { 3021 fdata->stop_unit = 3022 fdata->start_unit + format_step - 1; 3023 } 3024 3025 cqr = dasd_eckd_format_build_ccw_req(base, fdata, 3026 enable_pav, tpm, 3027 fmt_buffer, rpt); 3028 if (IS_ERR(cqr)) { 3029 rc = PTR_ERR(cqr); 3030 if (rc == -ENOMEM) { 3031 if (list_empty(&format_queue)) 3032 goto out; 3033 /* 3034 * not enough memory available, start 3035 * requests retry after first requests 3036 * were finished 3037 */ 3038 retry = 1; 3039 break; 3040 } 3041 goto out_err; 3042 } 3043 list_add_tail(&cqr->blocklist, &format_queue); 3044 3045 if (fmt_buffer) { 3046 step = fdata->stop_unit - fdata->start_unit + 1; 3047 fmt_buffer += rpt * step; 3048 } 3049 fdata->start_unit = fdata->stop_unit + 1; 3050 fdata->stop_unit = old_stop; 3051 } 3052 3053 rc = dasd_sleep_on_queue(&format_queue); 3054 3055 out_err: 3056 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 3057 device = cqr->startdev; 3058 private = device->private; 3059 3060 if (cqr->status == DASD_CQR_FAILED) { 3061 /* 3062 * Only get sense data if called by format 3063 * check 3064 */ 3065 if (fmt_buffer && irb) { 3066 sense = dasd_get_sense(&cqr->irb); 3067 memcpy(irb, &cqr->irb, sizeof(*irb)); 3068 } 3069 rc = -EIO; 3070 } 3071 list_del_init(&cqr->blocklist); 3072 dasd_ffree_request(cqr, device); 3073 private->count--; 3074 } 3075 3076 if (rc && rc != -EIO) 3077 goto out; 3078 if (rc == -EIO) { 3079 /* 3080 * In case fewer than the expected records are on the 3081 * track, we will most likely get a 'No Record Found' 3082 * error (in command mode) or a 'File Protected' error 3083 * (in transport mode). Those particular cases shouldn't 3084 * pass the -EIO to the IOCTL, therefore reset the rc 3085 * and continue. 3086 */ 3087 if (sense && 3088 (sense[1] & SNS1_NO_REC_FOUND || 3089 sense[1] & SNS1_FILE_PROTECTED)) 3090 retry = 1; 3091 else 3092 goto out; 3093 } 3094 3095 } while (retry); 3096 3097 out: 3098 fdata->start_unit = old_start; 3099 fdata->stop_unit = old_stop; 3100 3101 return rc; 3102 } 3103 3104 static int dasd_eckd_format_device(struct dasd_device *base, 3105 struct format_data_t *fdata, int enable_pav) 3106 { 3107 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 3108 0, NULL); 3109 } 3110 3111 static bool test_and_set_format_track(struct dasd_format_entry *to_format, 3112 struct dasd_ccw_req *cqr) 3113 { 3114 struct dasd_block *block = cqr->block; 3115 struct dasd_format_entry *format; 3116 unsigned long flags; 3117 bool rc = false; 3118 3119 spin_lock_irqsave(&block->format_lock, flags); 3120 if (cqr->trkcount != atomic_read(&block->trkcount)) { 3121 /* 3122 * The number of formatted tracks has changed after request 3123 * start and we can not tell if the current track was involved. 3124 * To avoid data corruption treat it as if the current track is 3125 * involved 3126 */ 3127 rc = true; 3128 goto out; 3129 } 3130 list_for_each_entry(format, &block->format_list, list) { 3131 if (format->track == to_format->track) { 3132 rc = true; 3133 goto out; 3134 } 3135 } 3136 list_add_tail(&to_format->list, &block->format_list); 3137 3138 out: 3139 spin_unlock_irqrestore(&block->format_lock, flags); 3140 return rc; 3141 } 3142 3143 static void clear_format_track(struct dasd_format_entry *format, 3144 struct dasd_block *block) 3145 { 3146 unsigned long flags; 3147 3148 spin_lock_irqsave(&block->format_lock, flags); 3149 atomic_inc(&block->trkcount); 3150 list_del_init(&format->list); 3151 spin_unlock_irqrestore(&block->format_lock, flags); 3152 } 3153 3154 /* 3155 * Callback function to free ESE format requests. 3156 */ 3157 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) 3158 { 3159 struct dasd_device *device = cqr->startdev; 3160 struct dasd_eckd_private *private = device->private; 3161 struct dasd_format_entry *format = data; 3162 3163 clear_format_track(format, cqr->basedev->block); 3164 private->count--; 3165 dasd_ffree_request(cqr, device); 3166 } 3167 3168 static struct dasd_ccw_req * 3169 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, 3170 struct irb *irb) 3171 { 3172 struct dasd_eckd_private *private; 3173 struct dasd_format_entry *format; 3174 struct format_data_t fdata; 3175 unsigned int recs_per_trk; 3176 struct dasd_ccw_req *fcqr; 3177 struct dasd_device *base; 3178 struct dasd_block *block; 3179 unsigned int blksize; 3180 struct request *req; 3181 sector_t first_trk; 3182 sector_t last_trk; 3183 sector_t curr_trk; 3184 int rc; 3185 3186 req = dasd_get_callback_data(cqr); 3187 block = cqr->block; 3188 base = block->base; 3189 private = base->private; 3190 blksize = block->bp_block; 3191 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3192 format = &startdev->format_entry; 3193 3194 first_trk = blk_rq_pos(req) >> block->s2b_shift; 3195 sector_div(first_trk, recs_per_trk); 3196 last_trk = 3197 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3198 sector_div(last_trk, recs_per_trk); 3199 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3200 if (rc) 3201 return ERR_PTR(rc); 3202 3203 if (curr_trk < first_trk || curr_trk > last_trk) { 3204 DBF_DEV_EVENT(DBF_WARNING, startdev, 3205 "ESE error track %llu not within range %llu - %llu\n", 3206 curr_trk, first_trk, last_trk); 3207 return ERR_PTR(-EINVAL); 3208 } 3209 format->track = curr_trk; 3210 /* test if track is already in formatting by another thread */ 3211 if (test_and_set_format_track(format, cqr)) { 3212 /* this is no real error so do not count down retries */ 3213 cqr->retries++; 3214 return ERR_PTR(-EEXIST); 3215 } 3216 3217 fdata.start_unit = curr_trk; 3218 fdata.stop_unit = curr_trk; 3219 fdata.blksize = blksize; 3220 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; 3221 3222 rc = dasd_eckd_format_sanity_checks(base, &fdata); 3223 if (rc) 3224 return ERR_PTR(-EINVAL); 3225 3226 /* 3227 * We're building the request with PAV disabled as we're reusing 3228 * the former startdev. 3229 */ 3230 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); 3231 if (IS_ERR(fcqr)) 3232 return fcqr; 3233 3234 fcqr->callback = dasd_eckd_ese_format_cb; 3235 fcqr->callback_data = (void *) format; 3236 3237 return fcqr; 3238 } 3239 3240 /* 3241 * When data is read from an unformatted area of an ESE volume, this function 3242 * returns zeroed data and thereby mimics a read of zero data. 3243 * 3244 * The first unformatted track is the one that got the NRF error, the address is 3245 * encoded in the sense data. 3246 * 3247 * All tracks before have returned valid data and should not be touched. 3248 * All tracks after the unformatted track might be formatted or not. This is 3249 * currently not known, remember the processed data and return the remainder of 3250 * the request to the blocklayer in __dasd_cleanup_cqr(). 3251 */ 3252 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) 3253 { 3254 struct dasd_eckd_private *private; 3255 sector_t first_trk, last_trk; 3256 sector_t first_blk, last_blk; 3257 unsigned int blksize, off; 3258 unsigned int recs_per_trk; 3259 struct dasd_device *base; 3260 struct req_iterator iter; 3261 struct dasd_block *block; 3262 unsigned int skip_block; 3263 unsigned int blk_count; 3264 struct request *req; 3265 struct bio_vec bv; 3266 sector_t curr_trk; 3267 sector_t end_blk; 3268 char *dst; 3269 int rc; 3270 3271 req = (struct request *) cqr->callback_data; 3272 base = cqr->block->base; 3273 blksize = base->block->bp_block; 3274 block = cqr->block; 3275 private = base->private; 3276 skip_block = 0; 3277 blk_count = 0; 3278 3279 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3280 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; 3281 sector_div(first_trk, recs_per_trk); 3282 last_trk = last_blk = 3283 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3284 sector_div(last_trk, recs_per_trk); 3285 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3286 if (rc) 3287 return rc; 3288 3289 /* sanity check if the current track from sense data is valid */ 3290 if (curr_trk < first_trk || curr_trk > last_trk) { 3291 DBF_DEV_EVENT(DBF_WARNING, base, 3292 "ESE error track %llu not within range %llu - %llu\n", 3293 curr_trk, first_trk, last_trk); 3294 return -EINVAL; 3295 } 3296 3297 /* 3298 * if not the first track got the NRF error we have to skip over valid 3299 * blocks 3300 */ 3301 if (curr_trk != first_trk) 3302 skip_block = curr_trk * recs_per_trk - first_blk; 3303 3304 /* we have no information beyond the current track */ 3305 end_blk = (curr_trk + 1) * recs_per_trk; 3306 3307 rq_for_each_segment(bv, req, iter) { 3308 dst = bvec_virt(&bv); 3309 for (off = 0; off < bv.bv_len; off += blksize) { 3310 if (first_blk + blk_count >= end_blk) { 3311 cqr->proc_bytes = blk_count * blksize; 3312 return 0; 3313 } 3314 if (dst && !skip_block) 3315 memset(dst, 0, blksize); 3316 else 3317 skip_block--; 3318 dst += blksize; 3319 blk_count++; 3320 } 3321 } 3322 return 0; 3323 } 3324 3325 /* 3326 * Helper function to count consecutive records of a single track. 3327 */ 3328 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, 3329 int max) 3330 { 3331 int head; 3332 int i; 3333 3334 head = fmt_buffer[start].head; 3335 3336 /* 3337 * There are 3 conditions where we stop counting: 3338 * - if data reoccurs (same head and record may reoccur), which may 3339 * happen due to the way DASD_ECKD_CCW_READ_COUNT works 3340 * - when the head changes, because we're iterating over several tracks 3341 * then (DASD_ECKD_CCW_READ_COUNT_MT) 3342 * - when we've reached the end of sensible data in the buffer (the 3343 * record will be 0 then) 3344 */ 3345 for (i = start; i < max; i++) { 3346 if (i > start) { 3347 if ((fmt_buffer[i].head == head && 3348 fmt_buffer[i].record == 1) || 3349 fmt_buffer[i].head != head || 3350 fmt_buffer[i].record == 0) 3351 break; 3352 } 3353 } 3354 3355 return i - start; 3356 } 3357 3358 /* 3359 * Evaluate a given range of tracks. Data like number of records, blocksize, 3360 * record ids, and key length are compared with expected data. 3361 * 3362 * If a mismatch occurs, the corresponding error bit is set, as well as 3363 * additional information, depending on the error. 3364 */ 3365 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer, 3366 struct format_check_t *cdata, 3367 int rpt_max, int rpt_exp, 3368 int trk_per_cyl, int tpm) 3369 { 3370 struct ch_t geo; 3371 int max_entries; 3372 int count = 0; 3373 int trkcount; 3374 int blksize; 3375 int pos = 0; 3376 int i, j; 3377 int kl; 3378 3379 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3380 max_entries = trkcount * rpt_max; 3381 3382 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) { 3383 /* Calculate the correct next starting position in the buffer */ 3384 if (tpm) { 3385 while (fmt_buffer[pos].record == 0 && 3386 fmt_buffer[pos].dl == 0) { 3387 if (pos++ > max_entries) 3388 break; 3389 } 3390 } else { 3391 if (i != cdata->expect.start_unit) 3392 pos += rpt_max - count; 3393 } 3394 3395 /* Calculate the expected geo values for the current track */ 3396 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl); 3397 3398 /* Count and check number of records */ 3399 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max); 3400 3401 if (count < rpt_exp) { 3402 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS; 3403 break; 3404 } 3405 if (count > rpt_exp) { 3406 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS; 3407 break; 3408 } 3409 3410 for (j = 0; j < count; j++, pos++) { 3411 blksize = cdata->expect.blksize; 3412 kl = 0; 3413 3414 /* 3415 * Set special values when checking CDL formatted 3416 * devices. 3417 */ 3418 if ((cdata->expect.intensity & 0x08) && 3419 geo.cyl == 0 && geo.head == 0) { 3420 if (j < 3) { 3421 blksize = sizes_trk0[j] - 4; 3422 kl = 4; 3423 } 3424 } 3425 if ((cdata->expect.intensity & 0x08) && 3426 geo.cyl == 0 && geo.head == 1) { 3427 blksize = LABEL_SIZE - 44; 3428 kl = 44; 3429 } 3430 3431 /* Check blocksize */ 3432 if (fmt_buffer[pos].dl != blksize) { 3433 cdata->result = DASD_FMT_ERR_BLKSIZE; 3434 goto out; 3435 } 3436 /* Check if key length is 0 */ 3437 if (fmt_buffer[pos].kl != kl) { 3438 cdata->result = DASD_FMT_ERR_KEY_LENGTH; 3439 goto out; 3440 } 3441 /* Check if record_id is correct */ 3442 if (fmt_buffer[pos].cyl != geo.cyl || 3443 fmt_buffer[pos].head != geo.head || 3444 fmt_buffer[pos].record != (j + 1)) { 3445 cdata->result = DASD_FMT_ERR_RECORD_ID; 3446 goto out; 3447 } 3448 } 3449 } 3450 3451 out: 3452 /* 3453 * In case of no errors, we need to decrease by one 3454 * to get the correct positions. 3455 */ 3456 if (!cdata->result) { 3457 i--; 3458 pos--; 3459 } 3460 3461 cdata->unit = i; 3462 cdata->num_records = count; 3463 cdata->rec = fmt_buffer[pos].record; 3464 cdata->blksize = fmt_buffer[pos].dl; 3465 cdata->key_length = fmt_buffer[pos].kl; 3466 } 3467 3468 /* 3469 * Check the format of a range of tracks of a DASD. 3470 */ 3471 static int dasd_eckd_check_device_format(struct dasd_device *base, 3472 struct format_check_t *cdata, 3473 int enable_pav) 3474 { 3475 struct dasd_eckd_private *private = base->private; 3476 struct eckd_count *fmt_buffer; 3477 struct irb irb; 3478 int rpt_max, rpt_exp; 3479 int fmt_buffer_size; 3480 int trk_per_cyl; 3481 int trkcount; 3482 int tpm = 0; 3483 int rc; 3484 3485 trk_per_cyl = private->rdc_data.trk_per_cyl; 3486 3487 /* Get maximum and expected amount of records per track */ 3488 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1; 3489 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize); 3490 3491 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3492 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count); 3493 3494 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA); 3495 if (!fmt_buffer) 3496 return -ENOMEM; 3497 3498 /* 3499 * A certain FICON feature subset is needed to operate in transport 3500 * mode. Additionally, the support for transport mode is implicitly 3501 * checked by comparing the buffer size with fcx_max_data. As long as 3502 * the buffer size is smaller we can operate in transport mode and 3503 * process multiple tracks. If not, only one track at once is being 3504 * processed using command mode. 3505 */ 3506 if ((private->features.feature[40] & 0x04) && 3507 fmt_buffer_size <= private->fcx_max_data) 3508 tpm = 1; 3509 3510 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav, 3511 tpm, fmt_buffer, rpt_max, &irb); 3512 if (rc && rc != -EIO) 3513 goto out; 3514 if (rc == -EIO) { 3515 /* 3516 * If our first attempt with transport mode enabled comes back 3517 * with an incorrect length error, we're going to retry the 3518 * check with command mode. 3519 */ 3520 if (tpm && scsw_cstat(&irb.scsw) == 0x40) { 3521 tpm = 0; 3522 rc = dasd_eckd_format_process_data(base, &cdata->expect, 3523 enable_pav, tpm, 3524 fmt_buffer, rpt_max, 3525 &irb); 3526 if (rc) 3527 goto out; 3528 } else { 3529 goto out; 3530 } 3531 } 3532 3533 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp, 3534 trk_per_cyl, tpm); 3535 3536 out: 3537 kfree(fmt_buffer); 3538 3539 return rc; 3540 } 3541 3542 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 3543 { 3544 if (cqr->retries < 0) { 3545 cqr->status = DASD_CQR_FAILED; 3546 return; 3547 } 3548 cqr->status = DASD_CQR_FILLED; 3549 if (cqr->block && (cqr->startdev != cqr->block->base)) { 3550 dasd_eckd_reset_ccw_to_base_io(cqr); 3551 cqr->startdev = cqr->block->base; 3552 cqr->lpm = dasd_path_get_opm(cqr->block->base); 3553 } 3554 }; 3555 3556 static dasd_erp_fn_t 3557 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 3558 { 3559 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 3560 struct ccw_device *cdev = device->cdev; 3561 3562 switch (cdev->id.cu_type) { 3563 case 0x3990: 3564 case 0x2105: 3565 case 0x2107: 3566 case 0x1750: 3567 return dasd_3990_erp_action; 3568 case 0x9343: 3569 case 0x3880: 3570 default: 3571 return dasd_default_erp_action; 3572 } 3573 } 3574 3575 static dasd_erp_fn_t 3576 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 3577 { 3578 return dasd_default_erp_postaction; 3579 } 3580 3581 static void dasd_eckd_check_for_device_change(struct dasd_device *device, 3582 struct dasd_ccw_req *cqr, 3583 struct irb *irb) 3584 { 3585 char mask; 3586 char *sense = NULL; 3587 struct dasd_eckd_private *private = device->private; 3588 3589 /* first of all check for state change pending interrupt */ 3590 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 3591 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 3592 /* 3593 * for alias only, not in offline processing 3594 * and only if not suspended 3595 */ 3596 if (!device->block && private->lcu && 3597 device->state == DASD_STATE_ONLINE && 3598 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 3599 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 3600 /* schedule worker to reload device */ 3601 dasd_reload_device(device); 3602 } 3603 dasd_generic_handle_state_change(device); 3604 return; 3605 } 3606 3607 sense = dasd_get_sense(irb); 3608 if (!sense) 3609 return; 3610 3611 /* summary unit check */ 3612 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 3613 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 3614 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { 3615 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3616 "eckd suc: device already notified"); 3617 return; 3618 } 3619 sense = dasd_get_sense(irb); 3620 if (!sense) { 3621 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3622 "eckd suc: no reason code available"); 3623 clear_bit(DASD_FLAG_SUC, &device->flags); 3624 return; 3625 3626 } 3627 private->suc_reason = sense[8]; 3628 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", 3629 "eckd handle summary unit check: reason", 3630 private->suc_reason); 3631 dasd_get_device(device); 3632 if (!schedule_work(&device->suc_work)) 3633 dasd_put_device(device); 3634 3635 return; 3636 } 3637 3638 /* service information message SIM */ 3639 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 3640 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 3641 dasd_3990_erp_handle_sim(device, sense); 3642 return; 3643 } 3644 3645 /* loss of device reservation is handled via base devices only 3646 * as alias devices may be used with several bases 3647 */ 3648 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 3649 (sense[7] == 0x3F) && 3650 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 3651 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 3652 if (device->features & DASD_FEATURE_FAILONSLCK) 3653 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 3654 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3655 dev_err(&device->cdev->dev, 3656 "The device reservation was lost\n"); 3657 } 3658 } 3659 3660 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, 3661 unsigned int first_trk, 3662 unsigned int last_trk) 3663 { 3664 struct dasd_eckd_private *private = device->private; 3665 unsigned int trks_per_vol; 3666 int rc = 0; 3667 3668 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; 3669 3670 if (first_trk >= trks_per_vol) { 3671 dev_warn(&device->cdev->dev, 3672 "Start track number %u used in the space release command is too big\n", 3673 first_trk); 3674 rc = -EINVAL; 3675 } else if (last_trk >= trks_per_vol) { 3676 dev_warn(&device->cdev->dev, 3677 "Stop track number %u used in the space release command is too big\n", 3678 last_trk); 3679 rc = -EINVAL; 3680 } else if (first_trk > last_trk) { 3681 dev_warn(&device->cdev->dev, 3682 "Start track %u used in the space release command exceeds the end track\n", 3683 first_trk); 3684 rc = -EINVAL; 3685 } 3686 return rc; 3687 } 3688 3689 /* 3690 * Helper function to count the amount of involved extents within a given range 3691 * with extent alignment in mind. 3692 */ 3693 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) 3694 { 3695 int cur_pos = 0; 3696 int count = 0; 3697 int tmp; 3698 3699 if (from == to) 3700 return 1; 3701 3702 /* Count first partial extent */ 3703 if (from % trks_per_ext != 0) { 3704 tmp = from + trks_per_ext - (from % trks_per_ext) - 1; 3705 if (tmp > to) 3706 tmp = to; 3707 cur_pos = tmp - from + 1; 3708 count++; 3709 } 3710 /* Count full extents */ 3711 if (to - (from + cur_pos) + 1 >= trks_per_ext) { 3712 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); 3713 count += (tmp - (from + cur_pos) + 1) / trks_per_ext; 3714 cur_pos = tmp; 3715 } 3716 /* Count last partial extent */ 3717 if (cur_pos < to) 3718 count++; 3719 3720 return count; 3721 } 3722 3723 static int dasd_in_copy_relation(struct dasd_device *device) 3724 { 3725 struct dasd_pprc_data_sc4 *temp; 3726 int rc; 3727 3728 if (!dasd_eckd_pprc_enabled(device)) 3729 return 0; 3730 3731 temp = kzalloc(sizeof(*temp), GFP_KERNEL); 3732 if (!temp) 3733 return -ENOMEM; 3734 3735 rc = dasd_eckd_query_pprc_status(device, temp); 3736 if (!rc) 3737 rc = temp->dev_info[0].state; 3738 3739 kfree(temp); 3740 return rc; 3741 } 3742 3743 /* 3744 * Release allocated space for a given range or an entire volume. 3745 */ 3746 static struct dasd_ccw_req * 3747 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, 3748 struct request *req, unsigned int first_trk, 3749 unsigned int last_trk, int by_extent) 3750 { 3751 struct dasd_eckd_private *private = device->private; 3752 struct dasd_dso_ras_ext_range *ras_range; 3753 struct dasd_rssd_features *features; 3754 struct dasd_dso_ras_data *ras_data; 3755 u16 heads, beg_head, end_head; 3756 int cur_to_trk, cur_from_trk; 3757 struct dasd_ccw_req *cqr; 3758 u32 beg_cyl, end_cyl; 3759 int copy_relation; 3760 struct ccw1 *ccw; 3761 int trks_per_ext; 3762 size_t ras_size; 3763 size_t size; 3764 int nr_exts; 3765 void *rq; 3766 int i; 3767 3768 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) 3769 return ERR_PTR(-EINVAL); 3770 3771 copy_relation = dasd_in_copy_relation(device); 3772 if (copy_relation < 0) 3773 return ERR_PTR(copy_relation); 3774 3775 rq = req ? blk_mq_rq_to_pdu(req) : NULL; 3776 3777 features = &private->features; 3778 3779 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3780 nr_exts = 0; 3781 if (by_extent) 3782 nr_exts = count_exts(first_trk, last_trk, trks_per_ext); 3783 ras_size = sizeof(*ras_data); 3784 size = ras_size + (nr_exts * sizeof(*ras_range)); 3785 3786 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); 3787 if (IS_ERR(cqr)) { 3788 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3789 "Could not allocate RAS request"); 3790 return cqr; 3791 } 3792 3793 ras_data = cqr->data; 3794 memset(ras_data, 0, size); 3795 3796 ras_data->order = DSO_ORDER_RAS; 3797 ras_data->flags.vol_type = 0; /* CKD volume */ 3798 /* Release specified extents or entire volume */ 3799 ras_data->op_flags.by_extent = by_extent; 3800 /* 3801 * This bit guarantees initialisation of tracks within an extent that is 3802 * not fully specified, but is only supported with a certain feature 3803 * subset and for devices not in a copy relation. 3804 */ 3805 if (features->feature[56] & 0x01 && !copy_relation) 3806 ras_data->op_flags.guarantee_init = 1; 3807 3808 ras_data->lss = private->conf.ned->ID; 3809 ras_data->dev_addr = private->conf.ned->unit_addr; 3810 ras_data->nr_exts = nr_exts; 3811 3812 if (by_extent) { 3813 heads = private->rdc_data.trk_per_cyl; 3814 cur_from_trk = first_trk; 3815 cur_to_trk = first_trk + trks_per_ext - 3816 (first_trk % trks_per_ext) - 1; 3817 if (cur_to_trk > last_trk) 3818 cur_to_trk = last_trk; 3819 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); 3820 3821 for (i = 0; i < nr_exts; i++) { 3822 beg_cyl = cur_from_trk / heads; 3823 beg_head = cur_from_trk % heads; 3824 end_cyl = cur_to_trk / heads; 3825 end_head = cur_to_trk % heads; 3826 3827 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); 3828 set_ch_t(&ras_range->end_ext, end_cyl, end_head); 3829 3830 cur_from_trk = cur_to_trk + 1; 3831 cur_to_trk = cur_from_trk + trks_per_ext - 1; 3832 if (cur_to_trk > last_trk) 3833 cur_to_trk = last_trk; 3834 ras_range++; 3835 } 3836 } 3837 3838 ccw = cqr->cpaddr; 3839 ccw->cda = virt_to_dma32(cqr->data); 3840 ccw->cmd_code = DASD_ECKD_CCW_DSO; 3841 ccw->count = size; 3842 3843 cqr->startdev = device; 3844 cqr->memdev = device; 3845 cqr->block = block; 3846 cqr->retries = 256; 3847 cqr->expires = device->default_expires * HZ; 3848 cqr->buildclk = get_tod_clock(); 3849 cqr->status = DASD_CQR_FILLED; 3850 3851 return cqr; 3852 } 3853 3854 static int dasd_eckd_release_space_full(struct dasd_device *device) 3855 { 3856 struct dasd_ccw_req *cqr; 3857 int rc; 3858 3859 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); 3860 if (IS_ERR(cqr)) 3861 return PTR_ERR(cqr); 3862 3863 rc = dasd_sleep_on_interruptible(cqr); 3864 3865 dasd_sfree_request(cqr, cqr->memdev); 3866 3867 return rc; 3868 } 3869 3870 static int dasd_eckd_release_space_trks(struct dasd_device *device, 3871 unsigned int from, unsigned int to) 3872 { 3873 struct dasd_eckd_private *private = device->private; 3874 struct dasd_block *block = device->block; 3875 struct dasd_ccw_req *cqr, *n; 3876 struct list_head ras_queue; 3877 unsigned int device_exts; 3878 int trks_per_ext; 3879 int stop, step; 3880 int cur_pos; 3881 int rc = 0; 3882 int retry; 3883 3884 INIT_LIST_HEAD(&ras_queue); 3885 3886 device_exts = private->real_cyl / dasd_eckd_ext_size(device); 3887 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3888 3889 /* Make sure device limits are not exceeded */ 3890 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); 3891 cur_pos = from; 3892 3893 do { 3894 retry = 0; 3895 while (cur_pos < to) { 3896 stop = cur_pos + step - 3897 ((cur_pos + step) % trks_per_ext) - 1; 3898 if (stop > to) 3899 stop = to; 3900 3901 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); 3902 if (IS_ERR(cqr)) { 3903 rc = PTR_ERR(cqr); 3904 if (rc == -ENOMEM) { 3905 if (list_empty(&ras_queue)) 3906 goto out; 3907 retry = 1; 3908 break; 3909 } 3910 goto err_out; 3911 } 3912 3913 spin_lock_irq(&block->queue_lock); 3914 list_add_tail(&cqr->blocklist, &ras_queue); 3915 spin_unlock_irq(&block->queue_lock); 3916 cur_pos = stop + 1; 3917 } 3918 3919 rc = dasd_sleep_on_queue_interruptible(&ras_queue); 3920 3921 err_out: 3922 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { 3923 device = cqr->startdev; 3924 private = device->private; 3925 3926 spin_lock_irq(&block->queue_lock); 3927 list_del_init(&cqr->blocklist); 3928 spin_unlock_irq(&block->queue_lock); 3929 dasd_sfree_request(cqr, device); 3930 private->count--; 3931 } 3932 } while (retry); 3933 3934 out: 3935 return rc; 3936 } 3937 3938 static int dasd_eckd_release_space(struct dasd_device *device, 3939 struct format_data_t *rdata) 3940 { 3941 if (rdata->intensity & DASD_FMT_INT_ESE_FULL) 3942 return dasd_eckd_release_space_full(device); 3943 else if (rdata->intensity == 0) 3944 return dasd_eckd_release_space_trks(device, rdata->start_unit, 3945 rdata->stop_unit); 3946 else 3947 return -EINVAL; 3948 } 3949 3950 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3951 struct dasd_device *startdev, 3952 struct dasd_block *block, 3953 struct request *req, 3954 sector_t first_rec, 3955 sector_t last_rec, 3956 sector_t first_trk, 3957 sector_t last_trk, 3958 unsigned int first_offs, 3959 unsigned int last_offs, 3960 unsigned int blk_per_trk, 3961 unsigned int blksize) 3962 { 3963 struct dasd_eckd_private *private; 3964 dma64_t *idaws; 3965 struct LO_eckd_data *LO_data; 3966 struct dasd_ccw_req *cqr; 3967 struct ccw1 *ccw; 3968 struct req_iterator iter; 3969 struct bio_vec bv; 3970 char *dst; 3971 unsigned int off; 3972 int count, cidaw, cplength, datasize; 3973 sector_t recid; 3974 unsigned char cmd, rcmd; 3975 int use_prefix; 3976 struct dasd_device *basedev; 3977 3978 basedev = block->base; 3979 private = basedev->private; 3980 if (rq_data_dir(req) == READ) 3981 cmd = DASD_ECKD_CCW_READ_MT; 3982 else if (rq_data_dir(req) == WRITE) 3983 cmd = DASD_ECKD_CCW_WRITE_MT; 3984 else 3985 return ERR_PTR(-EINVAL); 3986 3987 /* Check struct bio and count the number of blocks for the request. */ 3988 count = 0; 3989 cidaw = 0; 3990 rq_for_each_segment(bv, req, iter) { 3991 if (bv.bv_len & (blksize - 1)) 3992 /* Eckd can only do full blocks. */ 3993 return ERR_PTR(-EINVAL); 3994 count += bv.bv_len >> (block->s2b_shift + 9); 3995 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 3996 cidaw += bv.bv_len >> (block->s2b_shift + 9); 3997 } 3998 /* Paranoia. */ 3999 if (count != last_rec - first_rec + 1) 4000 return ERR_PTR(-EINVAL); 4001 4002 /* use the prefix command if available */ 4003 use_prefix = private->features.feature[8] & 0x01; 4004 if (use_prefix) { 4005 /* 1x prefix + number of blocks */ 4006 cplength = 2 + count; 4007 /* 1x prefix + cidaws*sizeof(long) */ 4008 datasize = sizeof(struct PFX_eckd_data) + 4009 sizeof(struct LO_eckd_data) + 4010 cidaw * sizeof(unsigned long); 4011 } else { 4012 /* 1x define extent + 1x locate record + number of blocks */ 4013 cplength = 2 + count; 4014 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 4015 datasize = sizeof(struct DE_eckd_data) + 4016 sizeof(struct LO_eckd_data) + 4017 cidaw * sizeof(unsigned long); 4018 } 4019 /* Find out the number of additional locate record ccws for cdl. */ 4020 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 4021 if (last_rec >= 2*blk_per_trk) 4022 count = 2*blk_per_trk - first_rec; 4023 cplength += count; 4024 datasize += count*sizeof(struct LO_eckd_data); 4025 } 4026 /* Allocate the ccw request. */ 4027 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 4028 startdev, blk_mq_rq_to_pdu(req)); 4029 if (IS_ERR(cqr)) 4030 return cqr; 4031 ccw = cqr->cpaddr; 4032 /* First ccw is define extent or prefix. */ 4033 if (use_prefix) { 4034 if (prefix(ccw++, cqr->data, first_trk, 4035 last_trk, cmd, basedev, startdev) == -EAGAIN) { 4036 /* Clock not in sync and XRC is enabled. 4037 * Try again later. 4038 */ 4039 dasd_sfree_request(cqr, startdev); 4040 return ERR_PTR(-EAGAIN); 4041 } 4042 idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); 4043 } else { 4044 if (define_extent(ccw++, cqr->data, first_trk, 4045 last_trk, cmd, basedev, 0) == -EAGAIN) { 4046 /* Clock not in sync and XRC is enabled. 4047 * Try again later. 4048 */ 4049 dasd_sfree_request(cqr, startdev); 4050 return ERR_PTR(-EAGAIN); 4051 } 4052 idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data)); 4053 } 4054 /* Build locate_record+read/write/ccws. */ 4055 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 4056 recid = first_rec; 4057 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 4058 /* Only standard blocks so there is just one locate record. */ 4059 ccw[-1].flags |= CCW_FLAG_CC; 4060 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 4061 last_rec - recid + 1, cmd, basedev, blksize); 4062 } 4063 rq_for_each_segment(bv, req, iter) { 4064 dst = bvec_virt(&bv); 4065 if (dasd_page_cache) { 4066 char *copy = kmem_cache_alloc(dasd_page_cache, 4067 GFP_DMA | __GFP_NOWARN); 4068 if (copy && rq_data_dir(req) == WRITE) 4069 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 4070 if (copy) 4071 dst = copy + bv.bv_offset; 4072 } 4073 for (off = 0; off < bv.bv_len; off += blksize) { 4074 sector_t trkid = recid; 4075 unsigned int recoffs = sector_div(trkid, blk_per_trk); 4076 rcmd = cmd; 4077 count = blksize; 4078 /* Locate record for cdl special block ? */ 4079 if (private->uses_cdl && recid < 2*blk_per_trk) { 4080 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 4081 rcmd |= 0x8; 4082 count = dasd_eckd_cdl_reclen(recid); 4083 if (count < blksize && 4084 rq_data_dir(req) == READ) 4085 memset(dst + count, 0xe5, 4086 blksize - count); 4087 } 4088 ccw[-1].flags |= CCW_FLAG_CC; 4089 locate_record(ccw++, LO_data++, 4090 trkid, recoffs + 1, 4091 1, rcmd, basedev, count); 4092 } 4093 /* Locate record for standard blocks ? */ 4094 if (private->uses_cdl && recid == 2*blk_per_trk) { 4095 ccw[-1].flags |= CCW_FLAG_CC; 4096 locate_record(ccw++, LO_data++, 4097 trkid, recoffs + 1, 4098 last_rec - recid + 1, 4099 cmd, basedev, count); 4100 } 4101 /* Read/write ccw. */ 4102 ccw[-1].flags |= CCW_FLAG_CC; 4103 ccw->cmd_code = rcmd; 4104 ccw->count = count; 4105 if (idal_is_needed(dst, blksize)) { 4106 ccw->cda = virt_to_dma32(idaws); 4107 ccw->flags = CCW_FLAG_IDA; 4108 idaws = idal_create_words(idaws, dst, blksize); 4109 } else { 4110 ccw->cda = virt_to_dma32(dst); 4111 ccw->flags = 0; 4112 } 4113 ccw++; 4114 dst += blksize; 4115 recid++; 4116 } 4117 } 4118 if (blk_noretry_request(req) || 4119 block->base->features & DASD_FEATURE_FAILFAST) 4120 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4121 cqr->startdev = startdev; 4122 cqr->memdev = startdev; 4123 cqr->block = block; 4124 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4125 cqr->lpm = dasd_path_get_ppm(startdev); 4126 cqr->retries = startdev->default_retries; 4127 cqr->buildclk = get_tod_clock(); 4128 cqr->status = DASD_CQR_FILLED; 4129 4130 /* Set flags to suppress output for expected errors */ 4131 if (dasd_eckd_is_ese(basedev)) { 4132 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4133 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4134 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4135 } 4136 4137 return cqr; 4138 } 4139 4140 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 4141 struct dasd_device *startdev, 4142 struct dasd_block *block, 4143 struct request *req, 4144 sector_t first_rec, 4145 sector_t last_rec, 4146 sector_t first_trk, 4147 sector_t last_trk, 4148 unsigned int first_offs, 4149 unsigned int last_offs, 4150 unsigned int blk_per_trk, 4151 unsigned int blksize) 4152 { 4153 dma64_t *idaws; 4154 struct dasd_ccw_req *cqr; 4155 struct ccw1 *ccw; 4156 struct req_iterator iter; 4157 struct bio_vec bv; 4158 char *dst, *idaw_dst; 4159 unsigned int cidaw, cplength, datasize; 4160 unsigned int tlf; 4161 sector_t recid; 4162 unsigned char cmd; 4163 struct dasd_device *basedev; 4164 unsigned int trkcount, count, count_to_trk_end; 4165 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 4166 unsigned char new_track, end_idaw; 4167 sector_t trkid; 4168 unsigned int recoffs; 4169 4170 basedev = block->base; 4171 if (rq_data_dir(req) == READ) 4172 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4173 else if (rq_data_dir(req) == WRITE) 4174 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4175 else 4176 return ERR_PTR(-EINVAL); 4177 4178 /* Track based I/O needs IDAWs for each page, and not just for 4179 * 64 bit addresses. We need additional idals for pages 4180 * that get filled from two tracks, so we use the number 4181 * of records as upper limit. 4182 */ 4183 cidaw = last_rec - first_rec + 1; 4184 trkcount = last_trk - first_trk + 1; 4185 4186 /* 1x prefix + one read/write ccw per track */ 4187 cplength = 1 + trkcount; 4188 4189 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long); 4190 4191 /* Allocate the ccw request. */ 4192 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 4193 startdev, blk_mq_rq_to_pdu(req)); 4194 if (IS_ERR(cqr)) 4195 return cqr; 4196 ccw = cqr->cpaddr; 4197 /* transfer length factor: how many bytes to read from the last track */ 4198 if (first_trk == last_trk) 4199 tlf = last_offs - first_offs + 1; 4200 else 4201 tlf = last_offs + 1; 4202 tlf *= blksize; 4203 4204 if (prefix_LRE(ccw++, cqr->data, first_trk, 4205 last_trk, cmd, basedev, startdev, 4206 1 /* format */, first_offs + 1, 4207 trkcount, blksize, 4208 tlf) == -EAGAIN) { 4209 /* Clock not in sync and XRC is enabled. 4210 * Try again later. 4211 */ 4212 dasd_sfree_request(cqr, startdev); 4213 return ERR_PTR(-EAGAIN); 4214 } 4215 4216 /* 4217 * The translation of request into ccw programs must meet the 4218 * following conditions: 4219 * - all idaws but the first and the last must address full pages 4220 * (or 2K blocks on 31-bit) 4221 * - the scope of a ccw and it's idal ends with the track boundaries 4222 */ 4223 idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); 4224 recid = first_rec; 4225 new_track = 1; 4226 end_idaw = 0; 4227 len_to_track_end = 0; 4228 idaw_dst = NULL; 4229 idaw_len = 0; 4230 rq_for_each_segment(bv, req, iter) { 4231 dst = bvec_virt(&bv); 4232 seg_len = bv.bv_len; 4233 while (seg_len) { 4234 if (new_track) { 4235 trkid = recid; 4236 recoffs = sector_div(trkid, blk_per_trk); 4237 count_to_trk_end = blk_per_trk - recoffs; 4238 count = min((last_rec - recid + 1), 4239 (sector_t)count_to_trk_end); 4240 len_to_track_end = count * blksize; 4241 ccw[-1].flags |= CCW_FLAG_CC; 4242 ccw->cmd_code = cmd; 4243 ccw->count = len_to_track_end; 4244 ccw->cda = virt_to_dma32(idaws); 4245 ccw->flags = CCW_FLAG_IDA; 4246 ccw++; 4247 recid += count; 4248 new_track = 0; 4249 /* first idaw for a ccw may start anywhere */ 4250 if (!idaw_dst) 4251 idaw_dst = dst; 4252 } 4253 /* If we start a new idaw, we must make sure that it 4254 * starts on an IDA_BLOCK_SIZE boundary. 4255 * If we continue an idaw, we must make sure that the 4256 * current segment begins where the so far accumulated 4257 * idaw ends 4258 */ 4259 if (!idaw_dst) { 4260 if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) { 4261 dasd_sfree_request(cqr, startdev); 4262 return ERR_PTR(-ERANGE); 4263 } else 4264 idaw_dst = dst; 4265 } 4266 if ((idaw_dst + idaw_len) != dst) { 4267 dasd_sfree_request(cqr, startdev); 4268 return ERR_PTR(-ERANGE); 4269 } 4270 part_len = min(seg_len, len_to_track_end); 4271 seg_len -= part_len; 4272 dst += part_len; 4273 idaw_len += part_len; 4274 len_to_track_end -= part_len; 4275 /* collected memory area ends on an IDA_BLOCK border, 4276 * -> create an idaw 4277 * idal_create_words will handle cases where idaw_len 4278 * is larger then IDA_BLOCK_SIZE 4279 */ 4280 if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1))) 4281 end_idaw = 1; 4282 /* We also need to end the idaw at track end */ 4283 if (!len_to_track_end) { 4284 new_track = 1; 4285 end_idaw = 1; 4286 } 4287 if (end_idaw) { 4288 idaws = idal_create_words(idaws, idaw_dst, 4289 idaw_len); 4290 idaw_dst = NULL; 4291 idaw_len = 0; 4292 end_idaw = 0; 4293 } 4294 } 4295 } 4296 4297 if (blk_noretry_request(req) || 4298 block->base->features & DASD_FEATURE_FAILFAST) 4299 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4300 cqr->startdev = startdev; 4301 cqr->memdev = startdev; 4302 cqr->block = block; 4303 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4304 cqr->lpm = dasd_path_get_ppm(startdev); 4305 cqr->retries = startdev->default_retries; 4306 cqr->buildclk = get_tod_clock(); 4307 cqr->status = DASD_CQR_FILLED; 4308 4309 /* Set flags to suppress output for expected errors */ 4310 if (dasd_eckd_is_ese(basedev)) 4311 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4312 4313 return cqr; 4314 } 4315 4316 static int prepare_itcw(struct itcw *itcw, 4317 unsigned int trk, unsigned int totrk, int cmd, 4318 struct dasd_device *basedev, 4319 struct dasd_device *startdev, 4320 unsigned int rec_on_trk, int count, 4321 unsigned int blksize, 4322 unsigned int total_data_size, 4323 unsigned int tlf, 4324 unsigned int blk_per_trk) 4325 { 4326 struct PFX_eckd_data pfxdata; 4327 struct dasd_eckd_private *basepriv, *startpriv; 4328 struct DE_eckd_data *dedata; 4329 struct LRE_eckd_data *lredata; 4330 struct dcw *dcw; 4331 4332 u32 begcyl, endcyl; 4333 u16 heads, beghead, endhead; 4334 u8 pfx_cmd; 4335 4336 int rc = 0; 4337 int sector = 0; 4338 int dn, d; 4339 4340 4341 /* setup prefix data */ 4342 basepriv = basedev->private; 4343 startpriv = startdev->private; 4344 dedata = &pfxdata.define_extent; 4345 lredata = &pfxdata.locate_record; 4346 4347 memset(&pfxdata, 0, sizeof(pfxdata)); 4348 pfxdata.format = 1; /* PFX with LRE */ 4349 pfxdata.base_address = basepriv->conf.ned->unit_addr; 4350 pfxdata.base_lss = basepriv->conf.ned->ID; 4351 pfxdata.validity.define_extent = 1; 4352 4353 /* private uid is kept up to date, conf_data may be outdated */ 4354 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 4355 pfxdata.validity.verify_base = 1; 4356 4357 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 4358 pfxdata.validity.verify_base = 1; 4359 pfxdata.validity.hyper_pav = 1; 4360 } 4361 4362 switch (cmd) { 4363 case DASD_ECKD_CCW_READ_TRACK_DATA: 4364 dedata->mask.perm = 0x1; 4365 dedata->attributes.operation = basepriv->attrib.operation; 4366 dedata->blk_size = blksize; 4367 dedata->ga_extended |= 0x42; 4368 lredata->operation.orientation = 0x0; 4369 lredata->operation.operation = 0x0C; 4370 lredata->auxiliary.check_bytes = 0x01; 4371 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4372 break; 4373 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 4374 dedata->mask.perm = 0x02; 4375 dedata->attributes.operation = basepriv->attrib.operation; 4376 dedata->blk_size = blksize; 4377 rc = set_timestamp(NULL, dedata, basedev); 4378 dedata->ga_extended |= 0x42; 4379 lredata->operation.orientation = 0x0; 4380 lredata->operation.operation = 0x3F; 4381 lredata->extended_operation = 0x23; 4382 lredata->auxiliary.check_bytes = 0x2; 4383 /* 4384 * If XRC is supported the System Time Stamp is set. The 4385 * validity of the time stamp must be reflected in the prefix 4386 * data as well. 4387 */ 4388 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 4389 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */ 4390 pfx_cmd = DASD_ECKD_CCW_PFX; 4391 break; 4392 case DASD_ECKD_CCW_READ_COUNT_MT: 4393 dedata->mask.perm = 0x1; 4394 dedata->attributes.operation = DASD_BYPASS_CACHE; 4395 dedata->ga_extended |= 0x42; 4396 dedata->blk_size = blksize; 4397 lredata->operation.orientation = 0x2; 4398 lredata->operation.operation = 0x16; 4399 lredata->auxiliary.check_bytes = 0x01; 4400 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4401 break; 4402 default: 4403 DBF_DEV_EVENT(DBF_ERR, basedev, 4404 "prepare itcw, unknown opcode 0x%x", cmd); 4405 BUG(); 4406 break; 4407 } 4408 if (rc) 4409 return rc; 4410 4411 dedata->attributes.mode = 0x3; /* ECKD */ 4412 4413 heads = basepriv->rdc_data.trk_per_cyl; 4414 begcyl = trk / heads; 4415 beghead = trk % heads; 4416 endcyl = totrk / heads; 4417 endhead = totrk % heads; 4418 4419 /* check for sequential prestage - enhance cylinder range */ 4420 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 4421 dedata->attributes.operation == DASD_SEQ_ACCESS) { 4422 4423 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 4424 endcyl += basepriv->attrib.nr_cyl; 4425 else 4426 endcyl = (basepriv->real_cyl - 1); 4427 } 4428 4429 set_ch_t(&dedata->beg_ext, begcyl, beghead); 4430 set_ch_t(&dedata->end_ext, endcyl, endhead); 4431 4432 dedata->ep_format = 0x20; /* records per track is valid */ 4433 dedata->ep_rec_per_track = blk_per_trk; 4434 4435 if (rec_on_trk) { 4436 switch (basepriv->rdc_data.dev_type) { 4437 case 0x3390: 4438 dn = ceil_quot(blksize + 6, 232); 4439 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 4440 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 4441 break; 4442 case 0x3380: 4443 d = 7 + ceil_quot(blksize + 12, 32); 4444 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 4445 break; 4446 } 4447 } 4448 4449 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) { 4450 lredata->auxiliary.length_valid = 0; 4451 lredata->auxiliary.length_scope = 0; 4452 lredata->sector = 0xff; 4453 } else { 4454 lredata->auxiliary.length_valid = 1; 4455 lredata->auxiliary.length_scope = 1; 4456 lredata->sector = sector; 4457 } 4458 lredata->auxiliary.imbedded_ccw_valid = 1; 4459 lredata->length = tlf; 4460 lredata->imbedded_ccw = cmd; 4461 lredata->count = count; 4462 set_ch_t(&lredata->seek_addr, begcyl, beghead); 4463 lredata->search_arg.cyl = lredata->seek_addr.cyl; 4464 lredata->search_arg.head = lredata->seek_addr.head; 4465 lredata->search_arg.record = rec_on_trk; 4466 4467 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 4468 &pfxdata, sizeof(pfxdata), total_data_size); 4469 return PTR_ERR_OR_ZERO(dcw); 4470 } 4471 4472 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 4473 struct dasd_device *startdev, 4474 struct dasd_block *block, 4475 struct request *req, 4476 sector_t first_rec, 4477 sector_t last_rec, 4478 sector_t first_trk, 4479 sector_t last_trk, 4480 unsigned int first_offs, 4481 unsigned int last_offs, 4482 unsigned int blk_per_trk, 4483 unsigned int blksize) 4484 { 4485 struct dasd_ccw_req *cqr; 4486 struct req_iterator iter; 4487 struct bio_vec bv; 4488 char *dst; 4489 unsigned int trkcount, ctidaw; 4490 unsigned char cmd; 4491 struct dasd_device *basedev; 4492 unsigned int tlf; 4493 struct itcw *itcw; 4494 struct tidaw *last_tidaw = NULL; 4495 int itcw_op; 4496 size_t itcw_size; 4497 u8 tidaw_flags; 4498 unsigned int seg_len, part_len, len_to_track_end; 4499 unsigned char new_track; 4500 sector_t recid, trkid; 4501 unsigned int offs; 4502 unsigned int count, count_to_trk_end; 4503 int ret; 4504 4505 basedev = block->base; 4506 if (rq_data_dir(req) == READ) { 4507 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4508 itcw_op = ITCW_OP_READ; 4509 } else if (rq_data_dir(req) == WRITE) { 4510 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4511 itcw_op = ITCW_OP_WRITE; 4512 } else 4513 return ERR_PTR(-EINVAL); 4514 4515 /* trackbased I/O needs address all memory via TIDAWs, 4516 * not just for 64 bit addresses. This allows us to map 4517 * each segment directly to one tidaw. 4518 * In the case of write requests, additional tidaws may 4519 * be needed when a segment crosses a track boundary. 4520 */ 4521 trkcount = last_trk - first_trk + 1; 4522 ctidaw = 0; 4523 rq_for_each_segment(bv, req, iter) { 4524 ++ctidaw; 4525 } 4526 if (rq_data_dir(req) == WRITE) 4527 ctidaw += (last_trk - first_trk); 4528 4529 /* Allocate the ccw request. */ 4530 itcw_size = itcw_calc_size(0, ctidaw, 0); 4531 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 4532 blk_mq_rq_to_pdu(req)); 4533 if (IS_ERR(cqr)) 4534 return cqr; 4535 4536 /* transfer length factor: how many bytes to read from the last track */ 4537 if (first_trk == last_trk) 4538 tlf = last_offs - first_offs + 1; 4539 else 4540 tlf = last_offs + 1; 4541 tlf *= blksize; 4542 4543 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 4544 if (IS_ERR(itcw)) { 4545 ret = -EINVAL; 4546 goto out_error; 4547 } 4548 cqr->cpaddr = itcw_get_tcw(itcw); 4549 if (prepare_itcw(itcw, first_trk, last_trk, 4550 cmd, basedev, startdev, 4551 first_offs + 1, 4552 trkcount, blksize, 4553 (last_rec - first_rec + 1) * blksize, 4554 tlf, blk_per_trk) == -EAGAIN) { 4555 /* Clock not in sync and XRC is enabled. 4556 * Try again later. 4557 */ 4558 ret = -EAGAIN; 4559 goto out_error; 4560 } 4561 len_to_track_end = 0; 4562 /* 4563 * A tidaw can address 4k of memory, but must not cross page boundaries 4564 * We can let the block layer handle this by setting seg_boundary_mask 4565 * to page boundaries and max_segment_size to page size when setting up 4566 * the request queue. 4567 * For write requests, a TIDAW must not cross track boundaries, because 4568 * we have to set the CBC flag on the last tidaw for each track. 4569 */ 4570 if (rq_data_dir(req) == WRITE) { 4571 new_track = 1; 4572 recid = first_rec; 4573 rq_for_each_segment(bv, req, iter) { 4574 dst = bvec_virt(&bv); 4575 seg_len = bv.bv_len; 4576 while (seg_len) { 4577 if (new_track) { 4578 trkid = recid; 4579 offs = sector_div(trkid, blk_per_trk); 4580 count_to_trk_end = blk_per_trk - offs; 4581 count = min((last_rec - recid + 1), 4582 (sector_t)count_to_trk_end); 4583 len_to_track_end = count * blksize; 4584 recid += count; 4585 new_track = 0; 4586 } 4587 part_len = min(seg_len, len_to_track_end); 4588 seg_len -= part_len; 4589 len_to_track_end -= part_len; 4590 /* We need to end the tidaw at track end */ 4591 if (!len_to_track_end) { 4592 new_track = 1; 4593 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 4594 } else 4595 tidaw_flags = 0; 4596 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 4597 dst, part_len); 4598 if (IS_ERR(last_tidaw)) { 4599 ret = -EINVAL; 4600 goto out_error; 4601 } 4602 dst += part_len; 4603 } 4604 } 4605 } else { 4606 rq_for_each_segment(bv, req, iter) { 4607 dst = bvec_virt(&bv); 4608 last_tidaw = itcw_add_tidaw(itcw, 0x00, 4609 dst, bv.bv_len); 4610 if (IS_ERR(last_tidaw)) { 4611 ret = -EINVAL; 4612 goto out_error; 4613 } 4614 } 4615 } 4616 last_tidaw->flags |= TIDAW_FLAGS_LAST; 4617 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 4618 itcw_finalize(itcw); 4619 4620 if (blk_noretry_request(req) || 4621 block->base->features & DASD_FEATURE_FAILFAST) 4622 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4623 cqr->cpmode = 1; 4624 cqr->startdev = startdev; 4625 cqr->memdev = startdev; 4626 cqr->block = block; 4627 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4628 cqr->lpm = dasd_path_get_ppm(startdev); 4629 cqr->retries = startdev->default_retries; 4630 cqr->buildclk = get_tod_clock(); 4631 cqr->status = DASD_CQR_FILLED; 4632 4633 /* Set flags to suppress output for expected errors */ 4634 if (dasd_eckd_is_ese(basedev)) { 4635 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4636 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4637 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4638 } 4639 4640 return cqr; 4641 out_error: 4642 dasd_sfree_request(cqr, startdev); 4643 return ERR_PTR(ret); 4644 } 4645 4646 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 4647 struct dasd_block *block, 4648 struct request *req) 4649 { 4650 int cmdrtd, cmdwtd; 4651 int use_prefix; 4652 int fcx_multitrack; 4653 struct dasd_eckd_private *private; 4654 struct dasd_device *basedev; 4655 sector_t first_rec, last_rec; 4656 sector_t first_trk, last_trk; 4657 unsigned int first_offs, last_offs; 4658 unsigned int blk_per_trk, blksize; 4659 int cdlspecial; 4660 unsigned int data_size; 4661 struct dasd_ccw_req *cqr; 4662 4663 basedev = block->base; 4664 private = basedev->private; 4665 4666 /* Calculate number of blocks/records per track. */ 4667 blksize = block->bp_block; 4668 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4669 if (blk_per_trk == 0) 4670 return ERR_PTR(-EINVAL); 4671 /* Calculate record id of first and last block. */ 4672 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 4673 first_offs = sector_div(first_trk, blk_per_trk); 4674 last_rec = last_trk = 4675 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 4676 last_offs = sector_div(last_trk, blk_per_trk); 4677 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 4678 4679 fcx_multitrack = private->features.feature[40] & 0x20; 4680 data_size = blk_rq_bytes(req); 4681 if (data_size % blksize) 4682 return ERR_PTR(-EINVAL); 4683 /* tpm write request add CBC data on each track boundary */ 4684 if (rq_data_dir(req) == WRITE) 4685 data_size += (last_trk - first_trk) * 4; 4686 4687 /* is read track data and write track data in command mode supported? */ 4688 cmdrtd = private->features.feature[9] & 0x20; 4689 cmdwtd = private->features.feature[12] & 0x40; 4690 use_prefix = private->features.feature[8] & 0x01; 4691 4692 cqr = NULL; 4693 if (cdlspecial || dasd_page_cache) { 4694 /* do nothing, just fall through to the cmd mode single case */ 4695 } else if ((data_size <= private->fcx_max_data) 4696 && (fcx_multitrack || (first_trk == last_trk))) { 4697 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 4698 first_rec, last_rec, 4699 first_trk, last_trk, 4700 first_offs, last_offs, 4701 blk_per_trk, blksize); 4702 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4703 (PTR_ERR(cqr) != -ENOMEM)) 4704 cqr = NULL; 4705 } else if (use_prefix && 4706 (((rq_data_dir(req) == READ) && cmdrtd) || 4707 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 4708 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 4709 first_rec, last_rec, 4710 first_trk, last_trk, 4711 first_offs, last_offs, 4712 blk_per_trk, blksize); 4713 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4714 (PTR_ERR(cqr) != -ENOMEM)) 4715 cqr = NULL; 4716 } 4717 if (!cqr) 4718 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 4719 first_rec, last_rec, 4720 first_trk, last_trk, 4721 first_offs, last_offs, 4722 blk_per_trk, blksize); 4723 return cqr; 4724 } 4725 4726 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, 4727 struct dasd_block *block, 4728 struct request *req) 4729 { 4730 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 4731 unsigned int seg_len, len_to_track_end; 4732 unsigned int cidaw, cplength, datasize; 4733 sector_t first_trk, last_trk, sectors; 4734 struct dasd_eckd_private *base_priv; 4735 struct dasd_device *basedev; 4736 struct req_iterator iter; 4737 struct dasd_ccw_req *cqr; 4738 unsigned int trkcount; 4739 unsigned int size; 4740 unsigned char cmd; 4741 struct bio_vec bv; 4742 struct ccw1 *ccw; 4743 dma64_t *idaws; 4744 int use_prefix; 4745 void *data; 4746 char *dst; 4747 4748 /* 4749 * raw track access needs to be mutiple of 64k and on 64k boundary 4750 * For read requests we can fix an incorrect alignment by padding 4751 * the request with dummy pages. 4752 */ 4753 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 4754 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 4755 DASD_RAW_SECTORS_PER_TRACK; 4756 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 4757 DASD_RAW_SECTORS_PER_TRACK; 4758 basedev = block->base; 4759 if ((start_padding_sectors || end_padding_sectors) && 4760 (rq_data_dir(req) == WRITE)) { 4761 DBF_DEV_EVENT(DBF_ERR, basedev, 4762 "raw write not track aligned (%llu,%llu) req %p", 4763 start_padding_sectors, end_padding_sectors, req); 4764 return ERR_PTR(-EINVAL); 4765 } 4766 4767 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 4768 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 4769 DASD_RAW_SECTORS_PER_TRACK; 4770 trkcount = last_trk - first_trk + 1; 4771 4772 if (rq_data_dir(req) == READ) 4773 cmd = DASD_ECKD_CCW_READ_TRACK; 4774 else if (rq_data_dir(req) == WRITE) 4775 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 4776 else 4777 return ERR_PTR(-EINVAL); 4778 4779 /* 4780 * Raw track based I/O needs IDAWs for each page, 4781 * and not just for 64 bit addresses. 4782 */ 4783 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 4784 4785 /* 4786 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes 4787 * of extended parameter. This is needed for write full track. 4788 */ 4789 base_priv = basedev->private; 4790 use_prefix = base_priv->features.feature[8] & 0x01; 4791 if (use_prefix) { 4792 cplength = 1 + trkcount; 4793 size = sizeof(struct PFX_eckd_data) + 2; 4794 } else { 4795 cplength = 2 + trkcount; 4796 size = sizeof(struct DE_eckd_data) + 4797 sizeof(struct LRE_eckd_data) + 2; 4798 } 4799 size = ALIGN(size, 8); 4800 4801 datasize = size + cidaw * sizeof(unsigned long); 4802 4803 /* Allocate the ccw request. */ 4804 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 4805 datasize, startdev, blk_mq_rq_to_pdu(req)); 4806 if (IS_ERR(cqr)) 4807 return cqr; 4808 4809 ccw = cqr->cpaddr; 4810 data = cqr->data; 4811 4812 if (use_prefix) { 4813 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, 4814 startdev, 1, 0, trkcount, 0, 0); 4815 } else { 4816 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); 4817 ccw[-1].flags |= CCW_FLAG_CC; 4818 4819 data += sizeof(struct DE_eckd_data); 4820 locate_record_ext(ccw++, data, first_trk, 0, 4821 trkcount, cmd, basedev, 0, 0); 4822 } 4823 4824 idaws = (dma64_t *)(cqr->data + size); 4825 len_to_track_end = 0; 4826 if (start_padding_sectors) { 4827 ccw[-1].flags |= CCW_FLAG_CC; 4828 ccw->cmd_code = cmd; 4829 /* maximum 3390 track size */ 4830 ccw->count = 57326; 4831 /* 64k map to one track */ 4832 len_to_track_end = 65536 - start_padding_sectors * 512; 4833 ccw->cda = virt_to_dma32(idaws); 4834 ccw->flags |= CCW_FLAG_IDA; 4835 ccw->flags |= CCW_FLAG_SLI; 4836 ccw++; 4837 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 4838 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4839 } 4840 rq_for_each_segment(bv, req, iter) { 4841 dst = bvec_virt(&bv); 4842 seg_len = bv.bv_len; 4843 if (cmd == DASD_ECKD_CCW_READ_TRACK) 4844 memset(dst, 0, seg_len); 4845 if (!len_to_track_end) { 4846 ccw[-1].flags |= CCW_FLAG_CC; 4847 ccw->cmd_code = cmd; 4848 /* maximum 3390 track size */ 4849 ccw->count = 57326; 4850 /* 64k map to one track */ 4851 len_to_track_end = 65536; 4852 ccw->cda = virt_to_dma32(idaws); 4853 ccw->flags |= CCW_FLAG_IDA; 4854 ccw->flags |= CCW_FLAG_SLI; 4855 ccw++; 4856 } 4857 len_to_track_end -= seg_len; 4858 idaws = idal_create_words(idaws, dst, seg_len); 4859 } 4860 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 4861 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4862 if (blk_noretry_request(req) || 4863 block->base->features & DASD_FEATURE_FAILFAST) 4864 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4865 cqr->startdev = startdev; 4866 cqr->memdev = startdev; 4867 cqr->block = block; 4868 cqr->expires = startdev->default_expires * HZ; 4869 cqr->lpm = dasd_path_get_ppm(startdev); 4870 cqr->retries = startdev->default_retries; 4871 cqr->buildclk = get_tod_clock(); 4872 cqr->status = DASD_CQR_FILLED; 4873 4874 return cqr; 4875 } 4876 4877 4878 static int 4879 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 4880 { 4881 struct dasd_eckd_private *private; 4882 struct ccw1 *ccw; 4883 struct req_iterator iter; 4884 struct bio_vec bv; 4885 char *dst, *cda; 4886 unsigned int blksize, blk_per_trk, off; 4887 sector_t recid; 4888 int status; 4889 4890 if (!dasd_page_cache) 4891 goto out; 4892 private = cqr->block->base->private; 4893 blksize = cqr->block->bp_block; 4894 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4895 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 4896 ccw = cqr->cpaddr; 4897 /* Skip over define extent & locate record. */ 4898 ccw++; 4899 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 4900 ccw++; 4901 rq_for_each_segment(bv, req, iter) { 4902 dst = bvec_virt(&bv); 4903 for (off = 0; off < bv.bv_len; off += blksize) { 4904 /* Skip locate record. */ 4905 if (private->uses_cdl && recid <= 2*blk_per_trk) 4906 ccw++; 4907 if (dst) { 4908 if (ccw->flags & CCW_FLAG_IDA) 4909 cda = *((char **)dma32_to_virt(ccw->cda)); 4910 else 4911 cda = dma32_to_virt(ccw->cda); 4912 if (dst != cda) { 4913 if (rq_data_dir(req) == READ) 4914 memcpy(dst, cda, bv.bv_len); 4915 kmem_cache_free(dasd_page_cache, 4916 (void *)((addr_t)cda & PAGE_MASK)); 4917 } 4918 dst = NULL; 4919 } 4920 ccw++; 4921 recid++; 4922 } 4923 } 4924 out: 4925 status = cqr->status == DASD_CQR_DONE; 4926 dasd_sfree_request(cqr, cqr->memdev); 4927 return status; 4928 } 4929 4930 /* 4931 * Modify ccw/tcw in cqr so it can be started on a base device. 4932 * 4933 * Note that this is not enough to restart the cqr! 4934 * Either reset cqr->startdev as well (summary unit check handling) 4935 * or restart via separate cqr (as in ERP handling). 4936 */ 4937 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 4938 { 4939 struct ccw1 *ccw; 4940 struct PFX_eckd_data *pfxdata; 4941 struct tcw *tcw; 4942 struct tccb *tccb; 4943 struct dcw *dcw; 4944 4945 if (cqr->cpmode == 1) { 4946 tcw = cqr->cpaddr; 4947 tccb = tcw_get_tccb(tcw); 4948 dcw = (struct dcw *)&tccb->tca[0]; 4949 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 4950 pfxdata->validity.verify_base = 0; 4951 pfxdata->validity.hyper_pav = 0; 4952 } else { 4953 ccw = cqr->cpaddr; 4954 pfxdata = cqr->data; 4955 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 4956 pfxdata->validity.verify_base = 0; 4957 pfxdata->validity.hyper_pav = 0; 4958 } 4959 } 4960 } 4961 4962 #define DASD_ECKD_CHANQ_MAX_SIZE 4 4963 4964 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 4965 struct dasd_block *block, 4966 struct request *req) 4967 { 4968 struct dasd_eckd_private *private; 4969 struct dasd_device *startdev; 4970 unsigned long flags; 4971 struct dasd_ccw_req *cqr; 4972 4973 startdev = dasd_alias_get_start_dev(base); 4974 if (!startdev) 4975 startdev = base; 4976 private = startdev->private; 4977 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 4978 return ERR_PTR(-EBUSY); 4979 4980 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 4981 private->count++; 4982 if ((base->features & DASD_FEATURE_USERAW)) 4983 cqr = dasd_eckd_build_cp_raw(startdev, block, req); 4984 else 4985 cqr = dasd_eckd_build_cp(startdev, block, req); 4986 if (IS_ERR(cqr)) 4987 private->count--; 4988 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 4989 return cqr; 4990 } 4991 4992 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 4993 struct request *req) 4994 { 4995 struct dasd_eckd_private *private; 4996 unsigned long flags; 4997 4998 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 4999 private = cqr->memdev->private; 5000 private->count--; 5001 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 5002 return dasd_eckd_free_cp(cqr, req); 5003 } 5004 5005 static int 5006 dasd_eckd_fill_info(struct dasd_device * device, 5007 struct dasd_information2_t * info) 5008 { 5009 struct dasd_eckd_private *private = device->private; 5010 5011 info->label_block = 2; 5012 info->FBA_layout = private->uses_cdl ? 0 : 1; 5013 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 5014 info->characteristics_size = sizeof(private->rdc_data); 5015 memcpy(info->characteristics, &private->rdc_data, 5016 sizeof(private->rdc_data)); 5017 info->confdata_size = min_t(unsigned long, private->conf.len, 5018 sizeof(info->configuration_data)); 5019 memcpy(info->configuration_data, private->conf.data, 5020 info->confdata_size); 5021 return 0; 5022 } 5023 5024 /* 5025 * SECTION: ioctl functions for eckd devices. 5026 */ 5027 5028 /* 5029 * Release device ioctl. 5030 * Buils a channel programm to releases a prior reserved 5031 * (see dasd_eckd_reserve) device. 5032 */ 5033 static int 5034 dasd_eckd_release(struct dasd_device *device) 5035 { 5036 struct dasd_ccw_req *cqr; 5037 int rc; 5038 struct ccw1 *ccw; 5039 int useglobal; 5040 5041 if (!capable(CAP_SYS_ADMIN)) 5042 return -EACCES; 5043 5044 useglobal = 0; 5045 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5046 if (IS_ERR(cqr)) { 5047 mutex_lock(&dasd_reserve_mutex); 5048 useglobal = 1; 5049 cqr = &dasd_reserve_req->cqr; 5050 memset(cqr, 0, sizeof(*cqr)); 5051 memset(&dasd_reserve_req->ccw, 0, 5052 sizeof(dasd_reserve_req->ccw)); 5053 cqr->cpaddr = &dasd_reserve_req->ccw; 5054 cqr->data = &dasd_reserve_req->data; 5055 cqr->magic = DASD_ECKD_MAGIC; 5056 } 5057 ccw = cqr->cpaddr; 5058 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 5059 ccw->flags |= CCW_FLAG_SLI; 5060 ccw->count = 32; 5061 ccw->cda = virt_to_dma32(cqr->data); 5062 cqr->startdev = device; 5063 cqr->memdev = device; 5064 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5065 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5066 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5067 cqr->expires = 2 * HZ; 5068 cqr->buildclk = get_tod_clock(); 5069 cqr->status = DASD_CQR_FILLED; 5070 5071 rc = dasd_sleep_on_immediatly(cqr); 5072 if (!rc) 5073 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5074 5075 if (useglobal) 5076 mutex_unlock(&dasd_reserve_mutex); 5077 else 5078 dasd_sfree_request(cqr, cqr->memdev); 5079 return rc; 5080 } 5081 5082 /* 5083 * Reserve device ioctl. 5084 * Options are set to 'synchronous wait for interrupt' and 5085 * 'timeout the request'. This leads to a terminate IO if 5086 * the interrupt is outstanding for a certain time. 5087 */ 5088 static int 5089 dasd_eckd_reserve(struct dasd_device *device) 5090 { 5091 struct dasd_ccw_req *cqr; 5092 int rc; 5093 struct ccw1 *ccw; 5094 int useglobal; 5095 5096 if (!capable(CAP_SYS_ADMIN)) 5097 return -EACCES; 5098 5099 useglobal = 0; 5100 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5101 if (IS_ERR(cqr)) { 5102 mutex_lock(&dasd_reserve_mutex); 5103 useglobal = 1; 5104 cqr = &dasd_reserve_req->cqr; 5105 memset(cqr, 0, sizeof(*cqr)); 5106 memset(&dasd_reserve_req->ccw, 0, 5107 sizeof(dasd_reserve_req->ccw)); 5108 cqr->cpaddr = &dasd_reserve_req->ccw; 5109 cqr->data = &dasd_reserve_req->data; 5110 cqr->magic = DASD_ECKD_MAGIC; 5111 } 5112 ccw = cqr->cpaddr; 5113 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 5114 ccw->flags |= CCW_FLAG_SLI; 5115 ccw->count = 32; 5116 ccw->cda = virt_to_dma32(cqr->data); 5117 cqr->startdev = device; 5118 cqr->memdev = device; 5119 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5120 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5121 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5122 cqr->expires = 2 * HZ; 5123 cqr->buildclk = get_tod_clock(); 5124 cqr->status = DASD_CQR_FILLED; 5125 5126 rc = dasd_sleep_on_immediatly(cqr); 5127 if (!rc) 5128 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5129 5130 if (useglobal) 5131 mutex_unlock(&dasd_reserve_mutex); 5132 else 5133 dasd_sfree_request(cqr, cqr->memdev); 5134 return rc; 5135 } 5136 5137 /* 5138 * Steal lock ioctl - unconditional reserve device. 5139 * Buils a channel programm to break a device's reservation. 5140 * (unconditional reserve) 5141 */ 5142 static int 5143 dasd_eckd_steal_lock(struct dasd_device *device) 5144 { 5145 struct dasd_ccw_req *cqr; 5146 int rc; 5147 struct ccw1 *ccw; 5148 int useglobal; 5149 5150 if (!capable(CAP_SYS_ADMIN)) 5151 return -EACCES; 5152 5153 useglobal = 0; 5154 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5155 if (IS_ERR(cqr)) { 5156 mutex_lock(&dasd_reserve_mutex); 5157 useglobal = 1; 5158 cqr = &dasd_reserve_req->cqr; 5159 memset(cqr, 0, sizeof(*cqr)); 5160 memset(&dasd_reserve_req->ccw, 0, 5161 sizeof(dasd_reserve_req->ccw)); 5162 cqr->cpaddr = &dasd_reserve_req->ccw; 5163 cqr->data = &dasd_reserve_req->data; 5164 cqr->magic = DASD_ECKD_MAGIC; 5165 } 5166 ccw = cqr->cpaddr; 5167 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 5168 ccw->flags |= CCW_FLAG_SLI; 5169 ccw->count = 32; 5170 ccw->cda = virt_to_dma32(cqr->data); 5171 cqr->startdev = device; 5172 cqr->memdev = device; 5173 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5174 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5175 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5176 cqr->expires = 2 * HZ; 5177 cqr->buildclk = get_tod_clock(); 5178 cqr->status = DASD_CQR_FILLED; 5179 5180 rc = dasd_sleep_on_immediatly(cqr); 5181 if (!rc) 5182 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5183 5184 if (useglobal) 5185 mutex_unlock(&dasd_reserve_mutex); 5186 else 5187 dasd_sfree_request(cqr, cqr->memdev); 5188 return rc; 5189 } 5190 5191 /* 5192 * SNID - Sense Path Group ID 5193 * This ioctl may be used in situations where I/O is stalled due to 5194 * a reserve, so if the normal dasd_smalloc_request fails, we use the 5195 * preallocated dasd_reserve_req. 5196 */ 5197 static int dasd_eckd_snid(struct dasd_device *device, 5198 void __user *argp) 5199 { 5200 struct dasd_ccw_req *cqr; 5201 int rc; 5202 struct ccw1 *ccw; 5203 int useglobal; 5204 struct dasd_snid_ioctl_data usrparm; 5205 5206 if (!capable(CAP_SYS_ADMIN)) 5207 return -EACCES; 5208 5209 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5210 return -EFAULT; 5211 5212 useglobal = 0; 5213 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 5214 sizeof(struct dasd_snid_data), device, 5215 NULL); 5216 if (IS_ERR(cqr)) { 5217 mutex_lock(&dasd_reserve_mutex); 5218 useglobal = 1; 5219 cqr = &dasd_reserve_req->cqr; 5220 memset(cqr, 0, sizeof(*cqr)); 5221 memset(&dasd_reserve_req->ccw, 0, 5222 sizeof(dasd_reserve_req->ccw)); 5223 cqr->cpaddr = &dasd_reserve_req->ccw; 5224 cqr->data = &dasd_reserve_req->data; 5225 cqr->magic = DASD_ECKD_MAGIC; 5226 } 5227 ccw = cqr->cpaddr; 5228 ccw->cmd_code = DASD_ECKD_CCW_SNID; 5229 ccw->flags |= CCW_FLAG_SLI; 5230 ccw->count = 12; 5231 ccw->cda = virt_to_dma32(cqr->data); 5232 cqr->startdev = device; 5233 cqr->memdev = device; 5234 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5235 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5236 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 5237 cqr->retries = 5; 5238 cqr->expires = 10 * HZ; 5239 cqr->buildclk = get_tod_clock(); 5240 cqr->status = DASD_CQR_FILLED; 5241 cqr->lpm = usrparm.path_mask; 5242 5243 rc = dasd_sleep_on_immediatly(cqr); 5244 /* verify that I/O processing didn't modify the path mask */ 5245 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 5246 rc = -EIO; 5247 if (!rc) { 5248 usrparm.data = *((struct dasd_snid_data *)cqr->data); 5249 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 5250 rc = -EFAULT; 5251 } 5252 5253 if (useglobal) 5254 mutex_unlock(&dasd_reserve_mutex); 5255 else 5256 dasd_sfree_request(cqr, cqr->memdev); 5257 return rc; 5258 } 5259 5260 /* 5261 * Read performance statistics 5262 */ 5263 static int 5264 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 5265 { 5266 struct dasd_psf_prssd_data *prssdp; 5267 struct dasd_rssd_perf_stats_t *stats; 5268 struct dasd_ccw_req *cqr; 5269 struct ccw1 *ccw; 5270 int rc; 5271 5272 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5273 (sizeof(struct dasd_psf_prssd_data) + 5274 sizeof(struct dasd_rssd_perf_stats_t)), 5275 device, NULL); 5276 if (IS_ERR(cqr)) { 5277 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5278 "Could not allocate initialization request"); 5279 return PTR_ERR(cqr); 5280 } 5281 cqr->startdev = device; 5282 cqr->memdev = device; 5283 cqr->retries = 0; 5284 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5285 cqr->expires = 10 * HZ; 5286 5287 /* Prepare for Read Subsystem Data */ 5288 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5289 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5290 prssdp->order = PSF_ORDER_PRSSD; 5291 prssdp->suborder = 0x01; /* Performance Statistics */ 5292 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 5293 5294 ccw = cqr->cpaddr; 5295 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5296 ccw->count = sizeof(struct dasd_psf_prssd_data); 5297 ccw->flags |= CCW_FLAG_CC; 5298 ccw->cda = virt_to_dma32(prssdp); 5299 5300 /* Read Subsystem Data - Performance Statistics */ 5301 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5302 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 5303 5304 ccw++; 5305 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5306 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 5307 ccw->cda = virt_to_dma32(stats); 5308 5309 cqr->buildclk = get_tod_clock(); 5310 cqr->status = DASD_CQR_FILLED; 5311 rc = dasd_sleep_on(cqr); 5312 if (rc == 0) { 5313 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5314 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5315 if (copy_to_user(argp, stats, 5316 sizeof(struct dasd_rssd_perf_stats_t))) 5317 rc = -EFAULT; 5318 } 5319 dasd_sfree_request(cqr, cqr->memdev); 5320 return rc; 5321 } 5322 5323 /* 5324 * Get attributes (cache operations) 5325 * Returnes the cache attributes used in Define Extend (DE). 5326 */ 5327 static int 5328 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 5329 { 5330 struct dasd_eckd_private *private = device->private; 5331 struct attrib_data_t attrib = private->attrib; 5332 int rc; 5333 5334 if (!capable(CAP_SYS_ADMIN)) 5335 return -EACCES; 5336 if (!argp) 5337 return -EINVAL; 5338 5339 rc = 0; 5340 if (copy_to_user(argp, (long *) &attrib, 5341 sizeof(struct attrib_data_t))) 5342 rc = -EFAULT; 5343 5344 return rc; 5345 } 5346 5347 /* 5348 * Set attributes (cache operations) 5349 * Stores the attributes for cache operation to be used in Define Extend (DE). 5350 */ 5351 static int 5352 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 5353 { 5354 struct dasd_eckd_private *private = device->private; 5355 struct attrib_data_t attrib; 5356 5357 if (!capable(CAP_SYS_ADMIN)) 5358 return -EACCES; 5359 if (!argp) 5360 return -EINVAL; 5361 5362 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 5363 return -EFAULT; 5364 private->attrib = attrib; 5365 5366 dev_info(&device->cdev->dev, 5367 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 5368 private->attrib.operation, private->attrib.nr_cyl); 5369 return 0; 5370 } 5371 5372 /* 5373 * Issue syscall I/O to EMC Symmetrix array. 5374 * CCWs are PSF and RSSD 5375 */ 5376 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 5377 { 5378 struct dasd_symmio_parms usrparm; 5379 char *psf_data, *rssd_result; 5380 struct dasd_ccw_req *cqr; 5381 struct ccw1 *ccw; 5382 char psf0, psf1; 5383 int rc; 5384 5385 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 5386 return -EACCES; 5387 psf0 = psf1 = 0; 5388 5389 /* Copy parms from caller */ 5390 rc = -EFAULT; 5391 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5392 goto out; 5393 if (is_compat_task()) { 5394 /* Make sure pointers are sane even on 31 bit. */ 5395 rc = -EINVAL; 5396 if ((usrparm.psf_data >> 32) != 0) 5397 goto out; 5398 if ((usrparm.rssd_result >> 32) != 0) 5399 goto out; 5400 usrparm.psf_data &= 0x7fffffffULL; 5401 usrparm.rssd_result &= 0x7fffffffULL; 5402 } 5403 /* at least 2 bytes are accessed and should be allocated */ 5404 if (usrparm.psf_data_len < 2) { 5405 DBF_DEV_EVENT(DBF_WARNING, device, 5406 "Symmetrix ioctl invalid data length %d", 5407 usrparm.psf_data_len); 5408 rc = -EINVAL; 5409 goto out; 5410 } 5411 /* alloc I/O data area */ 5412 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 5413 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 5414 if (!psf_data || !rssd_result) { 5415 rc = -ENOMEM; 5416 goto out_free; 5417 } 5418 5419 /* get syscall header from user space */ 5420 rc = -EFAULT; 5421 if (copy_from_user(psf_data, 5422 (void __user *)(unsigned long) usrparm.psf_data, 5423 usrparm.psf_data_len)) 5424 goto out_free; 5425 psf0 = psf_data[0]; 5426 psf1 = psf_data[1]; 5427 5428 /* setup CCWs for PSF + RSSD */ 5429 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); 5430 if (IS_ERR(cqr)) { 5431 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5432 "Could not allocate initialization request"); 5433 rc = PTR_ERR(cqr); 5434 goto out_free; 5435 } 5436 5437 cqr->startdev = device; 5438 cqr->memdev = device; 5439 cqr->retries = 3; 5440 cqr->expires = 10 * HZ; 5441 cqr->buildclk = get_tod_clock(); 5442 cqr->status = DASD_CQR_FILLED; 5443 5444 /* Build the ccws */ 5445 ccw = cqr->cpaddr; 5446 5447 /* PSF ccw */ 5448 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5449 ccw->count = usrparm.psf_data_len; 5450 ccw->flags |= CCW_FLAG_CC; 5451 ccw->cda = virt_to_dma32(psf_data); 5452 5453 ccw++; 5454 5455 /* RSSD ccw */ 5456 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5457 ccw->count = usrparm.rssd_result_len; 5458 ccw->flags = CCW_FLAG_SLI ; 5459 ccw->cda = virt_to_dma32(rssd_result); 5460 5461 rc = dasd_sleep_on(cqr); 5462 if (rc) 5463 goto out_sfree; 5464 5465 rc = -EFAULT; 5466 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 5467 rssd_result, usrparm.rssd_result_len)) 5468 goto out_sfree; 5469 rc = 0; 5470 5471 out_sfree: 5472 dasd_sfree_request(cqr, cqr->memdev); 5473 out_free: 5474 kfree(rssd_result); 5475 kfree(psf_data); 5476 out: 5477 DBF_DEV_EVENT(DBF_WARNING, device, 5478 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 5479 (int) psf0, (int) psf1, rc); 5480 return rc; 5481 } 5482 5483 static int 5484 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 5485 { 5486 struct dasd_device *device = block->base; 5487 5488 switch (cmd) { 5489 case BIODASDGATTR: 5490 return dasd_eckd_get_attrib(device, argp); 5491 case BIODASDSATTR: 5492 return dasd_eckd_set_attrib(device, argp); 5493 case BIODASDPSRD: 5494 return dasd_eckd_performance(device, argp); 5495 case BIODASDRLSE: 5496 return dasd_eckd_release(device); 5497 case BIODASDRSRV: 5498 return dasd_eckd_reserve(device); 5499 case BIODASDSLCK: 5500 return dasd_eckd_steal_lock(device); 5501 case BIODASDSNID: 5502 return dasd_eckd_snid(device, argp); 5503 case BIODASDSYMMIO: 5504 return dasd_symm_io(device, argp); 5505 default: 5506 return -ENOTTY; 5507 } 5508 } 5509 5510 /* 5511 * Dump the range of CCWs into 'page' buffer 5512 * and return number of printed chars. 5513 */ 5514 static void 5515 dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from, 5516 struct ccw1 *to, char *page) 5517 { 5518 int len, count; 5519 char *datap; 5520 5521 len = 0; 5522 while (from <= to) { 5523 len += sprintf(page + len, "CCW %px: %08X %08X DAT:", 5524 from, ((int *) from)[0], ((int *) from)[1]); 5525 5526 /* get pointer to data (consider IDALs) */ 5527 if (from->flags & CCW_FLAG_IDA) 5528 datap = (char *)*((addr_t *)dma32_to_virt(from->cda)); 5529 else 5530 datap = dma32_to_virt(from->cda); 5531 5532 /* dump data (max 128 bytes) */ 5533 for (count = 0; count < from->count && count < 128; count++) { 5534 if (count % 32 == 0) 5535 len += sprintf(page + len, "\n"); 5536 if (count % 8 == 0) 5537 len += sprintf(page + len, " "); 5538 if (count % 4 == 0) 5539 len += sprintf(page + len, " "); 5540 len += sprintf(page + len, "%02x", datap[count]); 5541 } 5542 len += sprintf(page + len, "\n"); 5543 from++; 5544 } 5545 if (len > 0) 5546 dev_err(&device->cdev->dev, "%s", page); 5547 } 5548 5549 static void 5550 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 5551 char *reason) 5552 { 5553 u64 *sense; 5554 u64 *stat; 5555 5556 sense = (u64 *) dasd_get_sense(irb); 5557 stat = (u64 *) &irb->scsw; 5558 if (sense) { 5559 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 5560 "%016llx %016llx %016llx %016llx", 5561 reason, *stat, *((u32 *) (stat + 1)), 5562 sense[0], sense[1], sense[2], sense[3]); 5563 } else { 5564 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 5565 reason, *stat, *((u32 *) (stat + 1)), 5566 "NO VALID SENSE"); 5567 } 5568 } 5569 5570 /* 5571 * Print sense data and related channel program. 5572 * Parts are printed because printk buffer is only 1024 bytes. 5573 */ 5574 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 5575 struct dasd_ccw_req *req, struct irb *irb) 5576 { 5577 struct ccw1 *first, *last, *fail, *from, *to; 5578 struct device *dev; 5579 int len, sl, sct; 5580 char *page; 5581 5582 dev = &device->cdev->dev; 5583 5584 page = (char *) get_zeroed_page(GFP_ATOMIC); 5585 if (page == NULL) { 5586 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5587 "No memory to dump sense data\n"); 5588 return; 5589 } 5590 /* dump the sense data */ 5591 len = sprintf(page, "I/O status report:\n"); 5592 len += sprintf(page + len, 5593 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n", 5594 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5595 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5596 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5597 req ? req->intrc : 0); 5598 len += sprintf(page + len, "Failing CCW: %px\n", 5599 dma32_to_virt(irb->scsw.cmd.cpa)); 5600 if (irb->esw.esw0.erw.cons) { 5601 for (sl = 0; sl < 4; sl++) { 5602 len += sprintf(page + len, "Sense(hex) %2d-%2d:", 5603 (8 * sl), ((8 * sl) + 7)); 5604 5605 for (sct = 0; sct < 8; sct++) { 5606 len += sprintf(page + len, " %02x", 5607 irb->ecw[8 * sl + sct]); 5608 } 5609 len += sprintf(page + len, "\n"); 5610 } 5611 5612 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 5613 /* 24 Byte Sense Data */ 5614 sprintf(page + len, 5615 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", 5616 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 5617 irb->ecw[1] & 0x10 ? "" : "no"); 5618 } else { 5619 /* 32 Byte Sense Data */ 5620 sprintf(page + len, 5621 "32 Byte: Format: %x Exception class %x\n", 5622 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 5623 } 5624 } else { 5625 sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); 5626 } 5627 dev_err(dev, "%s", page); 5628 5629 if (req) { 5630 /* req == NULL for unsolicited interrupts */ 5631 /* dump the Channel Program (max 140 Bytes per line) */ 5632 /* Count CCW and print first CCWs (maximum 7) */ 5633 first = req->cpaddr; 5634 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 5635 to = min(first + 6, last); 5636 dev_err(dev, "Related CP in req: %px\n", req); 5637 dasd_eckd_dump_ccw_range(device, first, to, page); 5638 5639 /* print failing CCW area (maximum 4) */ 5640 /* scsw->cda is either valid or zero */ 5641 from = ++to; 5642 fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ 5643 if (from < fail - 2) { 5644 from = fail - 2; /* there is a gap - print header */ 5645 dev_err(dev, "......\n"); 5646 } 5647 to = min(fail + 1, last); 5648 dasd_eckd_dump_ccw_range(device, from, to, page + len); 5649 5650 /* print last CCWs (maximum 2) */ 5651 len = 0; 5652 from = max(from, ++to); 5653 if (from < last - 1) { 5654 from = last - 1; /* there is a gap - print header */ 5655 dev_err(dev, "......\n"); 5656 } 5657 dasd_eckd_dump_ccw_range(device, from, last, page + len); 5658 } 5659 free_page((unsigned long) page); 5660 } 5661 5662 5663 /* 5664 * Print sense data from a tcw. 5665 */ 5666 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 5667 struct dasd_ccw_req *req, struct irb *irb) 5668 { 5669 char *page; 5670 int len, sl, sct, residual; 5671 struct tsb *tsb; 5672 u8 *sense, *rcq; 5673 5674 page = (char *) get_zeroed_page(GFP_ATOMIC); 5675 if (page == NULL) { 5676 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 5677 "No memory to dump sense data"); 5678 return; 5679 } 5680 /* dump the sense data */ 5681 len = sprintf(page, "I/O status report:\n"); 5682 len += sprintf(page + len, 5683 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5684 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 5685 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5686 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5687 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5688 irb->scsw.tm.fcxs, 5689 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 5690 req ? req->intrc : 0); 5691 len += sprintf(page + len, "Failing TCW: %px\n", 5692 dma32_to_virt(irb->scsw.tm.tcw)); 5693 5694 tsb = NULL; 5695 sense = NULL; 5696 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 5697 tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); 5698 5699 if (tsb) { 5700 len += sprintf(page + len, "tsb->length %d\n", tsb->length); 5701 len += sprintf(page + len, "tsb->flags %x\n", tsb->flags); 5702 len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset); 5703 len += sprintf(page + len, "tsb->count %d\n", tsb->count); 5704 residual = tsb->count - 28; 5705 len += sprintf(page + len, "residual %d\n", residual); 5706 5707 switch (tsb->flags & 0x07) { 5708 case 1: /* tsa_iostat */ 5709 len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n", 5710 tsb->tsa.iostat.dev_time); 5711 len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n", 5712 tsb->tsa.iostat.def_time); 5713 len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n", 5714 tsb->tsa.iostat.queue_time); 5715 len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n", 5716 tsb->tsa.iostat.dev_busy_time); 5717 len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n", 5718 tsb->tsa.iostat.dev_act_time); 5719 sense = tsb->tsa.iostat.sense; 5720 break; 5721 case 2: /* ts_ddpc */ 5722 len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n", 5723 tsb->tsa.ddpc.rc); 5724 for (sl = 0; sl < 2; sl++) { 5725 len += sprintf(page + len, 5726 "tsb->tsa.ddpc.rcq %2d-%2d: ", 5727 (8 * sl), ((8 * sl) + 7)); 5728 rcq = tsb->tsa.ddpc.rcq; 5729 for (sct = 0; sct < 8; sct++) { 5730 len += sprintf(page + len, "%02x", 5731 rcq[8 * sl + sct]); 5732 } 5733 len += sprintf(page + len, "\n"); 5734 } 5735 sense = tsb->tsa.ddpc.sense; 5736 break; 5737 case 3: /* tsa_intrg */ 5738 len += sprintf(page + len, 5739 "tsb->tsa.intrg.: not supported yet\n"); 5740 break; 5741 } 5742 5743 if (sense) { 5744 for (sl = 0; sl < 4; sl++) { 5745 len += sprintf(page + len, 5746 "Sense(hex) %2d-%2d:", 5747 (8 * sl), ((8 * sl) + 7)); 5748 for (sct = 0; sct < 8; sct++) { 5749 len += sprintf(page + len, " %02x", 5750 sense[8 * sl + sct]); 5751 } 5752 len += sprintf(page + len, "\n"); 5753 } 5754 5755 if (sense[27] & DASD_SENSE_BIT_0) { 5756 /* 24 Byte Sense Data */ 5757 sprintf(page + len, 5758 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", 5759 sense[7] >> 4, sense[7] & 0x0f, 5760 sense[1] & 0x10 ? "" : "no"); 5761 } else { 5762 /* 32 Byte Sense Data */ 5763 sprintf(page + len, 5764 "32 Byte: Format: %x Exception class %x\n", 5765 sense[6] & 0x0f, sense[22] >> 4); 5766 } 5767 } else { 5768 sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); 5769 } 5770 } else { 5771 sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n"); 5772 } 5773 dev_err(&device->cdev->dev, "%s", page); 5774 free_page((unsigned long) page); 5775 } 5776 5777 static void dasd_eckd_dump_sense(struct dasd_device *device, 5778 struct dasd_ccw_req *req, struct irb *irb) 5779 { 5780 u8 *sense = dasd_get_sense(irb); 5781 5782 if (scsw_is_tm(&irb->scsw)) { 5783 /* 5784 * In some cases the 'File Protected' or 'Incorrect Length' 5785 * error might be expected and log messages shouldn't be written 5786 * then. Check if the according suppress bit is set. 5787 */ 5788 if (sense && (sense[1] & SNS1_FILE_PROTECTED) && 5789 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) 5790 return; 5791 if (scsw_cstat(&irb->scsw) == 0x40 && 5792 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) 5793 return; 5794 5795 dasd_eckd_dump_sense_tcw(device, req, irb); 5796 } else { 5797 /* 5798 * In some cases the 'Command Reject' or 'No Record Found' 5799 * error might be expected and log messages shouldn't be 5800 * written then. Check if the according suppress bit is set. 5801 */ 5802 if (sense && sense[0] & SNS0_CMD_REJECT && 5803 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) 5804 return; 5805 5806 if (sense && sense[1] & SNS1_NO_REC_FOUND && 5807 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) 5808 return; 5809 5810 dasd_eckd_dump_sense_ccw(device, req, irb); 5811 } 5812 } 5813 5814 static int dasd_eckd_reload_device(struct dasd_device *device) 5815 { 5816 struct dasd_eckd_private *private = device->private; 5817 char print_uid[DASD_UID_STRLEN]; 5818 int rc, old_base; 5819 struct dasd_uid uid; 5820 unsigned long flags; 5821 5822 /* 5823 * remove device from alias handling to prevent new requests 5824 * from being scheduled on the wrong alias device 5825 */ 5826 dasd_alias_remove_device(device); 5827 5828 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5829 old_base = private->uid.base_unit_addr; 5830 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5831 5832 /* Read Configuration Data */ 5833 rc = dasd_eckd_read_conf(device); 5834 if (rc) 5835 goto out_err; 5836 5837 dasd_eckd_read_fc_security(device); 5838 5839 rc = dasd_eckd_generate_uid(device); 5840 if (rc) 5841 goto out_err; 5842 /* 5843 * update unit address configuration and 5844 * add device to alias management 5845 */ 5846 dasd_alias_update_add_device(device); 5847 5848 dasd_eckd_get_uid(device, &uid); 5849 5850 if (old_base != uid.base_unit_addr) { 5851 dasd_eckd_get_uid_string(&private->conf, print_uid); 5852 dev_info(&device->cdev->dev, 5853 "An Alias device was reassigned to a new base device " 5854 "with UID: %s\n", print_uid); 5855 } 5856 return 0; 5857 5858 out_err: 5859 return -1; 5860 } 5861 5862 static int dasd_eckd_read_message_buffer(struct dasd_device *device, 5863 struct dasd_rssd_messages *messages, 5864 __u8 lpum) 5865 { 5866 struct dasd_rssd_messages *message_buf; 5867 struct dasd_psf_prssd_data *prssdp; 5868 struct dasd_ccw_req *cqr; 5869 struct ccw1 *ccw; 5870 int rc; 5871 5872 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5873 (sizeof(struct dasd_psf_prssd_data) + 5874 sizeof(struct dasd_rssd_messages)), 5875 device, NULL); 5876 if (IS_ERR(cqr)) { 5877 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5878 "Could not allocate read message buffer request"); 5879 return PTR_ERR(cqr); 5880 } 5881 5882 cqr->lpm = lpum; 5883 retry: 5884 cqr->startdev = device; 5885 cqr->memdev = device; 5886 cqr->block = NULL; 5887 cqr->expires = 10 * HZ; 5888 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5889 /* dasd_sleep_on_immediatly does not do complex error 5890 * recovery so clear erp flag and set retry counter to 5891 * do basic erp */ 5892 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5893 cqr->retries = 256; 5894 5895 /* Prepare for Read Subsystem Data */ 5896 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5897 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5898 prssdp->order = PSF_ORDER_PRSSD; 5899 prssdp->suborder = 0x03; /* Message Buffer */ 5900 /* all other bytes of prssdp must be zero */ 5901 5902 ccw = cqr->cpaddr; 5903 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5904 ccw->count = sizeof(struct dasd_psf_prssd_data); 5905 ccw->flags |= CCW_FLAG_CC; 5906 ccw->flags |= CCW_FLAG_SLI; 5907 ccw->cda = virt_to_dma32(prssdp); 5908 5909 /* Read Subsystem Data - message buffer */ 5910 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 5911 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 5912 5913 ccw++; 5914 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5915 ccw->count = sizeof(struct dasd_rssd_messages); 5916 ccw->flags |= CCW_FLAG_SLI; 5917 ccw->cda = virt_to_dma32(message_buf); 5918 5919 cqr->buildclk = get_tod_clock(); 5920 cqr->status = DASD_CQR_FILLED; 5921 rc = dasd_sleep_on_immediatly(cqr); 5922 if (rc == 0) { 5923 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5924 message_buf = (struct dasd_rssd_messages *) 5925 (prssdp + 1); 5926 memcpy(messages, message_buf, 5927 sizeof(struct dasd_rssd_messages)); 5928 } else if (cqr->lpm) { 5929 /* 5930 * on z/VM we might not be able to do I/O on the requested path 5931 * but instead we get the required information on any path 5932 * so retry with open path mask 5933 */ 5934 cqr->lpm = 0; 5935 goto retry; 5936 } else 5937 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5938 "Reading messages failed with rc=%d\n" 5939 , rc); 5940 dasd_sfree_request(cqr, cqr->memdev); 5941 return rc; 5942 } 5943 5944 static int dasd_eckd_query_host_access(struct dasd_device *device, 5945 struct dasd_psf_query_host_access *data) 5946 { 5947 struct dasd_eckd_private *private = device->private; 5948 struct dasd_psf_query_host_access *host_access; 5949 struct dasd_psf_prssd_data *prssdp; 5950 struct dasd_ccw_req *cqr; 5951 struct ccw1 *ccw; 5952 int rc; 5953 5954 /* not available for HYPER PAV alias devices */ 5955 if (!device->block && private->lcu->pav == HYPER_PAV) 5956 return -EOPNOTSUPP; 5957 5958 /* may not be supported by the storage server */ 5959 if (!(private->features.feature[14] & 0x80)) 5960 return -EOPNOTSUPP; 5961 5962 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5963 sizeof(struct dasd_psf_prssd_data) + 1, 5964 device, NULL); 5965 if (IS_ERR(cqr)) { 5966 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5967 "Could not allocate read message buffer request"); 5968 return PTR_ERR(cqr); 5969 } 5970 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA); 5971 if (!host_access) { 5972 dasd_sfree_request(cqr, device); 5973 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5974 "Could not allocate host_access buffer"); 5975 return -ENOMEM; 5976 } 5977 cqr->startdev = device; 5978 cqr->memdev = device; 5979 cqr->block = NULL; 5980 cqr->retries = 256; 5981 cqr->expires = 10 * HZ; 5982 5983 /* Prepare for Read Subsystem Data */ 5984 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5985 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5986 prssdp->order = PSF_ORDER_PRSSD; 5987 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */ 5988 /* LSS and Volume that will be queried */ 5989 prssdp->lss = private->conf.ned->ID; 5990 prssdp->volume = private->conf.ned->unit_addr; 5991 /* all other bytes of prssdp must be zero */ 5992 5993 ccw = cqr->cpaddr; 5994 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5995 ccw->count = sizeof(struct dasd_psf_prssd_data); 5996 ccw->flags |= CCW_FLAG_CC; 5997 ccw->flags |= CCW_FLAG_SLI; 5998 ccw->cda = virt_to_dma32(prssdp); 5999 6000 /* Read Subsystem Data - query host access */ 6001 ccw++; 6002 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 6003 ccw->count = sizeof(struct dasd_psf_query_host_access); 6004 ccw->flags |= CCW_FLAG_SLI; 6005 ccw->cda = virt_to_dma32(host_access); 6006 6007 cqr->buildclk = get_tod_clock(); 6008 cqr->status = DASD_CQR_FILLED; 6009 /* the command might not be supported, suppress error message */ 6010 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 6011 rc = dasd_sleep_on_interruptible(cqr); 6012 if (rc == 0) { 6013 *data = *host_access; 6014 } else { 6015 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 6016 "Reading host access data failed with rc=%d\n", 6017 rc); 6018 rc = -EOPNOTSUPP; 6019 } 6020 6021 dasd_sfree_request(cqr, cqr->memdev); 6022 kfree(host_access); 6023 return rc; 6024 } 6025 /* 6026 * return number of grouped devices 6027 */ 6028 static int dasd_eckd_host_access_count(struct dasd_device *device) 6029 { 6030 struct dasd_psf_query_host_access *access; 6031 struct dasd_ckd_path_group_entry *entry; 6032 struct dasd_ckd_host_information *info; 6033 int count = 0; 6034 int rc, i; 6035 6036 access = kzalloc(sizeof(*access), GFP_NOIO); 6037 if (!access) { 6038 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6039 "Could not allocate access buffer"); 6040 return -ENOMEM; 6041 } 6042 rc = dasd_eckd_query_host_access(device, access); 6043 if (rc) { 6044 kfree(access); 6045 return rc; 6046 } 6047 6048 info = (struct dasd_ckd_host_information *) 6049 access->host_access_information; 6050 for (i = 0; i < info->entry_count; i++) { 6051 entry = (struct dasd_ckd_path_group_entry *) 6052 (info->entry + i * info->entry_size); 6053 if (entry->status_flags & DASD_ECKD_PG_GROUPED) 6054 count++; 6055 } 6056 6057 kfree(access); 6058 return count; 6059 } 6060 6061 /* 6062 * write host access information to a sequential file 6063 */ 6064 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) 6065 { 6066 struct dasd_psf_query_host_access *access; 6067 struct dasd_ckd_path_group_entry *entry; 6068 struct dasd_ckd_host_information *info; 6069 char sysplex[9] = ""; 6070 int rc, i; 6071 6072 access = kzalloc(sizeof(*access), GFP_NOIO); 6073 if (!access) { 6074 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6075 "Could not allocate access buffer"); 6076 return -ENOMEM; 6077 } 6078 rc = dasd_eckd_query_host_access(device, access); 6079 if (rc) { 6080 kfree(access); 6081 return rc; 6082 } 6083 6084 info = (struct dasd_ckd_host_information *) 6085 access->host_access_information; 6086 for (i = 0; i < info->entry_count; i++) { 6087 entry = (struct dasd_ckd_path_group_entry *) 6088 (info->entry + i * info->entry_size); 6089 /* PGID */ 6090 seq_printf(m, "pgid %*phN\n", 11, entry->pgid); 6091 /* FLAGS */ 6092 seq_printf(m, "status_flags %02x\n", entry->status_flags); 6093 /* SYSPLEX NAME */ 6094 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1); 6095 EBCASC(sysplex, sizeof(sysplex)); 6096 seq_printf(m, "sysplex_name %8s\n", sysplex); 6097 /* SUPPORTED CYLINDER */ 6098 seq_printf(m, "supported_cylinder %d\n", entry->cylinder); 6099 /* TIMESTAMP */ 6100 seq_printf(m, "timestamp %lu\n", (unsigned long) 6101 entry->timestamp); 6102 } 6103 kfree(access); 6104 6105 return 0; 6106 } 6107 6108 static struct dasd_device 6109 *copy_relation_find_device(struct dasd_copy_relation *copy, 6110 char *busid) 6111 { 6112 int i; 6113 6114 for (i = 0; i < DASD_CP_ENTRIES; i++) { 6115 if (copy->entry[i].configured && 6116 strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0) 6117 return copy->entry[i].device; 6118 } 6119 return NULL; 6120 } 6121 6122 /* 6123 * set the new active/primary device 6124 */ 6125 static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid, 6126 char *old_busid) 6127 { 6128 int i; 6129 6130 for (i = 0; i < DASD_CP_ENTRIES; i++) { 6131 if (copy->entry[i].configured && 6132 strncmp(copy->entry[i].busid, new_busid, 6133 DASD_BUS_ID_SIZE) == 0) { 6134 copy->active = ©->entry[i]; 6135 copy->entry[i].primary = true; 6136 } else if (copy->entry[i].configured && 6137 strncmp(copy->entry[i].busid, old_busid, 6138 DASD_BUS_ID_SIZE) == 0) { 6139 copy->entry[i].primary = false; 6140 } 6141 } 6142 } 6143 6144 /* 6145 * The function will swap the role of a given copy pair. 6146 * During the swap operation the relation of the blockdevice is disconnected 6147 * from the old primary and connected to the new. 6148 * 6149 * IO is paused on the block queue before swap and may be resumed afterwards. 6150 */ 6151 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid, 6152 char *sec_busid) 6153 { 6154 struct dasd_device *primary, *secondary; 6155 struct dasd_copy_relation *copy; 6156 struct dasd_block *block; 6157 struct gendisk *gdp; 6158 6159 copy = device->copy; 6160 if (!copy) 6161 return DASD_COPYPAIRSWAP_INVALID; 6162 primary = copy->active->device; 6163 if (!primary) 6164 return DASD_COPYPAIRSWAP_INVALID; 6165 /* double check if swap has correct primary */ 6166 if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0) 6167 return DASD_COPYPAIRSWAP_PRIMARY; 6168 6169 secondary = copy_relation_find_device(copy, sec_busid); 6170 if (!secondary) 6171 return DASD_COPYPAIRSWAP_SECONDARY; 6172 6173 /* 6174 * usually the device should be quiesced for swap 6175 * for paranoia stop device and requeue requests again 6176 */ 6177 dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC); 6178 dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC); 6179 dasd_generic_requeue_all_requests(primary); 6180 6181 /* swap DASD internal device <> block assignment */ 6182 block = primary->block; 6183 primary->block = NULL; 6184 secondary->block = block; 6185 block->base = secondary; 6186 /* set new primary device in COPY relation */ 6187 copy_pair_set_active(copy, sec_busid, prim_busid); 6188 6189 /* swap blocklayer device link */ 6190 gdp = block->gdp; 6191 dasd_add_link_to_gendisk(gdp, secondary); 6192 6193 /* re-enable device */ 6194 dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC); 6195 dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC); 6196 dasd_schedule_device_bh(secondary); 6197 6198 return DASD_COPYPAIRSWAP_SUCCESS; 6199 } 6200 6201 /* 6202 * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query 6203 */ 6204 static int dasd_eckd_query_pprc_status(struct dasd_device *device, 6205 struct dasd_pprc_data_sc4 *data) 6206 { 6207 struct dasd_pprc_data_sc4 *pprc_data; 6208 struct dasd_psf_prssd_data *prssdp; 6209 struct dasd_ccw_req *cqr; 6210 struct ccw1 *ccw; 6211 int rc; 6212 6213 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 6214 sizeof(*prssdp) + sizeof(*pprc_data) + 1, 6215 device, NULL); 6216 if (IS_ERR(cqr)) { 6217 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6218 "Could not allocate query PPRC status request"); 6219 return PTR_ERR(cqr); 6220 } 6221 cqr->startdev = device; 6222 cqr->memdev = device; 6223 cqr->block = NULL; 6224 cqr->retries = 256; 6225 cqr->expires = 10 * HZ; 6226 6227 /* Prepare for Read Subsystem Data */ 6228 prssdp = (struct dasd_psf_prssd_data *)cqr->data; 6229 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 6230 prssdp->order = PSF_ORDER_PRSSD; 6231 prssdp->suborder = PSF_SUBORDER_PPRCEQ; 6232 prssdp->varies[0] = PPRCEQ_SCOPE_4; 6233 pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1); 6234 6235 ccw = cqr->cpaddr; 6236 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6237 ccw->count = sizeof(struct dasd_psf_prssd_data); 6238 ccw->flags |= CCW_FLAG_CC; 6239 ccw->flags |= CCW_FLAG_SLI; 6240 ccw->cda = virt_to_dma32(prssdp); 6241 6242 /* Read Subsystem Data - query host access */ 6243 ccw++; 6244 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 6245 ccw->count = sizeof(*pprc_data); 6246 ccw->flags |= CCW_FLAG_SLI; 6247 ccw->cda = virt_to_dma32(pprc_data); 6248 6249 cqr->buildclk = get_tod_clock(); 6250 cqr->status = DASD_CQR_FILLED; 6251 6252 rc = dasd_sleep_on_interruptible(cqr); 6253 if (rc == 0) { 6254 *data = *pprc_data; 6255 } else { 6256 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 6257 "PPRC Extended Query failed with rc=%d\n", 6258 rc); 6259 rc = -EOPNOTSUPP; 6260 } 6261 6262 dasd_sfree_request(cqr, cqr->memdev); 6263 return rc; 6264 } 6265 6266 /* 6267 * ECKD NOP - no operation 6268 */ 6269 static int dasd_eckd_nop(struct dasd_device *device) 6270 { 6271 struct dasd_ccw_req *cqr; 6272 struct ccw1 *ccw; 6273 int rc; 6274 6275 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL); 6276 if (IS_ERR(cqr)) { 6277 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6278 "Could not allocate NOP request"); 6279 return PTR_ERR(cqr); 6280 } 6281 cqr->startdev = device; 6282 cqr->memdev = device; 6283 cqr->block = NULL; 6284 cqr->retries = 1; 6285 cqr->expires = 10 * HZ; 6286 6287 ccw = cqr->cpaddr; 6288 ccw->cmd_code = DASD_ECKD_CCW_NOP; 6289 ccw->flags |= CCW_FLAG_SLI; 6290 6291 cqr->buildclk = get_tod_clock(); 6292 cqr->status = DASD_CQR_FILLED; 6293 6294 rc = dasd_sleep_on_interruptible(cqr); 6295 if (rc != 0) { 6296 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 6297 "NOP failed with rc=%d\n", rc); 6298 rc = -EOPNOTSUPP; 6299 } 6300 dasd_sfree_request(cqr, cqr->memdev); 6301 return rc; 6302 } 6303 6304 static int dasd_eckd_device_ping(struct dasd_device *device) 6305 { 6306 return dasd_eckd_nop(device); 6307 } 6308 6309 /* 6310 * Perform Subsystem Function - CUIR response 6311 */ 6312 static int 6313 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 6314 __u32 message_id, __u8 lpum) 6315 { 6316 struct dasd_psf_cuir_response *psf_cuir; 6317 int pos = pathmask_to_pos(lpum); 6318 struct dasd_ccw_req *cqr; 6319 struct ccw1 *ccw; 6320 int rc; 6321 6322 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 6323 sizeof(struct dasd_psf_cuir_response), 6324 device, NULL); 6325 6326 if (IS_ERR(cqr)) { 6327 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6328 "Could not allocate PSF-CUIR request"); 6329 return PTR_ERR(cqr); 6330 } 6331 6332 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 6333 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 6334 psf_cuir->cc = response; 6335 psf_cuir->chpid = device->path[pos].chpid; 6336 psf_cuir->message_id = message_id; 6337 psf_cuir->cssid = device->path[pos].cssid; 6338 psf_cuir->ssid = device->path[pos].ssid; 6339 ccw = cqr->cpaddr; 6340 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6341 ccw->cda = virt_to_dma32(psf_cuir); 6342 ccw->flags = CCW_FLAG_SLI; 6343 ccw->count = sizeof(struct dasd_psf_cuir_response); 6344 6345 cqr->startdev = device; 6346 cqr->memdev = device; 6347 cqr->block = NULL; 6348 cqr->retries = 256; 6349 cqr->expires = 10*HZ; 6350 cqr->buildclk = get_tod_clock(); 6351 cqr->status = DASD_CQR_FILLED; 6352 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 6353 6354 rc = dasd_sleep_on(cqr); 6355 6356 dasd_sfree_request(cqr, cqr->memdev); 6357 return rc; 6358 } 6359 6360 /* 6361 * return configuration data that is referenced by record selector 6362 * if a record selector is specified or per default return the 6363 * conf_data pointer for the path specified by lpum 6364 */ 6365 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 6366 __u8 lpum, 6367 struct dasd_cuir_message *cuir) 6368 { 6369 struct dasd_conf_data *conf_data; 6370 int path, pos; 6371 6372 if (cuir->record_selector == 0) 6373 goto out; 6374 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 6375 conf_data = device->path[pos].conf_data; 6376 if (conf_data->gneq.record_selector == 6377 cuir->record_selector) 6378 return conf_data; 6379 } 6380 out: 6381 return device->path[pathmask_to_pos(lpum)].conf_data; 6382 } 6383 6384 /* 6385 * This function determines the scope of a reconfiguration request by 6386 * analysing the path and device selection data provided in the CUIR request. 6387 * Returns a path mask containing CUIR affected paths for the give device. 6388 * 6389 * If the CUIR request does not contain the required information return the 6390 * path mask of the path the attention message for the CUIR request was reveived 6391 * on. 6392 */ 6393 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 6394 struct dasd_cuir_message *cuir) 6395 { 6396 struct dasd_conf_data *ref_conf_data; 6397 unsigned long bitmask = 0, mask = 0; 6398 struct dasd_conf_data *conf_data; 6399 unsigned int pos, path; 6400 char *ref_gneq, *gneq; 6401 char *ref_ned, *ned; 6402 int tbcpm = 0; 6403 6404 /* if CUIR request does not specify the scope use the path 6405 the attention message was presented on */ 6406 if (!cuir->ned_map || 6407 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 6408 return lpum; 6409 6410 /* get reference conf data */ 6411 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 6412 /* reference ned is determined by ned_map field */ 6413 pos = 8 - ffs(cuir->ned_map); 6414 ref_ned = (char *)&ref_conf_data->neds[pos]; 6415 ref_gneq = (char *)&ref_conf_data->gneq; 6416 /* transfer 24 bit neq_map to mask */ 6417 mask = cuir->neq_map[2]; 6418 mask |= cuir->neq_map[1] << 8; 6419 mask |= cuir->neq_map[0] << 16; 6420 6421 for (path = 0; path < 8; path++) { 6422 /* initialise data per path */ 6423 bitmask = mask; 6424 conf_data = device->path[path].conf_data; 6425 pos = 8 - ffs(cuir->ned_map); 6426 ned = (char *) &conf_data->neds[pos]; 6427 /* compare reference ned and per path ned */ 6428 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 6429 continue; 6430 gneq = (char *)&conf_data->gneq; 6431 /* compare reference gneq and per_path gneq under 6432 24 bit mask where mask bit 0 equals byte 7 of 6433 the gneq and mask bit 24 equals byte 31 */ 6434 while (bitmask) { 6435 pos = ffs(bitmask) - 1; 6436 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 6437 != 0) 6438 break; 6439 clear_bit(pos, &bitmask); 6440 } 6441 if (bitmask) 6442 continue; 6443 /* device and path match the reference values 6444 add path to CUIR scope */ 6445 tbcpm |= 0x80 >> path; 6446 } 6447 return tbcpm; 6448 } 6449 6450 static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 6451 unsigned long paths, int action) 6452 { 6453 int pos; 6454 6455 while (paths) { 6456 /* get position of bit in mask */ 6457 pos = 8 - ffs(paths); 6458 /* get channel path descriptor from this position */ 6459 if (action == CUIR_QUIESCE) 6460 pr_warn("Service on the storage server caused path %x.%02x to go offline", 6461 device->path[pos].cssid, 6462 device->path[pos].chpid); 6463 else if (action == CUIR_RESUME) 6464 pr_info("Path %x.%02x is back online after service on the storage server", 6465 device->path[pos].cssid, 6466 device->path[pos].chpid); 6467 clear_bit(7 - pos, &paths); 6468 } 6469 } 6470 6471 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 6472 struct dasd_cuir_message *cuir) 6473 { 6474 unsigned long tbcpm; 6475 6476 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 6477 /* nothing to do if path is not in use */ 6478 if (!(dasd_path_get_opm(device) & tbcpm)) 6479 return 0; 6480 if (!(dasd_path_get_opm(device) & ~tbcpm)) { 6481 /* no path would be left if the CUIR action is taken 6482 return error */ 6483 return -EINVAL; 6484 } 6485 /* remove device from operational path mask */ 6486 dasd_path_remove_opm(device, tbcpm); 6487 dasd_path_add_cuirpm(device, tbcpm); 6488 return tbcpm; 6489 } 6490 6491 /* 6492 * walk through all devices and build a path mask to quiesce them 6493 * return an error if the last path to a device would be removed 6494 * 6495 * if only part of the devices are quiesced and an error 6496 * occurs no onlining necessary, the storage server will 6497 * notify the already set offline devices again 6498 */ 6499 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 6500 struct dasd_cuir_message *cuir) 6501 { 6502 struct dasd_eckd_private *private = device->private; 6503 struct alias_pav_group *pavgroup, *tempgroup; 6504 struct dasd_device *dev, *n; 6505 unsigned long paths = 0; 6506 unsigned long flags; 6507 int tbcpm; 6508 6509 /* active devices */ 6510 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6511 alias_list) { 6512 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6513 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6514 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6515 if (tbcpm < 0) 6516 goto out_err; 6517 paths |= tbcpm; 6518 } 6519 /* inactive devices */ 6520 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6521 alias_list) { 6522 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6523 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6524 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6525 if (tbcpm < 0) 6526 goto out_err; 6527 paths |= tbcpm; 6528 } 6529 /* devices in PAV groups */ 6530 list_for_each_entry_safe(pavgroup, tempgroup, 6531 &private->lcu->grouplist, group) { 6532 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6533 alias_list) { 6534 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6535 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6536 spin_unlock_irqrestore( 6537 get_ccwdev_lock(dev->cdev), flags); 6538 if (tbcpm < 0) 6539 goto out_err; 6540 paths |= tbcpm; 6541 } 6542 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6543 alias_list) { 6544 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6545 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6546 spin_unlock_irqrestore( 6547 get_ccwdev_lock(dev->cdev), flags); 6548 if (tbcpm < 0) 6549 goto out_err; 6550 paths |= tbcpm; 6551 } 6552 } 6553 /* notify user about all paths affected by CUIR action */ 6554 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); 6555 return 0; 6556 out_err: 6557 return tbcpm; 6558 } 6559 6560 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 6561 struct dasd_cuir_message *cuir) 6562 { 6563 struct dasd_eckd_private *private = device->private; 6564 struct alias_pav_group *pavgroup, *tempgroup; 6565 struct dasd_device *dev, *n; 6566 unsigned long paths = 0; 6567 int tbcpm; 6568 6569 /* 6570 * the path may have been added through a generic path event before 6571 * only trigger path verification if the path is not already in use 6572 */ 6573 list_for_each_entry_safe(dev, n, 6574 &private->lcu->active_devices, 6575 alias_list) { 6576 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6577 paths |= tbcpm; 6578 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6579 dasd_path_add_tbvpm(dev, tbcpm); 6580 dasd_schedule_device_bh(dev); 6581 } 6582 } 6583 list_for_each_entry_safe(dev, n, 6584 &private->lcu->inactive_devices, 6585 alias_list) { 6586 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6587 paths |= tbcpm; 6588 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6589 dasd_path_add_tbvpm(dev, tbcpm); 6590 dasd_schedule_device_bh(dev); 6591 } 6592 } 6593 /* devices in PAV groups */ 6594 list_for_each_entry_safe(pavgroup, tempgroup, 6595 &private->lcu->grouplist, 6596 group) { 6597 list_for_each_entry_safe(dev, n, 6598 &pavgroup->baselist, 6599 alias_list) { 6600 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6601 paths |= tbcpm; 6602 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6603 dasd_path_add_tbvpm(dev, tbcpm); 6604 dasd_schedule_device_bh(dev); 6605 } 6606 } 6607 list_for_each_entry_safe(dev, n, 6608 &pavgroup->aliaslist, 6609 alias_list) { 6610 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6611 paths |= tbcpm; 6612 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6613 dasd_path_add_tbvpm(dev, tbcpm); 6614 dasd_schedule_device_bh(dev); 6615 } 6616 } 6617 } 6618 /* notify user about all paths affected by CUIR action */ 6619 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); 6620 return 0; 6621 } 6622 6623 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 6624 __u8 lpum) 6625 { 6626 struct dasd_cuir_message *cuir = messages; 6627 int response; 6628 6629 DBF_DEV_EVENT(DBF_WARNING, device, 6630 "CUIR request: %016llx %016llx %016llx %08x", 6631 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 6632 ((u32 *)cuir)[3]); 6633 6634 if (cuir->code == CUIR_QUIESCE) { 6635 /* quiesce */ 6636 if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) 6637 response = PSF_CUIR_LAST_PATH; 6638 else 6639 response = PSF_CUIR_COMPLETED; 6640 } else if (cuir->code == CUIR_RESUME) { 6641 /* resume */ 6642 dasd_eckd_cuir_resume(device, lpum, cuir); 6643 response = PSF_CUIR_COMPLETED; 6644 } else 6645 response = PSF_CUIR_NOT_SUPPORTED; 6646 6647 dasd_eckd_psf_cuir_response(device, response, 6648 cuir->message_id, lpum); 6649 DBF_DEV_EVENT(DBF_WARNING, device, 6650 "CUIR response: %d on message ID %08x", response, 6651 cuir->message_id); 6652 /* to make sure there is no attention left schedule work again */ 6653 device->discipline->check_attention(device, lpum); 6654 } 6655 6656 static void dasd_eckd_oos_resume(struct dasd_device *device) 6657 { 6658 struct dasd_eckd_private *private = device->private; 6659 struct alias_pav_group *pavgroup, *tempgroup; 6660 struct dasd_device *dev, *n; 6661 unsigned long flags; 6662 6663 spin_lock_irqsave(&private->lcu->lock, flags); 6664 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6665 alias_list) { 6666 if (dev->stopped & DASD_STOPPED_NOSPC) 6667 dasd_generic_space_avail(dev); 6668 } 6669 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6670 alias_list) { 6671 if (dev->stopped & DASD_STOPPED_NOSPC) 6672 dasd_generic_space_avail(dev); 6673 } 6674 /* devices in PAV groups */ 6675 list_for_each_entry_safe(pavgroup, tempgroup, 6676 &private->lcu->grouplist, 6677 group) { 6678 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6679 alias_list) { 6680 if (dev->stopped & DASD_STOPPED_NOSPC) 6681 dasd_generic_space_avail(dev); 6682 } 6683 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6684 alias_list) { 6685 if (dev->stopped & DASD_STOPPED_NOSPC) 6686 dasd_generic_space_avail(dev); 6687 } 6688 } 6689 spin_unlock_irqrestore(&private->lcu->lock, flags); 6690 } 6691 6692 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, 6693 __u8 lpum) 6694 { 6695 struct dasd_oos_message *oos = messages; 6696 6697 switch (oos->code) { 6698 case REPO_WARN: 6699 case POOL_WARN: 6700 dev_warn(&device->cdev->dev, 6701 "Extent pool usage has reached a critical value\n"); 6702 dasd_eckd_oos_resume(device); 6703 break; 6704 case REPO_EXHAUST: 6705 case POOL_EXHAUST: 6706 dev_warn(&device->cdev->dev, 6707 "Extent pool is exhausted\n"); 6708 break; 6709 case REPO_RELIEVE: 6710 case POOL_RELIEVE: 6711 dev_info(&device->cdev->dev, 6712 "Extent pool physical space constraint has been relieved\n"); 6713 break; 6714 } 6715 6716 /* In any case, update related data */ 6717 dasd_eckd_read_ext_pool_info(device); 6718 6719 /* to make sure there is no attention left schedule work again */ 6720 device->discipline->check_attention(device, lpum); 6721 } 6722 6723 static void dasd_eckd_check_attention_work(struct work_struct *work) 6724 { 6725 struct check_attention_work_data *data; 6726 struct dasd_rssd_messages *messages; 6727 struct dasd_device *device; 6728 int rc; 6729 6730 data = container_of(work, struct check_attention_work_data, worker); 6731 device = data->device; 6732 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 6733 if (!messages) { 6734 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6735 "Could not allocate attention message buffer"); 6736 goto out; 6737 } 6738 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 6739 if (rc) 6740 goto out; 6741 6742 if (messages->length == ATTENTION_LENGTH_CUIR && 6743 messages->format == ATTENTION_FORMAT_CUIR) 6744 dasd_eckd_handle_cuir(device, messages, data->lpum); 6745 if (messages->length == ATTENTION_LENGTH_OOS && 6746 messages->format == ATTENTION_FORMAT_OOS) 6747 dasd_eckd_handle_oos(device, messages, data->lpum); 6748 6749 out: 6750 dasd_put_device(device); 6751 kfree(messages); 6752 kfree(data); 6753 } 6754 6755 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 6756 { 6757 struct check_attention_work_data *data; 6758 6759 data = kzalloc(sizeof(*data), GFP_ATOMIC); 6760 if (!data) 6761 return -ENOMEM; 6762 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 6763 dasd_get_device(device); 6764 data->device = device; 6765 data->lpum = lpum; 6766 schedule_work(&data->worker); 6767 return 0; 6768 } 6769 6770 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) 6771 { 6772 if (~lpum & dasd_path_get_opm(device)) { 6773 dasd_path_add_nohpfpm(device, lpum); 6774 dasd_path_remove_opm(device, lpum); 6775 dev_err(&device->cdev->dev, 6776 "Channel path %02X lost HPF functionality and is disabled\n", 6777 lpum); 6778 return 1; 6779 } 6780 return 0; 6781 } 6782 6783 static void dasd_eckd_disable_hpf_device(struct dasd_device *device) 6784 { 6785 struct dasd_eckd_private *private = device->private; 6786 6787 dev_err(&device->cdev->dev, 6788 "High Performance FICON disabled\n"); 6789 private->fcx_max_data = 0; 6790 } 6791 6792 static int dasd_eckd_hpf_enabled(struct dasd_device *device) 6793 { 6794 struct dasd_eckd_private *private = device->private; 6795 6796 return private->fcx_max_data ? 1 : 0; 6797 } 6798 6799 static void dasd_eckd_handle_hpf_error(struct dasd_device *device, 6800 struct irb *irb) 6801 { 6802 struct dasd_eckd_private *private = device->private; 6803 6804 if (!private->fcx_max_data) { 6805 /* sanity check for no HPF, the error makes no sense */ 6806 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6807 "Trying to disable HPF for a non HPF device"); 6808 return; 6809 } 6810 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { 6811 dasd_eckd_disable_hpf_device(device); 6812 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { 6813 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) 6814 return; 6815 dasd_eckd_disable_hpf_device(device); 6816 dasd_path_set_tbvpm(device, 6817 dasd_path_get_hpfpm(device)); 6818 } 6819 /* 6820 * prevent that any new I/O ist started on the device and schedule a 6821 * requeue of existing requests 6822 */ 6823 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 6824 dasd_schedule_requeue(device); 6825 } 6826 6827 static unsigned int dasd_eckd_max_sectors(struct dasd_block *block) 6828 { 6829 if (block->base->features & DASD_FEATURE_USERAW) { 6830 /* 6831 * the max_blocks value for raw_track access is 256 6832 * it is higher than the native ECKD value because we 6833 * only need one ccw per track 6834 * so the max_hw_sectors are 6835 * 2048 x 512B = 1024kB = 16 tracks 6836 */ 6837 return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; 6838 } 6839 6840 return DASD_ECKD_MAX_BLOCKS << block->s2b_shift; 6841 } 6842 6843 static struct ccw_driver dasd_eckd_driver = { 6844 .driver = { 6845 .name = "dasd-eckd", 6846 .owner = THIS_MODULE, 6847 .dev_groups = dasd_dev_groups, 6848 }, 6849 .ids = dasd_eckd_ids, 6850 .probe = dasd_eckd_probe, 6851 .remove = dasd_generic_remove, 6852 .set_offline = dasd_generic_set_offline, 6853 .set_online = dasd_eckd_set_online, 6854 .notify = dasd_generic_notify, 6855 .path_event = dasd_generic_path_event, 6856 .shutdown = dasd_generic_shutdown, 6857 .uc_handler = dasd_generic_uc_handler, 6858 .int_class = IRQIO_DAS, 6859 }; 6860 6861 static struct dasd_discipline dasd_eckd_discipline = { 6862 .owner = THIS_MODULE, 6863 .name = "ECKD", 6864 .ebcname = "ECKD", 6865 .check_device = dasd_eckd_check_characteristics, 6866 .uncheck_device = dasd_eckd_uncheck_device, 6867 .do_analysis = dasd_eckd_do_analysis, 6868 .pe_handler = dasd_eckd_pe_handler, 6869 .basic_to_ready = dasd_eckd_basic_to_ready, 6870 .online_to_ready = dasd_eckd_online_to_ready, 6871 .basic_to_known = dasd_eckd_basic_to_known, 6872 .max_sectors = dasd_eckd_max_sectors, 6873 .fill_geometry = dasd_eckd_fill_geometry, 6874 .start_IO = dasd_start_IO, 6875 .term_IO = dasd_term_IO, 6876 .handle_terminated_request = dasd_eckd_handle_terminated_request, 6877 .format_device = dasd_eckd_format_device, 6878 .check_device_format = dasd_eckd_check_device_format, 6879 .erp_action = dasd_eckd_erp_action, 6880 .erp_postaction = dasd_eckd_erp_postaction, 6881 .check_for_device_change = dasd_eckd_check_for_device_change, 6882 .build_cp = dasd_eckd_build_alias_cp, 6883 .free_cp = dasd_eckd_free_alias_cp, 6884 .dump_sense = dasd_eckd_dump_sense, 6885 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 6886 .fill_info = dasd_eckd_fill_info, 6887 .ioctl = dasd_eckd_ioctl, 6888 .reload = dasd_eckd_reload_device, 6889 .get_uid = dasd_eckd_get_uid, 6890 .kick_validate = dasd_eckd_kick_validate_server, 6891 .check_attention = dasd_eckd_check_attention, 6892 .host_access_count = dasd_eckd_host_access_count, 6893 .hosts_print = dasd_hosts_print, 6894 .handle_hpf_error = dasd_eckd_handle_hpf_error, 6895 .disable_hpf = dasd_eckd_disable_hpf_device, 6896 .hpf_enabled = dasd_eckd_hpf_enabled, 6897 .reset_path = dasd_eckd_reset_path, 6898 .is_ese = dasd_eckd_is_ese, 6899 .space_allocated = dasd_eckd_space_allocated, 6900 .space_configured = dasd_eckd_space_configured, 6901 .logical_capacity = dasd_eckd_logical_capacity, 6902 .release_space = dasd_eckd_release_space, 6903 .ext_pool_id = dasd_eckd_ext_pool_id, 6904 .ext_size = dasd_eckd_ext_size, 6905 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, 6906 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, 6907 .ext_pool_oos = dasd_eckd_ext_pool_oos, 6908 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, 6909 .ese_format = dasd_eckd_ese_format, 6910 .ese_read = dasd_eckd_ese_read, 6911 .pprc_status = dasd_eckd_query_pprc_status, 6912 .pprc_enabled = dasd_eckd_pprc_enabled, 6913 .copy_pair_swap = dasd_eckd_copy_pair_swap, 6914 .device_ping = dasd_eckd_device_ping, 6915 }; 6916 6917 static int __init 6918 dasd_eckd_init(void) 6919 { 6920 int ret; 6921 6922 ASCEBC(dasd_eckd_discipline.ebcname, 4); 6923 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 6924 GFP_KERNEL | GFP_DMA); 6925 if (!dasd_reserve_req) 6926 return -ENOMEM; 6927 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), 6928 GFP_KERNEL | GFP_DMA); 6929 if (!dasd_vol_info_req) { 6930 kfree(dasd_reserve_req); 6931 return -ENOMEM; 6932 } 6933 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker), 6934 GFP_KERNEL | GFP_DMA); 6935 if (!pe_handler_worker) { 6936 kfree(dasd_reserve_req); 6937 kfree(dasd_vol_info_req); 6938 return -ENOMEM; 6939 } 6940 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 6941 if (!rawpadpage) { 6942 kfree(pe_handler_worker); 6943 kfree(dasd_reserve_req); 6944 kfree(dasd_vol_info_req); 6945 return -ENOMEM; 6946 } 6947 ret = ccw_driver_register(&dasd_eckd_driver); 6948 if (!ret) 6949 wait_for_device_probe(); 6950 else { 6951 kfree(pe_handler_worker); 6952 kfree(dasd_reserve_req); 6953 kfree(dasd_vol_info_req); 6954 free_page((unsigned long)rawpadpage); 6955 } 6956 return ret; 6957 } 6958 6959 static void __exit 6960 dasd_eckd_cleanup(void) 6961 { 6962 ccw_driver_unregister(&dasd_eckd_driver); 6963 kfree(pe_handler_worker); 6964 kfree(dasd_reserve_req); 6965 free_page((unsigned long)rawpadpage); 6966 } 6967 6968 module_init(dasd_eckd_init); 6969 module_exit(dasd_eckd_cleanup); 6970