1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13 #define KMSG_COMPONENT "dasd-eckd" 14 15 #include <linux/stddef.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/hdreg.h> /* HDIO_GETGEO */ 19 #include <linux/bio.h> 20 #include <linux/module.h> 21 #include <linux/compat.h> 22 #include <linux/init.h> 23 #include <linux/seq_file.h> 24 25 #include <asm/css_chars.h> 26 #include <asm/debug.h> 27 #include <asm/idals.h> 28 #include <asm/ebcdic.h> 29 #include <asm/io.h> 30 #include <linux/uaccess.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/itcw.h> 34 #include <asm/schid.h> 35 #include <asm/chpid.h> 36 37 #include "dasd_int.h" 38 #include "dasd_eckd.h" 39 40 #ifdef PRINTK_HEADER 41 #undef PRINTK_HEADER 42 #endif /* PRINTK_HEADER */ 43 #define PRINTK_HEADER "dasd(eckd):" 44 45 #define ECKD_C0(i) (i->home_bytes) 46 #define ECKD_F(i) (i->formula) 47 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 48 (i->factors.f_0x02.f1)) 49 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 50 (i->factors.f_0x02.f2)) 51 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 52 (i->factors.f_0x02.f3)) 53 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 54 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 55 #define ECKD_F6(i) (i->factor6) 56 #define ECKD_F7(i) (i->factor7) 57 #define ECKD_F8(i) (i->factor8) 58 59 /* 60 * raw track access always map to 64k in memory 61 * so it maps to 16 blocks of 4k per track 62 */ 63 #define DASD_RAW_BLOCK_PER_TRACK 16 64 #define DASD_RAW_BLOCKSIZE 4096 65 /* 64k are 128 x 512 byte sectors */ 66 #define DASD_RAW_SECTORS_PER_TRACK 128 67 68 MODULE_LICENSE("GPL"); 69 70 static struct dasd_discipline dasd_eckd_discipline; 71 72 /* The ccw bus type uses this table to find devices that it sends to 73 * dasd_eckd_probe */ 74 static struct ccw_device_id dasd_eckd_ids[] = { 75 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 76 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 77 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 78 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 79 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 80 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 81 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 82 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 83 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 84 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 85 { /* end of list */ }, 86 }; 87 88 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 89 90 static struct ccw_driver dasd_eckd_driver; /* see below */ 91 92 static void *rawpadpage; 93 94 #define INIT_CQR_OK 0 95 #define INIT_CQR_UNFORMATTED 1 96 #define INIT_CQR_ERROR 2 97 98 /* emergency request for reserve/release */ 99 static struct { 100 struct dasd_ccw_req cqr; 101 struct ccw1 ccw; 102 char data[32]; 103 } *dasd_reserve_req; 104 static DEFINE_MUTEX(dasd_reserve_mutex); 105 106 /* definitions for the path verification worker */ 107 struct path_verification_work_data { 108 struct work_struct worker; 109 struct dasd_device *device; 110 struct dasd_ccw_req cqr; 111 struct ccw1 ccw; 112 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 113 int isglobal; 114 __u8 tbvpm; 115 }; 116 static struct path_verification_work_data *path_verification_worker; 117 static DEFINE_MUTEX(dasd_path_verification_mutex); 118 119 struct check_attention_work_data { 120 struct work_struct worker; 121 struct dasd_device *device; 122 __u8 lpum; 123 }; 124 125 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 126 struct dasd_device *, struct dasd_device *, 127 unsigned int, int, unsigned int, unsigned int, 128 unsigned int, unsigned int); 129 130 /* initial attempt at a probe function. this can be simplified once 131 * the other detection code is gone */ 132 static int 133 dasd_eckd_probe (struct ccw_device *cdev) 134 { 135 int ret; 136 137 /* set ECKD specific ccw-device options */ 138 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 139 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 140 if (ret) { 141 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 142 "dasd_eckd_probe: could not set " 143 "ccw-device options"); 144 return ret; 145 } 146 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 147 return ret; 148 } 149 150 static int 151 dasd_eckd_set_online(struct ccw_device *cdev) 152 { 153 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 154 } 155 156 static const int sizes_trk0[] = { 28, 148, 84 }; 157 #define LABEL_SIZE 140 158 159 /* head and record addresses of count_area read in analysis ccw */ 160 static const int count_area_head[] = { 0, 0, 0, 0, 2 }; 161 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 162 163 static inline unsigned int 164 round_up_multiple(unsigned int no, unsigned int mult) 165 { 166 int rem = no % mult; 167 return (rem ? no - rem + mult : no); 168 } 169 170 static inline unsigned int 171 ceil_quot(unsigned int d1, unsigned int d2) 172 { 173 return (d1 + (d2 - 1)) / d2; 174 } 175 176 static unsigned int 177 recs_per_track(struct dasd_eckd_characteristics * rdc, 178 unsigned int kl, unsigned int dl) 179 { 180 int dn, kn; 181 182 switch (rdc->dev_type) { 183 case 0x3380: 184 if (kl) 185 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 186 ceil_quot(dl + 12, 32)); 187 else 188 return 1499 / (15 + ceil_quot(dl + 12, 32)); 189 case 0x3390: 190 dn = ceil_quot(dl + 6, 232) + 1; 191 if (kl) { 192 kn = ceil_quot(kl + 6, 232) + 1; 193 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 194 9 + ceil_quot(dl + 6 * dn, 34)); 195 } else 196 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 197 case 0x9345: 198 dn = ceil_quot(dl + 6, 232) + 1; 199 if (kl) { 200 kn = ceil_quot(kl + 6, 232) + 1; 201 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 202 ceil_quot(dl + 6 * dn, 34)); 203 } else 204 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 205 } 206 return 0; 207 } 208 209 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 210 { 211 geo->cyl = (__u16) cyl; 212 geo->head = cyl >> 16; 213 geo->head <<= 4; 214 geo->head |= head; 215 } 216 217 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, 218 struct dasd_device *device) 219 { 220 struct dasd_eckd_private *private = device->private; 221 int rc; 222 223 rc = get_phys_clock(&data->ep_sys_time); 224 /* 225 * Ignore return code if XRC is not supported or 226 * sync clock is switched off 227 */ 228 if ((rc && !private->rdc_data.facilities.XRC_supported) || 229 rc == -EOPNOTSUPP || rc == -EACCES) 230 return 0; 231 232 /* switch on System Time Stamp - needed for XRC Support */ 233 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 234 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 235 236 if (ccw) { 237 ccw->count = sizeof(struct DE_eckd_data); 238 ccw->flags |= CCW_FLAG_SLI; 239 } 240 241 return rc; 242 } 243 244 static int 245 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 246 unsigned int totrk, int cmd, struct dasd_device *device, 247 int blksize) 248 { 249 struct dasd_eckd_private *private = device->private; 250 u16 heads, beghead, endhead; 251 u32 begcyl, endcyl; 252 int rc = 0; 253 254 if (ccw) { 255 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 256 ccw->flags = 0; 257 ccw->count = 16; 258 ccw->cda = (__u32)__pa(data); 259 } 260 261 memset(data, 0, sizeof(struct DE_eckd_data)); 262 switch (cmd) { 263 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 264 case DASD_ECKD_CCW_READ_RECORD_ZERO: 265 case DASD_ECKD_CCW_READ: 266 case DASD_ECKD_CCW_READ_MT: 267 case DASD_ECKD_CCW_READ_CKD: 268 case DASD_ECKD_CCW_READ_CKD_MT: 269 case DASD_ECKD_CCW_READ_KD: 270 case DASD_ECKD_CCW_READ_KD_MT: 271 data->mask.perm = 0x1; 272 data->attributes.operation = private->attrib.operation; 273 break; 274 case DASD_ECKD_CCW_READ_COUNT: 275 data->mask.perm = 0x1; 276 data->attributes.operation = DASD_BYPASS_CACHE; 277 break; 278 case DASD_ECKD_CCW_READ_TRACK: 279 case DASD_ECKD_CCW_READ_TRACK_DATA: 280 data->mask.perm = 0x1; 281 data->attributes.operation = private->attrib.operation; 282 data->blk_size = 0; 283 break; 284 case DASD_ECKD_CCW_WRITE: 285 case DASD_ECKD_CCW_WRITE_MT: 286 case DASD_ECKD_CCW_WRITE_KD: 287 case DASD_ECKD_CCW_WRITE_KD_MT: 288 data->mask.perm = 0x02; 289 data->attributes.operation = private->attrib.operation; 290 rc = set_timestamp(ccw, data, device); 291 break; 292 case DASD_ECKD_CCW_WRITE_CKD: 293 case DASD_ECKD_CCW_WRITE_CKD_MT: 294 data->attributes.operation = DASD_BYPASS_CACHE; 295 rc = set_timestamp(ccw, data, device); 296 break; 297 case DASD_ECKD_CCW_ERASE: 298 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 299 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 300 data->mask.perm = 0x3; 301 data->mask.auth = 0x1; 302 data->attributes.operation = DASD_BYPASS_CACHE; 303 rc = set_timestamp(ccw, data, device); 304 break; 305 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 306 data->mask.perm = 0x03; 307 data->attributes.operation = private->attrib.operation; 308 data->blk_size = 0; 309 break; 310 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 311 data->mask.perm = 0x02; 312 data->attributes.operation = private->attrib.operation; 313 data->blk_size = blksize; 314 rc = set_timestamp(ccw, data, device); 315 break; 316 default: 317 dev_err(&device->cdev->dev, 318 "0x%x is not a known command\n", cmd); 319 break; 320 } 321 322 data->attributes.mode = 0x3; /* ECKD */ 323 324 if ((private->rdc_data.cu_type == 0x2105 || 325 private->rdc_data.cu_type == 0x2107 || 326 private->rdc_data.cu_type == 0x1750) 327 && !(private->uses_cdl && trk < 2)) 328 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 329 330 heads = private->rdc_data.trk_per_cyl; 331 begcyl = trk / heads; 332 beghead = trk % heads; 333 endcyl = totrk / heads; 334 endhead = totrk % heads; 335 336 /* check for sequential prestage - enhance cylinder range */ 337 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 338 data->attributes.operation == DASD_SEQ_ACCESS) { 339 340 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 341 endcyl += private->attrib.nr_cyl; 342 else 343 endcyl = (private->real_cyl - 1); 344 } 345 346 set_ch_t(&data->beg_ext, begcyl, beghead); 347 set_ch_t(&data->end_ext, endcyl, endhead); 348 return rc; 349 } 350 351 352 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, 353 unsigned int trk, unsigned int rec_on_trk, 354 int count, int cmd, struct dasd_device *device, 355 unsigned int reclen, unsigned int tlf) 356 { 357 struct dasd_eckd_private *private = device->private; 358 int sector; 359 int dn, d; 360 361 if (ccw) { 362 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT; 363 ccw->flags = 0; 364 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) 365 ccw->count = 22; 366 else 367 ccw->count = 20; 368 ccw->cda = (__u32)__pa(data); 369 } 370 371 memset(data, 0, sizeof(*data)); 372 sector = 0; 373 if (rec_on_trk) { 374 switch (private->rdc_data.dev_type) { 375 case 0x3390: 376 dn = ceil_quot(reclen + 6, 232); 377 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 378 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 379 break; 380 case 0x3380: 381 d = 7 + ceil_quot(reclen + 12, 32); 382 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 383 break; 384 } 385 } 386 data->sector = sector; 387 /* note: meaning of count depends on the operation 388 * for record based I/O it's the number of records, but for 389 * track based I/O it's the number of tracks 390 */ 391 data->count = count; 392 switch (cmd) { 393 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 394 data->operation.orientation = 0x3; 395 data->operation.operation = 0x03; 396 break; 397 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 398 data->operation.orientation = 0x3; 399 data->operation.operation = 0x16; 400 break; 401 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 402 data->operation.orientation = 0x1; 403 data->operation.operation = 0x03; 404 data->count++; 405 break; 406 case DASD_ECKD_CCW_READ_RECORD_ZERO: 407 data->operation.orientation = 0x3; 408 data->operation.operation = 0x16; 409 data->count++; 410 break; 411 case DASD_ECKD_CCW_WRITE: 412 case DASD_ECKD_CCW_WRITE_MT: 413 case DASD_ECKD_CCW_WRITE_KD: 414 case DASD_ECKD_CCW_WRITE_KD_MT: 415 data->auxiliary.length_valid = 0x1; 416 data->length = reclen; 417 data->operation.operation = 0x01; 418 break; 419 case DASD_ECKD_CCW_WRITE_CKD: 420 case DASD_ECKD_CCW_WRITE_CKD_MT: 421 data->auxiliary.length_valid = 0x1; 422 data->length = reclen; 423 data->operation.operation = 0x03; 424 break; 425 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 426 data->operation.orientation = 0x0; 427 data->operation.operation = 0x3F; 428 data->extended_operation = 0x11; 429 data->length = 0; 430 data->extended_parameter_length = 0x02; 431 if (data->count > 8) { 432 data->extended_parameter[0] = 0xFF; 433 data->extended_parameter[1] = 0xFF; 434 data->extended_parameter[1] <<= (16 - count); 435 } else { 436 data->extended_parameter[0] = 0xFF; 437 data->extended_parameter[0] <<= (8 - count); 438 data->extended_parameter[1] = 0x00; 439 } 440 data->sector = 0xFF; 441 break; 442 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 443 data->auxiliary.length_valid = 0x1; 444 data->length = reclen; /* not tlf, as one might think */ 445 data->operation.operation = 0x3F; 446 data->extended_operation = 0x23; 447 break; 448 case DASD_ECKD_CCW_READ: 449 case DASD_ECKD_CCW_READ_MT: 450 case DASD_ECKD_CCW_READ_KD: 451 case DASD_ECKD_CCW_READ_KD_MT: 452 data->auxiliary.length_valid = 0x1; 453 data->length = reclen; 454 data->operation.operation = 0x06; 455 break; 456 case DASD_ECKD_CCW_READ_CKD: 457 case DASD_ECKD_CCW_READ_CKD_MT: 458 data->auxiliary.length_valid = 0x1; 459 data->length = reclen; 460 data->operation.operation = 0x16; 461 break; 462 case DASD_ECKD_CCW_READ_COUNT: 463 data->operation.operation = 0x06; 464 break; 465 case DASD_ECKD_CCW_READ_TRACK: 466 data->operation.orientation = 0x1; 467 data->operation.operation = 0x0C; 468 data->extended_parameter_length = 0; 469 data->sector = 0xFF; 470 break; 471 case DASD_ECKD_CCW_READ_TRACK_DATA: 472 data->auxiliary.length_valid = 0x1; 473 data->length = tlf; 474 data->operation.operation = 0x0C; 475 break; 476 case DASD_ECKD_CCW_ERASE: 477 data->length = reclen; 478 data->auxiliary.length_valid = 0x1; 479 data->operation.operation = 0x0b; 480 break; 481 default: 482 DBF_DEV_EVENT(DBF_ERR, device, 483 "fill LRE unknown opcode 0x%x", cmd); 484 BUG(); 485 } 486 set_ch_t(&data->seek_addr, 487 trk / private->rdc_data.trk_per_cyl, 488 trk % private->rdc_data.trk_per_cyl); 489 data->search_arg.cyl = data->seek_addr.cyl; 490 data->search_arg.head = data->seek_addr.head; 491 data->search_arg.record = rec_on_trk; 492 } 493 494 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 495 unsigned int trk, unsigned int totrk, int cmd, 496 struct dasd_device *basedev, struct dasd_device *startdev, 497 unsigned int format, unsigned int rec_on_trk, int count, 498 unsigned int blksize, unsigned int tlf) 499 { 500 struct dasd_eckd_private *basepriv, *startpriv; 501 struct LRE_eckd_data *lredata; 502 struct DE_eckd_data *dedata; 503 int rc = 0; 504 505 basepriv = basedev->private; 506 startpriv = startdev->private; 507 dedata = &pfxdata->define_extent; 508 lredata = &pfxdata->locate_record; 509 510 ccw->cmd_code = DASD_ECKD_CCW_PFX; 511 ccw->flags = 0; 512 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 513 ccw->count = sizeof(*pfxdata) + 2; 514 ccw->cda = (__u32) __pa(pfxdata); 515 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 516 } else { 517 ccw->count = sizeof(*pfxdata); 518 ccw->cda = (__u32) __pa(pfxdata); 519 memset(pfxdata, 0, sizeof(*pfxdata)); 520 } 521 522 /* prefix data */ 523 if (format > 1) { 524 DBF_DEV_EVENT(DBF_ERR, basedev, 525 "PFX LRE unknown format 0x%x", format); 526 BUG(); 527 return -EINVAL; 528 } 529 pfxdata->format = format; 530 pfxdata->base_address = basepriv->ned->unit_addr; 531 pfxdata->base_lss = basepriv->ned->ID; 532 pfxdata->validity.define_extent = 1; 533 534 /* private uid is kept up to date, conf_data may be outdated */ 535 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 536 pfxdata->validity.verify_base = 1; 537 538 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 539 pfxdata->validity.verify_base = 1; 540 pfxdata->validity.hyper_pav = 1; 541 } 542 543 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 544 545 /* 546 * For some commands the System Time Stamp is set in the define extent 547 * data when XRC is supported. The validity of the time stamp must be 548 * reflected in the prefix data as well. 549 */ 550 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 551 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 552 553 if (format == 1) { 554 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd, 555 basedev, blksize, tlf); 556 } 557 558 return rc; 559 } 560 561 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 562 unsigned int trk, unsigned int totrk, int cmd, 563 struct dasd_device *basedev, struct dasd_device *startdev) 564 { 565 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 566 0, 0, 0, 0, 0); 567 } 568 569 static void 570 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 571 unsigned int rec_on_trk, int no_rec, int cmd, 572 struct dasd_device * device, int reclen) 573 { 574 struct dasd_eckd_private *private = device->private; 575 int sector; 576 int dn, d; 577 578 DBF_DEV_EVENT(DBF_INFO, device, 579 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 580 trk, rec_on_trk, no_rec, cmd, reclen); 581 582 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 583 ccw->flags = 0; 584 ccw->count = 16; 585 ccw->cda = (__u32) __pa(data); 586 587 memset(data, 0, sizeof(struct LO_eckd_data)); 588 sector = 0; 589 if (rec_on_trk) { 590 switch (private->rdc_data.dev_type) { 591 case 0x3390: 592 dn = ceil_quot(reclen + 6, 232); 593 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 594 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 595 break; 596 case 0x3380: 597 d = 7 + ceil_quot(reclen + 12, 32); 598 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 599 break; 600 } 601 } 602 data->sector = sector; 603 data->count = no_rec; 604 switch (cmd) { 605 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 606 data->operation.orientation = 0x3; 607 data->operation.operation = 0x03; 608 break; 609 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 610 data->operation.orientation = 0x3; 611 data->operation.operation = 0x16; 612 break; 613 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 614 data->operation.orientation = 0x1; 615 data->operation.operation = 0x03; 616 data->count++; 617 break; 618 case DASD_ECKD_CCW_READ_RECORD_ZERO: 619 data->operation.orientation = 0x3; 620 data->operation.operation = 0x16; 621 data->count++; 622 break; 623 case DASD_ECKD_CCW_WRITE: 624 case DASD_ECKD_CCW_WRITE_MT: 625 case DASD_ECKD_CCW_WRITE_KD: 626 case DASD_ECKD_CCW_WRITE_KD_MT: 627 data->auxiliary.last_bytes_used = 0x1; 628 data->length = reclen; 629 data->operation.operation = 0x01; 630 break; 631 case DASD_ECKD_CCW_WRITE_CKD: 632 case DASD_ECKD_CCW_WRITE_CKD_MT: 633 data->auxiliary.last_bytes_used = 0x1; 634 data->length = reclen; 635 data->operation.operation = 0x03; 636 break; 637 case DASD_ECKD_CCW_READ: 638 case DASD_ECKD_CCW_READ_MT: 639 case DASD_ECKD_CCW_READ_KD: 640 case DASD_ECKD_CCW_READ_KD_MT: 641 data->auxiliary.last_bytes_used = 0x1; 642 data->length = reclen; 643 data->operation.operation = 0x06; 644 break; 645 case DASD_ECKD_CCW_READ_CKD: 646 case DASD_ECKD_CCW_READ_CKD_MT: 647 data->auxiliary.last_bytes_used = 0x1; 648 data->length = reclen; 649 data->operation.operation = 0x16; 650 break; 651 case DASD_ECKD_CCW_READ_COUNT: 652 data->operation.operation = 0x06; 653 break; 654 case DASD_ECKD_CCW_ERASE: 655 data->length = reclen; 656 data->auxiliary.last_bytes_used = 0x1; 657 data->operation.operation = 0x0b; 658 break; 659 default: 660 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 661 "opcode 0x%x", cmd); 662 } 663 set_ch_t(&data->seek_addr, 664 trk / private->rdc_data.trk_per_cyl, 665 trk % private->rdc_data.trk_per_cyl); 666 data->search_arg.cyl = data->seek_addr.cyl; 667 data->search_arg.head = data->seek_addr.head; 668 data->search_arg.record = rec_on_trk; 669 } 670 671 /* 672 * Returns 1 if the block is one of the special blocks that needs 673 * to get read/written with the KD variant of the command. 674 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 675 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 676 * Luckily the KD variants differ only by one bit (0x08) from the 677 * normal variant. So don't wonder about code like: 678 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 679 * ccw->cmd_code |= 0x8; 680 */ 681 static inline int 682 dasd_eckd_cdl_special(int blk_per_trk, int recid) 683 { 684 if (recid < 3) 685 return 1; 686 if (recid < blk_per_trk) 687 return 0; 688 if (recid < 2 * blk_per_trk) 689 return 1; 690 return 0; 691 } 692 693 /* 694 * Returns the record size for the special blocks of the cdl format. 695 * Only returns something useful if dasd_eckd_cdl_special is true 696 * for the recid. 697 */ 698 static inline int 699 dasd_eckd_cdl_reclen(int recid) 700 { 701 if (recid < 3) 702 return sizes_trk0[recid]; 703 return LABEL_SIZE; 704 } 705 /* create unique id from private structure. */ 706 static void create_uid(struct dasd_eckd_private *private) 707 { 708 int count; 709 struct dasd_uid *uid; 710 711 uid = &private->uid; 712 memset(uid, 0, sizeof(struct dasd_uid)); 713 memcpy(uid->vendor, private->ned->HDA_manufacturer, 714 sizeof(uid->vendor) - 1); 715 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 716 memcpy(uid->serial, private->ned->HDA_location, 717 sizeof(uid->serial) - 1); 718 EBCASC(uid->serial, sizeof(uid->serial) - 1); 719 uid->ssid = private->gneq->subsystemID; 720 uid->real_unit_addr = private->ned->unit_addr; 721 if (private->sneq) { 722 uid->type = private->sneq->sua_flags; 723 if (uid->type == UA_BASE_PAV_ALIAS) 724 uid->base_unit_addr = private->sneq->base_unit_addr; 725 } else { 726 uid->type = UA_BASE_DEVICE; 727 } 728 if (private->vdsneq) { 729 for (count = 0; count < 16; count++) { 730 sprintf(uid->vduit+2*count, "%02x", 731 private->vdsneq->uit[count]); 732 } 733 } 734 } 735 736 /* 737 * Generate device unique id that specifies the physical device. 738 */ 739 static int dasd_eckd_generate_uid(struct dasd_device *device) 740 { 741 struct dasd_eckd_private *private = device->private; 742 unsigned long flags; 743 744 if (!private) 745 return -ENODEV; 746 if (!private->ned || !private->gneq) 747 return -ENODEV; 748 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 749 create_uid(private); 750 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 751 return 0; 752 } 753 754 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 755 { 756 struct dasd_eckd_private *private = device->private; 757 unsigned long flags; 758 759 if (private) { 760 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 761 *uid = private->uid; 762 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 763 return 0; 764 } 765 return -EINVAL; 766 } 767 768 /* 769 * compare device UID with data of a given dasd_eckd_private structure 770 * return 0 for match 771 */ 772 static int dasd_eckd_compare_path_uid(struct dasd_device *device, 773 struct dasd_eckd_private *private) 774 { 775 struct dasd_uid device_uid; 776 777 create_uid(private); 778 dasd_eckd_get_uid(device, &device_uid); 779 780 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); 781 } 782 783 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 784 struct dasd_ccw_req *cqr, 785 __u8 *rcd_buffer, 786 __u8 lpm) 787 { 788 struct ccw1 *ccw; 789 /* 790 * buffer has to start with EBCDIC "V1.0" to show 791 * support for virtual device SNEQ 792 */ 793 rcd_buffer[0] = 0xE5; 794 rcd_buffer[1] = 0xF1; 795 rcd_buffer[2] = 0x4B; 796 rcd_buffer[3] = 0xF0; 797 798 ccw = cqr->cpaddr; 799 ccw->cmd_code = DASD_ECKD_CCW_RCD; 800 ccw->flags = 0; 801 ccw->cda = (__u32)(addr_t)rcd_buffer; 802 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 803 cqr->magic = DASD_ECKD_MAGIC; 804 805 cqr->startdev = device; 806 cqr->memdev = device; 807 cqr->block = NULL; 808 cqr->expires = 10*HZ; 809 cqr->lpm = lpm; 810 cqr->retries = 256; 811 cqr->buildclk = get_tod_clock(); 812 cqr->status = DASD_CQR_FILLED; 813 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 814 } 815 816 /* 817 * Wakeup helper for read_conf 818 * if the cqr is not done and needs some error recovery 819 * the buffer has to be re-initialized with the EBCDIC "V1.0" 820 * to show support for virtual device SNEQ 821 */ 822 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 823 { 824 struct ccw1 *ccw; 825 __u8 *rcd_buffer; 826 827 if (cqr->status != DASD_CQR_DONE) { 828 ccw = cqr->cpaddr; 829 rcd_buffer = (__u8 *)((addr_t) ccw->cda); 830 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 831 832 rcd_buffer[0] = 0xE5; 833 rcd_buffer[1] = 0xF1; 834 rcd_buffer[2] = 0x4B; 835 rcd_buffer[3] = 0xF0; 836 } 837 dasd_wakeup_cb(cqr, data); 838 } 839 840 static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 841 struct dasd_ccw_req *cqr, 842 __u8 *rcd_buffer, 843 __u8 lpm) 844 { 845 struct ciw *ciw; 846 int rc; 847 /* 848 * sanity check: scan for RCD command in extended SenseID data 849 * some devices do not support RCD 850 */ 851 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 852 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 853 return -EOPNOTSUPP; 854 855 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 856 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 857 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 858 cqr->retries = 5; 859 cqr->callback = read_conf_cb; 860 rc = dasd_sleep_on_immediatly(cqr); 861 return rc; 862 } 863 864 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 865 void **rcd_buffer, 866 int *rcd_buffer_size, __u8 lpm) 867 { 868 struct ciw *ciw; 869 char *rcd_buf = NULL; 870 int ret; 871 struct dasd_ccw_req *cqr; 872 873 /* 874 * sanity check: scan for RCD command in extended SenseID data 875 * some devices do not support RCD 876 */ 877 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 878 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 879 ret = -EOPNOTSUPP; 880 goto out_error; 881 } 882 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 883 if (!rcd_buf) { 884 ret = -ENOMEM; 885 goto out_error; 886 } 887 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 888 0, /* use rcd_buf as data ara */ 889 device, NULL); 890 if (IS_ERR(cqr)) { 891 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 892 "Could not allocate RCD request"); 893 ret = -ENOMEM; 894 goto out_error; 895 } 896 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 897 cqr->callback = read_conf_cb; 898 ret = dasd_sleep_on(cqr); 899 /* 900 * on success we update the user input parms 901 */ 902 dasd_sfree_request(cqr, cqr->memdev); 903 if (ret) 904 goto out_error; 905 906 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 907 *rcd_buffer = rcd_buf; 908 return 0; 909 out_error: 910 kfree(rcd_buf); 911 *rcd_buffer = NULL; 912 *rcd_buffer_size = 0; 913 return ret; 914 } 915 916 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 917 { 918 919 struct dasd_sneq *sneq; 920 int i, count; 921 922 private->ned = NULL; 923 private->sneq = NULL; 924 private->vdsneq = NULL; 925 private->gneq = NULL; 926 count = private->conf_len / sizeof(struct dasd_sneq); 927 sneq = (struct dasd_sneq *)private->conf_data; 928 for (i = 0; i < count; ++i) { 929 if (sneq->flags.identifier == 1 && sneq->format == 1) 930 private->sneq = sneq; 931 else if (sneq->flags.identifier == 1 && sneq->format == 4) 932 private->vdsneq = (struct vd_sneq *)sneq; 933 else if (sneq->flags.identifier == 2) 934 private->gneq = (struct dasd_gneq *)sneq; 935 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 936 private->ned = (struct dasd_ned *)sneq; 937 sneq++; 938 } 939 if (!private->ned || !private->gneq) { 940 private->ned = NULL; 941 private->sneq = NULL; 942 private->vdsneq = NULL; 943 private->gneq = NULL; 944 return -EINVAL; 945 } 946 return 0; 947 948 }; 949 950 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 951 { 952 struct dasd_gneq *gneq; 953 int i, count, found; 954 955 count = conf_len / sizeof(*gneq); 956 gneq = (struct dasd_gneq *)conf_data; 957 found = 0; 958 for (i = 0; i < count; ++i) { 959 if (gneq->flags.identifier == 2) { 960 found = 1; 961 break; 962 } 963 gneq++; 964 } 965 if (found) 966 return ((char *)gneq)[18] & 0x07; 967 else 968 return 0; 969 } 970 971 static void dasd_eckd_clear_conf_data(struct dasd_device *device) 972 { 973 struct dasd_eckd_private *private = device->private; 974 int i; 975 976 private->conf_data = NULL; 977 private->conf_len = 0; 978 for (i = 0; i < 8; i++) { 979 kfree(device->path[i].conf_data); 980 device->path[i].conf_data = NULL; 981 device->path[i].cssid = 0; 982 device->path[i].ssid = 0; 983 device->path[i].chpid = 0; 984 } 985 } 986 987 988 static int dasd_eckd_read_conf(struct dasd_device *device) 989 { 990 void *conf_data; 991 int conf_len, conf_data_saved; 992 int rc, path_err, pos; 993 __u8 lpm, opm; 994 struct dasd_eckd_private *private, path_private; 995 struct dasd_uid *uid; 996 char print_path_uid[60], print_device_uid[60]; 997 struct channel_path_desc_fmt0 *chp_desc; 998 struct subchannel_id sch_id; 999 1000 private = device->private; 1001 opm = ccw_device_get_path_mask(device->cdev); 1002 ccw_device_get_schid(device->cdev, &sch_id); 1003 conf_data_saved = 0; 1004 path_err = 0; 1005 /* get configuration data per operational path */ 1006 for (lpm = 0x80; lpm; lpm>>= 1) { 1007 if (!(lpm & opm)) 1008 continue; 1009 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1010 &conf_len, lpm); 1011 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1012 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1013 "Read configuration data returned " 1014 "error %d", rc); 1015 return rc; 1016 } 1017 if (conf_data == NULL) { 1018 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1019 "No configuration data " 1020 "retrieved"); 1021 /* no further analysis possible */ 1022 dasd_path_add_opm(device, opm); 1023 continue; /* no error */ 1024 } 1025 /* save first valid configuration data */ 1026 if (!conf_data_saved) { 1027 /* initially clear previously stored conf_data */ 1028 dasd_eckd_clear_conf_data(device); 1029 private->conf_data = conf_data; 1030 private->conf_len = conf_len; 1031 if (dasd_eckd_identify_conf_parts(private)) { 1032 private->conf_data = NULL; 1033 private->conf_len = 0; 1034 kfree(conf_data); 1035 continue; 1036 } 1037 pos = pathmask_to_pos(lpm); 1038 /* store per path conf_data */ 1039 device->path[pos].conf_data = conf_data; 1040 device->path[pos].cssid = sch_id.cssid; 1041 device->path[pos].ssid = sch_id.ssid; 1042 chp_desc = ccw_device_get_chp_desc(device->cdev, pos); 1043 if (chp_desc) 1044 device->path[pos].chpid = chp_desc->chpid; 1045 kfree(chp_desc); 1046 /* 1047 * build device UID that other path data 1048 * can be compared to it 1049 */ 1050 dasd_eckd_generate_uid(device); 1051 conf_data_saved++; 1052 } else { 1053 path_private.conf_data = conf_data; 1054 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1055 if (dasd_eckd_identify_conf_parts( 1056 &path_private)) { 1057 path_private.conf_data = NULL; 1058 path_private.conf_len = 0; 1059 kfree(conf_data); 1060 continue; 1061 } 1062 if (dasd_eckd_compare_path_uid( 1063 device, &path_private)) { 1064 uid = &path_private.uid; 1065 if (strlen(uid->vduit) > 0) 1066 snprintf(print_path_uid, 1067 sizeof(print_path_uid), 1068 "%s.%s.%04x.%02x.%s", 1069 uid->vendor, uid->serial, 1070 uid->ssid, uid->real_unit_addr, 1071 uid->vduit); 1072 else 1073 snprintf(print_path_uid, 1074 sizeof(print_path_uid), 1075 "%s.%s.%04x.%02x", 1076 uid->vendor, uid->serial, 1077 uid->ssid, 1078 uid->real_unit_addr); 1079 uid = &private->uid; 1080 if (strlen(uid->vduit) > 0) 1081 snprintf(print_device_uid, 1082 sizeof(print_device_uid), 1083 "%s.%s.%04x.%02x.%s", 1084 uid->vendor, uid->serial, 1085 uid->ssid, uid->real_unit_addr, 1086 uid->vduit); 1087 else 1088 snprintf(print_device_uid, 1089 sizeof(print_device_uid), 1090 "%s.%s.%04x.%02x", 1091 uid->vendor, uid->serial, 1092 uid->ssid, 1093 uid->real_unit_addr); 1094 dev_err(&device->cdev->dev, 1095 "Not all channel paths lead to " 1096 "the same device, path %02X leads to " 1097 "device %s instead of %s\n", lpm, 1098 print_path_uid, print_device_uid); 1099 path_err = -EINVAL; 1100 dasd_path_add_cablepm(device, lpm); 1101 continue; 1102 } 1103 pos = pathmask_to_pos(lpm); 1104 /* store per path conf_data */ 1105 device->path[pos].conf_data = conf_data; 1106 device->path[pos].cssid = sch_id.cssid; 1107 device->path[pos].ssid = sch_id.ssid; 1108 chp_desc = ccw_device_get_chp_desc(device->cdev, pos); 1109 if (chp_desc) 1110 device->path[pos].chpid = chp_desc->chpid; 1111 kfree(chp_desc); 1112 path_private.conf_data = NULL; 1113 path_private.conf_len = 0; 1114 } 1115 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1116 case 0x02: 1117 dasd_path_add_nppm(device, lpm); 1118 break; 1119 case 0x03: 1120 dasd_path_add_ppm(device, lpm); 1121 break; 1122 } 1123 if (!dasd_path_get_opm(device)) { 1124 dasd_path_set_opm(device, lpm); 1125 dasd_generic_path_operational(device); 1126 } else { 1127 dasd_path_add_opm(device, lpm); 1128 } 1129 } 1130 1131 return path_err; 1132 } 1133 1134 static u32 get_fcx_max_data(struct dasd_device *device) 1135 { 1136 struct dasd_eckd_private *private = device->private; 1137 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1138 int tpm, mdc; 1139 1140 if (dasd_nofcx) 1141 return 0; 1142 /* is transport mode supported? */ 1143 fcx_in_css = css_general_characteristics.fcx; 1144 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 1145 fcx_in_features = private->features.feature[40] & 0x80; 1146 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1147 1148 if (!tpm) 1149 return 0; 1150 1151 mdc = ccw_device_get_mdc(device->cdev, 0); 1152 if (mdc < 0) { 1153 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1154 return 0; 1155 } else { 1156 return (u32)mdc * FCX_MAX_DATA_FACTOR; 1157 } 1158 } 1159 1160 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1161 { 1162 struct dasd_eckd_private *private = device->private; 1163 int mdc; 1164 u32 fcx_max_data; 1165 1166 if (private->fcx_max_data) { 1167 mdc = ccw_device_get_mdc(device->cdev, lpm); 1168 if ((mdc < 0)) { 1169 dev_warn(&device->cdev->dev, 1170 "Detecting the maximum data size for zHPF " 1171 "requests failed (rc=%d) for a new path %x\n", 1172 mdc, lpm); 1173 return mdc; 1174 } 1175 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1176 if (fcx_max_data < private->fcx_max_data) { 1177 dev_warn(&device->cdev->dev, 1178 "The maximum data size for zHPF requests %u " 1179 "on a new path %x is below the active maximum " 1180 "%u\n", fcx_max_data, lpm, 1181 private->fcx_max_data); 1182 return -EACCES; 1183 } 1184 } 1185 return 0; 1186 } 1187 1188 static int rebuild_device_uid(struct dasd_device *device, 1189 struct path_verification_work_data *data) 1190 { 1191 struct dasd_eckd_private *private = device->private; 1192 __u8 lpm, opm = dasd_path_get_opm(device); 1193 int rc = -ENODEV; 1194 1195 for (lpm = 0x80; lpm; lpm >>= 1) { 1196 if (!(lpm & opm)) 1197 continue; 1198 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1199 memset(&data->cqr, 0, sizeof(data->cqr)); 1200 data->cqr.cpaddr = &data->ccw; 1201 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1202 data->rcd_buffer, 1203 lpm); 1204 1205 if (rc) { 1206 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1207 continue; 1208 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1209 "Read configuration data " 1210 "returned error %d", rc); 1211 break; 1212 } 1213 memcpy(private->conf_data, data->rcd_buffer, 1214 DASD_ECKD_RCD_DATA_SIZE); 1215 if (dasd_eckd_identify_conf_parts(private)) { 1216 rc = -ENODEV; 1217 } else /* first valid path is enough */ 1218 break; 1219 } 1220 1221 if (!rc) 1222 rc = dasd_eckd_generate_uid(device); 1223 1224 return rc; 1225 } 1226 1227 static void do_path_verification_work(struct work_struct *work) 1228 { 1229 struct path_verification_work_data *data; 1230 struct dasd_device *device; 1231 struct dasd_eckd_private path_private; 1232 struct dasd_uid *uid; 1233 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1234 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1235 unsigned long flags; 1236 char print_uid[60]; 1237 int rc; 1238 1239 data = container_of(work, struct path_verification_work_data, worker); 1240 device = data->device; 1241 1242 /* delay path verification until device was resumed */ 1243 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1244 schedule_work(work); 1245 return; 1246 } 1247 /* check if path verification already running and delay if so */ 1248 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1249 schedule_work(work); 1250 return; 1251 } 1252 opm = 0; 1253 npm = 0; 1254 ppm = 0; 1255 epm = 0; 1256 hpfpm = 0; 1257 cablepm = 0; 1258 1259 for (lpm = 0x80; lpm; lpm >>= 1) { 1260 if (!(lpm & data->tbvpm)) 1261 continue; 1262 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1263 memset(&data->cqr, 0, sizeof(data->cqr)); 1264 data->cqr.cpaddr = &data->ccw; 1265 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1266 data->rcd_buffer, 1267 lpm); 1268 if (!rc) { 1269 switch (dasd_eckd_path_access(data->rcd_buffer, 1270 DASD_ECKD_RCD_DATA_SIZE) 1271 ) { 1272 case 0x02: 1273 npm |= lpm; 1274 break; 1275 case 0x03: 1276 ppm |= lpm; 1277 break; 1278 } 1279 opm |= lpm; 1280 } else if (rc == -EOPNOTSUPP) { 1281 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1282 "path verification: No configuration " 1283 "data retrieved"); 1284 opm |= lpm; 1285 } else if (rc == -EAGAIN) { 1286 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1287 "path verification: device is stopped," 1288 " try again later"); 1289 epm |= lpm; 1290 } else { 1291 dev_warn(&device->cdev->dev, 1292 "Reading device feature codes failed " 1293 "(rc=%d) for new path %x\n", rc, lpm); 1294 continue; 1295 } 1296 if (verify_fcx_max_data(device, lpm)) { 1297 opm &= ~lpm; 1298 npm &= ~lpm; 1299 ppm &= ~lpm; 1300 hpfpm |= lpm; 1301 continue; 1302 } 1303 1304 /* 1305 * save conf_data for comparison after 1306 * rebuild_device_uid may have changed 1307 * the original data 1308 */ 1309 memcpy(&path_rcd_buf, data->rcd_buffer, 1310 DASD_ECKD_RCD_DATA_SIZE); 1311 path_private.conf_data = (void *) &path_rcd_buf; 1312 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1313 if (dasd_eckd_identify_conf_parts(&path_private)) { 1314 path_private.conf_data = NULL; 1315 path_private.conf_len = 0; 1316 continue; 1317 } 1318 1319 /* 1320 * compare path UID with device UID only if at least 1321 * one valid path is left 1322 * in other case the device UID may have changed and 1323 * the first working path UID will be used as device UID 1324 */ 1325 if (dasd_path_get_opm(device) && 1326 dasd_eckd_compare_path_uid(device, &path_private)) { 1327 /* 1328 * the comparison was not successful 1329 * rebuild the device UID with at least one 1330 * known path in case a z/VM hyperswap command 1331 * has changed the device 1332 * 1333 * after this compare again 1334 * 1335 * if either the rebuild or the recompare fails 1336 * the path can not be used 1337 */ 1338 if (rebuild_device_uid(device, data) || 1339 dasd_eckd_compare_path_uid( 1340 device, &path_private)) { 1341 uid = &path_private.uid; 1342 if (strlen(uid->vduit) > 0) 1343 snprintf(print_uid, sizeof(print_uid), 1344 "%s.%s.%04x.%02x.%s", 1345 uid->vendor, uid->serial, 1346 uid->ssid, uid->real_unit_addr, 1347 uid->vduit); 1348 else 1349 snprintf(print_uid, sizeof(print_uid), 1350 "%s.%s.%04x.%02x", 1351 uid->vendor, uid->serial, 1352 uid->ssid, 1353 uid->real_unit_addr); 1354 dev_err(&device->cdev->dev, 1355 "The newly added channel path %02X " 1356 "will not be used because it leads " 1357 "to a different device %s\n", 1358 lpm, print_uid); 1359 opm &= ~lpm; 1360 npm &= ~lpm; 1361 ppm &= ~lpm; 1362 cablepm |= lpm; 1363 continue; 1364 } 1365 } 1366 1367 /* 1368 * There is a small chance that a path is lost again between 1369 * above path verification and the following modification of 1370 * the device opm mask. We could avoid that race here by using 1371 * yet another path mask, but we rather deal with this unlikely 1372 * situation in dasd_start_IO. 1373 */ 1374 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1375 if (!dasd_path_get_opm(device) && opm) { 1376 dasd_path_set_opm(device, opm); 1377 dasd_generic_path_operational(device); 1378 } else { 1379 dasd_path_add_opm(device, opm); 1380 } 1381 dasd_path_add_nppm(device, npm); 1382 dasd_path_add_ppm(device, ppm); 1383 dasd_path_add_tbvpm(device, epm); 1384 dasd_path_add_cablepm(device, cablepm); 1385 dasd_path_add_nohpfpm(device, hpfpm); 1386 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1387 } 1388 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1389 dasd_put_device(device); 1390 if (data->isglobal) 1391 mutex_unlock(&dasd_path_verification_mutex); 1392 else 1393 kfree(data); 1394 } 1395 1396 static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) 1397 { 1398 struct path_verification_work_data *data; 1399 1400 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1401 if (!data) { 1402 if (mutex_trylock(&dasd_path_verification_mutex)) { 1403 data = path_verification_worker; 1404 data->isglobal = 1; 1405 } else 1406 return -ENOMEM; 1407 } else { 1408 memset(data, 0, sizeof(*data)); 1409 data->isglobal = 0; 1410 } 1411 INIT_WORK(&data->worker, do_path_verification_work); 1412 dasd_get_device(device); 1413 data->device = device; 1414 data->tbvpm = lpm; 1415 schedule_work(&data->worker); 1416 return 0; 1417 } 1418 1419 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) 1420 { 1421 struct dasd_eckd_private *private = device->private; 1422 unsigned long flags; 1423 1424 if (!private->fcx_max_data) 1425 private->fcx_max_data = get_fcx_max_data(device); 1426 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1427 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); 1428 dasd_schedule_device_bh(device); 1429 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1430 } 1431 1432 static int dasd_eckd_read_features(struct dasd_device *device) 1433 { 1434 struct dasd_eckd_private *private = device->private; 1435 struct dasd_psf_prssd_data *prssdp; 1436 struct dasd_rssd_features *features; 1437 struct dasd_ccw_req *cqr; 1438 struct ccw1 *ccw; 1439 int rc; 1440 1441 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1442 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1443 (sizeof(struct dasd_psf_prssd_data) + 1444 sizeof(struct dasd_rssd_features)), 1445 device, NULL); 1446 if (IS_ERR(cqr)) { 1447 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1448 "allocate initialization request"); 1449 return PTR_ERR(cqr); 1450 } 1451 cqr->startdev = device; 1452 cqr->memdev = device; 1453 cqr->block = NULL; 1454 cqr->retries = 256; 1455 cqr->expires = 10 * HZ; 1456 1457 /* Prepare for Read Subsystem Data */ 1458 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1459 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1460 prssdp->order = PSF_ORDER_PRSSD; 1461 prssdp->suborder = 0x41; /* Read Feature Codes */ 1462 /* all other bytes of prssdp must be zero */ 1463 1464 ccw = cqr->cpaddr; 1465 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1466 ccw->count = sizeof(struct dasd_psf_prssd_data); 1467 ccw->flags |= CCW_FLAG_CC; 1468 ccw->cda = (__u32)(addr_t) prssdp; 1469 1470 /* Read Subsystem Data - feature codes */ 1471 features = (struct dasd_rssd_features *) (prssdp + 1); 1472 memset(features, 0, sizeof(struct dasd_rssd_features)); 1473 1474 ccw++; 1475 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1476 ccw->count = sizeof(struct dasd_rssd_features); 1477 ccw->cda = (__u32)(addr_t) features; 1478 1479 cqr->buildclk = get_tod_clock(); 1480 cqr->status = DASD_CQR_FILLED; 1481 rc = dasd_sleep_on(cqr); 1482 if (rc == 0) { 1483 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1484 features = (struct dasd_rssd_features *) (prssdp + 1); 1485 memcpy(&private->features, features, 1486 sizeof(struct dasd_rssd_features)); 1487 } else 1488 dev_warn(&device->cdev->dev, "Reading device feature codes" 1489 " failed with rc=%d\n", rc); 1490 dasd_sfree_request(cqr, cqr->memdev); 1491 return rc; 1492 } 1493 1494 1495 /* 1496 * Build CP for Perform Subsystem Function - SSC. 1497 */ 1498 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1499 int enable_pav) 1500 { 1501 struct dasd_ccw_req *cqr; 1502 struct dasd_psf_ssc_data *psf_ssc_data; 1503 struct ccw1 *ccw; 1504 1505 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1506 sizeof(struct dasd_psf_ssc_data), 1507 device, NULL); 1508 1509 if (IS_ERR(cqr)) { 1510 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1511 "Could not allocate PSF-SSC request"); 1512 return cqr; 1513 } 1514 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1515 psf_ssc_data->order = PSF_ORDER_SSC; 1516 psf_ssc_data->suborder = 0xc0; 1517 if (enable_pav) { 1518 psf_ssc_data->suborder |= 0x08; 1519 psf_ssc_data->reserved[0] = 0x88; 1520 } 1521 ccw = cqr->cpaddr; 1522 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1523 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1524 ccw->count = 66; 1525 1526 cqr->startdev = device; 1527 cqr->memdev = device; 1528 cqr->block = NULL; 1529 cqr->retries = 256; 1530 cqr->expires = 10*HZ; 1531 cqr->buildclk = get_tod_clock(); 1532 cqr->status = DASD_CQR_FILLED; 1533 return cqr; 1534 } 1535 1536 /* 1537 * Perform Subsystem Function. 1538 * It is necessary to trigger CIO for channel revalidation since this 1539 * call might change behaviour of DASD devices. 1540 */ 1541 static int 1542 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1543 unsigned long flags) 1544 { 1545 struct dasd_ccw_req *cqr; 1546 int rc; 1547 1548 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1549 if (IS_ERR(cqr)) 1550 return PTR_ERR(cqr); 1551 1552 /* 1553 * set flags e.g. turn on failfast, to prevent blocking 1554 * the calling function should handle failed requests 1555 */ 1556 cqr->flags |= flags; 1557 1558 rc = dasd_sleep_on(cqr); 1559 if (!rc) 1560 /* trigger CIO to reprobe devices */ 1561 css_schedule_reprobe(); 1562 else if (cqr->intrc == -EAGAIN) 1563 rc = -EAGAIN; 1564 1565 dasd_sfree_request(cqr, cqr->memdev); 1566 return rc; 1567 } 1568 1569 /* 1570 * Valide storage server of current device. 1571 */ 1572 static int dasd_eckd_validate_server(struct dasd_device *device, 1573 unsigned long flags) 1574 { 1575 struct dasd_eckd_private *private = device->private; 1576 int enable_pav, rc; 1577 1578 if (private->uid.type == UA_BASE_PAV_ALIAS || 1579 private->uid.type == UA_HYPER_PAV_ALIAS) 1580 return 0; 1581 if (dasd_nopav || MACHINE_IS_VM) 1582 enable_pav = 0; 1583 else 1584 enable_pav = 1; 1585 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1586 1587 /* may be requested feature is not available on server, 1588 * therefore just report error and go ahead */ 1589 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1590 "returned rc=%d", private->uid.ssid, rc); 1591 return rc; 1592 } 1593 1594 /* 1595 * worker to do a validate server in case of a lost pathgroup 1596 */ 1597 static void dasd_eckd_do_validate_server(struct work_struct *work) 1598 { 1599 struct dasd_device *device = container_of(work, struct dasd_device, 1600 kick_validate); 1601 unsigned long flags = 0; 1602 1603 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 1604 if (dasd_eckd_validate_server(device, flags) 1605 == -EAGAIN) { 1606 /* schedule worker again if failed */ 1607 schedule_work(&device->kick_validate); 1608 return; 1609 } 1610 1611 dasd_put_device(device); 1612 } 1613 1614 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1615 { 1616 dasd_get_device(device); 1617 /* exit if device not online or in offline processing */ 1618 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1619 device->state < DASD_STATE_ONLINE) { 1620 dasd_put_device(device); 1621 return; 1622 } 1623 /* queue call to do_validate_server to the kernel event daemon. */ 1624 if (!schedule_work(&device->kick_validate)) 1625 dasd_put_device(device); 1626 } 1627 1628 /* 1629 * Check device characteristics. 1630 * If the device is accessible using ECKD discipline, the device is enabled. 1631 */ 1632 static int 1633 dasd_eckd_check_characteristics(struct dasd_device *device) 1634 { 1635 struct dasd_eckd_private *private = device->private; 1636 struct dasd_block *block; 1637 struct dasd_uid temp_uid; 1638 int rc, i; 1639 int readonly; 1640 unsigned long value; 1641 1642 /* setup work queue for validate server*/ 1643 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 1644 /* setup work queue for summary unit check */ 1645 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); 1646 1647 if (!ccw_device_is_pathgroup(device->cdev)) { 1648 dev_warn(&device->cdev->dev, 1649 "A channel path group could not be established\n"); 1650 return -EIO; 1651 } 1652 if (!ccw_device_is_multipath(device->cdev)) { 1653 dev_info(&device->cdev->dev, 1654 "The DASD is not operating in multipath mode\n"); 1655 } 1656 if (!private) { 1657 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1658 if (!private) { 1659 dev_warn(&device->cdev->dev, 1660 "Allocating memory for private DASD data " 1661 "failed\n"); 1662 return -ENOMEM; 1663 } 1664 device->private = private; 1665 } else { 1666 memset(private, 0, sizeof(*private)); 1667 } 1668 /* Invalidate status of initial analysis. */ 1669 private->init_cqr_status = -1; 1670 /* Set default cache operations. */ 1671 private->attrib.operation = DASD_NORMAL_CACHE; 1672 private->attrib.nr_cyl = 0; 1673 1674 /* Read Configuration Data */ 1675 rc = dasd_eckd_read_conf(device); 1676 if (rc) 1677 goto out_err1; 1678 1679 /* set some default values */ 1680 device->default_expires = DASD_EXPIRES; 1681 device->default_retries = DASD_RETRIES; 1682 device->path_thrhld = DASD_ECKD_PATH_THRHLD; 1683 device->path_interval = DASD_ECKD_PATH_INTERVAL; 1684 1685 if (private->gneq) { 1686 value = 1; 1687 for (i = 0; i < private->gneq->timeout.value; i++) 1688 value = 10 * value; 1689 value = value * private->gneq->timeout.number; 1690 /* do not accept useless values */ 1691 if (value != 0 && value <= DASD_EXPIRES_MAX) 1692 device->default_expires = value; 1693 } 1694 1695 dasd_eckd_get_uid(device, &temp_uid); 1696 if (temp_uid.type == UA_BASE_DEVICE) { 1697 block = dasd_alloc_block(); 1698 if (IS_ERR(block)) { 1699 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1700 "could not allocate dasd " 1701 "block structure"); 1702 rc = PTR_ERR(block); 1703 goto out_err1; 1704 } 1705 device->block = block; 1706 block->base = device; 1707 } 1708 1709 /* register lcu with alias handling, enable PAV */ 1710 rc = dasd_alias_make_device_known_to_lcu(device); 1711 if (rc) 1712 goto out_err2; 1713 1714 dasd_eckd_validate_server(device, 0); 1715 1716 /* device may report different configuration data after LCU setup */ 1717 rc = dasd_eckd_read_conf(device); 1718 if (rc) 1719 goto out_err3; 1720 1721 /* Read Feature Codes */ 1722 dasd_eckd_read_features(device); 1723 1724 /* Read Device Characteristics */ 1725 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 1726 &private->rdc_data, 64); 1727 if (rc) { 1728 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1729 "Read device characteristic failed, rc=%d", rc); 1730 goto out_err3; 1731 } 1732 1733 if ((device->features & DASD_FEATURE_USERAW) && 1734 !(private->rdc_data.facilities.RT_in_LR)) { 1735 dev_err(&device->cdev->dev, "The storage server does not " 1736 "support raw-track access\n"); 1737 rc = -EINVAL; 1738 goto out_err3; 1739 } 1740 1741 /* find the valid cylinder size */ 1742 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1743 private->rdc_data.long_no_cyl) 1744 private->real_cyl = private->rdc_data.long_no_cyl; 1745 else 1746 private->real_cyl = private->rdc_data.no_cyl; 1747 1748 private->fcx_max_data = get_fcx_max_data(device); 1749 1750 readonly = dasd_device_is_ro(device); 1751 if (readonly) 1752 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1753 1754 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 1755 "with %d cylinders, %d heads, %d sectors%s\n", 1756 private->rdc_data.dev_type, 1757 private->rdc_data.dev_model, 1758 private->rdc_data.cu_type, 1759 private->rdc_data.cu_model.model, 1760 private->real_cyl, 1761 private->rdc_data.trk_per_cyl, 1762 private->rdc_data.sec_per_trk, 1763 readonly ? ", read-only device" : ""); 1764 return 0; 1765 1766 out_err3: 1767 dasd_alias_disconnect_device_from_lcu(device); 1768 out_err2: 1769 dasd_free_block(device->block); 1770 device->block = NULL; 1771 out_err1: 1772 kfree(private->conf_data); 1773 kfree(device->private); 1774 device->private = NULL; 1775 return rc; 1776 } 1777 1778 static void dasd_eckd_uncheck_device(struct dasd_device *device) 1779 { 1780 struct dasd_eckd_private *private = device->private; 1781 int i; 1782 1783 if (!private) 1784 return; 1785 1786 dasd_alias_disconnect_device_from_lcu(device); 1787 private->ned = NULL; 1788 private->sneq = NULL; 1789 private->vdsneq = NULL; 1790 private->gneq = NULL; 1791 private->conf_len = 0; 1792 for (i = 0; i < 8; i++) { 1793 kfree(device->path[i].conf_data); 1794 if ((__u8 *)device->path[i].conf_data == 1795 private->conf_data) { 1796 private->conf_data = NULL; 1797 private->conf_len = 0; 1798 } 1799 device->path[i].conf_data = NULL; 1800 device->path[i].cssid = 0; 1801 device->path[i].ssid = 0; 1802 device->path[i].chpid = 0; 1803 } 1804 kfree(private->conf_data); 1805 private->conf_data = NULL; 1806 } 1807 1808 static struct dasd_ccw_req * 1809 dasd_eckd_analysis_ccw(struct dasd_device *device) 1810 { 1811 struct dasd_eckd_private *private = device->private; 1812 struct eckd_count *count_data; 1813 struct LO_eckd_data *LO_data; 1814 struct dasd_ccw_req *cqr; 1815 struct ccw1 *ccw; 1816 int cplength, datasize; 1817 int i; 1818 1819 cplength = 8; 1820 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1821 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, 1822 NULL); 1823 if (IS_ERR(cqr)) 1824 return cqr; 1825 ccw = cqr->cpaddr; 1826 /* Define extent for the first 3 tracks. */ 1827 define_extent(ccw++, cqr->data, 0, 2, 1828 DASD_ECKD_CCW_READ_COUNT, device, 0); 1829 LO_data = cqr->data + sizeof(struct DE_eckd_data); 1830 /* Locate record for the first 4 records on track 0. */ 1831 ccw[-1].flags |= CCW_FLAG_CC; 1832 locate_record(ccw++, LO_data++, 0, 0, 4, 1833 DASD_ECKD_CCW_READ_COUNT, device, 0); 1834 1835 count_data = private->count_area; 1836 for (i = 0; i < 4; i++) { 1837 ccw[-1].flags |= CCW_FLAG_CC; 1838 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1839 ccw->flags = 0; 1840 ccw->count = 8; 1841 ccw->cda = (__u32)(addr_t) count_data; 1842 ccw++; 1843 count_data++; 1844 } 1845 1846 /* Locate record for the first record on track 2. */ 1847 ccw[-1].flags |= CCW_FLAG_CC; 1848 locate_record(ccw++, LO_data++, 2, 0, 1, 1849 DASD_ECKD_CCW_READ_COUNT, device, 0); 1850 /* Read count ccw. */ 1851 ccw[-1].flags |= CCW_FLAG_CC; 1852 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1853 ccw->flags = 0; 1854 ccw->count = 8; 1855 ccw->cda = (__u32)(addr_t) count_data; 1856 1857 cqr->block = NULL; 1858 cqr->startdev = device; 1859 cqr->memdev = device; 1860 cqr->retries = 255; 1861 cqr->buildclk = get_tod_clock(); 1862 cqr->status = DASD_CQR_FILLED; 1863 return cqr; 1864 } 1865 1866 /* differentiate between 'no record found' and any other error */ 1867 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 1868 { 1869 char *sense; 1870 if (init_cqr->status == DASD_CQR_DONE) 1871 return INIT_CQR_OK; 1872 else if (init_cqr->status == DASD_CQR_NEED_ERP || 1873 init_cqr->status == DASD_CQR_FAILED) { 1874 sense = dasd_get_sense(&init_cqr->irb); 1875 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 1876 return INIT_CQR_UNFORMATTED; 1877 else 1878 return INIT_CQR_ERROR; 1879 } else 1880 return INIT_CQR_ERROR; 1881 } 1882 1883 /* 1884 * This is the callback function for the init_analysis cqr. It saves 1885 * the status of the initial analysis ccw before it frees it and kicks 1886 * the device to continue the startup sequence. This will call 1887 * dasd_eckd_do_analysis again (if the devices has not been marked 1888 * for deletion in the meantime). 1889 */ 1890 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 1891 void *data) 1892 { 1893 struct dasd_device *device = init_cqr->startdev; 1894 struct dasd_eckd_private *private = device->private; 1895 1896 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 1897 dasd_sfree_request(init_cqr, device); 1898 dasd_kick_device(device); 1899 } 1900 1901 static int dasd_eckd_start_analysis(struct dasd_block *block) 1902 { 1903 struct dasd_ccw_req *init_cqr; 1904 1905 init_cqr = dasd_eckd_analysis_ccw(block->base); 1906 if (IS_ERR(init_cqr)) 1907 return PTR_ERR(init_cqr); 1908 init_cqr->callback = dasd_eckd_analysis_callback; 1909 init_cqr->callback_data = NULL; 1910 init_cqr->expires = 5*HZ; 1911 /* first try without ERP, so we can later handle unformatted 1912 * devices as special case 1913 */ 1914 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 1915 init_cqr->retries = 0; 1916 dasd_add_request_head(init_cqr); 1917 return -EAGAIN; 1918 } 1919 1920 static int dasd_eckd_end_analysis(struct dasd_block *block) 1921 { 1922 struct dasd_device *device = block->base; 1923 struct dasd_eckd_private *private = device->private; 1924 struct eckd_count *count_area; 1925 unsigned int sb, blk_per_trk; 1926 int status, i; 1927 struct dasd_ccw_req *init_cqr; 1928 1929 status = private->init_cqr_status; 1930 private->init_cqr_status = -1; 1931 if (status == INIT_CQR_ERROR) { 1932 /* try again, this time with full ERP */ 1933 init_cqr = dasd_eckd_analysis_ccw(device); 1934 dasd_sleep_on(init_cqr); 1935 status = dasd_eckd_analysis_evaluation(init_cqr); 1936 dasd_sfree_request(init_cqr, device); 1937 } 1938 1939 if (device->features & DASD_FEATURE_USERAW) { 1940 block->bp_block = DASD_RAW_BLOCKSIZE; 1941 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 1942 block->s2b_shift = 3; 1943 goto raw; 1944 } 1945 1946 if (status == INIT_CQR_UNFORMATTED) { 1947 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 1948 return -EMEDIUMTYPE; 1949 } else if (status == INIT_CQR_ERROR) { 1950 dev_err(&device->cdev->dev, 1951 "Detecting the DASD disk layout failed because " 1952 "of an I/O error\n"); 1953 return -EIO; 1954 } 1955 1956 private->uses_cdl = 1; 1957 /* Check Track 0 for Compatible Disk Layout */ 1958 count_area = NULL; 1959 for (i = 0; i < 3; i++) { 1960 if (private->count_area[i].kl != 4 || 1961 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 1962 private->count_area[i].cyl != 0 || 1963 private->count_area[i].head != count_area_head[i] || 1964 private->count_area[i].record != count_area_rec[i]) { 1965 private->uses_cdl = 0; 1966 break; 1967 } 1968 } 1969 if (i == 3) 1970 count_area = &private->count_area[4]; 1971 1972 if (private->uses_cdl == 0) { 1973 for (i = 0; i < 5; i++) { 1974 if ((private->count_area[i].kl != 0) || 1975 (private->count_area[i].dl != 1976 private->count_area[0].dl) || 1977 private->count_area[i].cyl != 0 || 1978 private->count_area[i].head != count_area_head[i] || 1979 private->count_area[i].record != count_area_rec[i]) 1980 break; 1981 } 1982 if (i == 5) 1983 count_area = &private->count_area[0]; 1984 } else { 1985 if (private->count_area[3].record == 1) 1986 dev_warn(&device->cdev->dev, 1987 "Track 0 has no records following the VTOC\n"); 1988 } 1989 1990 if (count_area != NULL && count_area->kl == 0) { 1991 /* we found notthing violating our disk layout */ 1992 if (dasd_check_blocksize(count_area->dl) == 0) 1993 block->bp_block = count_area->dl; 1994 } 1995 if (block->bp_block == 0) { 1996 dev_warn(&device->cdev->dev, 1997 "The disk layout of the DASD is not supported\n"); 1998 return -EMEDIUMTYPE; 1999 } 2000 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2001 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2002 block->s2b_shift++; 2003 2004 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2005 2006 raw: 2007 block->blocks = ((unsigned long) private->real_cyl * 2008 private->rdc_data.trk_per_cyl * 2009 blk_per_trk); 2010 2011 dev_info(&device->cdev->dev, 2012 "DASD with %u KB/block, %lu KB total size, %u KB/track, " 2013 "%s\n", (block->bp_block >> 10), 2014 (((unsigned long) private->real_cyl * 2015 private->rdc_data.trk_per_cyl * 2016 blk_per_trk * (block->bp_block >> 9)) >> 1), 2017 ((blk_per_trk * block->bp_block) >> 10), 2018 private->uses_cdl ? 2019 "compatible disk layout" : "linux disk layout"); 2020 2021 return 0; 2022 } 2023 2024 static int dasd_eckd_do_analysis(struct dasd_block *block) 2025 { 2026 struct dasd_eckd_private *private = block->base->private; 2027 2028 if (private->init_cqr_status < 0) 2029 return dasd_eckd_start_analysis(block); 2030 else 2031 return dasd_eckd_end_analysis(block); 2032 } 2033 2034 static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2035 { 2036 return dasd_alias_add_device(device); 2037 }; 2038 2039 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2040 { 2041 if (cancel_work_sync(&device->reload_device)) 2042 dasd_put_device(device); 2043 if (cancel_work_sync(&device->kick_validate)) 2044 dasd_put_device(device); 2045 2046 return 0; 2047 }; 2048 2049 static int dasd_eckd_basic_to_known(struct dasd_device *device) 2050 { 2051 return dasd_alias_remove_device(device); 2052 }; 2053 2054 static int 2055 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2056 { 2057 struct dasd_eckd_private *private = block->base->private; 2058 2059 if (dasd_check_blocksize(block->bp_block) == 0) { 2060 geo->sectors = recs_per_track(&private->rdc_data, 2061 0, block->bp_block); 2062 } 2063 geo->cylinders = private->rdc_data.no_cyl; 2064 geo->heads = private->rdc_data.trk_per_cyl; 2065 return 0; 2066 } 2067 2068 /* 2069 * Build the TCW request for the format check 2070 */ 2071 static struct dasd_ccw_req * 2072 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, 2073 int enable_pav, struct eckd_count *fmt_buffer, 2074 int rpt) 2075 { 2076 struct dasd_eckd_private *start_priv; 2077 struct dasd_device *startdev = NULL; 2078 struct tidaw *last_tidaw = NULL; 2079 struct dasd_ccw_req *cqr; 2080 struct itcw *itcw; 2081 int itcw_size; 2082 int count; 2083 int rc; 2084 int i; 2085 2086 if (enable_pav) 2087 startdev = dasd_alias_get_start_dev(base); 2088 2089 if (!startdev) 2090 startdev = base; 2091 2092 start_priv = startdev->private; 2093 2094 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2095 2096 /* 2097 * we're adding 'count' amount of tidaw to the itcw. 2098 * calculate the corresponding itcw_size 2099 */ 2100 itcw_size = itcw_calc_size(0, count, 0); 2101 2102 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 2103 NULL); 2104 if (IS_ERR(cqr)) 2105 return cqr; 2106 2107 start_priv->count++; 2108 2109 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0); 2110 if (IS_ERR(itcw)) { 2111 rc = -EINVAL; 2112 goto out_err; 2113 } 2114 2115 cqr->cpaddr = itcw_get_tcw(itcw); 2116 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit, 2117 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count, 2118 sizeof(struct eckd_count), 2119 count * sizeof(struct eckd_count), 0, rpt); 2120 if (rc) 2121 goto out_err; 2122 2123 for (i = 0; i < count; i++) { 2124 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++, 2125 sizeof(struct eckd_count)); 2126 if (IS_ERR(last_tidaw)) { 2127 rc = -EINVAL; 2128 goto out_err; 2129 } 2130 } 2131 2132 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2133 itcw_finalize(itcw); 2134 2135 cqr->cpmode = 1; 2136 cqr->startdev = startdev; 2137 cqr->memdev = startdev; 2138 cqr->basedev = base; 2139 cqr->retries = startdev->default_retries; 2140 cqr->expires = startdev->default_expires * HZ; 2141 cqr->buildclk = get_tod_clock(); 2142 cqr->status = DASD_CQR_FILLED; 2143 /* Set flags to suppress output for expected errors */ 2144 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 2145 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 2146 2147 return cqr; 2148 2149 out_err: 2150 dasd_sfree_request(cqr, startdev); 2151 2152 return ERR_PTR(rc); 2153 } 2154 2155 /* 2156 * Build the CCW request for the format check 2157 */ 2158 static struct dasd_ccw_req * 2159 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, 2160 int enable_pav, struct eckd_count *fmt_buffer, int rpt) 2161 { 2162 struct dasd_eckd_private *start_priv; 2163 struct dasd_eckd_private *base_priv; 2164 struct dasd_device *startdev = NULL; 2165 struct dasd_ccw_req *cqr; 2166 struct ccw1 *ccw; 2167 void *data; 2168 int cplength, datasize; 2169 int use_prefix; 2170 int count; 2171 int i; 2172 2173 if (enable_pav) 2174 startdev = dasd_alias_get_start_dev(base); 2175 2176 if (!startdev) 2177 startdev = base; 2178 2179 start_priv = startdev->private; 2180 base_priv = base->private; 2181 2182 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2183 2184 use_prefix = base_priv->features.feature[8] & 0x01; 2185 2186 if (use_prefix) { 2187 cplength = 1; 2188 datasize = sizeof(struct PFX_eckd_data); 2189 } else { 2190 cplength = 2; 2191 datasize = sizeof(struct DE_eckd_data) + 2192 sizeof(struct LO_eckd_data); 2193 } 2194 cplength += count; 2195 2196 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 2197 startdev, NULL); 2198 if (IS_ERR(cqr)) 2199 return cqr; 2200 2201 start_priv->count++; 2202 data = cqr->data; 2203 ccw = cqr->cpaddr; 2204 2205 if (use_prefix) { 2206 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit, 2207 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0, 2208 count, 0, 0); 2209 } else { 2210 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit, 2211 DASD_ECKD_CCW_READ_COUNT, startdev, 0); 2212 2213 data += sizeof(struct DE_eckd_data); 2214 ccw[-1].flags |= CCW_FLAG_CC; 2215 2216 locate_record(ccw++, data, fdata->start_unit, 0, count, 2217 DASD_ECKD_CCW_READ_COUNT, base, 0); 2218 } 2219 2220 for (i = 0; i < count; i++) { 2221 ccw[-1].flags |= CCW_FLAG_CC; 2222 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2223 ccw->flags = CCW_FLAG_SLI; 2224 ccw->count = 8; 2225 ccw->cda = (__u32)(addr_t) fmt_buffer; 2226 ccw++; 2227 fmt_buffer++; 2228 } 2229 2230 cqr->startdev = startdev; 2231 cqr->memdev = startdev; 2232 cqr->basedev = base; 2233 cqr->retries = DASD_RETRIES; 2234 cqr->expires = startdev->default_expires * HZ; 2235 cqr->buildclk = get_tod_clock(); 2236 cqr->status = DASD_CQR_FILLED; 2237 /* Set flags to suppress output for expected errors */ 2238 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2239 2240 return cqr; 2241 } 2242 2243 static struct dasd_ccw_req * 2244 dasd_eckd_build_format(struct dasd_device *base, 2245 struct format_data_t *fdata, 2246 int enable_pav) 2247 { 2248 struct dasd_eckd_private *base_priv; 2249 struct dasd_eckd_private *start_priv; 2250 struct dasd_device *startdev = NULL; 2251 struct dasd_ccw_req *fcp; 2252 struct eckd_count *ect; 2253 struct ch_t address; 2254 struct ccw1 *ccw; 2255 void *data; 2256 int rpt; 2257 int cplength, datasize; 2258 int i, j; 2259 int intensity = 0; 2260 int r0_perm; 2261 int nr_tracks; 2262 int use_prefix; 2263 2264 if (enable_pav) 2265 startdev = dasd_alias_get_start_dev(base); 2266 2267 if (!startdev) 2268 startdev = base; 2269 2270 start_priv = startdev->private; 2271 base_priv = base->private; 2272 2273 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2274 2275 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2276 2277 /* 2278 * fdata->intensity is a bit string that tells us what to do: 2279 * Bit 0: write record zero 2280 * Bit 1: write home address, currently not supported 2281 * Bit 2: invalidate tracks 2282 * Bit 3: use OS/390 compatible disk layout (cdl) 2283 * Bit 4: do not allow storage subsystem to modify record zero 2284 * Only some bit combinations do make sense. 2285 */ 2286 if (fdata->intensity & 0x10) { 2287 r0_perm = 0; 2288 intensity = fdata->intensity & ~0x10; 2289 } else { 2290 r0_perm = 1; 2291 intensity = fdata->intensity; 2292 } 2293 2294 use_prefix = base_priv->features.feature[8] & 0x01; 2295 2296 switch (intensity) { 2297 case 0x00: /* Normal format */ 2298 case 0x08: /* Normal format, use cdl. */ 2299 cplength = 2 + (rpt*nr_tracks); 2300 if (use_prefix) 2301 datasize = sizeof(struct PFX_eckd_data) + 2302 sizeof(struct LO_eckd_data) + 2303 rpt * nr_tracks * sizeof(struct eckd_count); 2304 else 2305 datasize = sizeof(struct DE_eckd_data) + 2306 sizeof(struct LO_eckd_data) + 2307 rpt * nr_tracks * sizeof(struct eckd_count); 2308 break; 2309 case 0x01: /* Write record zero and format track. */ 2310 case 0x09: /* Write record zero and format track, use cdl. */ 2311 cplength = 2 + rpt * nr_tracks; 2312 if (use_prefix) 2313 datasize = sizeof(struct PFX_eckd_data) + 2314 sizeof(struct LO_eckd_data) + 2315 sizeof(struct eckd_count) + 2316 rpt * nr_tracks * sizeof(struct eckd_count); 2317 else 2318 datasize = sizeof(struct DE_eckd_data) + 2319 sizeof(struct LO_eckd_data) + 2320 sizeof(struct eckd_count) + 2321 rpt * nr_tracks * sizeof(struct eckd_count); 2322 break; 2323 case 0x04: /* Invalidate track. */ 2324 case 0x0c: /* Invalidate track, use cdl. */ 2325 cplength = 3; 2326 if (use_prefix) 2327 datasize = sizeof(struct PFX_eckd_data) + 2328 sizeof(struct LO_eckd_data) + 2329 sizeof(struct eckd_count); 2330 else 2331 datasize = sizeof(struct DE_eckd_data) + 2332 sizeof(struct LO_eckd_data) + 2333 sizeof(struct eckd_count); 2334 break; 2335 default: 2336 dev_warn(&startdev->cdev->dev, 2337 "An I/O control call used incorrect flags 0x%x\n", 2338 fdata->intensity); 2339 return ERR_PTR(-EINVAL); 2340 } 2341 /* Allocate the format ccw request. */ 2342 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 2343 datasize, startdev, NULL); 2344 if (IS_ERR(fcp)) 2345 return fcp; 2346 2347 start_priv->count++; 2348 data = fcp->data; 2349 ccw = fcp->cpaddr; 2350 2351 switch (intensity & ~0x08) { 2352 case 0x00: /* Normal format. */ 2353 if (use_prefix) { 2354 prefix(ccw++, (struct PFX_eckd_data *) data, 2355 fdata->start_unit, fdata->stop_unit, 2356 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2357 /* grant subsystem permission to format R0 */ 2358 if (r0_perm) 2359 ((struct PFX_eckd_data *)data) 2360 ->define_extent.ga_extended |= 0x04; 2361 data += sizeof(struct PFX_eckd_data); 2362 } else { 2363 define_extent(ccw++, (struct DE_eckd_data *) data, 2364 fdata->start_unit, fdata->stop_unit, 2365 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2366 /* grant subsystem permission to format R0 */ 2367 if (r0_perm) 2368 ((struct DE_eckd_data *) data) 2369 ->ga_extended |= 0x04; 2370 data += sizeof(struct DE_eckd_data); 2371 } 2372 ccw[-1].flags |= CCW_FLAG_CC; 2373 locate_record(ccw++, (struct LO_eckd_data *) data, 2374 fdata->start_unit, 0, rpt*nr_tracks, 2375 DASD_ECKD_CCW_WRITE_CKD, base, 2376 fdata->blksize); 2377 data += sizeof(struct LO_eckd_data); 2378 break; 2379 case 0x01: /* Write record zero + format track. */ 2380 if (use_prefix) { 2381 prefix(ccw++, (struct PFX_eckd_data *) data, 2382 fdata->start_unit, fdata->stop_unit, 2383 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2384 base, startdev); 2385 data += sizeof(struct PFX_eckd_data); 2386 } else { 2387 define_extent(ccw++, (struct DE_eckd_data *) data, 2388 fdata->start_unit, fdata->stop_unit, 2389 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0); 2390 data += sizeof(struct DE_eckd_data); 2391 } 2392 ccw[-1].flags |= CCW_FLAG_CC; 2393 locate_record(ccw++, (struct LO_eckd_data *) data, 2394 fdata->start_unit, 0, rpt * nr_tracks + 1, 2395 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2396 base->block->bp_block); 2397 data += sizeof(struct LO_eckd_data); 2398 break; 2399 case 0x04: /* Invalidate track. */ 2400 if (use_prefix) { 2401 prefix(ccw++, (struct PFX_eckd_data *) data, 2402 fdata->start_unit, fdata->stop_unit, 2403 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2404 data += sizeof(struct PFX_eckd_data); 2405 } else { 2406 define_extent(ccw++, (struct DE_eckd_data *) data, 2407 fdata->start_unit, fdata->stop_unit, 2408 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2409 data += sizeof(struct DE_eckd_data); 2410 } 2411 ccw[-1].flags |= CCW_FLAG_CC; 2412 locate_record(ccw++, (struct LO_eckd_data *) data, 2413 fdata->start_unit, 0, 1, 2414 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2415 data += sizeof(struct LO_eckd_data); 2416 break; 2417 } 2418 2419 for (j = 0; j < nr_tracks; j++) { 2420 /* calculate cylinder and head for the current track */ 2421 set_ch_t(&address, 2422 (fdata->start_unit + j) / 2423 base_priv->rdc_data.trk_per_cyl, 2424 (fdata->start_unit + j) % 2425 base_priv->rdc_data.trk_per_cyl); 2426 if (intensity & 0x01) { /* write record zero */ 2427 ect = (struct eckd_count *) data; 2428 data += sizeof(struct eckd_count); 2429 ect->cyl = address.cyl; 2430 ect->head = address.head; 2431 ect->record = 0; 2432 ect->kl = 0; 2433 ect->dl = 8; 2434 ccw[-1].flags |= CCW_FLAG_CC; 2435 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2436 ccw->flags = CCW_FLAG_SLI; 2437 ccw->count = 8; 2438 ccw->cda = (__u32)(addr_t) ect; 2439 ccw++; 2440 } 2441 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2442 ect = (struct eckd_count *) data; 2443 data += sizeof(struct eckd_count); 2444 ect->cyl = address.cyl; 2445 ect->head = address.head; 2446 ect->record = 1; 2447 ect->kl = 0; 2448 ect->dl = 0; 2449 ccw[-1].flags |= CCW_FLAG_CC; 2450 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2451 ccw->flags = CCW_FLAG_SLI; 2452 ccw->count = 8; 2453 ccw->cda = (__u32)(addr_t) ect; 2454 } else { /* write remaining records */ 2455 for (i = 0; i < rpt; i++) { 2456 ect = (struct eckd_count *) data; 2457 data += sizeof(struct eckd_count); 2458 ect->cyl = address.cyl; 2459 ect->head = address.head; 2460 ect->record = i + 1; 2461 ect->kl = 0; 2462 ect->dl = fdata->blksize; 2463 /* 2464 * Check for special tracks 0-1 2465 * when formatting CDL 2466 */ 2467 if ((intensity & 0x08) && 2468 address.cyl == 0 && address.head == 0) { 2469 if (i < 3) { 2470 ect->kl = 4; 2471 ect->dl = sizes_trk0[i] - 4; 2472 } 2473 } 2474 if ((intensity & 0x08) && 2475 address.cyl == 0 && address.head == 1) { 2476 ect->kl = 44; 2477 ect->dl = LABEL_SIZE - 44; 2478 } 2479 ccw[-1].flags |= CCW_FLAG_CC; 2480 if (i != 0 || j == 0) 2481 ccw->cmd_code = 2482 DASD_ECKD_CCW_WRITE_CKD; 2483 else 2484 ccw->cmd_code = 2485 DASD_ECKD_CCW_WRITE_CKD_MT; 2486 ccw->flags = CCW_FLAG_SLI; 2487 ccw->count = 8; 2488 ccw->cda = (__u32)(addr_t) ect; 2489 ccw++; 2490 } 2491 } 2492 } 2493 2494 fcp->startdev = startdev; 2495 fcp->memdev = startdev; 2496 fcp->basedev = base; 2497 fcp->retries = 256; 2498 fcp->expires = startdev->default_expires * HZ; 2499 fcp->buildclk = get_tod_clock(); 2500 fcp->status = DASD_CQR_FILLED; 2501 2502 return fcp; 2503 } 2504 2505 /* 2506 * Wrapper function to build a CCW request depending on input data 2507 */ 2508 static struct dasd_ccw_req * 2509 dasd_eckd_format_build_ccw_req(struct dasd_device *base, 2510 struct format_data_t *fdata, int enable_pav, 2511 int tpm, struct eckd_count *fmt_buffer, int rpt) 2512 { 2513 struct dasd_ccw_req *ccw_req; 2514 2515 if (!fmt_buffer) { 2516 ccw_req = dasd_eckd_build_format(base, fdata, enable_pav); 2517 } else { 2518 if (tpm) 2519 ccw_req = dasd_eckd_build_check_tcw(base, fdata, 2520 enable_pav, 2521 fmt_buffer, rpt); 2522 else 2523 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav, 2524 fmt_buffer, rpt); 2525 } 2526 2527 return ccw_req; 2528 } 2529 2530 /* 2531 * Sanity checks on format_data 2532 */ 2533 static int dasd_eckd_format_sanity_checks(struct dasd_device *base, 2534 struct format_data_t *fdata) 2535 { 2536 struct dasd_eckd_private *private = base->private; 2537 2538 if (fdata->start_unit >= 2539 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2540 dev_warn(&base->cdev->dev, 2541 "Start track number %u used in formatting is too big\n", 2542 fdata->start_unit); 2543 return -EINVAL; 2544 } 2545 if (fdata->stop_unit >= 2546 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2547 dev_warn(&base->cdev->dev, 2548 "Stop track number %u used in formatting is too big\n", 2549 fdata->stop_unit); 2550 return -EINVAL; 2551 } 2552 if (fdata->start_unit > fdata->stop_unit) { 2553 dev_warn(&base->cdev->dev, 2554 "Start track %u used in formatting exceeds end track\n", 2555 fdata->start_unit); 2556 return -EINVAL; 2557 } 2558 if (dasd_check_blocksize(fdata->blksize) != 0) { 2559 dev_warn(&base->cdev->dev, 2560 "The DASD cannot be formatted with block size %u\n", 2561 fdata->blksize); 2562 return -EINVAL; 2563 } 2564 return 0; 2565 } 2566 2567 /* 2568 * This function will process format_data originally coming from an IOCTL 2569 */ 2570 static int dasd_eckd_format_process_data(struct dasd_device *base, 2571 struct format_data_t *fdata, 2572 int enable_pav, int tpm, 2573 struct eckd_count *fmt_buffer, int rpt, 2574 struct irb *irb) 2575 { 2576 struct dasd_eckd_private *private = base->private; 2577 struct dasd_ccw_req *cqr, *n; 2578 struct list_head format_queue; 2579 struct dasd_device *device; 2580 char *sense = NULL; 2581 int old_start, old_stop, format_step; 2582 int step, retry; 2583 int rc; 2584 2585 rc = dasd_eckd_format_sanity_checks(base, fdata); 2586 if (rc) 2587 return rc; 2588 2589 INIT_LIST_HEAD(&format_queue); 2590 2591 old_start = fdata->start_unit; 2592 old_stop = fdata->stop_unit; 2593 2594 if (!tpm && fmt_buffer != NULL) { 2595 /* Command Mode / Format Check */ 2596 format_step = 1; 2597 } else if (tpm && fmt_buffer != NULL) { 2598 /* Transport Mode / Format Check */ 2599 format_step = DASD_CQR_MAX_CCW / rpt; 2600 } else { 2601 /* Normal Formatting */ 2602 format_step = DASD_CQR_MAX_CCW / 2603 recs_per_track(&private->rdc_data, 0, fdata->blksize); 2604 } 2605 2606 do { 2607 retry = 0; 2608 while (fdata->start_unit <= old_stop) { 2609 step = fdata->stop_unit - fdata->start_unit + 1; 2610 if (step > format_step) { 2611 fdata->stop_unit = 2612 fdata->start_unit + format_step - 1; 2613 } 2614 2615 cqr = dasd_eckd_format_build_ccw_req(base, fdata, 2616 enable_pav, tpm, 2617 fmt_buffer, rpt); 2618 if (IS_ERR(cqr)) { 2619 rc = PTR_ERR(cqr); 2620 if (rc == -ENOMEM) { 2621 if (list_empty(&format_queue)) 2622 goto out; 2623 /* 2624 * not enough memory available, start 2625 * requests retry after first requests 2626 * were finished 2627 */ 2628 retry = 1; 2629 break; 2630 } 2631 goto out_err; 2632 } 2633 list_add_tail(&cqr->blocklist, &format_queue); 2634 2635 if (fmt_buffer) { 2636 step = fdata->stop_unit - fdata->start_unit + 1; 2637 fmt_buffer += rpt * step; 2638 } 2639 fdata->start_unit = fdata->stop_unit + 1; 2640 fdata->stop_unit = old_stop; 2641 } 2642 2643 rc = dasd_sleep_on_queue(&format_queue); 2644 2645 out_err: 2646 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 2647 device = cqr->startdev; 2648 private = device->private; 2649 2650 if (cqr->status == DASD_CQR_FAILED) { 2651 /* 2652 * Only get sense data if called by format 2653 * check 2654 */ 2655 if (fmt_buffer && irb) { 2656 sense = dasd_get_sense(&cqr->irb); 2657 memcpy(irb, &cqr->irb, sizeof(*irb)); 2658 } 2659 rc = -EIO; 2660 } 2661 list_del_init(&cqr->blocklist); 2662 dasd_sfree_request(cqr, device); 2663 private->count--; 2664 } 2665 2666 if (rc && rc != -EIO) 2667 goto out; 2668 if (rc == -EIO) { 2669 /* 2670 * In case fewer than the expected records are on the 2671 * track, we will most likely get a 'No Record Found' 2672 * error (in command mode) or a 'File Protected' error 2673 * (in transport mode). Those particular cases shouldn't 2674 * pass the -EIO to the IOCTL, therefore reset the rc 2675 * and continue. 2676 */ 2677 if (sense && 2678 (sense[1] & SNS1_NO_REC_FOUND || 2679 sense[1] & SNS1_FILE_PROTECTED)) 2680 retry = 1; 2681 else 2682 goto out; 2683 } 2684 2685 } while (retry); 2686 2687 out: 2688 fdata->start_unit = old_start; 2689 fdata->stop_unit = old_stop; 2690 2691 return rc; 2692 } 2693 2694 static int dasd_eckd_format_device(struct dasd_device *base, 2695 struct format_data_t *fdata, int enable_pav) 2696 { 2697 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 2698 0, NULL); 2699 } 2700 2701 /* 2702 * Helper function to count consecutive records of a single track. 2703 */ 2704 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, 2705 int max) 2706 { 2707 int head; 2708 int i; 2709 2710 head = fmt_buffer[start].head; 2711 2712 /* 2713 * There are 3 conditions where we stop counting: 2714 * - if data reoccurs (same head and record may reoccur), which may 2715 * happen due to the way DASD_ECKD_CCW_READ_COUNT works 2716 * - when the head changes, because we're iterating over several tracks 2717 * then (DASD_ECKD_CCW_READ_COUNT_MT) 2718 * - when we've reached the end of sensible data in the buffer (the 2719 * record will be 0 then) 2720 */ 2721 for (i = start; i < max; i++) { 2722 if (i > start) { 2723 if ((fmt_buffer[i].head == head && 2724 fmt_buffer[i].record == 1) || 2725 fmt_buffer[i].head != head || 2726 fmt_buffer[i].record == 0) 2727 break; 2728 } 2729 } 2730 2731 return i - start; 2732 } 2733 2734 /* 2735 * Evaluate a given range of tracks. Data like number of records, blocksize, 2736 * record ids, and key length are compared with expected data. 2737 * 2738 * If a mismatch occurs, the corresponding error bit is set, as well as 2739 * additional information, depending on the error. 2740 */ 2741 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer, 2742 struct format_check_t *cdata, 2743 int rpt_max, int rpt_exp, 2744 int trk_per_cyl, int tpm) 2745 { 2746 struct ch_t geo; 2747 int max_entries; 2748 int count = 0; 2749 int trkcount; 2750 int blksize; 2751 int pos = 0; 2752 int i, j; 2753 int kl; 2754 2755 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 2756 max_entries = trkcount * rpt_max; 2757 2758 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) { 2759 /* Calculate the correct next starting position in the buffer */ 2760 if (tpm) { 2761 while (fmt_buffer[pos].record == 0 && 2762 fmt_buffer[pos].dl == 0) { 2763 if (pos++ > max_entries) 2764 break; 2765 } 2766 } else { 2767 if (i != cdata->expect.start_unit) 2768 pos += rpt_max - count; 2769 } 2770 2771 /* Calculate the expected geo values for the current track */ 2772 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl); 2773 2774 /* Count and check number of records */ 2775 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max); 2776 2777 if (count < rpt_exp) { 2778 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS; 2779 break; 2780 } 2781 if (count > rpt_exp) { 2782 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS; 2783 break; 2784 } 2785 2786 for (j = 0; j < count; j++, pos++) { 2787 blksize = cdata->expect.blksize; 2788 kl = 0; 2789 2790 /* 2791 * Set special values when checking CDL formatted 2792 * devices. 2793 */ 2794 if ((cdata->expect.intensity & 0x08) && 2795 geo.cyl == 0 && geo.head == 0) { 2796 if (j < 3) { 2797 blksize = sizes_trk0[j] - 4; 2798 kl = 4; 2799 } 2800 } 2801 if ((cdata->expect.intensity & 0x08) && 2802 geo.cyl == 0 && geo.head == 1) { 2803 blksize = LABEL_SIZE - 44; 2804 kl = 44; 2805 } 2806 2807 /* Check blocksize */ 2808 if (fmt_buffer[pos].dl != blksize) { 2809 cdata->result = DASD_FMT_ERR_BLKSIZE; 2810 goto out; 2811 } 2812 /* Check if key length is 0 */ 2813 if (fmt_buffer[pos].kl != kl) { 2814 cdata->result = DASD_FMT_ERR_KEY_LENGTH; 2815 goto out; 2816 } 2817 /* Check if record_id is correct */ 2818 if (fmt_buffer[pos].cyl != geo.cyl || 2819 fmt_buffer[pos].head != geo.head || 2820 fmt_buffer[pos].record != (j + 1)) { 2821 cdata->result = DASD_FMT_ERR_RECORD_ID; 2822 goto out; 2823 } 2824 } 2825 } 2826 2827 out: 2828 /* 2829 * In case of no errors, we need to decrease by one 2830 * to get the correct positions. 2831 */ 2832 if (!cdata->result) { 2833 i--; 2834 pos--; 2835 } 2836 2837 cdata->unit = i; 2838 cdata->num_records = count; 2839 cdata->rec = fmt_buffer[pos].record; 2840 cdata->blksize = fmt_buffer[pos].dl; 2841 cdata->key_length = fmt_buffer[pos].kl; 2842 } 2843 2844 /* 2845 * Check the format of a range of tracks of a DASD. 2846 */ 2847 static int dasd_eckd_check_device_format(struct dasd_device *base, 2848 struct format_check_t *cdata, 2849 int enable_pav) 2850 { 2851 struct dasd_eckd_private *private = base->private; 2852 struct eckd_count *fmt_buffer; 2853 struct irb irb; 2854 int rpt_max, rpt_exp; 2855 int fmt_buffer_size; 2856 int trk_per_cyl; 2857 int trkcount; 2858 int tpm = 0; 2859 int rc; 2860 2861 trk_per_cyl = private->rdc_data.trk_per_cyl; 2862 2863 /* Get maximum and expected amount of records per track */ 2864 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1; 2865 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize); 2866 2867 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 2868 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count); 2869 2870 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA); 2871 if (!fmt_buffer) 2872 return -ENOMEM; 2873 2874 /* 2875 * A certain FICON feature subset is needed to operate in transport 2876 * mode. Additionally, the support for transport mode is implicitly 2877 * checked by comparing the buffer size with fcx_max_data. As long as 2878 * the buffer size is smaller we can operate in transport mode and 2879 * process multiple tracks. If not, only one track at once is being 2880 * processed using command mode. 2881 */ 2882 if ((private->features.feature[40] & 0x04) && 2883 fmt_buffer_size <= private->fcx_max_data) 2884 tpm = 1; 2885 2886 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav, 2887 tpm, fmt_buffer, rpt_max, &irb); 2888 if (rc && rc != -EIO) 2889 goto out; 2890 if (rc == -EIO) { 2891 /* 2892 * If our first attempt with transport mode enabled comes back 2893 * with an incorrect length error, we're going to retry the 2894 * check with command mode. 2895 */ 2896 if (tpm && scsw_cstat(&irb.scsw) == 0x40) { 2897 tpm = 0; 2898 rc = dasd_eckd_format_process_data(base, &cdata->expect, 2899 enable_pav, tpm, 2900 fmt_buffer, rpt_max, 2901 &irb); 2902 if (rc) 2903 goto out; 2904 } else { 2905 goto out; 2906 } 2907 } 2908 2909 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp, 2910 trk_per_cyl, tpm); 2911 2912 out: 2913 kfree(fmt_buffer); 2914 2915 return rc; 2916 } 2917 2918 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 2919 { 2920 if (cqr->retries < 0) { 2921 cqr->status = DASD_CQR_FAILED; 2922 return; 2923 } 2924 cqr->status = DASD_CQR_FILLED; 2925 if (cqr->block && (cqr->startdev != cqr->block->base)) { 2926 dasd_eckd_reset_ccw_to_base_io(cqr); 2927 cqr->startdev = cqr->block->base; 2928 cqr->lpm = dasd_path_get_opm(cqr->block->base); 2929 } 2930 }; 2931 2932 static dasd_erp_fn_t 2933 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 2934 { 2935 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 2936 struct ccw_device *cdev = device->cdev; 2937 2938 switch (cdev->id.cu_type) { 2939 case 0x3990: 2940 case 0x2105: 2941 case 0x2107: 2942 case 0x1750: 2943 return dasd_3990_erp_action; 2944 case 0x9343: 2945 case 0x3880: 2946 default: 2947 return dasd_default_erp_action; 2948 } 2949 } 2950 2951 static dasd_erp_fn_t 2952 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 2953 { 2954 return dasd_default_erp_postaction; 2955 } 2956 2957 static void dasd_eckd_check_for_device_change(struct dasd_device *device, 2958 struct dasd_ccw_req *cqr, 2959 struct irb *irb) 2960 { 2961 char mask; 2962 char *sense = NULL; 2963 struct dasd_eckd_private *private = device->private; 2964 2965 /* first of all check for state change pending interrupt */ 2966 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 2967 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 2968 /* 2969 * for alias only, not in offline processing 2970 * and only if not suspended 2971 */ 2972 if (!device->block && private->lcu && 2973 device->state == DASD_STATE_ONLINE && 2974 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2975 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2976 /* schedule worker to reload device */ 2977 dasd_reload_device(device); 2978 } 2979 dasd_generic_handle_state_change(device); 2980 return; 2981 } 2982 2983 sense = dasd_get_sense(irb); 2984 if (!sense) 2985 return; 2986 2987 /* summary unit check */ 2988 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 2989 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2990 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { 2991 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2992 "eckd suc: device already notified"); 2993 return; 2994 } 2995 sense = dasd_get_sense(irb); 2996 if (!sense) { 2997 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2998 "eckd suc: no reason code available"); 2999 clear_bit(DASD_FLAG_SUC, &device->flags); 3000 return; 3001 3002 } 3003 private->suc_reason = sense[8]; 3004 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", 3005 "eckd handle summary unit check: reason", 3006 private->suc_reason); 3007 dasd_get_device(device); 3008 if (!schedule_work(&device->suc_work)) 3009 dasd_put_device(device); 3010 3011 return; 3012 } 3013 3014 /* service information message SIM */ 3015 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 3016 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 3017 dasd_3990_erp_handle_sim(device, sense); 3018 return; 3019 } 3020 3021 /* loss of device reservation is handled via base devices only 3022 * as alias devices may be used with several bases 3023 */ 3024 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 3025 (sense[7] == 0x3F) && 3026 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 3027 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 3028 if (device->features & DASD_FEATURE_FAILONSLCK) 3029 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 3030 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3031 dev_err(&device->cdev->dev, 3032 "The device reservation was lost\n"); 3033 } 3034 } 3035 3036 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3037 struct dasd_device *startdev, 3038 struct dasd_block *block, 3039 struct request *req, 3040 sector_t first_rec, 3041 sector_t last_rec, 3042 sector_t first_trk, 3043 sector_t last_trk, 3044 unsigned int first_offs, 3045 unsigned int last_offs, 3046 unsigned int blk_per_trk, 3047 unsigned int blksize) 3048 { 3049 struct dasd_eckd_private *private; 3050 unsigned long *idaws; 3051 struct LO_eckd_data *LO_data; 3052 struct dasd_ccw_req *cqr; 3053 struct ccw1 *ccw; 3054 struct req_iterator iter; 3055 struct bio_vec bv; 3056 char *dst; 3057 unsigned int off; 3058 int count, cidaw, cplength, datasize; 3059 sector_t recid; 3060 unsigned char cmd, rcmd; 3061 int use_prefix; 3062 struct dasd_device *basedev; 3063 3064 basedev = block->base; 3065 private = basedev->private; 3066 if (rq_data_dir(req) == READ) 3067 cmd = DASD_ECKD_CCW_READ_MT; 3068 else if (rq_data_dir(req) == WRITE) 3069 cmd = DASD_ECKD_CCW_WRITE_MT; 3070 else 3071 return ERR_PTR(-EINVAL); 3072 3073 /* Check struct bio and count the number of blocks for the request. */ 3074 count = 0; 3075 cidaw = 0; 3076 rq_for_each_segment(bv, req, iter) { 3077 if (bv.bv_len & (blksize - 1)) 3078 /* Eckd can only do full blocks. */ 3079 return ERR_PTR(-EINVAL); 3080 count += bv.bv_len >> (block->s2b_shift + 9); 3081 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 3082 cidaw += bv.bv_len >> (block->s2b_shift + 9); 3083 } 3084 /* Paranoia. */ 3085 if (count != last_rec - first_rec + 1) 3086 return ERR_PTR(-EINVAL); 3087 3088 /* use the prefix command if available */ 3089 use_prefix = private->features.feature[8] & 0x01; 3090 if (use_prefix) { 3091 /* 1x prefix + number of blocks */ 3092 cplength = 2 + count; 3093 /* 1x prefix + cidaws*sizeof(long) */ 3094 datasize = sizeof(struct PFX_eckd_data) + 3095 sizeof(struct LO_eckd_data) + 3096 cidaw * sizeof(unsigned long); 3097 } else { 3098 /* 1x define extent + 1x locate record + number of blocks */ 3099 cplength = 2 + count; 3100 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 3101 datasize = sizeof(struct DE_eckd_data) + 3102 sizeof(struct LO_eckd_data) + 3103 cidaw * sizeof(unsigned long); 3104 } 3105 /* Find out the number of additional locate record ccws for cdl. */ 3106 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 3107 if (last_rec >= 2*blk_per_trk) 3108 count = 2*blk_per_trk - first_rec; 3109 cplength += count; 3110 datasize += count*sizeof(struct LO_eckd_data); 3111 } 3112 /* Allocate the ccw request. */ 3113 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3114 startdev, blk_mq_rq_to_pdu(req)); 3115 if (IS_ERR(cqr)) 3116 return cqr; 3117 ccw = cqr->cpaddr; 3118 /* First ccw is define extent or prefix. */ 3119 if (use_prefix) { 3120 if (prefix(ccw++, cqr->data, first_trk, 3121 last_trk, cmd, basedev, startdev) == -EAGAIN) { 3122 /* Clock not in sync and XRC is enabled. 3123 * Try again later. 3124 */ 3125 dasd_sfree_request(cqr, startdev); 3126 return ERR_PTR(-EAGAIN); 3127 } 3128 idaws = (unsigned long *) (cqr->data + 3129 sizeof(struct PFX_eckd_data)); 3130 } else { 3131 if (define_extent(ccw++, cqr->data, first_trk, 3132 last_trk, cmd, basedev, 0) == -EAGAIN) { 3133 /* Clock not in sync and XRC is enabled. 3134 * Try again later. 3135 */ 3136 dasd_sfree_request(cqr, startdev); 3137 return ERR_PTR(-EAGAIN); 3138 } 3139 idaws = (unsigned long *) (cqr->data + 3140 sizeof(struct DE_eckd_data)); 3141 } 3142 /* Build locate_record+read/write/ccws. */ 3143 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 3144 recid = first_rec; 3145 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 3146 /* Only standard blocks so there is just one locate record. */ 3147 ccw[-1].flags |= CCW_FLAG_CC; 3148 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 3149 last_rec - recid + 1, cmd, basedev, blksize); 3150 } 3151 rq_for_each_segment(bv, req, iter) { 3152 dst = page_address(bv.bv_page) + bv.bv_offset; 3153 if (dasd_page_cache) { 3154 char *copy = kmem_cache_alloc(dasd_page_cache, 3155 GFP_DMA | __GFP_NOWARN); 3156 if (copy && rq_data_dir(req) == WRITE) 3157 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 3158 if (copy) 3159 dst = copy + bv.bv_offset; 3160 } 3161 for (off = 0; off < bv.bv_len; off += blksize) { 3162 sector_t trkid = recid; 3163 unsigned int recoffs = sector_div(trkid, blk_per_trk); 3164 rcmd = cmd; 3165 count = blksize; 3166 /* Locate record for cdl special block ? */ 3167 if (private->uses_cdl && recid < 2*blk_per_trk) { 3168 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 3169 rcmd |= 0x8; 3170 count = dasd_eckd_cdl_reclen(recid); 3171 if (count < blksize && 3172 rq_data_dir(req) == READ) 3173 memset(dst + count, 0xe5, 3174 blksize - count); 3175 } 3176 ccw[-1].flags |= CCW_FLAG_CC; 3177 locate_record(ccw++, LO_data++, 3178 trkid, recoffs + 1, 3179 1, rcmd, basedev, count); 3180 } 3181 /* Locate record for standard blocks ? */ 3182 if (private->uses_cdl && recid == 2*blk_per_trk) { 3183 ccw[-1].flags |= CCW_FLAG_CC; 3184 locate_record(ccw++, LO_data++, 3185 trkid, recoffs + 1, 3186 last_rec - recid + 1, 3187 cmd, basedev, count); 3188 } 3189 /* Read/write ccw. */ 3190 ccw[-1].flags |= CCW_FLAG_CC; 3191 ccw->cmd_code = rcmd; 3192 ccw->count = count; 3193 if (idal_is_needed(dst, blksize)) { 3194 ccw->cda = (__u32)(addr_t) idaws; 3195 ccw->flags = CCW_FLAG_IDA; 3196 idaws = idal_create_words(idaws, dst, blksize); 3197 } else { 3198 ccw->cda = (__u32)(addr_t) dst; 3199 ccw->flags = 0; 3200 } 3201 ccw++; 3202 dst += blksize; 3203 recid++; 3204 } 3205 } 3206 if (blk_noretry_request(req) || 3207 block->base->features & DASD_FEATURE_FAILFAST) 3208 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3209 cqr->startdev = startdev; 3210 cqr->memdev = startdev; 3211 cqr->block = block; 3212 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3213 cqr->lpm = dasd_path_get_ppm(startdev); 3214 cqr->retries = startdev->default_retries; 3215 cqr->buildclk = get_tod_clock(); 3216 cqr->status = DASD_CQR_FILLED; 3217 return cqr; 3218 } 3219 3220 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 3221 struct dasd_device *startdev, 3222 struct dasd_block *block, 3223 struct request *req, 3224 sector_t first_rec, 3225 sector_t last_rec, 3226 sector_t first_trk, 3227 sector_t last_trk, 3228 unsigned int first_offs, 3229 unsigned int last_offs, 3230 unsigned int blk_per_trk, 3231 unsigned int blksize) 3232 { 3233 unsigned long *idaws; 3234 struct dasd_ccw_req *cqr; 3235 struct ccw1 *ccw; 3236 struct req_iterator iter; 3237 struct bio_vec bv; 3238 char *dst, *idaw_dst; 3239 unsigned int cidaw, cplength, datasize; 3240 unsigned int tlf; 3241 sector_t recid; 3242 unsigned char cmd; 3243 struct dasd_device *basedev; 3244 unsigned int trkcount, count, count_to_trk_end; 3245 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 3246 unsigned char new_track, end_idaw; 3247 sector_t trkid; 3248 unsigned int recoffs; 3249 3250 basedev = block->base; 3251 if (rq_data_dir(req) == READ) 3252 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 3253 else if (rq_data_dir(req) == WRITE) 3254 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 3255 else 3256 return ERR_PTR(-EINVAL); 3257 3258 /* Track based I/O needs IDAWs for each page, and not just for 3259 * 64 bit addresses. We need additional idals for pages 3260 * that get filled from two tracks, so we use the number 3261 * of records as upper limit. 3262 */ 3263 cidaw = last_rec - first_rec + 1; 3264 trkcount = last_trk - first_trk + 1; 3265 3266 /* 1x prefix + one read/write ccw per track */ 3267 cplength = 1 + trkcount; 3268 3269 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long); 3270 3271 /* Allocate the ccw request. */ 3272 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3273 startdev, blk_mq_rq_to_pdu(req)); 3274 if (IS_ERR(cqr)) 3275 return cqr; 3276 ccw = cqr->cpaddr; 3277 /* transfer length factor: how many bytes to read from the last track */ 3278 if (first_trk == last_trk) 3279 tlf = last_offs - first_offs + 1; 3280 else 3281 tlf = last_offs + 1; 3282 tlf *= blksize; 3283 3284 if (prefix_LRE(ccw++, cqr->data, first_trk, 3285 last_trk, cmd, basedev, startdev, 3286 1 /* format */, first_offs + 1, 3287 trkcount, blksize, 3288 tlf) == -EAGAIN) { 3289 /* Clock not in sync and XRC is enabled. 3290 * Try again later. 3291 */ 3292 dasd_sfree_request(cqr, startdev); 3293 return ERR_PTR(-EAGAIN); 3294 } 3295 3296 /* 3297 * The translation of request into ccw programs must meet the 3298 * following conditions: 3299 * - all idaws but the first and the last must address full pages 3300 * (or 2K blocks on 31-bit) 3301 * - the scope of a ccw and it's idal ends with the track boundaries 3302 */ 3303 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 3304 recid = first_rec; 3305 new_track = 1; 3306 end_idaw = 0; 3307 len_to_track_end = 0; 3308 idaw_dst = NULL; 3309 idaw_len = 0; 3310 rq_for_each_segment(bv, req, iter) { 3311 dst = page_address(bv.bv_page) + bv.bv_offset; 3312 seg_len = bv.bv_len; 3313 while (seg_len) { 3314 if (new_track) { 3315 trkid = recid; 3316 recoffs = sector_div(trkid, blk_per_trk); 3317 count_to_trk_end = blk_per_trk - recoffs; 3318 count = min((last_rec - recid + 1), 3319 (sector_t)count_to_trk_end); 3320 len_to_track_end = count * blksize; 3321 ccw[-1].flags |= CCW_FLAG_CC; 3322 ccw->cmd_code = cmd; 3323 ccw->count = len_to_track_end; 3324 ccw->cda = (__u32)(addr_t)idaws; 3325 ccw->flags = CCW_FLAG_IDA; 3326 ccw++; 3327 recid += count; 3328 new_track = 0; 3329 /* first idaw for a ccw may start anywhere */ 3330 if (!idaw_dst) 3331 idaw_dst = dst; 3332 } 3333 /* If we start a new idaw, we must make sure that it 3334 * starts on an IDA_BLOCK_SIZE boundary. 3335 * If we continue an idaw, we must make sure that the 3336 * current segment begins where the so far accumulated 3337 * idaw ends 3338 */ 3339 if (!idaw_dst) { 3340 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 3341 dasd_sfree_request(cqr, startdev); 3342 return ERR_PTR(-ERANGE); 3343 } else 3344 idaw_dst = dst; 3345 } 3346 if ((idaw_dst + idaw_len) != dst) { 3347 dasd_sfree_request(cqr, startdev); 3348 return ERR_PTR(-ERANGE); 3349 } 3350 part_len = min(seg_len, len_to_track_end); 3351 seg_len -= part_len; 3352 dst += part_len; 3353 idaw_len += part_len; 3354 len_to_track_end -= part_len; 3355 /* collected memory area ends on an IDA_BLOCK border, 3356 * -> create an idaw 3357 * idal_create_words will handle cases where idaw_len 3358 * is larger then IDA_BLOCK_SIZE 3359 */ 3360 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 3361 end_idaw = 1; 3362 /* We also need to end the idaw at track end */ 3363 if (!len_to_track_end) { 3364 new_track = 1; 3365 end_idaw = 1; 3366 } 3367 if (end_idaw) { 3368 idaws = idal_create_words(idaws, idaw_dst, 3369 idaw_len); 3370 idaw_dst = NULL; 3371 idaw_len = 0; 3372 end_idaw = 0; 3373 } 3374 } 3375 } 3376 3377 if (blk_noretry_request(req) || 3378 block->base->features & DASD_FEATURE_FAILFAST) 3379 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3380 cqr->startdev = startdev; 3381 cqr->memdev = startdev; 3382 cqr->block = block; 3383 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3384 cqr->lpm = dasd_path_get_ppm(startdev); 3385 cqr->retries = startdev->default_retries; 3386 cqr->buildclk = get_tod_clock(); 3387 cqr->status = DASD_CQR_FILLED; 3388 return cqr; 3389 } 3390 3391 static int prepare_itcw(struct itcw *itcw, 3392 unsigned int trk, unsigned int totrk, int cmd, 3393 struct dasd_device *basedev, 3394 struct dasd_device *startdev, 3395 unsigned int rec_on_trk, int count, 3396 unsigned int blksize, 3397 unsigned int total_data_size, 3398 unsigned int tlf, 3399 unsigned int blk_per_trk) 3400 { 3401 struct PFX_eckd_data pfxdata; 3402 struct dasd_eckd_private *basepriv, *startpriv; 3403 struct DE_eckd_data *dedata; 3404 struct LRE_eckd_data *lredata; 3405 struct dcw *dcw; 3406 3407 u32 begcyl, endcyl; 3408 u16 heads, beghead, endhead; 3409 u8 pfx_cmd; 3410 3411 int rc = 0; 3412 int sector = 0; 3413 int dn, d; 3414 3415 3416 /* setup prefix data */ 3417 basepriv = basedev->private; 3418 startpriv = startdev->private; 3419 dedata = &pfxdata.define_extent; 3420 lredata = &pfxdata.locate_record; 3421 3422 memset(&pfxdata, 0, sizeof(pfxdata)); 3423 pfxdata.format = 1; /* PFX with LRE */ 3424 pfxdata.base_address = basepriv->ned->unit_addr; 3425 pfxdata.base_lss = basepriv->ned->ID; 3426 pfxdata.validity.define_extent = 1; 3427 3428 /* private uid is kept up to date, conf_data may be outdated */ 3429 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 3430 pfxdata.validity.verify_base = 1; 3431 3432 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 3433 pfxdata.validity.verify_base = 1; 3434 pfxdata.validity.hyper_pav = 1; 3435 } 3436 3437 switch (cmd) { 3438 case DASD_ECKD_CCW_READ_TRACK_DATA: 3439 dedata->mask.perm = 0x1; 3440 dedata->attributes.operation = basepriv->attrib.operation; 3441 dedata->blk_size = blksize; 3442 dedata->ga_extended |= 0x42; 3443 lredata->operation.orientation = 0x0; 3444 lredata->operation.operation = 0x0C; 3445 lredata->auxiliary.check_bytes = 0x01; 3446 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 3447 break; 3448 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 3449 dedata->mask.perm = 0x02; 3450 dedata->attributes.operation = basepriv->attrib.operation; 3451 dedata->blk_size = blksize; 3452 rc = set_timestamp(NULL, dedata, basedev); 3453 dedata->ga_extended |= 0x42; 3454 lredata->operation.orientation = 0x0; 3455 lredata->operation.operation = 0x3F; 3456 lredata->extended_operation = 0x23; 3457 lredata->auxiliary.check_bytes = 0x2; 3458 /* 3459 * If XRC is supported the System Time Stamp is set. The 3460 * validity of the time stamp must be reflected in the prefix 3461 * data as well. 3462 */ 3463 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 3464 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */ 3465 pfx_cmd = DASD_ECKD_CCW_PFX; 3466 break; 3467 case DASD_ECKD_CCW_READ_COUNT_MT: 3468 dedata->mask.perm = 0x1; 3469 dedata->attributes.operation = DASD_BYPASS_CACHE; 3470 dedata->ga_extended |= 0x42; 3471 dedata->blk_size = blksize; 3472 lredata->operation.orientation = 0x2; 3473 lredata->operation.operation = 0x16; 3474 lredata->auxiliary.check_bytes = 0x01; 3475 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 3476 break; 3477 default: 3478 DBF_DEV_EVENT(DBF_ERR, basedev, 3479 "prepare itcw, unknown opcode 0x%x", cmd); 3480 BUG(); 3481 break; 3482 } 3483 if (rc) 3484 return rc; 3485 3486 dedata->attributes.mode = 0x3; /* ECKD */ 3487 3488 heads = basepriv->rdc_data.trk_per_cyl; 3489 begcyl = trk / heads; 3490 beghead = trk % heads; 3491 endcyl = totrk / heads; 3492 endhead = totrk % heads; 3493 3494 /* check for sequential prestage - enhance cylinder range */ 3495 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 3496 dedata->attributes.operation == DASD_SEQ_ACCESS) { 3497 3498 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 3499 endcyl += basepriv->attrib.nr_cyl; 3500 else 3501 endcyl = (basepriv->real_cyl - 1); 3502 } 3503 3504 set_ch_t(&dedata->beg_ext, begcyl, beghead); 3505 set_ch_t(&dedata->end_ext, endcyl, endhead); 3506 3507 dedata->ep_format = 0x20; /* records per track is valid */ 3508 dedata->ep_rec_per_track = blk_per_trk; 3509 3510 if (rec_on_trk) { 3511 switch (basepriv->rdc_data.dev_type) { 3512 case 0x3390: 3513 dn = ceil_quot(blksize + 6, 232); 3514 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 3515 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 3516 break; 3517 case 0x3380: 3518 d = 7 + ceil_quot(blksize + 12, 32); 3519 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 3520 break; 3521 } 3522 } 3523 3524 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) { 3525 lredata->auxiliary.length_valid = 0; 3526 lredata->auxiliary.length_scope = 0; 3527 lredata->sector = 0xff; 3528 } else { 3529 lredata->auxiliary.length_valid = 1; 3530 lredata->auxiliary.length_scope = 1; 3531 lredata->sector = sector; 3532 } 3533 lredata->auxiliary.imbedded_ccw_valid = 1; 3534 lredata->length = tlf; 3535 lredata->imbedded_ccw = cmd; 3536 lredata->count = count; 3537 set_ch_t(&lredata->seek_addr, begcyl, beghead); 3538 lredata->search_arg.cyl = lredata->seek_addr.cyl; 3539 lredata->search_arg.head = lredata->seek_addr.head; 3540 lredata->search_arg.record = rec_on_trk; 3541 3542 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 3543 &pfxdata, sizeof(pfxdata), total_data_size); 3544 return PTR_ERR_OR_ZERO(dcw); 3545 } 3546 3547 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 3548 struct dasd_device *startdev, 3549 struct dasd_block *block, 3550 struct request *req, 3551 sector_t first_rec, 3552 sector_t last_rec, 3553 sector_t first_trk, 3554 sector_t last_trk, 3555 unsigned int first_offs, 3556 unsigned int last_offs, 3557 unsigned int blk_per_trk, 3558 unsigned int blksize) 3559 { 3560 struct dasd_ccw_req *cqr; 3561 struct req_iterator iter; 3562 struct bio_vec bv; 3563 char *dst; 3564 unsigned int trkcount, ctidaw; 3565 unsigned char cmd; 3566 struct dasd_device *basedev; 3567 unsigned int tlf; 3568 struct itcw *itcw; 3569 struct tidaw *last_tidaw = NULL; 3570 int itcw_op; 3571 size_t itcw_size; 3572 u8 tidaw_flags; 3573 unsigned int seg_len, part_len, len_to_track_end; 3574 unsigned char new_track; 3575 sector_t recid, trkid; 3576 unsigned int offs; 3577 unsigned int count, count_to_trk_end; 3578 int ret; 3579 3580 basedev = block->base; 3581 if (rq_data_dir(req) == READ) { 3582 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 3583 itcw_op = ITCW_OP_READ; 3584 } else if (rq_data_dir(req) == WRITE) { 3585 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 3586 itcw_op = ITCW_OP_WRITE; 3587 } else 3588 return ERR_PTR(-EINVAL); 3589 3590 /* trackbased I/O needs address all memory via TIDAWs, 3591 * not just for 64 bit addresses. This allows us to map 3592 * each segment directly to one tidaw. 3593 * In the case of write requests, additional tidaws may 3594 * be needed when a segment crosses a track boundary. 3595 */ 3596 trkcount = last_trk - first_trk + 1; 3597 ctidaw = 0; 3598 rq_for_each_segment(bv, req, iter) { 3599 ++ctidaw; 3600 } 3601 if (rq_data_dir(req) == WRITE) 3602 ctidaw += (last_trk - first_trk); 3603 3604 /* Allocate the ccw request. */ 3605 itcw_size = itcw_calc_size(0, ctidaw, 0); 3606 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 3607 blk_mq_rq_to_pdu(req)); 3608 if (IS_ERR(cqr)) 3609 return cqr; 3610 3611 /* transfer length factor: how many bytes to read from the last track */ 3612 if (first_trk == last_trk) 3613 tlf = last_offs - first_offs + 1; 3614 else 3615 tlf = last_offs + 1; 3616 tlf *= blksize; 3617 3618 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 3619 if (IS_ERR(itcw)) { 3620 ret = -EINVAL; 3621 goto out_error; 3622 } 3623 cqr->cpaddr = itcw_get_tcw(itcw); 3624 if (prepare_itcw(itcw, first_trk, last_trk, 3625 cmd, basedev, startdev, 3626 first_offs + 1, 3627 trkcount, blksize, 3628 (last_rec - first_rec + 1) * blksize, 3629 tlf, blk_per_trk) == -EAGAIN) { 3630 /* Clock not in sync and XRC is enabled. 3631 * Try again later. 3632 */ 3633 ret = -EAGAIN; 3634 goto out_error; 3635 } 3636 len_to_track_end = 0; 3637 /* 3638 * A tidaw can address 4k of memory, but must not cross page boundaries 3639 * We can let the block layer handle this by setting 3640 * blk_queue_segment_boundary to page boundaries and 3641 * blk_max_segment_size to page size when setting up the request queue. 3642 * For write requests, a TIDAW must not cross track boundaries, because 3643 * we have to set the CBC flag on the last tidaw for each track. 3644 */ 3645 if (rq_data_dir(req) == WRITE) { 3646 new_track = 1; 3647 recid = first_rec; 3648 rq_for_each_segment(bv, req, iter) { 3649 dst = page_address(bv.bv_page) + bv.bv_offset; 3650 seg_len = bv.bv_len; 3651 while (seg_len) { 3652 if (new_track) { 3653 trkid = recid; 3654 offs = sector_div(trkid, blk_per_trk); 3655 count_to_trk_end = blk_per_trk - offs; 3656 count = min((last_rec - recid + 1), 3657 (sector_t)count_to_trk_end); 3658 len_to_track_end = count * blksize; 3659 recid += count; 3660 new_track = 0; 3661 } 3662 part_len = min(seg_len, len_to_track_end); 3663 seg_len -= part_len; 3664 len_to_track_end -= part_len; 3665 /* We need to end the tidaw at track end */ 3666 if (!len_to_track_end) { 3667 new_track = 1; 3668 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 3669 } else 3670 tidaw_flags = 0; 3671 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 3672 dst, part_len); 3673 if (IS_ERR(last_tidaw)) { 3674 ret = -EINVAL; 3675 goto out_error; 3676 } 3677 dst += part_len; 3678 } 3679 } 3680 } else { 3681 rq_for_each_segment(bv, req, iter) { 3682 dst = page_address(bv.bv_page) + bv.bv_offset; 3683 last_tidaw = itcw_add_tidaw(itcw, 0x00, 3684 dst, bv.bv_len); 3685 if (IS_ERR(last_tidaw)) { 3686 ret = -EINVAL; 3687 goto out_error; 3688 } 3689 } 3690 } 3691 last_tidaw->flags |= TIDAW_FLAGS_LAST; 3692 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 3693 itcw_finalize(itcw); 3694 3695 if (blk_noretry_request(req) || 3696 block->base->features & DASD_FEATURE_FAILFAST) 3697 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3698 cqr->cpmode = 1; 3699 cqr->startdev = startdev; 3700 cqr->memdev = startdev; 3701 cqr->block = block; 3702 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3703 cqr->lpm = dasd_path_get_ppm(startdev); 3704 cqr->retries = startdev->default_retries; 3705 cqr->buildclk = get_tod_clock(); 3706 cqr->status = DASD_CQR_FILLED; 3707 return cqr; 3708 out_error: 3709 dasd_sfree_request(cqr, startdev); 3710 return ERR_PTR(ret); 3711 } 3712 3713 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 3714 struct dasd_block *block, 3715 struct request *req) 3716 { 3717 int cmdrtd, cmdwtd; 3718 int use_prefix; 3719 int fcx_multitrack; 3720 struct dasd_eckd_private *private; 3721 struct dasd_device *basedev; 3722 sector_t first_rec, last_rec; 3723 sector_t first_trk, last_trk; 3724 unsigned int first_offs, last_offs; 3725 unsigned int blk_per_trk, blksize; 3726 int cdlspecial; 3727 unsigned int data_size; 3728 struct dasd_ccw_req *cqr; 3729 3730 basedev = block->base; 3731 private = basedev->private; 3732 3733 /* Calculate number of blocks/records per track. */ 3734 blksize = block->bp_block; 3735 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3736 if (blk_per_trk == 0) 3737 return ERR_PTR(-EINVAL); 3738 /* Calculate record id of first and last block. */ 3739 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 3740 first_offs = sector_div(first_trk, blk_per_trk); 3741 last_rec = last_trk = 3742 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3743 last_offs = sector_div(last_trk, blk_per_trk); 3744 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 3745 3746 fcx_multitrack = private->features.feature[40] & 0x20; 3747 data_size = blk_rq_bytes(req); 3748 if (data_size % blksize) 3749 return ERR_PTR(-EINVAL); 3750 /* tpm write request add CBC data on each track boundary */ 3751 if (rq_data_dir(req) == WRITE) 3752 data_size += (last_trk - first_trk) * 4; 3753 3754 /* is read track data and write track data in command mode supported? */ 3755 cmdrtd = private->features.feature[9] & 0x20; 3756 cmdwtd = private->features.feature[12] & 0x40; 3757 use_prefix = private->features.feature[8] & 0x01; 3758 3759 cqr = NULL; 3760 if (cdlspecial || dasd_page_cache) { 3761 /* do nothing, just fall through to the cmd mode single case */ 3762 } else if ((data_size <= private->fcx_max_data) 3763 && (fcx_multitrack || (first_trk == last_trk))) { 3764 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 3765 first_rec, last_rec, 3766 first_trk, last_trk, 3767 first_offs, last_offs, 3768 blk_per_trk, blksize); 3769 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 3770 (PTR_ERR(cqr) != -ENOMEM)) 3771 cqr = NULL; 3772 } else if (use_prefix && 3773 (((rq_data_dir(req) == READ) && cmdrtd) || 3774 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 3775 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 3776 first_rec, last_rec, 3777 first_trk, last_trk, 3778 first_offs, last_offs, 3779 blk_per_trk, blksize); 3780 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 3781 (PTR_ERR(cqr) != -ENOMEM)) 3782 cqr = NULL; 3783 } 3784 if (!cqr) 3785 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 3786 first_rec, last_rec, 3787 first_trk, last_trk, 3788 first_offs, last_offs, 3789 blk_per_trk, blksize); 3790 return cqr; 3791 } 3792 3793 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, 3794 struct dasd_block *block, 3795 struct request *req) 3796 { 3797 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 3798 unsigned int seg_len, len_to_track_end; 3799 unsigned int cidaw, cplength, datasize; 3800 sector_t first_trk, last_trk, sectors; 3801 struct dasd_eckd_private *base_priv; 3802 struct dasd_device *basedev; 3803 struct req_iterator iter; 3804 struct dasd_ccw_req *cqr; 3805 unsigned int first_offs; 3806 unsigned int trkcount; 3807 unsigned long *idaws; 3808 unsigned int size; 3809 unsigned char cmd; 3810 struct bio_vec bv; 3811 struct ccw1 *ccw; 3812 int use_prefix; 3813 void *data; 3814 char *dst; 3815 3816 /* 3817 * raw track access needs to be mutiple of 64k and on 64k boundary 3818 * For read requests we can fix an incorrect alignment by padding 3819 * the request with dummy pages. 3820 */ 3821 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 3822 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 3823 DASD_RAW_SECTORS_PER_TRACK; 3824 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 3825 DASD_RAW_SECTORS_PER_TRACK; 3826 basedev = block->base; 3827 if ((start_padding_sectors || end_padding_sectors) && 3828 (rq_data_dir(req) == WRITE)) { 3829 DBF_DEV_EVENT(DBF_ERR, basedev, 3830 "raw write not track aligned (%llu,%llu) req %p", 3831 start_padding_sectors, end_padding_sectors, req); 3832 return ERR_PTR(-EINVAL); 3833 } 3834 3835 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 3836 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 3837 DASD_RAW_SECTORS_PER_TRACK; 3838 trkcount = last_trk - first_trk + 1; 3839 first_offs = 0; 3840 3841 if (rq_data_dir(req) == READ) 3842 cmd = DASD_ECKD_CCW_READ_TRACK; 3843 else if (rq_data_dir(req) == WRITE) 3844 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 3845 else 3846 return ERR_PTR(-EINVAL); 3847 3848 /* 3849 * Raw track based I/O needs IDAWs for each page, 3850 * and not just for 64 bit addresses. 3851 */ 3852 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 3853 3854 /* 3855 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes 3856 * of extended parameter. This is needed for write full track. 3857 */ 3858 base_priv = basedev->private; 3859 use_prefix = base_priv->features.feature[8] & 0x01; 3860 if (use_prefix) { 3861 cplength = 1 + trkcount; 3862 size = sizeof(struct PFX_eckd_data) + 2; 3863 } else { 3864 cplength = 2 + trkcount; 3865 size = sizeof(struct DE_eckd_data) + 3866 sizeof(struct LRE_eckd_data) + 2; 3867 } 3868 size = ALIGN(size, 8); 3869 3870 datasize = size + cidaw * sizeof(unsigned long); 3871 3872 /* Allocate the ccw request. */ 3873 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 3874 datasize, startdev, blk_mq_rq_to_pdu(req)); 3875 if (IS_ERR(cqr)) 3876 return cqr; 3877 3878 ccw = cqr->cpaddr; 3879 data = cqr->data; 3880 3881 if (use_prefix) { 3882 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, 3883 startdev, 1, first_offs + 1, trkcount, 0, 0); 3884 } else { 3885 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); 3886 ccw[-1].flags |= CCW_FLAG_CC; 3887 3888 data += sizeof(struct DE_eckd_data); 3889 locate_record_ext(ccw++, data, first_trk, first_offs + 1, 3890 trkcount, cmd, basedev, 0, 0); 3891 } 3892 3893 idaws = (unsigned long *)(cqr->data + size); 3894 len_to_track_end = 0; 3895 if (start_padding_sectors) { 3896 ccw[-1].flags |= CCW_FLAG_CC; 3897 ccw->cmd_code = cmd; 3898 /* maximum 3390 track size */ 3899 ccw->count = 57326; 3900 /* 64k map to one track */ 3901 len_to_track_end = 65536 - start_padding_sectors * 512; 3902 ccw->cda = (__u32)(addr_t)idaws; 3903 ccw->flags |= CCW_FLAG_IDA; 3904 ccw->flags |= CCW_FLAG_SLI; 3905 ccw++; 3906 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 3907 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3908 } 3909 rq_for_each_segment(bv, req, iter) { 3910 dst = page_address(bv.bv_page) + bv.bv_offset; 3911 seg_len = bv.bv_len; 3912 if (cmd == DASD_ECKD_CCW_READ_TRACK) 3913 memset(dst, 0, seg_len); 3914 if (!len_to_track_end) { 3915 ccw[-1].flags |= CCW_FLAG_CC; 3916 ccw->cmd_code = cmd; 3917 /* maximum 3390 track size */ 3918 ccw->count = 57326; 3919 /* 64k map to one track */ 3920 len_to_track_end = 65536; 3921 ccw->cda = (__u32)(addr_t)idaws; 3922 ccw->flags |= CCW_FLAG_IDA; 3923 ccw->flags |= CCW_FLAG_SLI; 3924 ccw++; 3925 } 3926 len_to_track_end -= seg_len; 3927 idaws = idal_create_words(idaws, dst, seg_len); 3928 } 3929 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 3930 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3931 if (blk_noretry_request(req) || 3932 block->base->features & DASD_FEATURE_FAILFAST) 3933 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3934 cqr->startdev = startdev; 3935 cqr->memdev = startdev; 3936 cqr->block = block; 3937 cqr->expires = startdev->default_expires * HZ; 3938 cqr->lpm = dasd_path_get_ppm(startdev); 3939 cqr->retries = startdev->default_retries; 3940 cqr->buildclk = get_tod_clock(); 3941 cqr->status = DASD_CQR_FILLED; 3942 3943 return cqr; 3944 } 3945 3946 3947 static int 3948 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 3949 { 3950 struct dasd_eckd_private *private; 3951 struct ccw1 *ccw; 3952 struct req_iterator iter; 3953 struct bio_vec bv; 3954 char *dst, *cda; 3955 unsigned int blksize, blk_per_trk, off; 3956 sector_t recid; 3957 int status; 3958 3959 if (!dasd_page_cache) 3960 goto out; 3961 private = cqr->block->base->private; 3962 blksize = cqr->block->bp_block; 3963 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3964 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 3965 ccw = cqr->cpaddr; 3966 /* Skip over define extent & locate record. */ 3967 ccw++; 3968 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 3969 ccw++; 3970 rq_for_each_segment(bv, req, iter) { 3971 dst = page_address(bv.bv_page) + bv.bv_offset; 3972 for (off = 0; off < bv.bv_len; off += blksize) { 3973 /* Skip locate record. */ 3974 if (private->uses_cdl && recid <= 2*blk_per_trk) 3975 ccw++; 3976 if (dst) { 3977 if (ccw->flags & CCW_FLAG_IDA) 3978 cda = *((char **)((addr_t) ccw->cda)); 3979 else 3980 cda = (char *)((addr_t) ccw->cda); 3981 if (dst != cda) { 3982 if (rq_data_dir(req) == READ) 3983 memcpy(dst, cda, bv.bv_len); 3984 kmem_cache_free(dasd_page_cache, 3985 (void *)((addr_t)cda & PAGE_MASK)); 3986 } 3987 dst = NULL; 3988 } 3989 ccw++; 3990 recid++; 3991 } 3992 } 3993 out: 3994 status = cqr->status == DASD_CQR_DONE; 3995 dasd_sfree_request(cqr, cqr->memdev); 3996 return status; 3997 } 3998 3999 /* 4000 * Modify ccw/tcw in cqr so it can be started on a base device. 4001 * 4002 * Note that this is not enough to restart the cqr! 4003 * Either reset cqr->startdev as well (summary unit check handling) 4004 * or restart via separate cqr (as in ERP handling). 4005 */ 4006 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 4007 { 4008 struct ccw1 *ccw; 4009 struct PFX_eckd_data *pfxdata; 4010 struct tcw *tcw; 4011 struct tccb *tccb; 4012 struct dcw *dcw; 4013 4014 if (cqr->cpmode == 1) { 4015 tcw = cqr->cpaddr; 4016 tccb = tcw_get_tccb(tcw); 4017 dcw = (struct dcw *)&tccb->tca[0]; 4018 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 4019 pfxdata->validity.verify_base = 0; 4020 pfxdata->validity.hyper_pav = 0; 4021 } else { 4022 ccw = cqr->cpaddr; 4023 pfxdata = cqr->data; 4024 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 4025 pfxdata->validity.verify_base = 0; 4026 pfxdata->validity.hyper_pav = 0; 4027 } 4028 } 4029 } 4030 4031 #define DASD_ECKD_CHANQ_MAX_SIZE 4 4032 4033 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 4034 struct dasd_block *block, 4035 struct request *req) 4036 { 4037 struct dasd_eckd_private *private; 4038 struct dasd_device *startdev; 4039 unsigned long flags; 4040 struct dasd_ccw_req *cqr; 4041 4042 startdev = dasd_alias_get_start_dev(base); 4043 if (!startdev) 4044 startdev = base; 4045 private = startdev->private; 4046 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 4047 return ERR_PTR(-EBUSY); 4048 4049 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 4050 private->count++; 4051 if ((base->features & DASD_FEATURE_USERAW)) 4052 cqr = dasd_eckd_build_cp_raw(startdev, block, req); 4053 else 4054 cqr = dasd_eckd_build_cp(startdev, block, req); 4055 if (IS_ERR(cqr)) 4056 private->count--; 4057 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 4058 return cqr; 4059 } 4060 4061 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 4062 struct request *req) 4063 { 4064 struct dasd_eckd_private *private; 4065 unsigned long flags; 4066 4067 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 4068 private = cqr->memdev->private; 4069 private->count--; 4070 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 4071 return dasd_eckd_free_cp(cqr, req); 4072 } 4073 4074 static int 4075 dasd_eckd_fill_info(struct dasd_device * device, 4076 struct dasd_information2_t * info) 4077 { 4078 struct dasd_eckd_private *private = device->private; 4079 4080 info->label_block = 2; 4081 info->FBA_layout = private->uses_cdl ? 0 : 1; 4082 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 4083 info->characteristics_size = sizeof(private->rdc_data); 4084 memcpy(info->characteristics, &private->rdc_data, 4085 sizeof(private->rdc_data)); 4086 info->confdata_size = min((unsigned long)private->conf_len, 4087 sizeof(info->configuration_data)); 4088 memcpy(info->configuration_data, private->conf_data, 4089 info->confdata_size); 4090 return 0; 4091 } 4092 4093 /* 4094 * SECTION: ioctl functions for eckd devices. 4095 */ 4096 4097 /* 4098 * Release device ioctl. 4099 * Buils a channel programm to releases a prior reserved 4100 * (see dasd_eckd_reserve) device. 4101 */ 4102 static int 4103 dasd_eckd_release(struct dasd_device *device) 4104 { 4105 struct dasd_ccw_req *cqr; 4106 int rc; 4107 struct ccw1 *ccw; 4108 int useglobal; 4109 4110 if (!capable(CAP_SYS_ADMIN)) 4111 return -EACCES; 4112 4113 useglobal = 0; 4114 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4115 if (IS_ERR(cqr)) { 4116 mutex_lock(&dasd_reserve_mutex); 4117 useglobal = 1; 4118 cqr = &dasd_reserve_req->cqr; 4119 memset(cqr, 0, sizeof(*cqr)); 4120 memset(&dasd_reserve_req->ccw, 0, 4121 sizeof(dasd_reserve_req->ccw)); 4122 cqr->cpaddr = &dasd_reserve_req->ccw; 4123 cqr->data = &dasd_reserve_req->data; 4124 cqr->magic = DASD_ECKD_MAGIC; 4125 } 4126 ccw = cqr->cpaddr; 4127 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 4128 ccw->flags |= CCW_FLAG_SLI; 4129 ccw->count = 32; 4130 ccw->cda = (__u32)(addr_t) cqr->data; 4131 cqr->startdev = device; 4132 cqr->memdev = device; 4133 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4134 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4135 cqr->retries = 2; /* set retry counter to enable basic ERP */ 4136 cqr->expires = 2 * HZ; 4137 cqr->buildclk = get_tod_clock(); 4138 cqr->status = DASD_CQR_FILLED; 4139 4140 rc = dasd_sleep_on_immediatly(cqr); 4141 if (!rc) 4142 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 4143 4144 if (useglobal) 4145 mutex_unlock(&dasd_reserve_mutex); 4146 else 4147 dasd_sfree_request(cqr, cqr->memdev); 4148 return rc; 4149 } 4150 4151 /* 4152 * Reserve device ioctl. 4153 * Options are set to 'synchronous wait for interrupt' and 4154 * 'timeout the request'. This leads to a terminate IO if 4155 * the interrupt is outstanding for a certain time. 4156 */ 4157 static int 4158 dasd_eckd_reserve(struct dasd_device *device) 4159 { 4160 struct dasd_ccw_req *cqr; 4161 int rc; 4162 struct ccw1 *ccw; 4163 int useglobal; 4164 4165 if (!capable(CAP_SYS_ADMIN)) 4166 return -EACCES; 4167 4168 useglobal = 0; 4169 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4170 if (IS_ERR(cqr)) { 4171 mutex_lock(&dasd_reserve_mutex); 4172 useglobal = 1; 4173 cqr = &dasd_reserve_req->cqr; 4174 memset(cqr, 0, sizeof(*cqr)); 4175 memset(&dasd_reserve_req->ccw, 0, 4176 sizeof(dasd_reserve_req->ccw)); 4177 cqr->cpaddr = &dasd_reserve_req->ccw; 4178 cqr->data = &dasd_reserve_req->data; 4179 cqr->magic = DASD_ECKD_MAGIC; 4180 } 4181 ccw = cqr->cpaddr; 4182 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 4183 ccw->flags |= CCW_FLAG_SLI; 4184 ccw->count = 32; 4185 ccw->cda = (__u32)(addr_t) cqr->data; 4186 cqr->startdev = device; 4187 cqr->memdev = device; 4188 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4189 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4190 cqr->retries = 2; /* set retry counter to enable basic ERP */ 4191 cqr->expires = 2 * HZ; 4192 cqr->buildclk = get_tod_clock(); 4193 cqr->status = DASD_CQR_FILLED; 4194 4195 rc = dasd_sleep_on_immediatly(cqr); 4196 if (!rc) 4197 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 4198 4199 if (useglobal) 4200 mutex_unlock(&dasd_reserve_mutex); 4201 else 4202 dasd_sfree_request(cqr, cqr->memdev); 4203 return rc; 4204 } 4205 4206 /* 4207 * Steal lock ioctl - unconditional reserve device. 4208 * Buils a channel programm to break a device's reservation. 4209 * (unconditional reserve) 4210 */ 4211 static int 4212 dasd_eckd_steal_lock(struct dasd_device *device) 4213 { 4214 struct dasd_ccw_req *cqr; 4215 int rc; 4216 struct ccw1 *ccw; 4217 int useglobal; 4218 4219 if (!capable(CAP_SYS_ADMIN)) 4220 return -EACCES; 4221 4222 useglobal = 0; 4223 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4224 if (IS_ERR(cqr)) { 4225 mutex_lock(&dasd_reserve_mutex); 4226 useglobal = 1; 4227 cqr = &dasd_reserve_req->cqr; 4228 memset(cqr, 0, sizeof(*cqr)); 4229 memset(&dasd_reserve_req->ccw, 0, 4230 sizeof(dasd_reserve_req->ccw)); 4231 cqr->cpaddr = &dasd_reserve_req->ccw; 4232 cqr->data = &dasd_reserve_req->data; 4233 cqr->magic = DASD_ECKD_MAGIC; 4234 } 4235 ccw = cqr->cpaddr; 4236 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 4237 ccw->flags |= CCW_FLAG_SLI; 4238 ccw->count = 32; 4239 ccw->cda = (__u32)(addr_t) cqr->data; 4240 cqr->startdev = device; 4241 cqr->memdev = device; 4242 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4243 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4244 cqr->retries = 2; /* set retry counter to enable basic ERP */ 4245 cqr->expires = 2 * HZ; 4246 cqr->buildclk = get_tod_clock(); 4247 cqr->status = DASD_CQR_FILLED; 4248 4249 rc = dasd_sleep_on_immediatly(cqr); 4250 if (!rc) 4251 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 4252 4253 if (useglobal) 4254 mutex_unlock(&dasd_reserve_mutex); 4255 else 4256 dasd_sfree_request(cqr, cqr->memdev); 4257 return rc; 4258 } 4259 4260 /* 4261 * SNID - Sense Path Group ID 4262 * This ioctl may be used in situations where I/O is stalled due to 4263 * a reserve, so if the normal dasd_smalloc_request fails, we use the 4264 * preallocated dasd_reserve_req. 4265 */ 4266 static int dasd_eckd_snid(struct dasd_device *device, 4267 void __user *argp) 4268 { 4269 struct dasd_ccw_req *cqr; 4270 int rc; 4271 struct ccw1 *ccw; 4272 int useglobal; 4273 struct dasd_snid_ioctl_data usrparm; 4274 4275 if (!capable(CAP_SYS_ADMIN)) 4276 return -EACCES; 4277 4278 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 4279 return -EFAULT; 4280 4281 useglobal = 0; 4282 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 4283 sizeof(struct dasd_snid_data), device, 4284 NULL); 4285 if (IS_ERR(cqr)) { 4286 mutex_lock(&dasd_reserve_mutex); 4287 useglobal = 1; 4288 cqr = &dasd_reserve_req->cqr; 4289 memset(cqr, 0, sizeof(*cqr)); 4290 memset(&dasd_reserve_req->ccw, 0, 4291 sizeof(dasd_reserve_req->ccw)); 4292 cqr->cpaddr = &dasd_reserve_req->ccw; 4293 cqr->data = &dasd_reserve_req->data; 4294 cqr->magic = DASD_ECKD_MAGIC; 4295 } 4296 ccw = cqr->cpaddr; 4297 ccw->cmd_code = DASD_ECKD_CCW_SNID; 4298 ccw->flags |= CCW_FLAG_SLI; 4299 ccw->count = 12; 4300 ccw->cda = (__u32)(addr_t) cqr->data; 4301 cqr->startdev = device; 4302 cqr->memdev = device; 4303 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4304 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4305 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 4306 cqr->retries = 5; 4307 cqr->expires = 10 * HZ; 4308 cqr->buildclk = get_tod_clock(); 4309 cqr->status = DASD_CQR_FILLED; 4310 cqr->lpm = usrparm.path_mask; 4311 4312 rc = dasd_sleep_on_immediatly(cqr); 4313 /* verify that I/O processing didn't modify the path mask */ 4314 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 4315 rc = -EIO; 4316 if (!rc) { 4317 usrparm.data = *((struct dasd_snid_data *)cqr->data); 4318 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 4319 rc = -EFAULT; 4320 } 4321 4322 if (useglobal) 4323 mutex_unlock(&dasd_reserve_mutex); 4324 else 4325 dasd_sfree_request(cqr, cqr->memdev); 4326 return rc; 4327 } 4328 4329 /* 4330 * Read performance statistics 4331 */ 4332 static int 4333 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 4334 { 4335 struct dasd_psf_prssd_data *prssdp; 4336 struct dasd_rssd_perf_stats_t *stats; 4337 struct dasd_ccw_req *cqr; 4338 struct ccw1 *ccw; 4339 int rc; 4340 4341 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 4342 (sizeof(struct dasd_psf_prssd_data) + 4343 sizeof(struct dasd_rssd_perf_stats_t)), 4344 device, NULL); 4345 if (IS_ERR(cqr)) { 4346 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4347 "Could not allocate initialization request"); 4348 return PTR_ERR(cqr); 4349 } 4350 cqr->startdev = device; 4351 cqr->memdev = device; 4352 cqr->retries = 0; 4353 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4354 cqr->expires = 10 * HZ; 4355 4356 /* Prepare for Read Subsystem Data */ 4357 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 4358 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 4359 prssdp->order = PSF_ORDER_PRSSD; 4360 prssdp->suborder = 0x01; /* Performance Statistics */ 4361 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 4362 4363 ccw = cqr->cpaddr; 4364 ccw->cmd_code = DASD_ECKD_CCW_PSF; 4365 ccw->count = sizeof(struct dasd_psf_prssd_data); 4366 ccw->flags |= CCW_FLAG_CC; 4367 ccw->cda = (__u32)(addr_t) prssdp; 4368 4369 /* Read Subsystem Data - Performance Statistics */ 4370 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 4371 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 4372 4373 ccw++; 4374 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 4375 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 4376 ccw->cda = (__u32)(addr_t) stats; 4377 4378 cqr->buildclk = get_tod_clock(); 4379 cqr->status = DASD_CQR_FILLED; 4380 rc = dasd_sleep_on(cqr); 4381 if (rc == 0) { 4382 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 4383 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 4384 if (copy_to_user(argp, stats, 4385 sizeof(struct dasd_rssd_perf_stats_t))) 4386 rc = -EFAULT; 4387 } 4388 dasd_sfree_request(cqr, cqr->memdev); 4389 return rc; 4390 } 4391 4392 /* 4393 * Get attributes (cache operations) 4394 * Returnes the cache attributes used in Define Extend (DE). 4395 */ 4396 static int 4397 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 4398 { 4399 struct dasd_eckd_private *private = device->private; 4400 struct attrib_data_t attrib = private->attrib; 4401 int rc; 4402 4403 if (!capable(CAP_SYS_ADMIN)) 4404 return -EACCES; 4405 if (!argp) 4406 return -EINVAL; 4407 4408 rc = 0; 4409 if (copy_to_user(argp, (long *) &attrib, 4410 sizeof(struct attrib_data_t))) 4411 rc = -EFAULT; 4412 4413 return rc; 4414 } 4415 4416 /* 4417 * Set attributes (cache operations) 4418 * Stores the attributes for cache operation to be used in Define Extend (DE). 4419 */ 4420 static int 4421 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 4422 { 4423 struct dasd_eckd_private *private = device->private; 4424 struct attrib_data_t attrib; 4425 4426 if (!capable(CAP_SYS_ADMIN)) 4427 return -EACCES; 4428 if (!argp) 4429 return -EINVAL; 4430 4431 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 4432 return -EFAULT; 4433 private->attrib = attrib; 4434 4435 dev_info(&device->cdev->dev, 4436 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 4437 private->attrib.operation, private->attrib.nr_cyl); 4438 return 0; 4439 } 4440 4441 /* 4442 * Issue syscall I/O to EMC Symmetrix array. 4443 * CCWs are PSF and RSSD 4444 */ 4445 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 4446 { 4447 struct dasd_symmio_parms usrparm; 4448 char *psf_data, *rssd_result; 4449 struct dasd_ccw_req *cqr; 4450 struct ccw1 *ccw; 4451 char psf0, psf1; 4452 int rc; 4453 4454 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 4455 return -EACCES; 4456 psf0 = psf1 = 0; 4457 4458 /* Copy parms from caller */ 4459 rc = -EFAULT; 4460 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 4461 goto out; 4462 if (is_compat_task()) { 4463 /* Make sure pointers are sane even on 31 bit. */ 4464 rc = -EINVAL; 4465 if ((usrparm.psf_data >> 32) != 0) 4466 goto out; 4467 if ((usrparm.rssd_result >> 32) != 0) 4468 goto out; 4469 usrparm.psf_data &= 0x7fffffffULL; 4470 usrparm.rssd_result &= 0x7fffffffULL; 4471 } 4472 /* at least 2 bytes are accessed and should be allocated */ 4473 if (usrparm.psf_data_len < 2) { 4474 DBF_DEV_EVENT(DBF_WARNING, device, 4475 "Symmetrix ioctl invalid data length %d", 4476 usrparm.psf_data_len); 4477 rc = -EINVAL; 4478 goto out; 4479 } 4480 /* alloc I/O data area */ 4481 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 4482 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 4483 if (!psf_data || !rssd_result) { 4484 rc = -ENOMEM; 4485 goto out_free; 4486 } 4487 4488 /* get syscall header from user space */ 4489 rc = -EFAULT; 4490 if (copy_from_user(psf_data, 4491 (void __user *)(unsigned long) usrparm.psf_data, 4492 usrparm.psf_data_len)) 4493 goto out_free; 4494 psf0 = psf_data[0]; 4495 psf1 = psf_data[1]; 4496 4497 /* setup CCWs for PSF + RSSD */ 4498 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); 4499 if (IS_ERR(cqr)) { 4500 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4501 "Could not allocate initialization request"); 4502 rc = PTR_ERR(cqr); 4503 goto out_free; 4504 } 4505 4506 cqr->startdev = device; 4507 cqr->memdev = device; 4508 cqr->retries = 3; 4509 cqr->expires = 10 * HZ; 4510 cqr->buildclk = get_tod_clock(); 4511 cqr->status = DASD_CQR_FILLED; 4512 4513 /* Build the ccws */ 4514 ccw = cqr->cpaddr; 4515 4516 /* PSF ccw */ 4517 ccw->cmd_code = DASD_ECKD_CCW_PSF; 4518 ccw->count = usrparm.psf_data_len; 4519 ccw->flags |= CCW_FLAG_CC; 4520 ccw->cda = (__u32)(addr_t) psf_data; 4521 4522 ccw++; 4523 4524 /* RSSD ccw */ 4525 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 4526 ccw->count = usrparm.rssd_result_len; 4527 ccw->flags = CCW_FLAG_SLI ; 4528 ccw->cda = (__u32)(addr_t) rssd_result; 4529 4530 rc = dasd_sleep_on(cqr); 4531 if (rc) 4532 goto out_sfree; 4533 4534 rc = -EFAULT; 4535 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 4536 rssd_result, usrparm.rssd_result_len)) 4537 goto out_sfree; 4538 rc = 0; 4539 4540 out_sfree: 4541 dasd_sfree_request(cqr, cqr->memdev); 4542 out_free: 4543 kfree(rssd_result); 4544 kfree(psf_data); 4545 out: 4546 DBF_DEV_EVENT(DBF_WARNING, device, 4547 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 4548 (int) psf0, (int) psf1, rc); 4549 return rc; 4550 } 4551 4552 static int 4553 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 4554 { 4555 struct dasd_device *device = block->base; 4556 4557 switch (cmd) { 4558 case BIODASDGATTR: 4559 return dasd_eckd_get_attrib(device, argp); 4560 case BIODASDSATTR: 4561 return dasd_eckd_set_attrib(device, argp); 4562 case BIODASDPSRD: 4563 return dasd_eckd_performance(device, argp); 4564 case BIODASDRLSE: 4565 return dasd_eckd_release(device); 4566 case BIODASDRSRV: 4567 return dasd_eckd_reserve(device); 4568 case BIODASDSLCK: 4569 return dasd_eckd_steal_lock(device); 4570 case BIODASDSNID: 4571 return dasd_eckd_snid(device, argp); 4572 case BIODASDSYMMIO: 4573 return dasd_symm_io(device, argp); 4574 default: 4575 return -ENOTTY; 4576 } 4577 } 4578 4579 /* 4580 * Dump the range of CCWs into 'page' buffer 4581 * and return number of printed chars. 4582 */ 4583 static int 4584 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 4585 { 4586 int len, count; 4587 char *datap; 4588 4589 len = 0; 4590 while (from <= to) { 4591 len += sprintf(page + len, PRINTK_HEADER 4592 " CCW %p: %08X %08X DAT:", 4593 from, ((int *) from)[0], ((int *) from)[1]); 4594 4595 /* get pointer to data (consider IDALs) */ 4596 if (from->flags & CCW_FLAG_IDA) 4597 datap = (char *) *((addr_t *) (addr_t) from->cda); 4598 else 4599 datap = (char *) ((addr_t) from->cda); 4600 4601 /* dump data (max 32 bytes) */ 4602 for (count = 0; count < from->count && count < 32; count++) { 4603 if (count % 8 == 0) len += sprintf(page + len, " "); 4604 if (count % 4 == 0) len += sprintf(page + len, " "); 4605 len += sprintf(page + len, "%02x", datap[count]); 4606 } 4607 len += sprintf(page + len, "\n"); 4608 from++; 4609 } 4610 return len; 4611 } 4612 4613 static void 4614 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 4615 char *reason) 4616 { 4617 u64 *sense; 4618 u64 *stat; 4619 4620 sense = (u64 *) dasd_get_sense(irb); 4621 stat = (u64 *) &irb->scsw; 4622 if (sense) { 4623 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 4624 "%016llx %016llx %016llx %016llx", 4625 reason, *stat, *((u32 *) (stat + 1)), 4626 sense[0], sense[1], sense[2], sense[3]); 4627 } else { 4628 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 4629 reason, *stat, *((u32 *) (stat + 1)), 4630 "NO VALID SENSE"); 4631 } 4632 } 4633 4634 /* 4635 * Print sense data and related channel program. 4636 * Parts are printed because printk buffer is only 1024 bytes. 4637 */ 4638 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 4639 struct dasd_ccw_req *req, struct irb *irb) 4640 { 4641 char *page; 4642 struct ccw1 *first, *last, *fail, *from, *to; 4643 int len, sl, sct; 4644 4645 page = (char *) get_zeroed_page(GFP_ATOMIC); 4646 if (page == NULL) { 4647 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4648 "No memory to dump sense data\n"); 4649 return; 4650 } 4651 /* dump the sense data */ 4652 len = sprintf(page, PRINTK_HEADER 4653 " I/O status report for device %s:\n", 4654 dev_name(&device->cdev->dev)); 4655 len += sprintf(page + len, PRINTK_HEADER 4656 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 4657 "CS:%02X RC:%d\n", 4658 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4659 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 4660 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4661 req ? req->intrc : 0); 4662 len += sprintf(page + len, PRINTK_HEADER 4663 " device %s: Failing CCW: %p\n", 4664 dev_name(&device->cdev->dev), 4665 (void *) (addr_t) irb->scsw.cmd.cpa); 4666 if (irb->esw.esw0.erw.cons) { 4667 for (sl = 0; sl < 4; sl++) { 4668 len += sprintf(page + len, PRINTK_HEADER 4669 " Sense(hex) %2d-%2d:", 4670 (8 * sl), ((8 * sl) + 7)); 4671 4672 for (sct = 0; sct < 8; sct++) { 4673 len += sprintf(page + len, " %02x", 4674 irb->ecw[8 * sl + sct]); 4675 } 4676 len += sprintf(page + len, "\n"); 4677 } 4678 4679 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 4680 /* 24 Byte Sense Data */ 4681 sprintf(page + len, PRINTK_HEADER 4682 " 24 Byte: %x MSG %x, " 4683 "%s MSGb to SYSOP\n", 4684 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 4685 irb->ecw[1] & 0x10 ? "" : "no"); 4686 } else { 4687 /* 32 Byte Sense Data */ 4688 sprintf(page + len, PRINTK_HEADER 4689 " 32 Byte: Format: %x " 4690 "Exception class %x\n", 4691 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 4692 } 4693 } else { 4694 sprintf(page + len, PRINTK_HEADER 4695 " SORRY - NO VALID SENSE AVAILABLE\n"); 4696 } 4697 printk(KERN_ERR "%s", page); 4698 4699 if (req) { 4700 /* req == NULL for unsolicited interrupts */ 4701 /* dump the Channel Program (max 140 Bytes per line) */ 4702 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 4703 first = req->cpaddr; 4704 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 4705 to = min(first + 6, last); 4706 len = sprintf(page, PRINTK_HEADER 4707 " Related CP in req: %p\n", req); 4708 dasd_eckd_dump_ccw_range(first, to, page + len); 4709 printk(KERN_ERR "%s", page); 4710 4711 /* print failing CCW area (maximum 4) */ 4712 /* scsw->cda is either valid or zero */ 4713 len = 0; 4714 from = ++to; 4715 fail = (struct ccw1 *)(addr_t) 4716 irb->scsw.cmd.cpa; /* failing CCW */ 4717 if (from < fail - 2) { 4718 from = fail - 2; /* there is a gap - print header */ 4719 len += sprintf(page, PRINTK_HEADER "......\n"); 4720 } 4721 to = min(fail + 1, last); 4722 len += dasd_eckd_dump_ccw_range(from, to, page + len); 4723 4724 /* print last CCWs (maximum 2) */ 4725 from = max(from, ++to); 4726 if (from < last - 1) { 4727 from = last - 1; /* there is a gap - print header */ 4728 len += sprintf(page + len, PRINTK_HEADER "......\n"); 4729 } 4730 len += dasd_eckd_dump_ccw_range(from, last, page + len); 4731 if (len > 0) 4732 printk(KERN_ERR "%s", page); 4733 } 4734 free_page((unsigned long) page); 4735 } 4736 4737 4738 /* 4739 * Print sense data from a tcw. 4740 */ 4741 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 4742 struct dasd_ccw_req *req, struct irb *irb) 4743 { 4744 char *page; 4745 int len, sl, sct, residual; 4746 struct tsb *tsb; 4747 u8 *sense, *rcq; 4748 4749 page = (char *) get_zeroed_page(GFP_ATOMIC); 4750 if (page == NULL) { 4751 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 4752 "No memory to dump sense data"); 4753 return; 4754 } 4755 /* dump the sense data */ 4756 len = sprintf(page, PRINTK_HEADER 4757 " I/O status report for device %s:\n", 4758 dev_name(&device->cdev->dev)); 4759 len += sprintf(page + len, PRINTK_HEADER 4760 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 4761 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 4762 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4763 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 4764 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4765 irb->scsw.tm.fcxs, 4766 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 4767 req ? req->intrc : 0); 4768 len += sprintf(page + len, PRINTK_HEADER 4769 " device %s: Failing TCW: %p\n", 4770 dev_name(&device->cdev->dev), 4771 (void *) (addr_t) irb->scsw.tm.tcw); 4772 4773 tsb = NULL; 4774 sense = NULL; 4775 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 4776 tsb = tcw_get_tsb( 4777 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 4778 4779 if (tsb) { 4780 len += sprintf(page + len, PRINTK_HEADER 4781 " tsb->length %d\n", tsb->length); 4782 len += sprintf(page + len, PRINTK_HEADER 4783 " tsb->flags %x\n", tsb->flags); 4784 len += sprintf(page + len, PRINTK_HEADER 4785 " tsb->dcw_offset %d\n", tsb->dcw_offset); 4786 len += sprintf(page + len, PRINTK_HEADER 4787 " tsb->count %d\n", tsb->count); 4788 residual = tsb->count - 28; 4789 len += sprintf(page + len, PRINTK_HEADER 4790 " residual %d\n", residual); 4791 4792 switch (tsb->flags & 0x07) { 4793 case 1: /* tsa_iostat */ 4794 len += sprintf(page + len, PRINTK_HEADER 4795 " tsb->tsa.iostat.dev_time %d\n", 4796 tsb->tsa.iostat.dev_time); 4797 len += sprintf(page + len, PRINTK_HEADER 4798 " tsb->tsa.iostat.def_time %d\n", 4799 tsb->tsa.iostat.def_time); 4800 len += sprintf(page + len, PRINTK_HEADER 4801 " tsb->tsa.iostat.queue_time %d\n", 4802 tsb->tsa.iostat.queue_time); 4803 len += sprintf(page + len, PRINTK_HEADER 4804 " tsb->tsa.iostat.dev_busy_time %d\n", 4805 tsb->tsa.iostat.dev_busy_time); 4806 len += sprintf(page + len, PRINTK_HEADER 4807 " tsb->tsa.iostat.dev_act_time %d\n", 4808 tsb->tsa.iostat.dev_act_time); 4809 sense = tsb->tsa.iostat.sense; 4810 break; 4811 case 2: /* ts_ddpc */ 4812 len += sprintf(page + len, PRINTK_HEADER 4813 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 4814 for (sl = 0; sl < 2; sl++) { 4815 len += sprintf(page + len, PRINTK_HEADER 4816 " tsb->tsa.ddpc.rcq %2d-%2d: ", 4817 (8 * sl), ((8 * sl) + 7)); 4818 rcq = tsb->tsa.ddpc.rcq; 4819 for (sct = 0; sct < 8; sct++) { 4820 len += sprintf(page + len, " %02x", 4821 rcq[8 * sl + sct]); 4822 } 4823 len += sprintf(page + len, "\n"); 4824 } 4825 sense = tsb->tsa.ddpc.sense; 4826 break; 4827 case 3: /* tsa_intrg */ 4828 len += sprintf(page + len, PRINTK_HEADER 4829 " tsb->tsa.intrg.: not supported yet\n"); 4830 break; 4831 } 4832 4833 if (sense) { 4834 for (sl = 0; sl < 4; sl++) { 4835 len += sprintf(page + len, PRINTK_HEADER 4836 " Sense(hex) %2d-%2d:", 4837 (8 * sl), ((8 * sl) + 7)); 4838 for (sct = 0; sct < 8; sct++) { 4839 len += sprintf(page + len, " %02x", 4840 sense[8 * sl + sct]); 4841 } 4842 len += sprintf(page + len, "\n"); 4843 } 4844 4845 if (sense[27] & DASD_SENSE_BIT_0) { 4846 /* 24 Byte Sense Data */ 4847 sprintf(page + len, PRINTK_HEADER 4848 " 24 Byte: %x MSG %x, " 4849 "%s MSGb to SYSOP\n", 4850 sense[7] >> 4, sense[7] & 0x0f, 4851 sense[1] & 0x10 ? "" : "no"); 4852 } else { 4853 /* 32 Byte Sense Data */ 4854 sprintf(page + len, PRINTK_HEADER 4855 " 32 Byte: Format: %x " 4856 "Exception class %x\n", 4857 sense[6] & 0x0f, sense[22] >> 4); 4858 } 4859 } else { 4860 sprintf(page + len, PRINTK_HEADER 4861 " SORRY - NO VALID SENSE AVAILABLE\n"); 4862 } 4863 } else { 4864 sprintf(page + len, PRINTK_HEADER 4865 " SORRY - NO TSB DATA AVAILABLE\n"); 4866 } 4867 printk(KERN_ERR "%s", page); 4868 free_page((unsigned long) page); 4869 } 4870 4871 static void dasd_eckd_dump_sense(struct dasd_device *device, 4872 struct dasd_ccw_req *req, struct irb *irb) 4873 { 4874 u8 *sense = dasd_get_sense(irb); 4875 4876 if (scsw_is_tm(&irb->scsw)) { 4877 /* 4878 * In some cases the 'File Protected' or 'Incorrect Length' 4879 * error might be expected and log messages shouldn't be written 4880 * then. Check if the according suppress bit is set. 4881 */ 4882 if (sense && (sense[1] & SNS1_FILE_PROTECTED) && 4883 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) 4884 return; 4885 if (scsw_cstat(&irb->scsw) == 0x40 && 4886 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) 4887 return; 4888 4889 dasd_eckd_dump_sense_tcw(device, req, irb); 4890 } else { 4891 /* 4892 * In some cases the 'Command Reject' or 'No Record Found' 4893 * error might be expected and log messages shouldn't be 4894 * written then. Check if the according suppress bit is set. 4895 */ 4896 if (sense && sense[0] & SNS0_CMD_REJECT && 4897 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) 4898 return; 4899 4900 if (sense && sense[1] & SNS1_NO_REC_FOUND && 4901 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) 4902 return; 4903 4904 dasd_eckd_dump_sense_ccw(device, req, irb); 4905 } 4906 } 4907 4908 static int dasd_eckd_pm_freeze(struct dasd_device *device) 4909 { 4910 /* 4911 * the device should be disconnected from our LCU structure 4912 * on restore we will reconnect it and reread LCU specific 4913 * information like PAV support that might have changed 4914 */ 4915 dasd_alias_remove_device(device); 4916 dasd_alias_disconnect_device_from_lcu(device); 4917 4918 return 0; 4919 } 4920 4921 static int dasd_eckd_restore_device(struct dasd_device *device) 4922 { 4923 struct dasd_eckd_private *private = device->private; 4924 struct dasd_eckd_characteristics temp_rdc_data; 4925 int rc; 4926 struct dasd_uid temp_uid; 4927 unsigned long flags; 4928 unsigned long cqr_flags = 0; 4929 4930 /* Read Configuration Data */ 4931 rc = dasd_eckd_read_conf(device); 4932 if (rc) { 4933 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4934 "Read configuration data failed, rc=%d", rc); 4935 goto out_err; 4936 } 4937 4938 dasd_eckd_get_uid(device, &temp_uid); 4939 /* Generate device unique id */ 4940 rc = dasd_eckd_generate_uid(device); 4941 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4942 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 4943 dev_err(&device->cdev->dev, "The UID of the DASD has " 4944 "changed\n"); 4945 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4946 if (rc) 4947 goto out_err; 4948 4949 /* register lcu with alias handling, enable PAV if this is a new lcu */ 4950 rc = dasd_alias_make_device_known_to_lcu(device); 4951 if (rc) 4952 goto out_err; 4953 4954 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); 4955 dasd_eckd_validate_server(device, cqr_flags); 4956 4957 /* RE-Read Configuration Data */ 4958 rc = dasd_eckd_read_conf(device); 4959 if (rc) { 4960 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4961 "Read configuration data failed, rc=%d", rc); 4962 goto out_err2; 4963 } 4964 4965 /* Read Feature Codes */ 4966 dasd_eckd_read_features(device); 4967 4968 /* Read Device Characteristics */ 4969 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 4970 &temp_rdc_data, 64); 4971 if (rc) { 4972 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4973 "Read device characteristic failed, rc=%d", rc); 4974 goto out_err2; 4975 } 4976 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4977 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); 4978 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4979 4980 /* add device to alias management */ 4981 dasd_alias_add_device(device); 4982 4983 return 0; 4984 4985 out_err2: 4986 dasd_alias_disconnect_device_from_lcu(device); 4987 out_err: 4988 return -1; 4989 } 4990 4991 static int dasd_eckd_reload_device(struct dasd_device *device) 4992 { 4993 struct dasd_eckd_private *private = device->private; 4994 int rc, old_base; 4995 char print_uid[60]; 4996 struct dasd_uid uid; 4997 unsigned long flags; 4998 4999 /* 5000 * remove device from alias handling to prevent new requests 5001 * from being scheduled on the wrong alias device 5002 */ 5003 dasd_alias_remove_device(device); 5004 5005 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5006 old_base = private->uid.base_unit_addr; 5007 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5008 5009 /* Read Configuration Data */ 5010 rc = dasd_eckd_read_conf(device); 5011 if (rc) 5012 goto out_err; 5013 5014 rc = dasd_eckd_generate_uid(device); 5015 if (rc) 5016 goto out_err; 5017 /* 5018 * update unit address configuration and 5019 * add device to alias management 5020 */ 5021 dasd_alias_update_add_device(device); 5022 5023 dasd_eckd_get_uid(device, &uid); 5024 5025 if (old_base != uid.base_unit_addr) { 5026 if (strlen(uid.vduit) > 0) 5027 snprintf(print_uid, sizeof(print_uid), 5028 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, 5029 uid.ssid, uid.base_unit_addr, uid.vduit); 5030 else 5031 snprintf(print_uid, sizeof(print_uid), 5032 "%s.%s.%04x.%02x", uid.vendor, uid.serial, 5033 uid.ssid, uid.base_unit_addr); 5034 5035 dev_info(&device->cdev->dev, 5036 "An Alias device was reassigned to a new base device " 5037 "with UID: %s\n", print_uid); 5038 } 5039 return 0; 5040 5041 out_err: 5042 return -1; 5043 } 5044 5045 static int dasd_eckd_read_message_buffer(struct dasd_device *device, 5046 struct dasd_rssd_messages *messages, 5047 __u8 lpum) 5048 { 5049 struct dasd_rssd_messages *message_buf; 5050 struct dasd_psf_prssd_data *prssdp; 5051 struct dasd_ccw_req *cqr; 5052 struct ccw1 *ccw; 5053 int rc; 5054 5055 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5056 (sizeof(struct dasd_psf_prssd_data) + 5057 sizeof(struct dasd_rssd_messages)), 5058 device, NULL); 5059 if (IS_ERR(cqr)) { 5060 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5061 "Could not allocate read message buffer request"); 5062 return PTR_ERR(cqr); 5063 } 5064 5065 cqr->lpm = lpum; 5066 retry: 5067 cqr->startdev = device; 5068 cqr->memdev = device; 5069 cqr->block = NULL; 5070 cqr->expires = 10 * HZ; 5071 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5072 /* dasd_sleep_on_immediatly does not do complex error 5073 * recovery so clear erp flag and set retry counter to 5074 * do basic erp */ 5075 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5076 cqr->retries = 256; 5077 5078 /* Prepare for Read Subsystem Data */ 5079 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5080 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5081 prssdp->order = PSF_ORDER_PRSSD; 5082 prssdp->suborder = 0x03; /* Message Buffer */ 5083 /* all other bytes of prssdp must be zero */ 5084 5085 ccw = cqr->cpaddr; 5086 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5087 ccw->count = sizeof(struct dasd_psf_prssd_data); 5088 ccw->flags |= CCW_FLAG_CC; 5089 ccw->flags |= CCW_FLAG_SLI; 5090 ccw->cda = (__u32)(addr_t) prssdp; 5091 5092 /* Read Subsystem Data - message buffer */ 5093 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 5094 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 5095 5096 ccw++; 5097 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5098 ccw->count = sizeof(struct dasd_rssd_messages); 5099 ccw->flags |= CCW_FLAG_SLI; 5100 ccw->cda = (__u32)(addr_t) message_buf; 5101 5102 cqr->buildclk = get_tod_clock(); 5103 cqr->status = DASD_CQR_FILLED; 5104 rc = dasd_sleep_on_immediatly(cqr); 5105 if (rc == 0) { 5106 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5107 message_buf = (struct dasd_rssd_messages *) 5108 (prssdp + 1); 5109 memcpy(messages, message_buf, 5110 sizeof(struct dasd_rssd_messages)); 5111 } else if (cqr->lpm) { 5112 /* 5113 * on z/VM we might not be able to do I/O on the requested path 5114 * but instead we get the required information on any path 5115 * so retry with open path mask 5116 */ 5117 cqr->lpm = 0; 5118 goto retry; 5119 } else 5120 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5121 "Reading messages failed with rc=%d\n" 5122 , rc); 5123 dasd_sfree_request(cqr, cqr->memdev); 5124 return rc; 5125 } 5126 5127 static int dasd_eckd_query_host_access(struct dasd_device *device, 5128 struct dasd_psf_query_host_access *data) 5129 { 5130 struct dasd_eckd_private *private = device->private; 5131 struct dasd_psf_query_host_access *host_access; 5132 struct dasd_psf_prssd_data *prssdp; 5133 struct dasd_ccw_req *cqr; 5134 struct ccw1 *ccw; 5135 int rc; 5136 5137 /* not available for HYPER PAV alias devices */ 5138 if (!device->block && private->lcu->pav == HYPER_PAV) 5139 return -EOPNOTSUPP; 5140 5141 /* may not be supported by the storage server */ 5142 if (!(private->features.feature[14] & 0x80)) 5143 return -EOPNOTSUPP; 5144 5145 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5146 sizeof(struct dasd_psf_prssd_data) + 1, 5147 device, NULL); 5148 if (IS_ERR(cqr)) { 5149 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5150 "Could not allocate read message buffer request"); 5151 return PTR_ERR(cqr); 5152 } 5153 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA); 5154 if (!host_access) { 5155 dasd_sfree_request(cqr, device); 5156 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5157 "Could not allocate host_access buffer"); 5158 return -ENOMEM; 5159 } 5160 cqr->startdev = device; 5161 cqr->memdev = device; 5162 cqr->block = NULL; 5163 cqr->retries = 256; 5164 cqr->expires = 10 * HZ; 5165 5166 /* Prepare for Read Subsystem Data */ 5167 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5168 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5169 prssdp->order = PSF_ORDER_PRSSD; 5170 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */ 5171 /* LSS and Volume that will be queried */ 5172 prssdp->lss = private->ned->ID; 5173 prssdp->volume = private->ned->unit_addr; 5174 /* all other bytes of prssdp must be zero */ 5175 5176 ccw = cqr->cpaddr; 5177 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5178 ccw->count = sizeof(struct dasd_psf_prssd_data); 5179 ccw->flags |= CCW_FLAG_CC; 5180 ccw->flags |= CCW_FLAG_SLI; 5181 ccw->cda = (__u32)(addr_t) prssdp; 5182 5183 /* Read Subsystem Data - query host access */ 5184 ccw++; 5185 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5186 ccw->count = sizeof(struct dasd_psf_query_host_access); 5187 ccw->flags |= CCW_FLAG_SLI; 5188 ccw->cda = (__u32)(addr_t) host_access; 5189 5190 cqr->buildclk = get_tod_clock(); 5191 cqr->status = DASD_CQR_FILLED; 5192 /* the command might not be supported, suppress error message */ 5193 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 5194 rc = dasd_sleep_on_interruptible(cqr); 5195 if (rc == 0) { 5196 *data = *host_access; 5197 } else { 5198 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5199 "Reading host access data failed with rc=%d\n", 5200 rc); 5201 rc = -EOPNOTSUPP; 5202 } 5203 5204 dasd_sfree_request(cqr, cqr->memdev); 5205 kfree(host_access); 5206 return rc; 5207 } 5208 /* 5209 * return number of grouped devices 5210 */ 5211 static int dasd_eckd_host_access_count(struct dasd_device *device) 5212 { 5213 struct dasd_psf_query_host_access *access; 5214 struct dasd_ckd_path_group_entry *entry; 5215 struct dasd_ckd_host_information *info; 5216 int count = 0; 5217 int rc, i; 5218 5219 access = kzalloc(sizeof(*access), GFP_NOIO); 5220 if (!access) { 5221 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5222 "Could not allocate access buffer"); 5223 return -ENOMEM; 5224 } 5225 rc = dasd_eckd_query_host_access(device, access); 5226 if (rc) { 5227 kfree(access); 5228 return rc; 5229 } 5230 5231 info = (struct dasd_ckd_host_information *) 5232 access->host_access_information; 5233 for (i = 0; i < info->entry_count; i++) { 5234 entry = (struct dasd_ckd_path_group_entry *) 5235 (info->entry + i * info->entry_size); 5236 if (entry->status_flags & DASD_ECKD_PG_GROUPED) 5237 count++; 5238 } 5239 5240 kfree(access); 5241 return count; 5242 } 5243 5244 /* 5245 * write host access information to a sequential file 5246 */ 5247 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) 5248 { 5249 struct dasd_psf_query_host_access *access; 5250 struct dasd_ckd_path_group_entry *entry; 5251 struct dasd_ckd_host_information *info; 5252 char sysplex[9] = ""; 5253 int rc, i; 5254 5255 access = kzalloc(sizeof(*access), GFP_NOIO); 5256 if (!access) { 5257 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5258 "Could not allocate access buffer"); 5259 return -ENOMEM; 5260 } 5261 rc = dasd_eckd_query_host_access(device, access); 5262 if (rc) { 5263 kfree(access); 5264 return rc; 5265 } 5266 5267 info = (struct dasd_ckd_host_information *) 5268 access->host_access_information; 5269 for (i = 0; i < info->entry_count; i++) { 5270 entry = (struct dasd_ckd_path_group_entry *) 5271 (info->entry + i * info->entry_size); 5272 /* PGID */ 5273 seq_printf(m, "pgid %*phN\n", 11, entry->pgid); 5274 /* FLAGS */ 5275 seq_printf(m, "status_flags %02x\n", entry->status_flags); 5276 /* SYSPLEX NAME */ 5277 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1); 5278 EBCASC(sysplex, sizeof(sysplex)); 5279 seq_printf(m, "sysplex_name %8s\n", sysplex); 5280 /* SUPPORTED CYLINDER */ 5281 seq_printf(m, "supported_cylinder %d\n", entry->cylinder); 5282 /* TIMESTAMP */ 5283 seq_printf(m, "timestamp %lu\n", (unsigned long) 5284 entry->timestamp); 5285 } 5286 kfree(access); 5287 5288 return 0; 5289 } 5290 5291 /* 5292 * Perform Subsystem Function - CUIR response 5293 */ 5294 static int 5295 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 5296 __u32 message_id, __u8 lpum) 5297 { 5298 struct dasd_psf_cuir_response *psf_cuir; 5299 int pos = pathmask_to_pos(lpum); 5300 struct dasd_ccw_req *cqr; 5301 struct ccw1 *ccw; 5302 int rc; 5303 5304 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 5305 sizeof(struct dasd_psf_cuir_response), 5306 device, NULL); 5307 5308 if (IS_ERR(cqr)) { 5309 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5310 "Could not allocate PSF-CUIR request"); 5311 return PTR_ERR(cqr); 5312 } 5313 5314 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 5315 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 5316 psf_cuir->cc = response; 5317 psf_cuir->chpid = device->path[pos].chpid; 5318 psf_cuir->message_id = message_id; 5319 psf_cuir->cssid = device->path[pos].cssid; 5320 psf_cuir->ssid = device->path[pos].ssid; 5321 ccw = cqr->cpaddr; 5322 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5323 ccw->cda = (__u32)(addr_t)psf_cuir; 5324 ccw->flags = CCW_FLAG_SLI; 5325 ccw->count = sizeof(struct dasd_psf_cuir_response); 5326 5327 cqr->startdev = device; 5328 cqr->memdev = device; 5329 cqr->block = NULL; 5330 cqr->retries = 256; 5331 cqr->expires = 10*HZ; 5332 cqr->buildclk = get_tod_clock(); 5333 cqr->status = DASD_CQR_FILLED; 5334 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5335 5336 rc = dasd_sleep_on(cqr); 5337 5338 dasd_sfree_request(cqr, cqr->memdev); 5339 return rc; 5340 } 5341 5342 /* 5343 * return configuration data that is referenced by record selector 5344 * if a record selector is specified or per default return the 5345 * conf_data pointer for the path specified by lpum 5346 */ 5347 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 5348 __u8 lpum, 5349 struct dasd_cuir_message *cuir) 5350 { 5351 struct dasd_conf_data *conf_data; 5352 int path, pos; 5353 5354 if (cuir->record_selector == 0) 5355 goto out; 5356 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 5357 conf_data = device->path[pos].conf_data; 5358 if (conf_data->gneq.record_selector == 5359 cuir->record_selector) 5360 return conf_data; 5361 } 5362 out: 5363 return device->path[pathmask_to_pos(lpum)].conf_data; 5364 } 5365 5366 /* 5367 * This function determines the scope of a reconfiguration request by 5368 * analysing the path and device selection data provided in the CUIR request. 5369 * Returns a path mask containing CUIR affected paths for the give device. 5370 * 5371 * If the CUIR request does not contain the required information return the 5372 * path mask of the path the attention message for the CUIR request was reveived 5373 * on. 5374 */ 5375 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 5376 struct dasd_cuir_message *cuir) 5377 { 5378 struct dasd_conf_data *ref_conf_data; 5379 unsigned long bitmask = 0, mask = 0; 5380 struct dasd_conf_data *conf_data; 5381 unsigned int pos, path; 5382 char *ref_gneq, *gneq; 5383 char *ref_ned, *ned; 5384 int tbcpm = 0; 5385 5386 /* if CUIR request does not specify the scope use the path 5387 the attention message was presented on */ 5388 if (!cuir->ned_map || 5389 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 5390 return lpum; 5391 5392 /* get reference conf data */ 5393 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 5394 /* reference ned is determined by ned_map field */ 5395 pos = 8 - ffs(cuir->ned_map); 5396 ref_ned = (char *)&ref_conf_data->neds[pos]; 5397 ref_gneq = (char *)&ref_conf_data->gneq; 5398 /* transfer 24 bit neq_map to mask */ 5399 mask = cuir->neq_map[2]; 5400 mask |= cuir->neq_map[1] << 8; 5401 mask |= cuir->neq_map[0] << 16; 5402 5403 for (path = 0; path < 8; path++) { 5404 /* initialise data per path */ 5405 bitmask = mask; 5406 conf_data = device->path[path].conf_data; 5407 pos = 8 - ffs(cuir->ned_map); 5408 ned = (char *) &conf_data->neds[pos]; 5409 /* compare reference ned and per path ned */ 5410 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 5411 continue; 5412 gneq = (char *)&conf_data->gneq; 5413 /* compare reference gneq and per_path gneq under 5414 24 bit mask where mask bit 0 equals byte 7 of 5415 the gneq and mask bit 24 equals byte 31 */ 5416 while (bitmask) { 5417 pos = ffs(bitmask) - 1; 5418 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 5419 != 0) 5420 break; 5421 clear_bit(pos, &bitmask); 5422 } 5423 if (bitmask) 5424 continue; 5425 /* device and path match the reference values 5426 add path to CUIR scope */ 5427 tbcpm |= 0x80 >> path; 5428 } 5429 return tbcpm; 5430 } 5431 5432 static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 5433 unsigned long paths, int action) 5434 { 5435 int pos; 5436 5437 while (paths) { 5438 /* get position of bit in mask */ 5439 pos = 8 - ffs(paths); 5440 /* get channel path descriptor from this position */ 5441 if (action == CUIR_QUIESCE) 5442 pr_warn("Service on the storage server caused path %x.%02x to go offline", 5443 device->path[pos].cssid, 5444 device->path[pos].chpid); 5445 else if (action == CUIR_RESUME) 5446 pr_info("Path %x.%02x is back online after service on the storage server", 5447 device->path[pos].cssid, 5448 device->path[pos].chpid); 5449 clear_bit(7 - pos, &paths); 5450 } 5451 } 5452 5453 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 5454 struct dasd_cuir_message *cuir) 5455 { 5456 unsigned long tbcpm; 5457 5458 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 5459 /* nothing to do if path is not in use */ 5460 if (!(dasd_path_get_opm(device) & tbcpm)) 5461 return 0; 5462 if (!(dasd_path_get_opm(device) & ~tbcpm)) { 5463 /* no path would be left if the CUIR action is taken 5464 return error */ 5465 return -EINVAL; 5466 } 5467 /* remove device from operational path mask */ 5468 dasd_path_remove_opm(device, tbcpm); 5469 dasd_path_add_cuirpm(device, tbcpm); 5470 return tbcpm; 5471 } 5472 5473 /* 5474 * walk through all devices and build a path mask to quiesce them 5475 * return an error if the last path to a device would be removed 5476 * 5477 * if only part of the devices are quiesced and an error 5478 * occurs no onlining necessary, the storage server will 5479 * notify the already set offline devices again 5480 */ 5481 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 5482 struct dasd_cuir_message *cuir) 5483 { 5484 struct dasd_eckd_private *private = device->private; 5485 struct alias_pav_group *pavgroup, *tempgroup; 5486 struct dasd_device *dev, *n; 5487 unsigned long paths = 0; 5488 unsigned long flags; 5489 int tbcpm; 5490 5491 /* active devices */ 5492 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 5493 alias_list) { 5494 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 5495 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 5496 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 5497 if (tbcpm < 0) 5498 goto out_err; 5499 paths |= tbcpm; 5500 } 5501 /* inactive devices */ 5502 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 5503 alias_list) { 5504 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 5505 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 5506 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 5507 if (tbcpm < 0) 5508 goto out_err; 5509 paths |= tbcpm; 5510 } 5511 /* devices in PAV groups */ 5512 list_for_each_entry_safe(pavgroup, tempgroup, 5513 &private->lcu->grouplist, group) { 5514 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 5515 alias_list) { 5516 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 5517 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 5518 spin_unlock_irqrestore( 5519 get_ccwdev_lock(dev->cdev), flags); 5520 if (tbcpm < 0) 5521 goto out_err; 5522 paths |= tbcpm; 5523 } 5524 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 5525 alias_list) { 5526 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 5527 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 5528 spin_unlock_irqrestore( 5529 get_ccwdev_lock(dev->cdev), flags); 5530 if (tbcpm < 0) 5531 goto out_err; 5532 paths |= tbcpm; 5533 } 5534 } 5535 /* notify user about all paths affected by CUIR action */ 5536 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); 5537 return 0; 5538 out_err: 5539 return tbcpm; 5540 } 5541 5542 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 5543 struct dasd_cuir_message *cuir) 5544 { 5545 struct dasd_eckd_private *private = device->private; 5546 struct alias_pav_group *pavgroup, *tempgroup; 5547 struct dasd_device *dev, *n; 5548 unsigned long paths = 0; 5549 int tbcpm; 5550 5551 /* 5552 * the path may have been added through a generic path event before 5553 * only trigger path verification if the path is not already in use 5554 */ 5555 list_for_each_entry_safe(dev, n, 5556 &private->lcu->active_devices, 5557 alias_list) { 5558 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5559 paths |= tbcpm; 5560 if (!(dasd_path_get_opm(dev) & tbcpm)) { 5561 dasd_path_add_tbvpm(dev, tbcpm); 5562 dasd_schedule_device_bh(dev); 5563 } 5564 } 5565 list_for_each_entry_safe(dev, n, 5566 &private->lcu->inactive_devices, 5567 alias_list) { 5568 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5569 paths |= tbcpm; 5570 if (!(dasd_path_get_opm(dev) & tbcpm)) { 5571 dasd_path_add_tbvpm(dev, tbcpm); 5572 dasd_schedule_device_bh(dev); 5573 } 5574 } 5575 /* devices in PAV groups */ 5576 list_for_each_entry_safe(pavgroup, tempgroup, 5577 &private->lcu->grouplist, 5578 group) { 5579 list_for_each_entry_safe(dev, n, 5580 &pavgroup->baselist, 5581 alias_list) { 5582 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5583 paths |= tbcpm; 5584 if (!(dasd_path_get_opm(dev) & tbcpm)) { 5585 dasd_path_add_tbvpm(dev, tbcpm); 5586 dasd_schedule_device_bh(dev); 5587 } 5588 } 5589 list_for_each_entry_safe(dev, n, 5590 &pavgroup->aliaslist, 5591 alias_list) { 5592 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5593 paths |= tbcpm; 5594 if (!(dasd_path_get_opm(dev) & tbcpm)) { 5595 dasd_path_add_tbvpm(dev, tbcpm); 5596 dasd_schedule_device_bh(dev); 5597 } 5598 } 5599 } 5600 /* notify user about all paths affected by CUIR action */ 5601 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); 5602 return 0; 5603 } 5604 5605 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 5606 __u8 lpum) 5607 { 5608 struct dasd_cuir_message *cuir = messages; 5609 int response; 5610 5611 DBF_DEV_EVENT(DBF_WARNING, device, 5612 "CUIR request: %016llx %016llx %016llx %08x", 5613 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 5614 ((u32 *)cuir)[3]); 5615 5616 if (cuir->code == CUIR_QUIESCE) { 5617 /* quiesce */ 5618 if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) 5619 response = PSF_CUIR_LAST_PATH; 5620 else 5621 response = PSF_CUIR_COMPLETED; 5622 } else if (cuir->code == CUIR_RESUME) { 5623 /* resume */ 5624 dasd_eckd_cuir_resume(device, lpum, cuir); 5625 response = PSF_CUIR_COMPLETED; 5626 } else 5627 response = PSF_CUIR_NOT_SUPPORTED; 5628 5629 dasd_eckd_psf_cuir_response(device, response, 5630 cuir->message_id, lpum); 5631 DBF_DEV_EVENT(DBF_WARNING, device, 5632 "CUIR response: %d on message ID %08x", response, 5633 cuir->message_id); 5634 /* to make sure there is no attention left schedule work again */ 5635 device->discipline->check_attention(device, lpum); 5636 } 5637 5638 static void dasd_eckd_check_attention_work(struct work_struct *work) 5639 { 5640 struct check_attention_work_data *data; 5641 struct dasd_rssd_messages *messages; 5642 struct dasd_device *device; 5643 int rc; 5644 5645 data = container_of(work, struct check_attention_work_data, worker); 5646 device = data->device; 5647 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 5648 if (!messages) { 5649 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5650 "Could not allocate attention message buffer"); 5651 goto out; 5652 } 5653 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 5654 if (rc) 5655 goto out; 5656 if (messages->length == ATTENTION_LENGTH_CUIR && 5657 messages->format == ATTENTION_FORMAT_CUIR) 5658 dasd_eckd_handle_cuir(device, messages, data->lpum); 5659 out: 5660 dasd_put_device(device); 5661 kfree(messages); 5662 kfree(data); 5663 } 5664 5665 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 5666 { 5667 struct check_attention_work_data *data; 5668 5669 data = kzalloc(sizeof(*data), GFP_ATOMIC); 5670 if (!data) 5671 return -ENOMEM; 5672 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 5673 dasd_get_device(device); 5674 data->device = device; 5675 data->lpum = lpum; 5676 schedule_work(&data->worker); 5677 return 0; 5678 } 5679 5680 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) 5681 { 5682 if (~lpum & dasd_path_get_opm(device)) { 5683 dasd_path_add_nohpfpm(device, lpum); 5684 dasd_path_remove_opm(device, lpum); 5685 dev_err(&device->cdev->dev, 5686 "Channel path %02X lost HPF functionality and is disabled\n", 5687 lpum); 5688 return 1; 5689 } 5690 return 0; 5691 } 5692 5693 static void dasd_eckd_disable_hpf_device(struct dasd_device *device) 5694 { 5695 struct dasd_eckd_private *private = device->private; 5696 5697 dev_err(&device->cdev->dev, 5698 "High Performance FICON disabled\n"); 5699 private->fcx_max_data = 0; 5700 } 5701 5702 static int dasd_eckd_hpf_enabled(struct dasd_device *device) 5703 { 5704 struct dasd_eckd_private *private = device->private; 5705 5706 return private->fcx_max_data ? 1 : 0; 5707 } 5708 5709 static void dasd_eckd_handle_hpf_error(struct dasd_device *device, 5710 struct irb *irb) 5711 { 5712 struct dasd_eckd_private *private = device->private; 5713 5714 if (!private->fcx_max_data) { 5715 /* sanity check for no HPF, the error makes no sense */ 5716 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5717 "Trying to disable HPF for a non HPF device"); 5718 return; 5719 } 5720 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { 5721 dasd_eckd_disable_hpf_device(device); 5722 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { 5723 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) 5724 return; 5725 dasd_eckd_disable_hpf_device(device); 5726 dasd_path_set_tbvpm(device, 5727 dasd_path_get_hpfpm(device)); 5728 } 5729 /* 5730 * prevent that any new I/O ist started on the device and schedule a 5731 * requeue of existing requests 5732 */ 5733 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 5734 dasd_schedule_requeue(device); 5735 } 5736 5737 static struct ccw_driver dasd_eckd_driver = { 5738 .driver = { 5739 .name = "dasd-eckd", 5740 .owner = THIS_MODULE, 5741 }, 5742 .ids = dasd_eckd_ids, 5743 .probe = dasd_eckd_probe, 5744 .remove = dasd_generic_remove, 5745 .set_offline = dasd_generic_set_offline, 5746 .set_online = dasd_eckd_set_online, 5747 .notify = dasd_generic_notify, 5748 .path_event = dasd_generic_path_event, 5749 .shutdown = dasd_generic_shutdown, 5750 .freeze = dasd_generic_pm_freeze, 5751 .thaw = dasd_generic_restore_device, 5752 .restore = dasd_generic_restore_device, 5753 .uc_handler = dasd_generic_uc_handler, 5754 .int_class = IRQIO_DAS, 5755 }; 5756 5757 /* 5758 * max_blocks is dependent on the amount of storage that is available 5759 * in the static io buffer for each device. Currently each device has 5760 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 5761 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 5762 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 5763 * addition we have one define extent ccw + 16 bytes of data and one 5764 * locate record ccw + 16 bytes of data. That makes: 5765 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 5766 * We want to fit two into the available memory so that we can immediately 5767 * start the next request if one finishes off. That makes 249.5 blocks 5768 * for one request. Give a little safety and the result is 240. 5769 */ 5770 static struct dasd_discipline dasd_eckd_discipline = { 5771 .owner = THIS_MODULE, 5772 .name = "ECKD", 5773 .ebcname = "ECKD", 5774 .max_blocks = 190, 5775 .check_device = dasd_eckd_check_characteristics, 5776 .uncheck_device = dasd_eckd_uncheck_device, 5777 .do_analysis = dasd_eckd_do_analysis, 5778 .verify_path = dasd_eckd_verify_path, 5779 .basic_to_ready = dasd_eckd_basic_to_ready, 5780 .online_to_ready = dasd_eckd_online_to_ready, 5781 .basic_to_known = dasd_eckd_basic_to_known, 5782 .fill_geometry = dasd_eckd_fill_geometry, 5783 .start_IO = dasd_start_IO, 5784 .term_IO = dasd_term_IO, 5785 .handle_terminated_request = dasd_eckd_handle_terminated_request, 5786 .format_device = dasd_eckd_format_device, 5787 .check_device_format = dasd_eckd_check_device_format, 5788 .erp_action = dasd_eckd_erp_action, 5789 .erp_postaction = dasd_eckd_erp_postaction, 5790 .check_for_device_change = dasd_eckd_check_for_device_change, 5791 .build_cp = dasd_eckd_build_alias_cp, 5792 .free_cp = dasd_eckd_free_alias_cp, 5793 .dump_sense = dasd_eckd_dump_sense, 5794 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 5795 .fill_info = dasd_eckd_fill_info, 5796 .ioctl = dasd_eckd_ioctl, 5797 .freeze = dasd_eckd_pm_freeze, 5798 .restore = dasd_eckd_restore_device, 5799 .reload = dasd_eckd_reload_device, 5800 .get_uid = dasd_eckd_get_uid, 5801 .kick_validate = dasd_eckd_kick_validate_server, 5802 .check_attention = dasd_eckd_check_attention, 5803 .host_access_count = dasd_eckd_host_access_count, 5804 .hosts_print = dasd_hosts_print, 5805 .handle_hpf_error = dasd_eckd_handle_hpf_error, 5806 .disable_hpf = dasd_eckd_disable_hpf_device, 5807 .hpf_enabled = dasd_eckd_hpf_enabled, 5808 .reset_path = dasd_eckd_reset_path, 5809 }; 5810 5811 static int __init 5812 dasd_eckd_init(void) 5813 { 5814 int ret; 5815 5816 ASCEBC(dasd_eckd_discipline.ebcname, 4); 5817 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 5818 GFP_KERNEL | GFP_DMA); 5819 if (!dasd_reserve_req) 5820 return -ENOMEM; 5821 path_verification_worker = kmalloc(sizeof(*path_verification_worker), 5822 GFP_KERNEL | GFP_DMA); 5823 if (!path_verification_worker) { 5824 kfree(dasd_reserve_req); 5825 return -ENOMEM; 5826 } 5827 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 5828 if (!rawpadpage) { 5829 kfree(path_verification_worker); 5830 kfree(dasd_reserve_req); 5831 return -ENOMEM; 5832 } 5833 ret = ccw_driver_register(&dasd_eckd_driver); 5834 if (!ret) 5835 wait_for_device_probe(); 5836 else { 5837 kfree(path_verification_worker); 5838 kfree(dasd_reserve_req); 5839 free_page((unsigned long)rawpadpage); 5840 } 5841 return ret; 5842 } 5843 5844 static void __exit 5845 dasd_eckd_cleanup(void) 5846 { 5847 ccw_driver_unregister(&dasd_eckd_driver); 5848 kfree(path_verification_worker); 5849 kfree(dasd_reserve_req); 5850 free_page((unsigned long)rawpadpage); 5851 } 5852 5853 module_init(dasd_eckd_init); 5854 module_exit(dasd_eckd_cleanup); 5855