1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 9 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 10 */ 11 12 #define KMSG_COMPONENT "dasd-eckd" 13 14 #include <linux/stddef.h> 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/hdreg.h> /* HDIO_GETGEO */ 18 #include <linux/bio.h> 19 #include <linux/module.h> 20 #include <linux/compat.h> 21 #include <linux/init.h> 22 23 #include <asm/css_chars.h> 24 #include <asm/debug.h> 25 #include <asm/idals.h> 26 #include <asm/ebcdic.h> 27 #include <asm/io.h> 28 #include <asm/uaccess.h> 29 #include <asm/cio.h> 30 #include <asm/ccwdev.h> 31 #include <asm/itcw.h> 32 #include <asm/schid.h> 33 #include <asm/chpid.h> 34 35 #include "dasd_int.h" 36 #include "dasd_eckd.h" 37 38 #ifdef PRINTK_HEADER 39 #undef PRINTK_HEADER 40 #endif /* PRINTK_HEADER */ 41 #define PRINTK_HEADER "dasd(eckd):" 42 43 #define ECKD_C0(i) (i->home_bytes) 44 #define ECKD_F(i) (i->formula) 45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 46 (i->factors.f_0x02.f1)) 47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 48 (i->factors.f_0x02.f2)) 49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 50 (i->factors.f_0x02.f3)) 51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 53 #define ECKD_F6(i) (i->factor6) 54 #define ECKD_F7(i) (i->factor7) 55 #define ECKD_F8(i) (i->factor8) 56 57 /* 58 * raw track access always map to 64k in memory 59 * so it maps to 16 blocks of 4k per track 60 */ 61 #define DASD_RAW_BLOCK_PER_TRACK 16 62 #define DASD_RAW_BLOCKSIZE 4096 63 /* 64k are 128 x 512 byte sectors */ 64 #define DASD_RAW_SECTORS_PER_TRACK 128 65 66 MODULE_LICENSE("GPL"); 67 68 static struct dasd_discipline dasd_eckd_discipline; 69 70 /* The ccw bus type uses this table to find devices that it sends to 71 * dasd_eckd_probe */ 72 static struct ccw_device_id dasd_eckd_ids[] = { 73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 79 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 80 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 81 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 82 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 83 { /* end of list */ }, 84 }; 85 86 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 87 88 static struct ccw_driver dasd_eckd_driver; /* see below */ 89 90 static void *rawpadpage; 91 92 #define INIT_CQR_OK 0 93 #define INIT_CQR_UNFORMATTED 1 94 #define INIT_CQR_ERROR 2 95 96 /* emergency request for reserve/release */ 97 static struct { 98 struct dasd_ccw_req cqr; 99 struct ccw1 ccw; 100 char data[32]; 101 } *dasd_reserve_req; 102 static DEFINE_MUTEX(dasd_reserve_mutex); 103 104 /* definitions for the path verification worker */ 105 struct path_verification_work_data { 106 struct work_struct worker; 107 struct dasd_device *device; 108 struct dasd_ccw_req cqr; 109 struct ccw1 ccw; 110 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 111 int isglobal; 112 __u8 tbvpm; 113 }; 114 static struct path_verification_work_data *path_verification_worker; 115 static DEFINE_MUTEX(dasd_path_verification_mutex); 116 117 struct check_attention_work_data { 118 struct work_struct worker; 119 struct dasd_device *device; 120 __u8 lpum; 121 }; 122 123 /* initial attempt at a probe function. this can be simplified once 124 * the other detection code is gone */ 125 static int 126 dasd_eckd_probe (struct ccw_device *cdev) 127 { 128 int ret; 129 130 /* set ECKD specific ccw-device options */ 131 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 132 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 133 if (ret) { 134 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 135 "dasd_eckd_probe: could not set " 136 "ccw-device options"); 137 return ret; 138 } 139 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 140 return ret; 141 } 142 143 static int 144 dasd_eckd_set_online(struct ccw_device *cdev) 145 { 146 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 147 } 148 149 static const int sizes_trk0[] = { 28, 148, 84 }; 150 #define LABEL_SIZE 140 151 152 /* head and record addresses of count_area read in analysis ccw */ 153 static const int count_area_head[] = { 0, 0, 0, 0, 2 }; 154 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 155 156 static inline unsigned int 157 round_up_multiple(unsigned int no, unsigned int mult) 158 { 159 int rem = no % mult; 160 return (rem ? no - rem + mult : no); 161 } 162 163 static inline unsigned int 164 ceil_quot(unsigned int d1, unsigned int d2) 165 { 166 return (d1 + (d2 - 1)) / d2; 167 } 168 169 static unsigned int 170 recs_per_track(struct dasd_eckd_characteristics * rdc, 171 unsigned int kl, unsigned int dl) 172 { 173 int dn, kn; 174 175 switch (rdc->dev_type) { 176 case 0x3380: 177 if (kl) 178 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 179 ceil_quot(dl + 12, 32)); 180 else 181 return 1499 / (15 + ceil_quot(dl + 12, 32)); 182 case 0x3390: 183 dn = ceil_quot(dl + 6, 232) + 1; 184 if (kl) { 185 kn = ceil_quot(kl + 6, 232) + 1; 186 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 187 9 + ceil_quot(dl + 6 * dn, 34)); 188 } else 189 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 190 case 0x9345: 191 dn = ceil_quot(dl + 6, 232) + 1; 192 if (kl) { 193 kn = ceil_quot(kl + 6, 232) + 1; 194 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 195 ceil_quot(dl + 6 * dn, 34)); 196 } else 197 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 198 } 199 return 0; 200 } 201 202 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 203 { 204 geo->cyl = (__u16) cyl; 205 geo->head = cyl >> 16; 206 geo->head <<= 4; 207 geo->head |= head; 208 } 209 210 static int 211 check_XRC (struct ccw1 *de_ccw, 212 struct DE_eckd_data *data, 213 struct dasd_device *device) 214 { 215 struct dasd_eckd_private *private; 216 int rc; 217 218 private = (struct dasd_eckd_private *) device->private; 219 if (!private->rdc_data.facilities.XRC_supported) 220 return 0; 221 222 /* switch on System Time Stamp - needed for XRC Support */ 223 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 224 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 225 226 rc = get_sync_clock(&data->ep_sys_time); 227 /* Ignore return code if sync clock is switched off. */ 228 if (rc == -EOPNOTSUPP || rc == -EACCES) 229 rc = 0; 230 231 de_ccw->count = sizeof(struct DE_eckd_data); 232 de_ccw->flags |= CCW_FLAG_SLI; 233 return rc; 234 } 235 236 static int 237 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 238 unsigned int totrk, int cmd, struct dasd_device *device) 239 { 240 struct dasd_eckd_private *private; 241 u32 begcyl, endcyl; 242 u16 heads, beghead, endhead; 243 int rc = 0; 244 245 private = (struct dasd_eckd_private *) device->private; 246 247 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 248 ccw->flags = 0; 249 ccw->count = 16; 250 ccw->cda = (__u32) __pa(data); 251 252 memset(data, 0, sizeof(struct DE_eckd_data)); 253 switch (cmd) { 254 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 255 case DASD_ECKD_CCW_READ_RECORD_ZERO: 256 case DASD_ECKD_CCW_READ: 257 case DASD_ECKD_CCW_READ_MT: 258 case DASD_ECKD_CCW_READ_CKD: 259 case DASD_ECKD_CCW_READ_CKD_MT: 260 case DASD_ECKD_CCW_READ_KD: 261 case DASD_ECKD_CCW_READ_KD_MT: 262 case DASD_ECKD_CCW_READ_COUNT: 263 data->mask.perm = 0x1; 264 data->attributes.operation = private->attrib.operation; 265 break; 266 case DASD_ECKD_CCW_WRITE: 267 case DASD_ECKD_CCW_WRITE_MT: 268 case DASD_ECKD_CCW_WRITE_KD: 269 case DASD_ECKD_CCW_WRITE_KD_MT: 270 data->mask.perm = 0x02; 271 data->attributes.operation = private->attrib.operation; 272 rc = check_XRC (ccw, data, device); 273 break; 274 case DASD_ECKD_CCW_WRITE_CKD: 275 case DASD_ECKD_CCW_WRITE_CKD_MT: 276 data->attributes.operation = DASD_BYPASS_CACHE; 277 rc = check_XRC (ccw, data, device); 278 break; 279 case DASD_ECKD_CCW_ERASE: 280 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 281 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 282 data->mask.perm = 0x3; 283 data->mask.auth = 0x1; 284 data->attributes.operation = DASD_BYPASS_CACHE; 285 rc = check_XRC (ccw, data, device); 286 break; 287 default: 288 dev_err(&device->cdev->dev, 289 "0x%x is not a known command\n", cmd); 290 break; 291 } 292 293 data->attributes.mode = 0x3; /* ECKD */ 294 295 if ((private->rdc_data.cu_type == 0x2105 || 296 private->rdc_data.cu_type == 0x2107 || 297 private->rdc_data.cu_type == 0x1750) 298 && !(private->uses_cdl && trk < 2)) 299 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 300 301 heads = private->rdc_data.trk_per_cyl; 302 begcyl = trk / heads; 303 beghead = trk % heads; 304 endcyl = totrk / heads; 305 endhead = totrk % heads; 306 307 /* check for sequential prestage - enhance cylinder range */ 308 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 309 data->attributes.operation == DASD_SEQ_ACCESS) { 310 311 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 312 endcyl += private->attrib.nr_cyl; 313 else 314 endcyl = (private->real_cyl - 1); 315 } 316 317 set_ch_t(&data->beg_ext, begcyl, beghead); 318 set_ch_t(&data->end_ext, endcyl, endhead); 319 return rc; 320 } 321 322 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, 323 struct dasd_device *device) 324 { 325 struct dasd_eckd_private *private; 326 int rc; 327 328 private = (struct dasd_eckd_private *) device->private; 329 if (!private->rdc_data.facilities.XRC_supported) 330 return 0; 331 332 /* switch on System Time Stamp - needed for XRC Support */ 333 pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */ 334 pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */ 335 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 336 337 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); 338 /* Ignore return code if sync clock is switched off. */ 339 if (rc == -EOPNOTSUPP || rc == -EACCES) 340 rc = 0; 341 return rc; 342 } 343 344 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, 345 unsigned int rec_on_trk, int count, int cmd, 346 struct dasd_device *device, unsigned int reclen, 347 unsigned int tlf) 348 { 349 struct dasd_eckd_private *private; 350 int sector; 351 int dn, d; 352 353 private = (struct dasd_eckd_private *) device->private; 354 355 memset(data, 0, sizeof(*data)); 356 sector = 0; 357 if (rec_on_trk) { 358 switch (private->rdc_data.dev_type) { 359 case 0x3390: 360 dn = ceil_quot(reclen + 6, 232); 361 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 362 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 363 break; 364 case 0x3380: 365 d = 7 + ceil_quot(reclen + 12, 32); 366 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 367 break; 368 } 369 } 370 data->sector = sector; 371 /* note: meaning of count depends on the operation 372 * for record based I/O it's the number of records, but for 373 * track based I/O it's the number of tracks 374 */ 375 data->count = count; 376 switch (cmd) { 377 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 378 data->operation.orientation = 0x3; 379 data->operation.operation = 0x03; 380 break; 381 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 382 data->operation.orientation = 0x3; 383 data->operation.operation = 0x16; 384 break; 385 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 386 data->operation.orientation = 0x1; 387 data->operation.operation = 0x03; 388 data->count++; 389 break; 390 case DASD_ECKD_CCW_READ_RECORD_ZERO: 391 data->operation.orientation = 0x3; 392 data->operation.operation = 0x16; 393 data->count++; 394 break; 395 case DASD_ECKD_CCW_WRITE: 396 case DASD_ECKD_CCW_WRITE_MT: 397 case DASD_ECKD_CCW_WRITE_KD: 398 case DASD_ECKD_CCW_WRITE_KD_MT: 399 data->auxiliary.length_valid = 0x1; 400 data->length = reclen; 401 data->operation.operation = 0x01; 402 break; 403 case DASD_ECKD_CCW_WRITE_CKD: 404 case DASD_ECKD_CCW_WRITE_CKD_MT: 405 data->auxiliary.length_valid = 0x1; 406 data->length = reclen; 407 data->operation.operation = 0x03; 408 break; 409 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 410 data->operation.orientation = 0x0; 411 data->operation.operation = 0x3F; 412 data->extended_operation = 0x11; 413 data->length = 0; 414 data->extended_parameter_length = 0x02; 415 if (data->count > 8) { 416 data->extended_parameter[0] = 0xFF; 417 data->extended_parameter[1] = 0xFF; 418 data->extended_parameter[1] <<= (16 - count); 419 } else { 420 data->extended_parameter[0] = 0xFF; 421 data->extended_parameter[0] <<= (8 - count); 422 data->extended_parameter[1] = 0x00; 423 } 424 data->sector = 0xFF; 425 break; 426 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 427 data->auxiliary.length_valid = 0x1; 428 data->length = reclen; /* not tlf, as one might think */ 429 data->operation.operation = 0x3F; 430 data->extended_operation = 0x23; 431 break; 432 case DASD_ECKD_CCW_READ: 433 case DASD_ECKD_CCW_READ_MT: 434 case DASD_ECKD_CCW_READ_KD: 435 case DASD_ECKD_CCW_READ_KD_MT: 436 data->auxiliary.length_valid = 0x1; 437 data->length = reclen; 438 data->operation.operation = 0x06; 439 break; 440 case DASD_ECKD_CCW_READ_CKD: 441 case DASD_ECKD_CCW_READ_CKD_MT: 442 data->auxiliary.length_valid = 0x1; 443 data->length = reclen; 444 data->operation.operation = 0x16; 445 break; 446 case DASD_ECKD_CCW_READ_COUNT: 447 data->operation.operation = 0x06; 448 break; 449 case DASD_ECKD_CCW_READ_TRACK: 450 data->operation.orientation = 0x1; 451 data->operation.operation = 0x0C; 452 data->extended_parameter_length = 0; 453 data->sector = 0xFF; 454 break; 455 case DASD_ECKD_CCW_READ_TRACK_DATA: 456 data->auxiliary.length_valid = 0x1; 457 data->length = tlf; 458 data->operation.operation = 0x0C; 459 break; 460 case DASD_ECKD_CCW_ERASE: 461 data->length = reclen; 462 data->auxiliary.length_valid = 0x1; 463 data->operation.operation = 0x0b; 464 break; 465 default: 466 DBF_DEV_EVENT(DBF_ERR, device, 467 "fill LRE unknown opcode 0x%x", cmd); 468 BUG(); 469 } 470 set_ch_t(&data->seek_addr, 471 trk / private->rdc_data.trk_per_cyl, 472 trk % private->rdc_data.trk_per_cyl); 473 data->search_arg.cyl = data->seek_addr.cyl; 474 data->search_arg.head = data->seek_addr.head; 475 data->search_arg.record = rec_on_trk; 476 } 477 478 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 479 unsigned int trk, unsigned int totrk, int cmd, 480 struct dasd_device *basedev, struct dasd_device *startdev, 481 unsigned char format, unsigned int rec_on_trk, int count, 482 unsigned int blksize, unsigned int tlf) 483 { 484 struct dasd_eckd_private *basepriv, *startpriv; 485 struct DE_eckd_data *dedata; 486 struct LRE_eckd_data *lredata; 487 u32 begcyl, endcyl; 488 u16 heads, beghead, endhead; 489 int rc = 0; 490 491 basepriv = (struct dasd_eckd_private *) basedev->private; 492 startpriv = (struct dasd_eckd_private *) startdev->private; 493 dedata = &pfxdata->define_extent; 494 lredata = &pfxdata->locate_record; 495 496 ccw->cmd_code = DASD_ECKD_CCW_PFX; 497 ccw->flags = 0; 498 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 499 ccw->count = sizeof(*pfxdata) + 2; 500 ccw->cda = (__u32) __pa(pfxdata); 501 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 502 } else { 503 ccw->count = sizeof(*pfxdata); 504 ccw->cda = (__u32) __pa(pfxdata); 505 memset(pfxdata, 0, sizeof(*pfxdata)); 506 } 507 508 /* prefix data */ 509 if (format > 1) { 510 DBF_DEV_EVENT(DBF_ERR, basedev, 511 "PFX LRE unknown format 0x%x", format); 512 BUG(); 513 return -EINVAL; 514 } 515 pfxdata->format = format; 516 pfxdata->base_address = basepriv->ned->unit_addr; 517 pfxdata->base_lss = basepriv->ned->ID; 518 pfxdata->validity.define_extent = 1; 519 520 /* private uid is kept up to date, conf_data may be outdated */ 521 if (startpriv->uid.type != UA_BASE_DEVICE) { 522 pfxdata->validity.verify_base = 1; 523 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 524 pfxdata->validity.hyper_pav = 1; 525 } 526 527 /* define extend data (mostly)*/ 528 switch (cmd) { 529 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 530 case DASD_ECKD_CCW_READ_RECORD_ZERO: 531 case DASD_ECKD_CCW_READ: 532 case DASD_ECKD_CCW_READ_MT: 533 case DASD_ECKD_CCW_READ_CKD: 534 case DASD_ECKD_CCW_READ_CKD_MT: 535 case DASD_ECKD_CCW_READ_KD: 536 case DASD_ECKD_CCW_READ_KD_MT: 537 case DASD_ECKD_CCW_READ_COUNT: 538 dedata->mask.perm = 0x1; 539 dedata->attributes.operation = basepriv->attrib.operation; 540 break; 541 case DASD_ECKD_CCW_READ_TRACK: 542 case DASD_ECKD_CCW_READ_TRACK_DATA: 543 dedata->mask.perm = 0x1; 544 dedata->attributes.operation = basepriv->attrib.operation; 545 dedata->blk_size = 0; 546 break; 547 case DASD_ECKD_CCW_WRITE: 548 case DASD_ECKD_CCW_WRITE_MT: 549 case DASD_ECKD_CCW_WRITE_KD: 550 case DASD_ECKD_CCW_WRITE_KD_MT: 551 dedata->mask.perm = 0x02; 552 dedata->attributes.operation = basepriv->attrib.operation; 553 rc = check_XRC_on_prefix(pfxdata, basedev); 554 break; 555 case DASD_ECKD_CCW_WRITE_CKD: 556 case DASD_ECKD_CCW_WRITE_CKD_MT: 557 dedata->attributes.operation = DASD_BYPASS_CACHE; 558 rc = check_XRC_on_prefix(pfxdata, basedev); 559 break; 560 case DASD_ECKD_CCW_ERASE: 561 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 562 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 563 dedata->mask.perm = 0x3; 564 dedata->mask.auth = 0x1; 565 dedata->attributes.operation = DASD_BYPASS_CACHE; 566 rc = check_XRC_on_prefix(pfxdata, basedev); 567 break; 568 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 569 dedata->mask.perm = 0x03; 570 dedata->attributes.operation = basepriv->attrib.operation; 571 dedata->blk_size = 0; 572 break; 573 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 574 dedata->mask.perm = 0x02; 575 dedata->attributes.operation = basepriv->attrib.operation; 576 dedata->blk_size = blksize; 577 rc = check_XRC_on_prefix(pfxdata, basedev); 578 break; 579 default: 580 DBF_DEV_EVENT(DBF_ERR, basedev, 581 "PFX LRE unknown opcode 0x%x", cmd); 582 BUG(); 583 return -EINVAL; 584 } 585 586 dedata->attributes.mode = 0x3; /* ECKD */ 587 588 if ((basepriv->rdc_data.cu_type == 0x2105 || 589 basepriv->rdc_data.cu_type == 0x2107 || 590 basepriv->rdc_data.cu_type == 0x1750) 591 && !(basepriv->uses_cdl && trk < 2)) 592 dedata->ga_extended |= 0x40; /* Regular Data Format Mode */ 593 594 heads = basepriv->rdc_data.trk_per_cyl; 595 begcyl = trk / heads; 596 beghead = trk % heads; 597 endcyl = totrk / heads; 598 endhead = totrk % heads; 599 600 /* check for sequential prestage - enhance cylinder range */ 601 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 602 dedata->attributes.operation == DASD_SEQ_ACCESS) { 603 604 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 605 endcyl += basepriv->attrib.nr_cyl; 606 else 607 endcyl = (basepriv->real_cyl - 1); 608 } 609 610 set_ch_t(&dedata->beg_ext, begcyl, beghead); 611 set_ch_t(&dedata->end_ext, endcyl, endhead); 612 613 if (format == 1) { 614 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd, 615 basedev, blksize, tlf); 616 } 617 618 return rc; 619 } 620 621 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 622 unsigned int trk, unsigned int totrk, int cmd, 623 struct dasd_device *basedev, struct dasd_device *startdev) 624 { 625 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 626 0, 0, 0, 0, 0); 627 } 628 629 static void 630 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 631 unsigned int rec_on_trk, int no_rec, int cmd, 632 struct dasd_device * device, int reclen) 633 { 634 struct dasd_eckd_private *private; 635 int sector; 636 int dn, d; 637 638 private = (struct dasd_eckd_private *) device->private; 639 640 DBF_DEV_EVENT(DBF_INFO, device, 641 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 642 trk, rec_on_trk, no_rec, cmd, reclen); 643 644 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 645 ccw->flags = 0; 646 ccw->count = 16; 647 ccw->cda = (__u32) __pa(data); 648 649 memset(data, 0, sizeof(struct LO_eckd_data)); 650 sector = 0; 651 if (rec_on_trk) { 652 switch (private->rdc_data.dev_type) { 653 case 0x3390: 654 dn = ceil_quot(reclen + 6, 232); 655 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 656 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 657 break; 658 case 0x3380: 659 d = 7 + ceil_quot(reclen + 12, 32); 660 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 661 break; 662 } 663 } 664 data->sector = sector; 665 data->count = no_rec; 666 switch (cmd) { 667 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 668 data->operation.orientation = 0x3; 669 data->operation.operation = 0x03; 670 break; 671 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 672 data->operation.orientation = 0x3; 673 data->operation.operation = 0x16; 674 break; 675 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 676 data->operation.orientation = 0x1; 677 data->operation.operation = 0x03; 678 data->count++; 679 break; 680 case DASD_ECKD_CCW_READ_RECORD_ZERO: 681 data->operation.orientation = 0x3; 682 data->operation.operation = 0x16; 683 data->count++; 684 break; 685 case DASD_ECKD_CCW_WRITE: 686 case DASD_ECKD_CCW_WRITE_MT: 687 case DASD_ECKD_CCW_WRITE_KD: 688 case DASD_ECKD_CCW_WRITE_KD_MT: 689 data->auxiliary.last_bytes_used = 0x1; 690 data->length = reclen; 691 data->operation.operation = 0x01; 692 break; 693 case DASD_ECKD_CCW_WRITE_CKD: 694 case DASD_ECKD_CCW_WRITE_CKD_MT: 695 data->auxiliary.last_bytes_used = 0x1; 696 data->length = reclen; 697 data->operation.operation = 0x03; 698 break; 699 case DASD_ECKD_CCW_READ: 700 case DASD_ECKD_CCW_READ_MT: 701 case DASD_ECKD_CCW_READ_KD: 702 case DASD_ECKD_CCW_READ_KD_MT: 703 data->auxiliary.last_bytes_used = 0x1; 704 data->length = reclen; 705 data->operation.operation = 0x06; 706 break; 707 case DASD_ECKD_CCW_READ_CKD: 708 case DASD_ECKD_CCW_READ_CKD_MT: 709 data->auxiliary.last_bytes_used = 0x1; 710 data->length = reclen; 711 data->operation.operation = 0x16; 712 break; 713 case DASD_ECKD_CCW_READ_COUNT: 714 data->operation.operation = 0x06; 715 break; 716 case DASD_ECKD_CCW_ERASE: 717 data->length = reclen; 718 data->auxiliary.last_bytes_used = 0x1; 719 data->operation.operation = 0x0b; 720 break; 721 default: 722 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 723 "opcode 0x%x", cmd); 724 } 725 set_ch_t(&data->seek_addr, 726 trk / private->rdc_data.trk_per_cyl, 727 trk % private->rdc_data.trk_per_cyl); 728 data->search_arg.cyl = data->seek_addr.cyl; 729 data->search_arg.head = data->seek_addr.head; 730 data->search_arg.record = rec_on_trk; 731 } 732 733 /* 734 * Returns 1 if the block is one of the special blocks that needs 735 * to get read/written with the KD variant of the command. 736 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 737 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 738 * Luckily the KD variants differ only by one bit (0x08) from the 739 * normal variant. So don't wonder about code like: 740 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 741 * ccw->cmd_code |= 0x8; 742 */ 743 static inline int 744 dasd_eckd_cdl_special(int blk_per_trk, int recid) 745 { 746 if (recid < 3) 747 return 1; 748 if (recid < blk_per_trk) 749 return 0; 750 if (recid < 2 * blk_per_trk) 751 return 1; 752 return 0; 753 } 754 755 /* 756 * Returns the record size for the special blocks of the cdl format. 757 * Only returns something useful if dasd_eckd_cdl_special is true 758 * for the recid. 759 */ 760 static inline int 761 dasd_eckd_cdl_reclen(int recid) 762 { 763 if (recid < 3) 764 return sizes_trk0[recid]; 765 return LABEL_SIZE; 766 } 767 /* create unique id from private structure. */ 768 static void create_uid(struct dasd_eckd_private *private) 769 { 770 int count; 771 struct dasd_uid *uid; 772 773 uid = &private->uid; 774 memset(uid, 0, sizeof(struct dasd_uid)); 775 memcpy(uid->vendor, private->ned->HDA_manufacturer, 776 sizeof(uid->vendor) - 1); 777 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 778 memcpy(uid->serial, private->ned->HDA_location, 779 sizeof(uid->serial) - 1); 780 EBCASC(uid->serial, sizeof(uid->serial) - 1); 781 uid->ssid = private->gneq->subsystemID; 782 uid->real_unit_addr = private->ned->unit_addr; 783 if (private->sneq) { 784 uid->type = private->sneq->sua_flags; 785 if (uid->type == UA_BASE_PAV_ALIAS) 786 uid->base_unit_addr = private->sneq->base_unit_addr; 787 } else { 788 uid->type = UA_BASE_DEVICE; 789 } 790 if (private->vdsneq) { 791 for (count = 0; count < 16; count++) { 792 sprintf(uid->vduit+2*count, "%02x", 793 private->vdsneq->uit[count]); 794 } 795 } 796 } 797 798 /* 799 * Generate device unique id that specifies the physical device. 800 */ 801 static int dasd_eckd_generate_uid(struct dasd_device *device) 802 { 803 struct dasd_eckd_private *private; 804 unsigned long flags; 805 806 private = (struct dasd_eckd_private *) device->private; 807 if (!private) 808 return -ENODEV; 809 if (!private->ned || !private->gneq) 810 return -ENODEV; 811 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 812 create_uid(private); 813 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 814 return 0; 815 } 816 817 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 818 { 819 struct dasd_eckd_private *private; 820 unsigned long flags; 821 822 if (device->private) { 823 private = (struct dasd_eckd_private *)device->private; 824 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 825 *uid = private->uid; 826 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 827 return 0; 828 } 829 return -EINVAL; 830 } 831 832 /* 833 * compare device UID with data of a given dasd_eckd_private structure 834 * return 0 for match 835 */ 836 static int dasd_eckd_compare_path_uid(struct dasd_device *device, 837 struct dasd_eckd_private *private) 838 { 839 struct dasd_uid device_uid; 840 841 create_uid(private); 842 dasd_eckd_get_uid(device, &device_uid); 843 844 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); 845 } 846 847 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 848 struct dasd_ccw_req *cqr, 849 __u8 *rcd_buffer, 850 __u8 lpm) 851 { 852 struct ccw1 *ccw; 853 /* 854 * buffer has to start with EBCDIC "V1.0" to show 855 * support for virtual device SNEQ 856 */ 857 rcd_buffer[0] = 0xE5; 858 rcd_buffer[1] = 0xF1; 859 rcd_buffer[2] = 0x4B; 860 rcd_buffer[3] = 0xF0; 861 862 ccw = cqr->cpaddr; 863 ccw->cmd_code = DASD_ECKD_CCW_RCD; 864 ccw->flags = 0; 865 ccw->cda = (__u32)(addr_t)rcd_buffer; 866 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 867 cqr->magic = DASD_ECKD_MAGIC; 868 869 cqr->startdev = device; 870 cqr->memdev = device; 871 cqr->block = NULL; 872 cqr->expires = 10*HZ; 873 cqr->lpm = lpm; 874 cqr->retries = 256; 875 cqr->buildclk = get_tod_clock(); 876 cqr->status = DASD_CQR_FILLED; 877 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 878 } 879 880 /* 881 * Wakeup helper for read_conf 882 * if the cqr is not done and needs some error recovery 883 * the buffer has to be re-initialized with the EBCDIC "V1.0" 884 * to show support for virtual device SNEQ 885 */ 886 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 887 { 888 struct ccw1 *ccw; 889 __u8 *rcd_buffer; 890 891 if (cqr->status != DASD_CQR_DONE) { 892 ccw = cqr->cpaddr; 893 rcd_buffer = (__u8 *)((addr_t) ccw->cda); 894 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 895 896 rcd_buffer[0] = 0xE5; 897 rcd_buffer[1] = 0xF1; 898 rcd_buffer[2] = 0x4B; 899 rcd_buffer[3] = 0xF0; 900 } 901 dasd_wakeup_cb(cqr, data); 902 } 903 904 static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 905 struct dasd_ccw_req *cqr, 906 __u8 *rcd_buffer, 907 __u8 lpm) 908 { 909 struct ciw *ciw; 910 int rc; 911 /* 912 * sanity check: scan for RCD command in extended SenseID data 913 * some devices do not support RCD 914 */ 915 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 916 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 917 return -EOPNOTSUPP; 918 919 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 920 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 921 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 922 cqr->retries = 5; 923 cqr->callback = read_conf_cb; 924 rc = dasd_sleep_on_immediatly(cqr); 925 return rc; 926 } 927 928 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 929 void **rcd_buffer, 930 int *rcd_buffer_size, __u8 lpm) 931 { 932 struct ciw *ciw; 933 char *rcd_buf = NULL; 934 int ret; 935 struct dasd_ccw_req *cqr; 936 937 /* 938 * sanity check: scan for RCD command in extended SenseID data 939 * some devices do not support RCD 940 */ 941 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 942 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 943 ret = -EOPNOTSUPP; 944 goto out_error; 945 } 946 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 947 if (!rcd_buf) { 948 ret = -ENOMEM; 949 goto out_error; 950 } 951 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 952 0, /* use rcd_buf as data ara */ 953 device); 954 if (IS_ERR(cqr)) { 955 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 956 "Could not allocate RCD request"); 957 ret = -ENOMEM; 958 goto out_error; 959 } 960 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 961 cqr->callback = read_conf_cb; 962 ret = dasd_sleep_on(cqr); 963 /* 964 * on success we update the user input parms 965 */ 966 dasd_sfree_request(cqr, cqr->memdev); 967 if (ret) 968 goto out_error; 969 970 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 971 *rcd_buffer = rcd_buf; 972 return 0; 973 out_error: 974 kfree(rcd_buf); 975 *rcd_buffer = NULL; 976 *rcd_buffer_size = 0; 977 return ret; 978 } 979 980 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 981 { 982 983 struct dasd_sneq *sneq; 984 int i, count; 985 986 private->ned = NULL; 987 private->sneq = NULL; 988 private->vdsneq = NULL; 989 private->gneq = NULL; 990 count = private->conf_len / sizeof(struct dasd_sneq); 991 sneq = (struct dasd_sneq *)private->conf_data; 992 for (i = 0; i < count; ++i) { 993 if (sneq->flags.identifier == 1 && sneq->format == 1) 994 private->sneq = sneq; 995 else if (sneq->flags.identifier == 1 && sneq->format == 4) 996 private->vdsneq = (struct vd_sneq *)sneq; 997 else if (sneq->flags.identifier == 2) 998 private->gneq = (struct dasd_gneq *)sneq; 999 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 1000 private->ned = (struct dasd_ned *)sneq; 1001 sneq++; 1002 } 1003 if (!private->ned || !private->gneq) { 1004 private->ned = NULL; 1005 private->sneq = NULL; 1006 private->vdsneq = NULL; 1007 private->gneq = NULL; 1008 return -EINVAL; 1009 } 1010 return 0; 1011 1012 }; 1013 1014 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 1015 { 1016 struct dasd_gneq *gneq; 1017 int i, count, found; 1018 1019 count = conf_len / sizeof(*gneq); 1020 gneq = (struct dasd_gneq *)conf_data; 1021 found = 0; 1022 for (i = 0; i < count; ++i) { 1023 if (gneq->flags.identifier == 2) { 1024 found = 1; 1025 break; 1026 } 1027 gneq++; 1028 } 1029 if (found) 1030 return ((char *)gneq)[18] & 0x07; 1031 else 1032 return 0; 1033 } 1034 1035 static int dasd_eckd_read_conf(struct dasd_device *device) 1036 { 1037 void *conf_data; 1038 int conf_len, conf_data_saved; 1039 int rc, path_err, pos; 1040 __u8 lpm, opm; 1041 struct dasd_eckd_private *private, path_private; 1042 struct dasd_path *path_data; 1043 struct dasd_uid *uid; 1044 char print_path_uid[60], print_device_uid[60]; 1045 1046 private = (struct dasd_eckd_private *) device->private; 1047 path_data = &device->path_data; 1048 opm = ccw_device_get_path_mask(device->cdev); 1049 conf_data_saved = 0; 1050 path_err = 0; 1051 /* get configuration data per operational path */ 1052 for (lpm = 0x80; lpm; lpm>>= 1) { 1053 if (!(lpm & opm)) 1054 continue; 1055 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1056 &conf_len, lpm); 1057 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1058 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1059 "Read configuration data returned " 1060 "error %d", rc); 1061 return rc; 1062 } 1063 if (conf_data == NULL) { 1064 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1065 "No configuration data " 1066 "retrieved"); 1067 /* no further analysis possible */ 1068 path_data->opm |= lpm; 1069 continue; /* no error */ 1070 } 1071 /* translate path mask to position in mask */ 1072 pos = 8 - ffs(lpm); 1073 kfree(private->path_conf_data[pos]); 1074 if ((__u8 *)private->path_conf_data[pos] == 1075 private->conf_data) { 1076 private->conf_data = NULL; 1077 private->conf_len = 0; 1078 conf_data_saved = 0; 1079 } 1080 private->path_conf_data[pos] = 1081 (struct dasd_conf_data *) conf_data; 1082 /* save first valid configuration data */ 1083 if (!conf_data_saved) { 1084 kfree(private->conf_data); 1085 private->conf_data = conf_data; 1086 private->conf_len = conf_len; 1087 if (dasd_eckd_identify_conf_parts(private)) { 1088 private->conf_data = NULL; 1089 private->conf_len = 0; 1090 kfree(conf_data); 1091 continue; 1092 } 1093 /* 1094 * build device UID that other path data 1095 * can be compared to it 1096 */ 1097 dasd_eckd_generate_uid(device); 1098 conf_data_saved++; 1099 } else { 1100 path_private.conf_data = conf_data; 1101 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1102 if (dasd_eckd_identify_conf_parts( 1103 &path_private)) { 1104 path_private.conf_data = NULL; 1105 path_private.conf_len = 0; 1106 kfree(conf_data); 1107 continue; 1108 } 1109 if (dasd_eckd_compare_path_uid( 1110 device, &path_private)) { 1111 uid = &path_private.uid; 1112 if (strlen(uid->vduit) > 0) 1113 snprintf(print_path_uid, 1114 sizeof(print_path_uid), 1115 "%s.%s.%04x.%02x.%s", 1116 uid->vendor, uid->serial, 1117 uid->ssid, uid->real_unit_addr, 1118 uid->vduit); 1119 else 1120 snprintf(print_path_uid, 1121 sizeof(print_path_uid), 1122 "%s.%s.%04x.%02x", 1123 uid->vendor, uid->serial, 1124 uid->ssid, 1125 uid->real_unit_addr); 1126 uid = &private->uid; 1127 if (strlen(uid->vduit) > 0) 1128 snprintf(print_device_uid, 1129 sizeof(print_device_uid), 1130 "%s.%s.%04x.%02x.%s", 1131 uid->vendor, uid->serial, 1132 uid->ssid, uid->real_unit_addr, 1133 uid->vduit); 1134 else 1135 snprintf(print_device_uid, 1136 sizeof(print_device_uid), 1137 "%s.%s.%04x.%02x", 1138 uid->vendor, uid->serial, 1139 uid->ssid, 1140 uid->real_unit_addr); 1141 dev_err(&device->cdev->dev, 1142 "Not all channel paths lead to " 1143 "the same device, path %02X leads to " 1144 "device %s instead of %s\n", lpm, 1145 print_path_uid, print_device_uid); 1146 path_err = -EINVAL; 1147 path_data->cablepm |= lpm; 1148 continue; 1149 } 1150 1151 path_private.conf_data = NULL; 1152 path_private.conf_len = 0; 1153 } 1154 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1155 case 0x02: 1156 path_data->npm |= lpm; 1157 break; 1158 case 0x03: 1159 path_data->ppm |= lpm; 1160 break; 1161 } 1162 path_data->opm |= lpm; 1163 /* 1164 * if the path is used 1165 * it should not be in one of the negative lists 1166 */ 1167 path_data->cablepm &= ~lpm; 1168 path_data->hpfpm &= ~lpm; 1169 path_data->cuirpm &= ~lpm; 1170 } 1171 1172 return path_err; 1173 } 1174 1175 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1176 { 1177 struct dasd_eckd_private *private; 1178 int mdc; 1179 u32 fcx_max_data; 1180 1181 private = (struct dasd_eckd_private *) device->private; 1182 if (private->fcx_max_data) { 1183 mdc = ccw_device_get_mdc(device->cdev, lpm); 1184 if ((mdc < 0)) { 1185 dev_warn(&device->cdev->dev, 1186 "Detecting the maximum data size for zHPF " 1187 "requests failed (rc=%d) for a new path %x\n", 1188 mdc, lpm); 1189 return mdc; 1190 } 1191 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; 1192 if (fcx_max_data < private->fcx_max_data) { 1193 dev_warn(&device->cdev->dev, 1194 "The maximum data size for zHPF requests %u " 1195 "on a new path %x is below the active maximum " 1196 "%u\n", fcx_max_data, lpm, 1197 private->fcx_max_data); 1198 return -EACCES; 1199 } 1200 } 1201 return 0; 1202 } 1203 1204 static int rebuild_device_uid(struct dasd_device *device, 1205 struct path_verification_work_data *data) 1206 { 1207 struct dasd_eckd_private *private; 1208 struct dasd_path *path_data; 1209 __u8 lpm, opm; 1210 int rc; 1211 1212 rc = -ENODEV; 1213 private = (struct dasd_eckd_private *) device->private; 1214 path_data = &device->path_data; 1215 opm = device->path_data.opm; 1216 1217 for (lpm = 0x80; lpm; lpm >>= 1) { 1218 if (!(lpm & opm)) 1219 continue; 1220 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1221 memset(&data->cqr, 0, sizeof(data->cqr)); 1222 data->cqr.cpaddr = &data->ccw; 1223 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1224 data->rcd_buffer, 1225 lpm); 1226 1227 if (rc) { 1228 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1229 continue; 1230 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1231 "Read configuration data " 1232 "returned error %d", rc); 1233 break; 1234 } 1235 memcpy(private->conf_data, data->rcd_buffer, 1236 DASD_ECKD_RCD_DATA_SIZE); 1237 if (dasd_eckd_identify_conf_parts(private)) { 1238 rc = -ENODEV; 1239 } else /* first valid path is enough */ 1240 break; 1241 } 1242 1243 if (!rc) 1244 rc = dasd_eckd_generate_uid(device); 1245 1246 return rc; 1247 } 1248 1249 static void do_path_verification_work(struct work_struct *work) 1250 { 1251 struct path_verification_work_data *data; 1252 struct dasd_device *device; 1253 struct dasd_eckd_private path_private; 1254 struct dasd_uid *uid; 1255 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1256 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1257 unsigned long flags; 1258 char print_uid[60]; 1259 int rc; 1260 1261 data = container_of(work, struct path_verification_work_data, worker); 1262 device = data->device; 1263 1264 /* delay path verification until device was resumed */ 1265 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1266 schedule_work(work); 1267 return; 1268 } 1269 /* check if path verification already running and delay if so */ 1270 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1271 schedule_work(work); 1272 return; 1273 } 1274 opm = 0; 1275 npm = 0; 1276 ppm = 0; 1277 epm = 0; 1278 hpfpm = 0; 1279 cablepm = 0; 1280 1281 for (lpm = 0x80; lpm; lpm >>= 1) { 1282 if (!(lpm & data->tbvpm)) 1283 continue; 1284 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1285 memset(&data->cqr, 0, sizeof(data->cqr)); 1286 data->cqr.cpaddr = &data->ccw; 1287 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1288 data->rcd_buffer, 1289 lpm); 1290 if (!rc) { 1291 switch (dasd_eckd_path_access(data->rcd_buffer, 1292 DASD_ECKD_RCD_DATA_SIZE) 1293 ) { 1294 case 0x02: 1295 npm |= lpm; 1296 break; 1297 case 0x03: 1298 ppm |= lpm; 1299 break; 1300 } 1301 opm |= lpm; 1302 } else if (rc == -EOPNOTSUPP) { 1303 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1304 "path verification: No configuration " 1305 "data retrieved"); 1306 opm |= lpm; 1307 } else if (rc == -EAGAIN) { 1308 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1309 "path verification: device is stopped," 1310 " try again later"); 1311 epm |= lpm; 1312 } else { 1313 dev_warn(&device->cdev->dev, 1314 "Reading device feature codes failed " 1315 "(rc=%d) for new path %x\n", rc, lpm); 1316 continue; 1317 } 1318 if (verify_fcx_max_data(device, lpm)) { 1319 opm &= ~lpm; 1320 npm &= ~lpm; 1321 ppm &= ~lpm; 1322 hpfpm |= lpm; 1323 continue; 1324 } 1325 1326 /* 1327 * save conf_data for comparison after 1328 * rebuild_device_uid may have changed 1329 * the original data 1330 */ 1331 memcpy(&path_rcd_buf, data->rcd_buffer, 1332 DASD_ECKD_RCD_DATA_SIZE); 1333 path_private.conf_data = (void *) &path_rcd_buf; 1334 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1335 if (dasd_eckd_identify_conf_parts(&path_private)) { 1336 path_private.conf_data = NULL; 1337 path_private.conf_len = 0; 1338 continue; 1339 } 1340 1341 /* 1342 * compare path UID with device UID only if at least 1343 * one valid path is left 1344 * in other case the device UID may have changed and 1345 * the first working path UID will be used as device UID 1346 */ 1347 if (device->path_data.opm && 1348 dasd_eckd_compare_path_uid(device, &path_private)) { 1349 /* 1350 * the comparison was not successful 1351 * rebuild the device UID with at least one 1352 * known path in case a z/VM hyperswap command 1353 * has changed the device 1354 * 1355 * after this compare again 1356 * 1357 * if either the rebuild or the recompare fails 1358 * the path can not be used 1359 */ 1360 if (rebuild_device_uid(device, data) || 1361 dasd_eckd_compare_path_uid( 1362 device, &path_private)) { 1363 uid = &path_private.uid; 1364 if (strlen(uid->vduit) > 0) 1365 snprintf(print_uid, sizeof(print_uid), 1366 "%s.%s.%04x.%02x.%s", 1367 uid->vendor, uid->serial, 1368 uid->ssid, uid->real_unit_addr, 1369 uid->vduit); 1370 else 1371 snprintf(print_uid, sizeof(print_uid), 1372 "%s.%s.%04x.%02x", 1373 uid->vendor, uid->serial, 1374 uid->ssid, 1375 uid->real_unit_addr); 1376 dev_err(&device->cdev->dev, 1377 "The newly added channel path %02X " 1378 "will not be used because it leads " 1379 "to a different device %s\n", 1380 lpm, print_uid); 1381 opm &= ~lpm; 1382 npm &= ~lpm; 1383 ppm &= ~lpm; 1384 cablepm |= lpm; 1385 continue; 1386 } 1387 } 1388 1389 /* 1390 * There is a small chance that a path is lost again between 1391 * above path verification and the following modification of 1392 * the device opm mask. We could avoid that race here by using 1393 * yet another path mask, but we rather deal with this unlikely 1394 * situation in dasd_start_IO. 1395 */ 1396 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1397 if (!device->path_data.opm && opm) { 1398 device->path_data.opm = opm; 1399 device->path_data.cablepm &= ~opm; 1400 device->path_data.cuirpm &= ~opm; 1401 device->path_data.hpfpm &= ~opm; 1402 dasd_generic_path_operational(device); 1403 } else { 1404 device->path_data.opm |= opm; 1405 device->path_data.cablepm &= ~opm; 1406 device->path_data.cuirpm &= ~opm; 1407 device->path_data.hpfpm &= ~opm; 1408 } 1409 device->path_data.npm |= npm; 1410 device->path_data.ppm |= ppm; 1411 device->path_data.tbvpm |= epm; 1412 device->path_data.cablepm |= cablepm; 1413 device->path_data.hpfpm |= hpfpm; 1414 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1415 } 1416 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1417 dasd_put_device(device); 1418 if (data->isglobal) 1419 mutex_unlock(&dasd_path_verification_mutex); 1420 else 1421 kfree(data); 1422 } 1423 1424 static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) 1425 { 1426 struct path_verification_work_data *data; 1427 1428 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1429 if (!data) { 1430 if (mutex_trylock(&dasd_path_verification_mutex)) { 1431 data = path_verification_worker; 1432 data->isglobal = 1; 1433 } else 1434 return -ENOMEM; 1435 } else { 1436 memset(data, 0, sizeof(*data)); 1437 data->isglobal = 0; 1438 } 1439 INIT_WORK(&data->worker, do_path_verification_work); 1440 dasd_get_device(device); 1441 data->device = device; 1442 data->tbvpm = lpm; 1443 schedule_work(&data->worker); 1444 return 0; 1445 } 1446 1447 static int dasd_eckd_read_features(struct dasd_device *device) 1448 { 1449 struct dasd_psf_prssd_data *prssdp; 1450 struct dasd_rssd_features *features; 1451 struct dasd_ccw_req *cqr; 1452 struct ccw1 *ccw; 1453 int rc; 1454 struct dasd_eckd_private *private; 1455 1456 private = (struct dasd_eckd_private *) device->private; 1457 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1458 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1459 (sizeof(struct dasd_psf_prssd_data) + 1460 sizeof(struct dasd_rssd_features)), 1461 device); 1462 if (IS_ERR(cqr)) { 1463 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1464 "allocate initialization request"); 1465 return PTR_ERR(cqr); 1466 } 1467 cqr->startdev = device; 1468 cqr->memdev = device; 1469 cqr->block = NULL; 1470 cqr->retries = 256; 1471 cqr->expires = 10 * HZ; 1472 1473 /* Prepare for Read Subsystem Data */ 1474 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1475 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1476 prssdp->order = PSF_ORDER_PRSSD; 1477 prssdp->suborder = 0x41; /* Read Feature Codes */ 1478 /* all other bytes of prssdp must be zero */ 1479 1480 ccw = cqr->cpaddr; 1481 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1482 ccw->count = sizeof(struct dasd_psf_prssd_data); 1483 ccw->flags |= CCW_FLAG_CC; 1484 ccw->cda = (__u32)(addr_t) prssdp; 1485 1486 /* Read Subsystem Data - feature codes */ 1487 features = (struct dasd_rssd_features *) (prssdp + 1); 1488 memset(features, 0, sizeof(struct dasd_rssd_features)); 1489 1490 ccw++; 1491 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1492 ccw->count = sizeof(struct dasd_rssd_features); 1493 ccw->cda = (__u32)(addr_t) features; 1494 1495 cqr->buildclk = get_tod_clock(); 1496 cqr->status = DASD_CQR_FILLED; 1497 rc = dasd_sleep_on(cqr); 1498 if (rc == 0) { 1499 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1500 features = (struct dasd_rssd_features *) (prssdp + 1); 1501 memcpy(&private->features, features, 1502 sizeof(struct dasd_rssd_features)); 1503 } else 1504 dev_warn(&device->cdev->dev, "Reading device feature codes" 1505 " failed with rc=%d\n", rc); 1506 dasd_sfree_request(cqr, cqr->memdev); 1507 return rc; 1508 } 1509 1510 1511 /* 1512 * Build CP for Perform Subsystem Function - SSC. 1513 */ 1514 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1515 int enable_pav) 1516 { 1517 struct dasd_ccw_req *cqr; 1518 struct dasd_psf_ssc_data *psf_ssc_data; 1519 struct ccw1 *ccw; 1520 1521 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1522 sizeof(struct dasd_psf_ssc_data), 1523 device); 1524 1525 if (IS_ERR(cqr)) { 1526 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1527 "Could not allocate PSF-SSC request"); 1528 return cqr; 1529 } 1530 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1531 psf_ssc_data->order = PSF_ORDER_SSC; 1532 psf_ssc_data->suborder = 0xc0; 1533 if (enable_pav) { 1534 psf_ssc_data->suborder |= 0x08; 1535 psf_ssc_data->reserved[0] = 0x88; 1536 } 1537 ccw = cqr->cpaddr; 1538 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1539 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1540 ccw->count = 66; 1541 1542 cqr->startdev = device; 1543 cqr->memdev = device; 1544 cqr->block = NULL; 1545 cqr->retries = 256; 1546 cqr->expires = 10*HZ; 1547 cqr->buildclk = get_tod_clock(); 1548 cqr->status = DASD_CQR_FILLED; 1549 return cqr; 1550 } 1551 1552 /* 1553 * Perform Subsystem Function. 1554 * It is necessary to trigger CIO for channel revalidation since this 1555 * call might change behaviour of DASD devices. 1556 */ 1557 static int 1558 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1559 unsigned long flags) 1560 { 1561 struct dasd_ccw_req *cqr; 1562 int rc; 1563 1564 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1565 if (IS_ERR(cqr)) 1566 return PTR_ERR(cqr); 1567 1568 /* 1569 * set flags e.g. turn on failfast, to prevent blocking 1570 * the calling function should handle failed requests 1571 */ 1572 cqr->flags |= flags; 1573 1574 rc = dasd_sleep_on(cqr); 1575 if (!rc) 1576 /* trigger CIO to reprobe devices */ 1577 css_schedule_reprobe(); 1578 else if (cqr->intrc == -EAGAIN) 1579 rc = -EAGAIN; 1580 1581 dasd_sfree_request(cqr, cqr->memdev); 1582 return rc; 1583 } 1584 1585 /* 1586 * Valide storage server of current device. 1587 */ 1588 static int dasd_eckd_validate_server(struct dasd_device *device, 1589 unsigned long flags) 1590 { 1591 int rc; 1592 struct dasd_eckd_private *private; 1593 int enable_pav; 1594 1595 private = (struct dasd_eckd_private *) device->private; 1596 if (private->uid.type == UA_BASE_PAV_ALIAS || 1597 private->uid.type == UA_HYPER_PAV_ALIAS) 1598 return 0; 1599 if (dasd_nopav || MACHINE_IS_VM) 1600 enable_pav = 0; 1601 else 1602 enable_pav = 1; 1603 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1604 1605 /* may be requested feature is not available on server, 1606 * therefore just report error and go ahead */ 1607 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1608 "returned rc=%d", private->uid.ssid, rc); 1609 return rc; 1610 } 1611 1612 /* 1613 * worker to do a validate server in case of a lost pathgroup 1614 */ 1615 static void dasd_eckd_do_validate_server(struct work_struct *work) 1616 { 1617 struct dasd_device *device = container_of(work, struct dasd_device, 1618 kick_validate); 1619 unsigned long flags = 0; 1620 1621 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 1622 if (dasd_eckd_validate_server(device, flags) 1623 == -EAGAIN) { 1624 /* schedule worker again if failed */ 1625 schedule_work(&device->kick_validate); 1626 return; 1627 } 1628 1629 dasd_put_device(device); 1630 } 1631 1632 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1633 { 1634 dasd_get_device(device); 1635 /* exit if device not online or in offline processing */ 1636 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1637 device->state < DASD_STATE_ONLINE) { 1638 dasd_put_device(device); 1639 return; 1640 } 1641 /* queue call to do_validate_server to the kernel event daemon. */ 1642 if (!schedule_work(&device->kick_validate)) 1643 dasd_put_device(device); 1644 } 1645 1646 static u32 get_fcx_max_data(struct dasd_device *device) 1647 { 1648 int tpm, mdc; 1649 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1650 struct dasd_eckd_private *private; 1651 1652 if (dasd_nofcx) 1653 return 0; 1654 /* is transport mode supported? */ 1655 private = (struct dasd_eckd_private *) device->private; 1656 fcx_in_css = css_general_characteristics.fcx; 1657 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 1658 fcx_in_features = private->features.feature[40] & 0x80; 1659 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1660 1661 if (!tpm) 1662 return 0; 1663 1664 mdc = ccw_device_get_mdc(device->cdev, 0); 1665 if (mdc < 0) { 1666 dev_warn(&device->cdev->dev, "Detecting the maximum supported" 1667 " data size for zHPF requests failed\n"); 1668 return 0; 1669 } else 1670 return mdc * FCX_MAX_DATA_FACTOR; 1671 } 1672 1673 /* 1674 * Check device characteristics. 1675 * If the device is accessible using ECKD discipline, the device is enabled. 1676 */ 1677 static int 1678 dasd_eckd_check_characteristics(struct dasd_device *device) 1679 { 1680 struct dasd_eckd_private *private; 1681 struct dasd_block *block; 1682 struct dasd_uid temp_uid; 1683 int rc, i; 1684 int readonly; 1685 unsigned long value; 1686 1687 /* setup work queue for validate server*/ 1688 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 1689 1690 if (!ccw_device_is_pathgroup(device->cdev)) { 1691 dev_warn(&device->cdev->dev, 1692 "A channel path group could not be established\n"); 1693 return -EIO; 1694 } 1695 if (!ccw_device_is_multipath(device->cdev)) { 1696 dev_info(&device->cdev->dev, 1697 "The DASD is not operating in multipath mode\n"); 1698 } 1699 private = (struct dasd_eckd_private *) device->private; 1700 if (!private) { 1701 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1702 if (!private) { 1703 dev_warn(&device->cdev->dev, 1704 "Allocating memory for private DASD data " 1705 "failed\n"); 1706 return -ENOMEM; 1707 } 1708 device->private = (void *) private; 1709 } else { 1710 memset(private, 0, sizeof(*private)); 1711 } 1712 /* Invalidate status of initial analysis. */ 1713 private->init_cqr_status = -1; 1714 /* Set default cache operations. */ 1715 private->attrib.operation = DASD_NORMAL_CACHE; 1716 private->attrib.nr_cyl = 0; 1717 1718 /* Read Configuration Data */ 1719 rc = dasd_eckd_read_conf(device); 1720 if (rc) 1721 goto out_err1; 1722 1723 /* set default timeout */ 1724 device->default_expires = DASD_EXPIRES; 1725 /* set default retry count */ 1726 device->default_retries = DASD_RETRIES; 1727 1728 if (private->gneq) { 1729 value = 1; 1730 for (i = 0; i < private->gneq->timeout.value; i++) 1731 value = 10 * value; 1732 value = value * private->gneq->timeout.number; 1733 /* do not accept useless values */ 1734 if (value != 0 && value <= DASD_EXPIRES_MAX) 1735 device->default_expires = value; 1736 } 1737 1738 dasd_eckd_get_uid(device, &temp_uid); 1739 if (temp_uid.type == UA_BASE_DEVICE) { 1740 block = dasd_alloc_block(); 1741 if (IS_ERR(block)) { 1742 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1743 "could not allocate dasd " 1744 "block structure"); 1745 rc = PTR_ERR(block); 1746 goto out_err1; 1747 } 1748 device->block = block; 1749 block->base = device; 1750 } 1751 1752 /* register lcu with alias handling, enable PAV */ 1753 rc = dasd_alias_make_device_known_to_lcu(device); 1754 if (rc) 1755 goto out_err2; 1756 1757 dasd_eckd_validate_server(device, 0); 1758 1759 /* device may report different configuration data after LCU setup */ 1760 rc = dasd_eckd_read_conf(device); 1761 if (rc) 1762 goto out_err3; 1763 1764 /* Read Feature Codes */ 1765 dasd_eckd_read_features(device); 1766 1767 /* Read Device Characteristics */ 1768 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 1769 &private->rdc_data, 64); 1770 if (rc) { 1771 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1772 "Read device characteristic failed, rc=%d", rc); 1773 goto out_err3; 1774 } 1775 1776 if ((device->features & DASD_FEATURE_USERAW) && 1777 !(private->rdc_data.facilities.RT_in_LR)) { 1778 dev_err(&device->cdev->dev, "The storage server does not " 1779 "support raw-track access\n"); 1780 rc = -EINVAL; 1781 goto out_err3; 1782 } 1783 1784 /* find the valid cylinder size */ 1785 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1786 private->rdc_data.long_no_cyl) 1787 private->real_cyl = private->rdc_data.long_no_cyl; 1788 else 1789 private->real_cyl = private->rdc_data.no_cyl; 1790 1791 private->fcx_max_data = get_fcx_max_data(device); 1792 1793 readonly = dasd_device_is_ro(device); 1794 if (readonly) 1795 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1796 1797 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 1798 "with %d cylinders, %d heads, %d sectors%s\n", 1799 private->rdc_data.dev_type, 1800 private->rdc_data.dev_model, 1801 private->rdc_data.cu_type, 1802 private->rdc_data.cu_model.model, 1803 private->real_cyl, 1804 private->rdc_data.trk_per_cyl, 1805 private->rdc_data.sec_per_trk, 1806 readonly ? ", read-only device" : ""); 1807 return 0; 1808 1809 out_err3: 1810 dasd_alias_disconnect_device_from_lcu(device); 1811 out_err2: 1812 dasd_free_block(device->block); 1813 device->block = NULL; 1814 out_err1: 1815 kfree(private->conf_data); 1816 kfree(device->private); 1817 device->private = NULL; 1818 return rc; 1819 } 1820 1821 static void dasd_eckd_uncheck_device(struct dasd_device *device) 1822 { 1823 struct dasd_eckd_private *private; 1824 int i; 1825 1826 private = (struct dasd_eckd_private *) device->private; 1827 dasd_alias_disconnect_device_from_lcu(device); 1828 private->ned = NULL; 1829 private->sneq = NULL; 1830 private->vdsneq = NULL; 1831 private->gneq = NULL; 1832 private->conf_len = 0; 1833 for (i = 0; i < 8; i++) { 1834 kfree(private->path_conf_data[i]); 1835 if ((__u8 *)private->path_conf_data[i] == 1836 private->conf_data) { 1837 private->conf_data = NULL; 1838 private->conf_len = 0; 1839 } 1840 private->path_conf_data[i] = NULL; 1841 } 1842 kfree(private->conf_data); 1843 private->conf_data = NULL; 1844 } 1845 1846 static struct dasd_ccw_req * 1847 dasd_eckd_analysis_ccw(struct dasd_device *device) 1848 { 1849 struct dasd_eckd_private *private; 1850 struct eckd_count *count_data; 1851 struct LO_eckd_data *LO_data; 1852 struct dasd_ccw_req *cqr; 1853 struct ccw1 *ccw; 1854 int cplength, datasize; 1855 int i; 1856 1857 private = (struct dasd_eckd_private *) device->private; 1858 1859 cplength = 8; 1860 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1861 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); 1862 if (IS_ERR(cqr)) 1863 return cqr; 1864 ccw = cqr->cpaddr; 1865 /* Define extent for the first 3 tracks. */ 1866 define_extent(ccw++, cqr->data, 0, 2, 1867 DASD_ECKD_CCW_READ_COUNT, device); 1868 LO_data = cqr->data + sizeof(struct DE_eckd_data); 1869 /* Locate record for the first 4 records on track 0. */ 1870 ccw[-1].flags |= CCW_FLAG_CC; 1871 locate_record(ccw++, LO_data++, 0, 0, 4, 1872 DASD_ECKD_CCW_READ_COUNT, device, 0); 1873 1874 count_data = private->count_area; 1875 for (i = 0; i < 4; i++) { 1876 ccw[-1].flags |= CCW_FLAG_CC; 1877 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1878 ccw->flags = 0; 1879 ccw->count = 8; 1880 ccw->cda = (__u32)(addr_t) count_data; 1881 ccw++; 1882 count_data++; 1883 } 1884 1885 /* Locate record for the first record on track 2. */ 1886 ccw[-1].flags |= CCW_FLAG_CC; 1887 locate_record(ccw++, LO_data++, 2, 0, 1, 1888 DASD_ECKD_CCW_READ_COUNT, device, 0); 1889 /* Read count ccw. */ 1890 ccw[-1].flags |= CCW_FLAG_CC; 1891 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1892 ccw->flags = 0; 1893 ccw->count = 8; 1894 ccw->cda = (__u32)(addr_t) count_data; 1895 1896 cqr->block = NULL; 1897 cqr->startdev = device; 1898 cqr->memdev = device; 1899 cqr->retries = 255; 1900 cqr->buildclk = get_tod_clock(); 1901 cqr->status = DASD_CQR_FILLED; 1902 return cqr; 1903 } 1904 1905 /* differentiate between 'no record found' and any other error */ 1906 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 1907 { 1908 char *sense; 1909 if (init_cqr->status == DASD_CQR_DONE) 1910 return INIT_CQR_OK; 1911 else if (init_cqr->status == DASD_CQR_NEED_ERP || 1912 init_cqr->status == DASD_CQR_FAILED) { 1913 sense = dasd_get_sense(&init_cqr->irb); 1914 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 1915 return INIT_CQR_UNFORMATTED; 1916 else 1917 return INIT_CQR_ERROR; 1918 } else 1919 return INIT_CQR_ERROR; 1920 } 1921 1922 /* 1923 * This is the callback function for the init_analysis cqr. It saves 1924 * the status of the initial analysis ccw before it frees it and kicks 1925 * the device to continue the startup sequence. This will call 1926 * dasd_eckd_do_analysis again (if the devices has not been marked 1927 * for deletion in the meantime). 1928 */ 1929 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 1930 void *data) 1931 { 1932 struct dasd_eckd_private *private; 1933 struct dasd_device *device; 1934 1935 device = init_cqr->startdev; 1936 private = (struct dasd_eckd_private *) device->private; 1937 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 1938 dasd_sfree_request(init_cqr, device); 1939 dasd_kick_device(device); 1940 } 1941 1942 static int dasd_eckd_start_analysis(struct dasd_block *block) 1943 { 1944 struct dasd_ccw_req *init_cqr; 1945 1946 init_cqr = dasd_eckd_analysis_ccw(block->base); 1947 if (IS_ERR(init_cqr)) 1948 return PTR_ERR(init_cqr); 1949 init_cqr->callback = dasd_eckd_analysis_callback; 1950 init_cqr->callback_data = NULL; 1951 init_cqr->expires = 5*HZ; 1952 /* first try without ERP, so we can later handle unformatted 1953 * devices as special case 1954 */ 1955 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 1956 init_cqr->retries = 0; 1957 dasd_add_request_head(init_cqr); 1958 return -EAGAIN; 1959 } 1960 1961 static int dasd_eckd_end_analysis(struct dasd_block *block) 1962 { 1963 struct dasd_device *device; 1964 struct dasd_eckd_private *private; 1965 struct eckd_count *count_area; 1966 unsigned int sb, blk_per_trk; 1967 int status, i; 1968 struct dasd_ccw_req *init_cqr; 1969 1970 device = block->base; 1971 private = (struct dasd_eckd_private *) device->private; 1972 status = private->init_cqr_status; 1973 private->init_cqr_status = -1; 1974 if (status == INIT_CQR_ERROR) { 1975 /* try again, this time with full ERP */ 1976 init_cqr = dasd_eckd_analysis_ccw(device); 1977 dasd_sleep_on(init_cqr); 1978 status = dasd_eckd_analysis_evaluation(init_cqr); 1979 dasd_sfree_request(init_cqr, device); 1980 } 1981 1982 if (device->features & DASD_FEATURE_USERAW) { 1983 block->bp_block = DASD_RAW_BLOCKSIZE; 1984 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 1985 block->s2b_shift = 3; 1986 goto raw; 1987 } 1988 1989 if (status == INIT_CQR_UNFORMATTED) { 1990 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 1991 return -EMEDIUMTYPE; 1992 } else if (status == INIT_CQR_ERROR) { 1993 dev_err(&device->cdev->dev, 1994 "Detecting the DASD disk layout failed because " 1995 "of an I/O error\n"); 1996 return -EIO; 1997 } 1998 1999 private->uses_cdl = 1; 2000 /* Check Track 0 for Compatible Disk Layout */ 2001 count_area = NULL; 2002 for (i = 0; i < 3; i++) { 2003 if (private->count_area[i].kl != 4 || 2004 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 2005 private->count_area[i].cyl != 0 || 2006 private->count_area[i].head != count_area_head[i] || 2007 private->count_area[i].record != count_area_rec[i]) { 2008 private->uses_cdl = 0; 2009 break; 2010 } 2011 } 2012 if (i == 3) 2013 count_area = &private->count_area[4]; 2014 2015 if (private->uses_cdl == 0) { 2016 for (i = 0; i < 5; i++) { 2017 if ((private->count_area[i].kl != 0) || 2018 (private->count_area[i].dl != 2019 private->count_area[0].dl) || 2020 private->count_area[i].cyl != 0 || 2021 private->count_area[i].head != count_area_head[i] || 2022 private->count_area[i].record != count_area_rec[i]) 2023 break; 2024 } 2025 if (i == 5) 2026 count_area = &private->count_area[0]; 2027 } else { 2028 if (private->count_area[3].record == 1) 2029 dev_warn(&device->cdev->dev, 2030 "Track 0 has no records following the VTOC\n"); 2031 } 2032 2033 if (count_area != NULL && count_area->kl == 0) { 2034 /* we found notthing violating our disk layout */ 2035 if (dasd_check_blocksize(count_area->dl) == 0) 2036 block->bp_block = count_area->dl; 2037 } 2038 if (block->bp_block == 0) { 2039 dev_warn(&device->cdev->dev, 2040 "The disk layout of the DASD is not supported\n"); 2041 return -EMEDIUMTYPE; 2042 } 2043 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2044 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2045 block->s2b_shift++; 2046 2047 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2048 2049 raw: 2050 block->blocks = (private->real_cyl * 2051 private->rdc_data.trk_per_cyl * 2052 blk_per_trk); 2053 2054 dev_info(&device->cdev->dev, 2055 "DASD with %d KB/block, %d KB total size, %d KB/track, " 2056 "%s\n", (block->bp_block >> 10), 2057 ((private->real_cyl * 2058 private->rdc_data.trk_per_cyl * 2059 blk_per_trk * (block->bp_block >> 9)) >> 1), 2060 ((blk_per_trk * block->bp_block) >> 10), 2061 private->uses_cdl ? 2062 "compatible disk layout" : "linux disk layout"); 2063 2064 return 0; 2065 } 2066 2067 static int dasd_eckd_do_analysis(struct dasd_block *block) 2068 { 2069 struct dasd_eckd_private *private; 2070 2071 private = (struct dasd_eckd_private *) block->base->private; 2072 if (private->init_cqr_status < 0) 2073 return dasd_eckd_start_analysis(block); 2074 else 2075 return dasd_eckd_end_analysis(block); 2076 } 2077 2078 static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2079 { 2080 return dasd_alias_add_device(device); 2081 }; 2082 2083 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2084 { 2085 cancel_work_sync(&device->reload_device); 2086 cancel_work_sync(&device->kick_validate); 2087 return 0; 2088 }; 2089 2090 static int dasd_eckd_basic_to_known(struct dasd_device *device) 2091 { 2092 return dasd_alias_remove_device(device); 2093 }; 2094 2095 static int 2096 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2097 { 2098 struct dasd_eckd_private *private; 2099 2100 private = (struct dasd_eckd_private *) block->base->private; 2101 if (dasd_check_blocksize(block->bp_block) == 0) { 2102 geo->sectors = recs_per_track(&private->rdc_data, 2103 0, block->bp_block); 2104 } 2105 geo->cylinders = private->rdc_data.no_cyl; 2106 geo->heads = private->rdc_data.trk_per_cyl; 2107 return 0; 2108 } 2109 2110 static struct dasd_ccw_req * 2111 dasd_eckd_build_format(struct dasd_device *base, 2112 struct format_data_t *fdata, 2113 int enable_pav) 2114 { 2115 struct dasd_eckd_private *base_priv; 2116 struct dasd_eckd_private *start_priv; 2117 struct dasd_device *startdev = NULL; 2118 struct dasd_ccw_req *fcp; 2119 struct eckd_count *ect; 2120 struct ch_t address; 2121 struct ccw1 *ccw; 2122 void *data; 2123 int rpt; 2124 int cplength, datasize; 2125 int i, j; 2126 int intensity = 0; 2127 int r0_perm; 2128 int nr_tracks; 2129 int use_prefix; 2130 2131 if (enable_pav) 2132 startdev = dasd_alias_get_start_dev(base); 2133 2134 if (!startdev) 2135 startdev = base; 2136 2137 start_priv = (struct dasd_eckd_private *) startdev->private; 2138 base_priv = (struct dasd_eckd_private *) base->private; 2139 2140 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2141 2142 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2143 2144 /* 2145 * fdata->intensity is a bit string that tells us what to do: 2146 * Bit 0: write record zero 2147 * Bit 1: write home address, currently not supported 2148 * Bit 2: invalidate tracks 2149 * Bit 3: use OS/390 compatible disk layout (cdl) 2150 * Bit 4: do not allow storage subsystem to modify record zero 2151 * Only some bit combinations do make sense. 2152 */ 2153 if (fdata->intensity & 0x10) { 2154 r0_perm = 0; 2155 intensity = fdata->intensity & ~0x10; 2156 } else { 2157 r0_perm = 1; 2158 intensity = fdata->intensity; 2159 } 2160 2161 use_prefix = base_priv->features.feature[8] & 0x01; 2162 2163 switch (intensity) { 2164 case 0x00: /* Normal format */ 2165 case 0x08: /* Normal format, use cdl. */ 2166 cplength = 2 + (rpt*nr_tracks); 2167 if (use_prefix) 2168 datasize = sizeof(struct PFX_eckd_data) + 2169 sizeof(struct LO_eckd_data) + 2170 rpt * nr_tracks * sizeof(struct eckd_count); 2171 else 2172 datasize = sizeof(struct DE_eckd_data) + 2173 sizeof(struct LO_eckd_data) + 2174 rpt * nr_tracks * sizeof(struct eckd_count); 2175 break; 2176 case 0x01: /* Write record zero and format track. */ 2177 case 0x09: /* Write record zero and format track, use cdl. */ 2178 cplength = 2 + rpt * nr_tracks; 2179 if (use_prefix) 2180 datasize = sizeof(struct PFX_eckd_data) + 2181 sizeof(struct LO_eckd_data) + 2182 sizeof(struct eckd_count) + 2183 rpt * nr_tracks * sizeof(struct eckd_count); 2184 else 2185 datasize = sizeof(struct DE_eckd_data) + 2186 sizeof(struct LO_eckd_data) + 2187 sizeof(struct eckd_count) + 2188 rpt * nr_tracks * sizeof(struct eckd_count); 2189 break; 2190 case 0x04: /* Invalidate track. */ 2191 case 0x0c: /* Invalidate track, use cdl. */ 2192 cplength = 3; 2193 if (use_prefix) 2194 datasize = sizeof(struct PFX_eckd_data) + 2195 sizeof(struct LO_eckd_data) + 2196 sizeof(struct eckd_count); 2197 else 2198 datasize = sizeof(struct DE_eckd_data) + 2199 sizeof(struct LO_eckd_data) + 2200 sizeof(struct eckd_count); 2201 break; 2202 default: 2203 dev_warn(&startdev->cdev->dev, 2204 "An I/O control call used incorrect flags 0x%x\n", 2205 fdata->intensity); 2206 return ERR_PTR(-EINVAL); 2207 } 2208 /* Allocate the format ccw request. */ 2209 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 2210 datasize, startdev); 2211 if (IS_ERR(fcp)) 2212 return fcp; 2213 2214 start_priv->count++; 2215 data = fcp->data; 2216 ccw = fcp->cpaddr; 2217 2218 switch (intensity & ~0x08) { 2219 case 0x00: /* Normal format. */ 2220 if (use_prefix) { 2221 prefix(ccw++, (struct PFX_eckd_data *) data, 2222 fdata->start_unit, fdata->stop_unit, 2223 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2224 /* grant subsystem permission to format R0 */ 2225 if (r0_perm) 2226 ((struct PFX_eckd_data *)data) 2227 ->define_extent.ga_extended |= 0x04; 2228 data += sizeof(struct PFX_eckd_data); 2229 } else { 2230 define_extent(ccw++, (struct DE_eckd_data *) data, 2231 fdata->start_unit, fdata->stop_unit, 2232 DASD_ECKD_CCW_WRITE_CKD, startdev); 2233 /* grant subsystem permission to format R0 */ 2234 if (r0_perm) 2235 ((struct DE_eckd_data *) data) 2236 ->ga_extended |= 0x04; 2237 data += sizeof(struct DE_eckd_data); 2238 } 2239 ccw[-1].flags |= CCW_FLAG_CC; 2240 locate_record(ccw++, (struct LO_eckd_data *) data, 2241 fdata->start_unit, 0, rpt*nr_tracks, 2242 DASD_ECKD_CCW_WRITE_CKD, base, 2243 fdata->blksize); 2244 data += sizeof(struct LO_eckd_data); 2245 break; 2246 case 0x01: /* Write record zero + format track. */ 2247 if (use_prefix) { 2248 prefix(ccw++, (struct PFX_eckd_data *) data, 2249 fdata->start_unit, fdata->stop_unit, 2250 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2251 base, startdev); 2252 data += sizeof(struct PFX_eckd_data); 2253 } else { 2254 define_extent(ccw++, (struct DE_eckd_data *) data, 2255 fdata->start_unit, fdata->stop_unit, 2256 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev); 2257 data += sizeof(struct DE_eckd_data); 2258 } 2259 ccw[-1].flags |= CCW_FLAG_CC; 2260 locate_record(ccw++, (struct LO_eckd_data *) data, 2261 fdata->start_unit, 0, rpt * nr_tracks + 1, 2262 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2263 base->block->bp_block); 2264 data += sizeof(struct LO_eckd_data); 2265 break; 2266 case 0x04: /* Invalidate track. */ 2267 if (use_prefix) { 2268 prefix(ccw++, (struct PFX_eckd_data *) data, 2269 fdata->start_unit, fdata->stop_unit, 2270 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2271 data += sizeof(struct PFX_eckd_data); 2272 } else { 2273 define_extent(ccw++, (struct DE_eckd_data *) data, 2274 fdata->start_unit, fdata->stop_unit, 2275 DASD_ECKD_CCW_WRITE_CKD, startdev); 2276 data += sizeof(struct DE_eckd_data); 2277 } 2278 ccw[-1].flags |= CCW_FLAG_CC; 2279 locate_record(ccw++, (struct LO_eckd_data *) data, 2280 fdata->start_unit, 0, 1, 2281 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2282 data += sizeof(struct LO_eckd_data); 2283 break; 2284 } 2285 2286 for (j = 0; j < nr_tracks; j++) { 2287 /* calculate cylinder and head for the current track */ 2288 set_ch_t(&address, 2289 (fdata->start_unit + j) / 2290 base_priv->rdc_data.trk_per_cyl, 2291 (fdata->start_unit + j) % 2292 base_priv->rdc_data.trk_per_cyl); 2293 if (intensity & 0x01) { /* write record zero */ 2294 ect = (struct eckd_count *) data; 2295 data += sizeof(struct eckd_count); 2296 ect->cyl = address.cyl; 2297 ect->head = address.head; 2298 ect->record = 0; 2299 ect->kl = 0; 2300 ect->dl = 8; 2301 ccw[-1].flags |= CCW_FLAG_CC; 2302 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2303 ccw->flags = CCW_FLAG_SLI; 2304 ccw->count = 8; 2305 ccw->cda = (__u32)(addr_t) ect; 2306 ccw++; 2307 } 2308 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2309 ect = (struct eckd_count *) data; 2310 data += sizeof(struct eckd_count); 2311 ect->cyl = address.cyl; 2312 ect->head = address.head; 2313 ect->record = 1; 2314 ect->kl = 0; 2315 ect->dl = 0; 2316 ccw[-1].flags |= CCW_FLAG_CC; 2317 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2318 ccw->flags = CCW_FLAG_SLI; 2319 ccw->count = 8; 2320 ccw->cda = (__u32)(addr_t) ect; 2321 } else { /* write remaining records */ 2322 for (i = 0; i < rpt; i++) { 2323 ect = (struct eckd_count *) data; 2324 data += sizeof(struct eckd_count); 2325 ect->cyl = address.cyl; 2326 ect->head = address.head; 2327 ect->record = i + 1; 2328 ect->kl = 0; 2329 ect->dl = fdata->blksize; 2330 /* 2331 * Check for special tracks 0-1 2332 * when formatting CDL 2333 */ 2334 if ((intensity & 0x08) && 2335 fdata->start_unit == 0) { 2336 if (i < 3) { 2337 ect->kl = 4; 2338 ect->dl = sizes_trk0[i] - 4; 2339 } 2340 } 2341 if ((intensity & 0x08) && 2342 fdata->start_unit == 1) { 2343 ect->kl = 44; 2344 ect->dl = LABEL_SIZE - 44; 2345 } 2346 ccw[-1].flags |= CCW_FLAG_CC; 2347 if (i != 0 || j == 0) 2348 ccw->cmd_code = 2349 DASD_ECKD_CCW_WRITE_CKD; 2350 else 2351 ccw->cmd_code = 2352 DASD_ECKD_CCW_WRITE_CKD_MT; 2353 ccw->flags = CCW_FLAG_SLI; 2354 ccw->count = 8; 2355 ccw->cda = (__u32)(addr_t) ect; 2356 ccw++; 2357 } 2358 } 2359 } 2360 2361 fcp->startdev = startdev; 2362 fcp->memdev = startdev; 2363 fcp->basedev = base; 2364 fcp->retries = 256; 2365 fcp->expires = startdev->default_expires * HZ; 2366 fcp->buildclk = get_tod_clock(); 2367 fcp->status = DASD_CQR_FILLED; 2368 2369 return fcp; 2370 } 2371 2372 static int 2373 dasd_eckd_format_device(struct dasd_device *base, 2374 struct format_data_t *fdata, 2375 int enable_pav) 2376 { 2377 struct dasd_ccw_req *cqr, *n; 2378 struct dasd_block *block; 2379 struct dasd_eckd_private *private; 2380 struct list_head format_queue; 2381 struct dasd_device *device; 2382 int old_stop, format_step; 2383 int step, rc = 0, sleep_rc; 2384 2385 block = base->block; 2386 private = (struct dasd_eckd_private *) base->private; 2387 2388 /* Sanity checks. */ 2389 if (fdata->start_unit >= 2390 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2391 dev_warn(&base->cdev->dev, 2392 "Start track number %u used in formatting is too big\n", 2393 fdata->start_unit); 2394 return -EINVAL; 2395 } 2396 if (fdata->stop_unit >= 2397 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2398 dev_warn(&base->cdev->dev, 2399 "Stop track number %u used in formatting is too big\n", 2400 fdata->stop_unit); 2401 return -EINVAL; 2402 } 2403 if (fdata->start_unit > fdata->stop_unit) { 2404 dev_warn(&base->cdev->dev, 2405 "Start track %u used in formatting exceeds end track\n", 2406 fdata->start_unit); 2407 return -EINVAL; 2408 } 2409 if (dasd_check_blocksize(fdata->blksize) != 0) { 2410 dev_warn(&base->cdev->dev, 2411 "The DASD cannot be formatted with block size %u\n", 2412 fdata->blksize); 2413 return -EINVAL; 2414 } 2415 2416 INIT_LIST_HEAD(&format_queue); 2417 2418 old_stop = fdata->stop_unit; 2419 while (fdata->start_unit <= 1) { 2420 fdata->stop_unit = fdata->start_unit; 2421 cqr = dasd_eckd_build_format(base, fdata, enable_pav); 2422 list_add(&cqr->blocklist, &format_queue); 2423 2424 fdata->stop_unit = old_stop; 2425 fdata->start_unit++; 2426 2427 if (fdata->start_unit > fdata->stop_unit) 2428 goto sleep; 2429 } 2430 2431 retry: 2432 format_step = 255 / recs_per_track(&private->rdc_data, 0, 2433 fdata->blksize); 2434 while (fdata->start_unit <= old_stop) { 2435 step = fdata->stop_unit - fdata->start_unit + 1; 2436 if (step > format_step) 2437 fdata->stop_unit = fdata->start_unit + format_step - 1; 2438 2439 cqr = dasd_eckd_build_format(base, fdata, enable_pav); 2440 if (IS_ERR(cqr)) { 2441 if (PTR_ERR(cqr) == -ENOMEM) { 2442 /* 2443 * not enough memory available 2444 * go to out and start requests 2445 * retry after first requests were finished 2446 */ 2447 fdata->stop_unit = old_stop; 2448 goto sleep; 2449 } else 2450 return PTR_ERR(cqr); 2451 } 2452 list_add(&cqr->blocklist, &format_queue); 2453 2454 fdata->start_unit = fdata->stop_unit + 1; 2455 fdata->stop_unit = old_stop; 2456 } 2457 2458 sleep: 2459 sleep_rc = dasd_sleep_on_queue(&format_queue); 2460 2461 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 2462 device = cqr->startdev; 2463 private = (struct dasd_eckd_private *) device->private; 2464 if (cqr->status == DASD_CQR_FAILED) 2465 rc = -EIO; 2466 list_del_init(&cqr->blocklist); 2467 dasd_sfree_request(cqr, device); 2468 private->count--; 2469 } 2470 2471 if (sleep_rc) 2472 return sleep_rc; 2473 2474 /* 2475 * in case of ENOMEM we need to retry after 2476 * first requests are finished 2477 */ 2478 if (fdata->start_unit <= fdata->stop_unit) 2479 goto retry; 2480 2481 return rc; 2482 } 2483 2484 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 2485 { 2486 if (cqr->retries < 0) { 2487 cqr->status = DASD_CQR_FAILED; 2488 return; 2489 } 2490 cqr->status = DASD_CQR_FILLED; 2491 if (cqr->block && (cqr->startdev != cqr->block->base)) { 2492 dasd_eckd_reset_ccw_to_base_io(cqr); 2493 cqr->startdev = cqr->block->base; 2494 cqr->lpm = cqr->block->base->path_data.opm; 2495 } 2496 }; 2497 2498 static dasd_erp_fn_t 2499 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 2500 { 2501 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 2502 struct ccw_device *cdev = device->cdev; 2503 2504 switch (cdev->id.cu_type) { 2505 case 0x3990: 2506 case 0x2105: 2507 case 0x2107: 2508 case 0x1750: 2509 return dasd_3990_erp_action; 2510 case 0x9343: 2511 case 0x3880: 2512 default: 2513 return dasd_default_erp_action; 2514 } 2515 } 2516 2517 static dasd_erp_fn_t 2518 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 2519 { 2520 return dasd_default_erp_postaction; 2521 } 2522 2523 static void dasd_eckd_check_for_device_change(struct dasd_device *device, 2524 struct dasd_ccw_req *cqr, 2525 struct irb *irb) 2526 { 2527 char mask; 2528 char *sense = NULL; 2529 struct dasd_eckd_private *private; 2530 2531 private = (struct dasd_eckd_private *) device->private; 2532 /* first of all check for state change pending interrupt */ 2533 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 2534 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 2535 /* 2536 * for alias only, not in offline processing 2537 * and only if not suspended 2538 */ 2539 if (!device->block && private->lcu && 2540 device->state == DASD_STATE_ONLINE && 2541 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2542 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2543 /* 2544 * the state change could be caused by an alias 2545 * reassignment remove device from alias handling 2546 * to prevent new requests from being scheduled on 2547 * the wrong alias device 2548 */ 2549 dasd_alias_remove_device(device); 2550 2551 /* schedule worker to reload device */ 2552 dasd_reload_device(device); 2553 } 2554 dasd_generic_handle_state_change(device); 2555 return; 2556 } 2557 2558 sense = dasd_get_sense(irb); 2559 if (!sense) 2560 return; 2561 2562 /* summary unit check */ 2563 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 2564 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2565 dasd_alias_handle_summary_unit_check(device, irb); 2566 return; 2567 } 2568 2569 /* service information message SIM */ 2570 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 2571 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 2572 dasd_3990_erp_handle_sim(device, sense); 2573 return; 2574 } 2575 2576 /* loss of device reservation is handled via base devices only 2577 * as alias devices may be used with several bases 2578 */ 2579 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 2580 (sense[7] == 0x3F) && 2581 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 2582 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 2583 if (device->features & DASD_FEATURE_FAILONSLCK) 2584 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 2585 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 2586 dev_err(&device->cdev->dev, 2587 "The device reservation was lost\n"); 2588 } 2589 } 2590 2591 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 2592 struct dasd_device *startdev, 2593 struct dasd_block *block, 2594 struct request *req, 2595 sector_t first_rec, 2596 sector_t last_rec, 2597 sector_t first_trk, 2598 sector_t last_trk, 2599 unsigned int first_offs, 2600 unsigned int last_offs, 2601 unsigned int blk_per_trk, 2602 unsigned int blksize) 2603 { 2604 struct dasd_eckd_private *private; 2605 unsigned long *idaws; 2606 struct LO_eckd_data *LO_data; 2607 struct dasd_ccw_req *cqr; 2608 struct ccw1 *ccw; 2609 struct req_iterator iter; 2610 struct bio_vec bv; 2611 char *dst; 2612 unsigned int off; 2613 int count, cidaw, cplength, datasize; 2614 sector_t recid; 2615 unsigned char cmd, rcmd; 2616 int use_prefix; 2617 struct dasd_device *basedev; 2618 2619 basedev = block->base; 2620 private = (struct dasd_eckd_private *) basedev->private; 2621 if (rq_data_dir(req) == READ) 2622 cmd = DASD_ECKD_CCW_READ_MT; 2623 else if (rq_data_dir(req) == WRITE) 2624 cmd = DASD_ECKD_CCW_WRITE_MT; 2625 else 2626 return ERR_PTR(-EINVAL); 2627 2628 /* Check struct bio and count the number of blocks for the request. */ 2629 count = 0; 2630 cidaw = 0; 2631 rq_for_each_segment(bv, req, iter) { 2632 if (bv.bv_len & (blksize - 1)) 2633 /* Eckd can only do full blocks. */ 2634 return ERR_PTR(-EINVAL); 2635 count += bv.bv_len >> (block->s2b_shift + 9); 2636 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 2637 cidaw += bv.bv_len >> (block->s2b_shift + 9); 2638 } 2639 /* Paranoia. */ 2640 if (count != last_rec - first_rec + 1) 2641 return ERR_PTR(-EINVAL); 2642 2643 /* use the prefix command if available */ 2644 use_prefix = private->features.feature[8] & 0x01; 2645 if (use_prefix) { 2646 /* 1x prefix + number of blocks */ 2647 cplength = 2 + count; 2648 /* 1x prefix + cidaws*sizeof(long) */ 2649 datasize = sizeof(struct PFX_eckd_data) + 2650 sizeof(struct LO_eckd_data) + 2651 cidaw * sizeof(unsigned long); 2652 } else { 2653 /* 1x define extent + 1x locate record + number of blocks */ 2654 cplength = 2 + count; 2655 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 2656 datasize = sizeof(struct DE_eckd_data) + 2657 sizeof(struct LO_eckd_data) + 2658 cidaw * sizeof(unsigned long); 2659 } 2660 /* Find out the number of additional locate record ccws for cdl. */ 2661 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 2662 if (last_rec >= 2*blk_per_trk) 2663 count = 2*blk_per_trk - first_rec; 2664 cplength += count; 2665 datasize += count*sizeof(struct LO_eckd_data); 2666 } 2667 /* Allocate the ccw request. */ 2668 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 2669 startdev); 2670 if (IS_ERR(cqr)) 2671 return cqr; 2672 ccw = cqr->cpaddr; 2673 /* First ccw is define extent or prefix. */ 2674 if (use_prefix) { 2675 if (prefix(ccw++, cqr->data, first_trk, 2676 last_trk, cmd, basedev, startdev) == -EAGAIN) { 2677 /* Clock not in sync and XRC is enabled. 2678 * Try again later. 2679 */ 2680 dasd_sfree_request(cqr, startdev); 2681 return ERR_PTR(-EAGAIN); 2682 } 2683 idaws = (unsigned long *) (cqr->data + 2684 sizeof(struct PFX_eckd_data)); 2685 } else { 2686 if (define_extent(ccw++, cqr->data, first_trk, 2687 last_trk, cmd, basedev) == -EAGAIN) { 2688 /* Clock not in sync and XRC is enabled. 2689 * Try again later. 2690 */ 2691 dasd_sfree_request(cqr, startdev); 2692 return ERR_PTR(-EAGAIN); 2693 } 2694 idaws = (unsigned long *) (cqr->data + 2695 sizeof(struct DE_eckd_data)); 2696 } 2697 /* Build locate_record+read/write/ccws. */ 2698 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 2699 recid = first_rec; 2700 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 2701 /* Only standard blocks so there is just one locate record. */ 2702 ccw[-1].flags |= CCW_FLAG_CC; 2703 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 2704 last_rec - recid + 1, cmd, basedev, blksize); 2705 } 2706 rq_for_each_segment(bv, req, iter) { 2707 dst = page_address(bv.bv_page) + bv.bv_offset; 2708 if (dasd_page_cache) { 2709 char *copy = kmem_cache_alloc(dasd_page_cache, 2710 GFP_DMA | __GFP_NOWARN); 2711 if (copy && rq_data_dir(req) == WRITE) 2712 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 2713 if (copy) 2714 dst = copy + bv.bv_offset; 2715 } 2716 for (off = 0; off < bv.bv_len; off += blksize) { 2717 sector_t trkid = recid; 2718 unsigned int recoffs = sector_div(trkid, blk_per_trk); 2719 rcmd = cmd; 2720 count = blksize; 2721 /* Locate record for cdl special block ? */ 2722 if (private->uses_cdl && recid < 2*blk_per_trk) { 2723 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 2724 rcmd |= 0x8; 2725 count = dasd_eckd_cdl_reclen(recid); 2726 if (count < blksize && 2727 rq_data_dir(req) == READ) 2728 memset(dst + count, 0xe5, 2729 blksize - count); 2730 } 2731 ccw[-1].flags |= CCW_FLAG_CC; 2732 locate_record(ccw++, LO_data++, 2733 trkid, recoffs + 1, 2734 1, rcmd, basedev, count); 2735 } 2736 /* Locate record for standard blocks ? */ 2737 if (private->uses_cdl && recid == 2*blk_per_trk) { 2738 ccw[-1].flags |= CCW_FLAG_CC; 2739 locate_record(ccw++, LO_data++, 2740 trkid, recoffs + 1, 2741 last_rec - recid + 1, 2742 cmd, basedev, count); 2743 } 2744 /* Read/write ccw. */ 2745 ccw[-1].flags |= CCW_FLAG_CC; 2746 ccw->cmd_code = rcmd; 2747 ccw->count = count; 2748 if (idal_is_needed(dst, blksize)) { 2749 ccw->cda = (__u32)(addr_t) idaws; 2750 ccw->flags = CCW_FLAG_IDA; 2751 idaws = idal_create_words(idaws, dst, blksize); 2752 } else { 2753 ccw->cda = (__u32)(addr_t) dst; 2754 ccw->flags = 0; 2755 } 2756 ccw++; 2757 dst += blksize; 2758 recid++; 2759 } 2760 } 2761 if (blk_noretry_request(req) || 2762 block->base->features & DASD_FEATURE_FAILFAST) 2763 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2764 cqr->startdev = startdev; 2765 cqr->memdev = startdev; 2766 cqr->block = block; 2767 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2768 cqr->lpm = startdev->path_data.ppm; 2769 cqr->retries = startdev->default_retries; 2770 cqr->buildclk = get_tod_clock(); 2771 cqr->status = DASD_CQR_FILLED; 2772 return cqr; 2773 } 2774 2775 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 2776 struct dasd_device *startdev, 2777 struct dasd_block *block, 2778 struct request *req, 2779 sector_t first_rec, 2780 sector_t last_rec, 2781 sector_t first_trk, 2782 sector_t last_trk, 2783 unsigned int first_offs, 2784 unsigned int last_offs, 2785 unsigned int blk_per_trk, 2786 unsigned int blksize) 2787 { 2788 unsigned long *idaws; 2789 struct dasd_ccw_req *cqr; 2790 struct ccw1 *ccw; 2791 struct req_iterator iter; 2792 struct bio_vec bv; 2793 char *dst, *idaw_dst; 2794 unsigned int cidaw, cplength, datasize; 2795 unsigned int tlf; 2796 sector_t recid; 2797 unsigned char cmd; 2798 struct dasd_device *basedev; 2799 unsigned int trkcount, count, count_to_trk_end; 2800 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 2801 unsigned char new_track, end_idaw; 2802 sector_t trkid; 2803 unsigned int recoffs; 2804 2805 basedev = block->base; 2806 if (rq_data_dir(req) == READ) 2807 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2808 else if (rq_data_dir(req) == WRITE) 2809 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 2810 else 2811 return ERR_PTR(-EINVAL); 2812 2813 /* Track based I/O needs IDAWs for each page, and not just for 2814 * 64 bit addresses. We need additional idals for pages 2815 * that get filled from two tracks, so we use the number 2816 * of records as upper limit. 2817 */ 2818 cidaw = last_rec - first_rec + 1; 2819 trkcount = last_trk - first_trk + 1; 2820 2821 /* 1x prefix + one read/write ccw per track */ 2822 cplength = 1 + trkcount; 2823 2824 /* on 31-bit we need space for two 32 bit addresses per page 2825 * on 64-bit one 64 bit address 2826 */ 2827 datasize = sizeof(struct PFX_eckd_data) + 2828 cidaw * sizeof(unsigned long long); 2829 2830 /* Allocate the ccw request. */ 2831 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 2832 startdev); 2833 if (IS_ERR(cqr)) 2834 return cqr; 2835 ccw = cqr->cpaddr; 2836 /* transfer length factor: how many bytes to read from the last track */ 2837 if (first_trk == last_trk) 2838 tlf = last_offs - first_offs + 1; 2839 else 2840 tlf = last_offs + 1; 2841 tlf *= blksize; 2842 2843 if (prefix_LRE(ccw++, cqr->data, first_trk, 2844 last_trk, cmd, basedev, startdev, 2845 1 /* format */, first_offs + 1, 2846 trkcount, blksize, 2847 tlf) == -EAGAIN) { 2848 /* Clock not in sync and XRC is enabled. 2849 * Try again later. 2850 */ 2851 dasd_sfree_request(cqr, startdev); 2852 return ERR_PTR(-EAGAIN); 2853 } 2854 2855 /* 2856 * The translation of request into ccw programs must meet the 2857 * following conditions: 2858 * - all idaws but the first and the last must address full pages 2859 * (or 2K blocks on 31-bit) 2860 * - the scope of a ccw and it's idal ends with the track boundaries 2861 */ 2862 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 2863 recid = first_rec; 2864 new_track = 1; 2865 end_idaw = 0; 2866 len_to_track_end = 0; 2867 idaw_dst = NULL; 2868 idaw_len = 0; 2869 rq_for_each_segment(bv, req, iter) { 2870 dst = page_address(bv.bv_page) + bv.bv_offset; 2871 seg_len = bv.bv_len; 2872 while (seg_len) { 2873 if (new_track) { 2874 trkid = recid; 2875 recoffs = sector_div(trkid, blk_per_trk); 2876 count_to_trk_end = blk_per_trk - recoffs; 2877 count = min((last_rec - recid + 1), 2878 (sector_t)count_to_trk_end); 2879 len_to_track_end = count * blksize; 2880 ccw[-1].flags |= CCW_FLAG_CC; 2881 ccw->cmd_code = cmd; 2882 ccw->count = len_to_track_end; 2883 ccw->cda = (__u32)(addr_t)idaws; 2884 ccw->flags = CCW_FLAG_IDA; 2885 ccw++; 2886 recid += count; 2887 new_track = 0; 2888 /* first idaw for a ccw may start anywhere */ 2889 if (!idaw_dst) 2890 idaw_dst = dst; 2891 } 2892 /* If we start a new idaw, we must make sure that it 2893 * starts on an IDA_BLOCK_SIZE boundary. 2894 * If we continue an idaw, we must make sure that the 2895 * current segment begins where the so far accumulated 2896 * idaw ends 2897 */ 2898 if (!idaw_dst) { 2899 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 2900 dasd_sfree_request(cqr, startdev); 2901 return ERR_PTR(-ERANGE); 2902 } else 2903 idaw_dst = dst; 2904 } 2905 if ((idaw_dst + idaw_len) != dst) { 2906 dasd_sfree_request(cqr, startdev); 2907 return ERR_PTR(-ERANGE); 2908 } 2909 part_len = min(seg_len, len_to_track_end); 2910 seg_len -= part_len; 2911 dst += part_len; 2912 idaw_len += part_len; 2913 len_to_track_end -= part_len; 2914 /* collected memory area ends on an IDA_BLOCK border, 2915 * -> create an idaw 2916 * idal_create_words will handle cases where idaw_len 2917 * is larger then IDA_BLOCK_SIZE 2918 */ 2919 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 2920 end_idaw = 1; 2921 /* We also need to end the idaw at track end */ 2922 if (!len_to_track_end) { 2923 new_track = 1; 2924 end_idaw = 1; 2925 } 2926 if (end_idaw) { 2927 idaws = idal_create_words(idaws, idaw_dst, 2928 idaw_len); 2929 idaw_dst = NULL; 2930 idaw_len = 0; 2931 end_idaw = 0; 2932 } 2933 } 2934 } 2935 2936 if (blk_noretry_request(req) || 2937 block->base->features & DASD_FEATURE_FAILFAST) 2938 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2939 cqr->startdev = startdev; 2940 cqr->memdev = startdev; 2941 cqr->block = block; 2942 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2943 cqr->lpm = startdev->path_data.ppm; 2944 cqr->retries = startdev->default_retries; 2945 cqr->buildclk = get_tod_clock(); 2946 cqr->status = DASD_CQR_FILLED; 2947 return cqr; 2948 } 2949 2950 static int prepare_itcw(struct itcw *itcw, 2951 unsigned int trk, unsigned int totrk, int cmd, 2952 struct dasd_device *basedev, 2953 struct dasd_device *startdev, 2954 unsigned int rec_on_trk, int count, 2955 unsigned int blksize, 2956 unsigned int total_data_size, 2957 unsigned int tlf, 2958 unsigned int blk_per_trk) 2959 { 2960 struct PFX_eckd_data pfxdata; 2961 struct dasd_eckd_private *basepriv, *startpriv; 2962 struct DE_eckd_data *dedata; 2963 struct LRE_eckd_data *lredata; 2964 struct dcw *dcw; 2965 2966 u32 begcyl, endcyl; 2967 u16 heads, beghead, endhead; 2968 u8 pfx_cmd; 2969 2970 int rc = 0; 2971 int sector = 0; 2972 int dn, d; 2973 2974 2975 /* setup prefix data */ 2976 basepriv = (struct dasd_eckd_private *) basedev->private; 2977 startpriv = (struct dasd_eckd_private *) startdev->private; 2978 dedata = &pfxdata.define_extent; 2979 lredata = &pfxdata.locate_record; 2980 2981 memset(&pfxdata, 0, sizeof(pfxdata)); 2982 pfxdata.format = 1; /* PFX with LRE */ 2983 pfxdata.base_address = basepriv->ned->unit_addr; 2984 pfxdata.base_lss = basepriv->ned->ID; 2985 pfxdata.validity.define_extent = 1; 2986 2987 /* private uid is kept up to date, conf_data may be outdated */ 2988 if (startpriv->uid.type != UA_BASE_DEVICE) { 2989 pfxdata.validity.verify_base = 1; 2990 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 2991 pfxdata.validity.hyper_pav = 1; 2992 } 2993 2994 switch (cmd) { 2995 case DASD_ECKD_CCW_READ_TRACK_DATA: 2996 dedata->mask.perm = 0x1; 2997 dedata->attributes.operation = basepriv->attrib.operation; 2998 dedata->blk_size = blksize; 2999 dedata->ga_extended |= 0x42; 3000 lredata->operation.orientation = 0x0; 3001 lredata->operation.operation = 0x0C; 3002 lredata->auxiliary.check_bytes = 0x01; 3003 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 3004 break; 3005 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 3006 dedata->mask.perm = 0x02; 3007 dedata->attributes.operation = basepriv->attrib.operation; 3008 dedata->blk_size = blksize; 3009 rc = check_XRC_on_prefix(&pfxdata, basedev); 3010 dedata->ga_extended |= 0x42; 3011 lredata->operation.orientation = 0x0; 3012 lredata->operation.operation = 0x3F; 3013 lredata->extended_operation = 0x23; 3014 lredata->auxiliary.check_bytes = 0x2; 3015 pfx_cmd = DASD_ECKD_CCW_PFX; 3016 break; 3017 default: 3018 DBF_DEV_EVENT(DBF_ERR, basedev, 3019 "prepare itcw, unknown opcode 0x%x", cmd); 3020 BUG(); 3021 break; 3022 } 3023 if (rc) 3024 return rc; 3025 3026 dedata->attributes.mode = 0x3; /* ECKD */ 3027 3028 heads = basepriv->rdc_data.trk_per_cyl; 3029 begcyl = trk / heads; 3030 beghead = trk % heads; 3031 endcyl = totrk / heads; 3032 endhead = totrk % heads; 3033 3034 /* check for sequential prestage - enhance cylinder range */ 3035 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 3036 dedata->attributes.operation == DASD_SEQ_ACCESS) { 3037 3038 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 3039 endcyl += basepriv->attrib.nr_cyl; 3040 else 3041 endcyl = (basepriv->real_cyl - 1); 3042 } 3043 3044 set_ch_t(&dedata->beg_ext, begcyl, beghead); 3045 set_ch_t(&dedata->end_ext, endcyl, endhead); 3046 3047 dedata->ep_format = 0x20; /* records per track is valid */ 3048 dedata->ep_rec_per_track = blk_per_trk; 3049 3050 if (rec_on_trk) { 3051 switch (basepriv->rdc_data.dev_type) { 3052 case 0x3390: 3053 dn = ceil_quot(blksize + 6, 232); 3054 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 3055 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 3056 break; 3057 case 0x3380: 3058 d = 7 + ceil_quot(blksize + 12, 32); 3059 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 3060 break; 3061 } 3062 } 3063 3064 lredata->auxiliary.length_valid = 1; 3065 lredata->auxiliary.length_scope = 1; 3066 lredata->auxiliary.imbedded_ccw_valid = 1; 3067 lredata->length = tlf; 3068 lredata->imbedded_ccw = cmd; 3069 lredata->count = count; 3070 lredata->sector = sector; 3071 set_ch_t(&lredata->seek_addr, begcyl, beghead); 3072 lredata->search_arg.cyl = lredata->seek_addr.cyl; 3073 lredata->search_arg.head = lredata->seek_addr.head; 3074 lredata->search_arg.record = rec_on_trk; 3075 3076 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 3077 &pfxdata, sizeof(pfxdata), total_data_size); 3078 return PTR_RET(dcw); 3079 } 3080 3081 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 3082 struct dasd_device *startdev, 3083 struct dasd_block *block, 3084 struct request *req, 3085 sector_t first_rec, 3086 sector_t last_rec, 3087 sector_t first_trk, 3088 sector_t last_trk, 3089 unsigned int first_offs, 3090 unsigned int last_offs, 3091 unsigned int blk_per_trk, 3092 unsigned int blksize) 3093 { 3094 struct dasd_ccw_req *cqr; 3095 struct req_iterator iter; 3096 struct bio_vec bv; 3097 char *dst; 3098 unsigned int trkcount, ctidaw; 3099 unsigned char cmd; 3100 struct dasd_device *basedev; 3101 unsigned int tlf; 3102 struct itcw *itcw; 3103 struct tidaw *last_tidaw = NULL; 3104 int itcw_op; 3105 size_t itcw_size; 3106 u8 tidaw_flags; 3107 unsigned int seg_len, part_len, len_to_track_end; 3108 unsigned char new_track; 3109 sector_t recid, trkid; 3110 unsigned int offs; 3111 unsigned int count, count_to_trk_end; 3112 int ret; 3113 3114 basedev = block->base; 3115 if (rq_data_dir(req) == READ) { 3116 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 3117 itcw_op = ITCW_OP_READ; 3118 } else if (rq_data_dir(req) == WRITE) { 3119 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 3120 itcw_op = ITCW_OP_WRITE; 3121 } else 3122 return ERR_PTR(-EINVAL); 3123 3124 /* trackbased I/O needs address all memory via TIDAWs, 3125 * not just for 64 bit addresses. This allows us to map 3126 * each segment directly to one tidaw. 3127 * In the case of write requests, additional tidaws may 3128 * be needed when a segment crosses a track boundary. 3129 */ 3130 trkcount = last_trk - first_trk + 1; 3131 ctidaw = 0; 3132 rq_for_each_segment(bv, req, iter) { 3133 ++ctidaw; 3134 } 3135 if (rq_data_dir(req) == WRITE) 3136 ctidaw += (last_trk - first_trk); 3137 3138 /* Allocate the ccw request. */ 3139 itcw_size = itcw_calc_size(0, ctidaw, 0); 3140 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 3141 if (IS_ERR(cqr)) 3142 return cqr; 3143 3144 /* transfer length factor: how many bytes to read from the last track */ 3145 if (first_trk == last_trk) 3146 tlf = last_offs - first_offs + 1; 3147 else 3148 tlf = last_offs + 1; 3149 tlf *= blksize; 3150 3151 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 3152 if (IS_ERR(itcw)) { 3153 ret = -EINVAL; 3154 goto out_error; 3155 } 3156 cqr->cpaddr = itcw_get_tcw(itcw); 3157 if (prepare_itcw(itcw, first_trk, last_trk, 3158 cmd, basedev, startdev, 3159 first_offs + 1, 3160 trkcount, blksize, 3161 (last_rec - first_rec + 1) * blksize, 3162 tlf, blk_per_trk) == -EAGAIN) { 3163 /* Clock not in sync and XRC is enabled. 3164 * Try again later. 3165 */ 3166 ret = -EAGAIN; 3167 goto out_error; 3168 } 3169 len_to_track_end = 0; 3170 /* 3171 * A tidaw can address 4k of memory, but must not cross page boundaries 3172 * We can let the block layer handle this by setting 3173 * blk_queue_segment_boundary to page boundaries and 3174 * blk_max_segment_size to page size when setting up the request queue. 3175 * For write requests, a TIDAW must not cross track boundaries, because 3176 * we have to set the CBC flag on the last tidaw for each track. 3177 */ 3178 if (rq_data_dir(req) == WRITE) { 3179 new_track = 1; 3180 recid = first_rec; 3181 rq_for_each_segment(bv, req, iter) { 3182 dst = page_address(bv.bv_page) + bv.bv_offset; 3183 seg_len = bv.bv_len; 3184 while (seg_len) { 3185 if (new_track) { 3186 trkid = recid; 3187 offs = sector_div(trkid, blk_per_trk); 3188 count_to_trk_end = blk_per_trk - offs; 3189 count = min((last_rec - recid + 1), 3190 (sector_t)count_to_trk_end); 3191 len_to_track_end = count * blksize; 3192 recid += count; 3193 new_track = 0; 3194 } 3195 part_len = min(seg_len, len_to_track_end); 3196 seg_len -= part_len; 3197 len_to_track_end -= part_len; 3198 /* We need to end the tidaw at track end */ 3199 if (!len_to_track_end) { 3200 new_track = 1; 3201 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 3202 } else 3203 tidaw_flags = 0; 3204 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 3205 dst, part_len); 3206 if (IS_ERR(last_tidaw)) { 3207 ret = -EINVAL; 3208 goto out_error; 3209 } 3210 dst += part_len; 3211 } 3212 } 3213 } else { 3214 rq_for_each_segment(bv, req, iter) { 3215 dst = page_address(bv.bv_page) + bv.bv_offset; 3216 last_tidaw = itcw_add_tidaw(itcw, 0x00, 3217 dst, bv.bv_len); 3218 if (IS_ERR(last_tidaw)) { 3219 ret = -EINVAL; 3220 goto out_error; 3221 } 3222 } 3223 } 3224 last_tidaw->flags |= TIDAW_FLAGS_LAST; 3225 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 3226 itcw_finalize(itcw); 3227 3228 if (blk_noretry_request(req) || 3229 block->base->features & DASD_FEATURE_FAILFAST) 3230 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3231 cqr->cpmode = 1; 3232 cqr->startdev = startdev; 3233 cqr->memdev = startdev; 3234 cqr->block = block; 3235 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3236 cqr->lpm = startdev->path_data.ppm; 3237 cqr->retries = startdev->default_retries; 3238 cqr->buildclk = get_tod_clock(); 3239 cqr->status = DASD_CQR_FILLED; 3240 return cqr; 3241 out_error: 3242 dasd_sfree_request(cqr, startdev); 3243 return ERR_PTR(ret); 3244 } 3245 3246 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 3247 struct dasd_block *block, 3248 struct request *req) 3249 { 3250 int cmdrtd, cmdwtd; 3251 int use_prefix; 3252 int fcx_multitrack; 3253 struct dasd_eckd_private *private; 3254 struct dasd_device *basedev; 3255 sector_t first_rec, last_rec; 3256 sector_t first_trk, last_trk; 3257 unsigned int first_offs, last_offs; 3258 unsigned int blk_per_trk, blksize; 3259 int cdlspecial; 3260 unsigned int data_size; 3261 struct dasd_ccw_req *cqr; 3262 3263 basedev = block->base; 3264 private = (struct dasd_eckd_private *) basedev->private; 3265 3266 /* Calculate number of blocks/records per track. */ 3267 blksize = block->bp_block; 3268 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3269 if (blk_per_trk == 0) 3270 return ERR_PTR(-EINVAL); 3271 /* Calculate record id of first and last block. */ 3272 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 3273 first_offs = sector_div(first_trk, blk_per_trk); 3274 last_rec = last_trk = 3275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3276 last_offs = sector_div(last_trk, blk_per_trk); 3277 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 3278 3279 fcx_multitrack = private->features.feature[40] & 0x20; 3280 data_size = blk_rq_bytes(req); 3281 if (data_size % blksize) 3282 return ERR_PTR(-EINVAL); 3283 /* tpm write request add CBC data on each track boundary */ 3284 if (rq_data_dir(req) == WRITE) 3285 data_size += (last_trk - first_trk) * 4; 3286 3287 /* is read track data and write track data in command mode supported? */ 3288 cmdrtd = private->features.feature[9] & 0x20; 3289 cmdwtd = private->features.feature[12] & 0x40; 3290 use_prefix = private->features.feature[8] & 0x01; 3291 3292 cqr = NULL; 3293 if (cdlspecial || dasd_page_cache) { 3294 /* do nothing, just fall through to the cmd mode single case */ 3295 } else if ((data_size <= private->fcx_max_data) 3296 && (fcx_multitrack || (first_trk == last_trk))) { 3297 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 3298 first_rec, last_rec, 3299 first_trk, last_trk, 3300 first_offs, last_offs, 3301 blk_per_trk, blksize); 3302 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 3303 (PTR_ERR(cqr) != -ENOMEM)) 3304 cqr = NULL; 3305 } else if (use_prefix && 3306 (((rq_data_dir(req) == READ) && cmdrtd) || 3307 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 3308 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 3309 first_rec, last_rec, 3310 first_trk, last_trk, 3311 first_offs, last_offs, 3312 blk_per_trk, blksize); 3313 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 3314 (PTR_ERR(cqr) != -ENOMEM)) 3315 cqr = NULL; 3316 } 3317 if (!cqr) 3318 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 3319 first_rec, last_rec, 3320 first_trk, last_trk, 3321 first_offs, last_offs, 3322 blk_per_trk, blksize); 3323 return cqr; 3324 } 3325 3326 static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, 3327 struct dasd_block *block, 3328 struct request *req) 3329 { 3330 unsigned long *idaws; 3331 struct dasd_device *basedev; 3332 struct dasd_ccw_req *cqr; 3333 struct ccw1 *ccw; 3334 struct req_iterator iter; 3335 struct bio_vec bv; 3336 char *dst; 3337 unsigned char cmd; 3338 unsigned int trkcount; 3339 unsigned int seg_len, len_to_track_end; 3340 unsigned int first_offs; 3341 unsigned int cidaw, cplength, datasize; 3342 sector_t first_trk, last_trk, sectors; 3343 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 3344 unsigned int pfx_datasize; 3345 3346 /* 3347 * raw track access needs to be mutiple of 64k and on 64k boundary 3348 * For read requests we can fix an incorrect alignment by padding 3349 * the request with dummy pages. 3350 */ 3351 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 3352 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 3353 DASD_RAW_SECTORS_PER_TRACK; 3354 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 3355 DASD_RAW_SECTORS_PER_TRACK; 3356 basedev = block->base; 3357 if ((start_padding_sectors || end_padding_sectors) && 3358 (rq_data_dir(req) == WRITE)) { 3359 DBF_DEV_EVENT(DBF_ERR, basedev, 3360 "raw write not track aligned (%lu,%lu) req %p", 3361 start_padding_sectors, end_padding_sectors, req); 3362 cqr = ERR_PTR(-EINVAL); 3363 goto out; 3364 } 3365 3366 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 3367 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 3368 DASD_RAW_SECTORS_PER_TRACK; 3369 trkcount = last_trk - first_trk + 1; 3370 first_offs = 0; 3371 3372 if (rq_data_dir(req) == READ) 3373 cmd = DASD_ECKD_CCW_READ_TRACK; 3374 else if (rq_data_dir(req) == WRITE) 3375 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 3376 else { 3377 cqr = ERR_PTR(-EINVAL); 3378 goto out; 3379 } 3380 3381 /* 3382 * Raw track based I/O needs IDAWs for each page, 3383 * and not just for 64 bit addresses. 3384 */ 3385 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 3386 3387 /* 1x prefix + one read/write ccw per track */ 3388 cplength = 1 + trkcount; 3389 3390 /* 3391 * struct PFX_eckd_data has up to 2 byte as extended parameter 3392 * this is needed for write full track and has to be mentioned 3393 * separately 3394 * add 8 instead of 2 to keep 8 byte boundary 3395 */ 3396 pfx_datasize = sizeof(struct PFX_eckd_data) + 8; 3397 3398 datasize = pfx_datasize + cidaw * sizeof(unsigned long long); 3399 3400 /* Allocate the ccw request. */ 3401 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 3402 datasize, startdev); 3403 if (IS_ERR(cqr)) 3404 goto out; 3405 ccw = cqr->cpaddr; 3406 3407 if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd, 3408 basedev, startdev, 1 /* format */, first_offs + 1, 3409 trkcount, 0, 0) == -EAGAIN) { 3410 /* Clock not in sync and XRC is enabled. 3411 * Try again later. 3412 */ 3413 dasd_sfree_request(cqr, startdev); 3414 cqr = ERR_PTR(-EAGAIN); 3415 goto out; 3416 } 3417 3418 idaws = (unsigned long *)(cqr->data + pfx_datasize); 3419 len_to_track_end = 0; 3420 if (start_padding_sectors) { 3421 ccw[-1].flags |= CCW_FLAG_CC; 3422 ccw->cmd_code = cmd; 3423 /* maximum 3390 track size */ 3424 ccw->count = 57326; 3425 /* 64k map to one track */ 3426 len_to_track_end = 65536 - start_padding_sectors * 512; 3427 ccw->cda = (__u32)(addr_t)idaws; 3428 ccw->flags |= CCW_FLAG_IDA; 3429 ccw->flags |= CCW_FLAG_SLI; 3430 ccw++; 3431 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 3432 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3433 } 3434 rq_for_each_segment(bv, req, iter) { 3435 dst = page_address(bv.bv_page) + bv.bv_offset; 3436 seg_len = bv.bv_len; 3437 if (cmd == DASD_ECKD_CCW_READ_TRACK) 3438 memset(dst, 0, seg_len); 3439 if (!len_to_track_end) { 3440 ccw[-1].flags |= CCW_FLAG_CC; 3441 ccw->cmd_code = cmd; 3442 /* maximum 3390 track size */ 3443 ccw->count = 57326; 3444 /* 64k map to one track */ 3445 len_to_track_end = 65536; 3446 ccw->cda = (__u32)(addr_t)idaws; 3447 ccw->flags |= CCW_FLAG_IDA; 3448 ccw->flags |= CCW_FLAG_SLI; 3449 ccw++; 3450 } 3451 len_to_track_end -= seg_len; 3452 idaws = idal_create_words(idaws, dst, seg_len); 3453 } 3454 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 3455 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3456 if (blk_noretry_request(req) || 3457 block->base->features & DASD_FEATURE_FAILFAST) 3458 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3459 cqr->startdev = startdev; 3460 cqr->memdev = startdev; 3461 cqr->block = block; 3462 cqr->expires = startdev->default_expires * HZ; 3463 cqr->lpm = startdev->path_data.ppm; 3464 cqr->retries = startdev->default_retries; 3465 cqr->buildclk = get_tod_clock(); 3466 cqr->status = DASD_CQR_FILLED; 3467 3468 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 3469 cqr = NULL; 3470 out: 3471 return cqr; 3472 } 3473 3474 3475 static int 3476 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 3477 { 3478 struct dasd_eckd_private *private; 3479 struct ccw1 *ccw; 3480 struct req_iterator iter; 3481 struct bio_vec bv; 3482 char *dst, *cda; 3483 unsigned int blksize, blk_per_trk, off; 3484 sector_t recid; 3485 int status; 3486 3487 if (!dasd_page_cache) 3488 goto out; 3489 private = (struct dasd_eckd_private *) cqr->block->base->private; 3490 blksize = cqr->block->bp_block; 3491 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3492 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 3493 ccw = cqr->cpaddr; 3494 /* Skip over define extent & locate record. */ 3495 ccw++; 3496 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 3497 ccw++; 3498 rq_for_each_segment(bv, req, iter) { 3499 dst = page_address(bv.bv_page) + bv.bv_offset; 3500 for (off = 0; off < bv.bv_len; off += blksize) { 3501 /* Skip locate record. */ 3502 if (private->uses_cdl && recid <= 2*blk_per_trk) 3503 ccw++; 3504 if (dst) { 3505 if (ccw->flags & CCW_FLAG_IDA) 3506 cda = *((char **)((addr_t) ccw->cda)); 3507 else 3508 cda = (char *)((addr_t) ccw->cda); 3509 if (dst != cda) { 3510 if (rq_data_dir(req) == READ) 3511 memcpy(dst, cda, bv.bv_len); 3512 kmem_cache_free(dasd_page_cache, 3513 (void *)((addr_t)cda & PAGE_MASK)); 3514 } 3515 dst = NULL; 3516 } 3517 ccw++; 3518 recid++; 3519 } 3520 } 3521 out: 3522 status = cqr->status == DASD_CQR_DONE; 3523 dasd_sfree_request(cqr, cqr->memdev); 3524 return status; 3525 } 3526 3527 /* 3528 * Modify ccw/tcw in cqr so it can be started on a base device. 3529 * 3530 * Note that this is not enough to restart the cqr! 3531 * Either reset cqr->startdev as well (summary unit check handling) 3532 * or restart via separate cqr (as in ERP handling). 3533 */ 3534 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 3535 { 3536 struct ccw1 *ccw; 3537 struct PFX_eckd_data *pfxdata; 3538 struct tcw *tcw; 3539 struct tccb *tccb; 3540 struct dcw *dcw; 3541 3542 if (cqr->cpmode == 1) { 3543 tcw = cqr->cpaddr; 3544 tccb = tcw_get_tccb(tcw); 3545 dcw = (struct dcw *)&tccb->tca[0]; 3546 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 3547 pfxdata->validity.verify_base = 0; 3548 pfxdata->validity.hyper_pav = 0; 3549 } else { 3550 ccw = cqr->cpaddr; 3551 pfxdata = cqr->data; 3552 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 3553 pfxdata->validity.verify_base = 0; 3554 pfxdata->validity.hyper_pav = 0; 3555 } 3556 } 3557 } 3558 3559 #define DASD_ECKD_CHANQ_MAX_SIZE 4 3560 3561 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 3562 struct dasd_block *block, 3563 struct request *req) 3564 { 3565 struct dasd_eckd_private *private; 3566 struct dasd_device *startdev; 3567 unsigned long flags; 3568 struct dasd_ccw_req *cqr; 3569 3570 startdev = dasd_alias_get_start_dev(base); 3571 if (!startdev) 3572 startdev = base; 3573 private = (struct dasd_eckd_private *) startdev->private; 3574 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 3575 return ERR_PTR(-EBUSY); 3576 3577 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 3578 private->count++; 3579 if ((base->features & DASD_FEATURE_USERAW)) 3580 cqr = dasd_raw_build_cp(startdev, block, req); 3581 else 3582 cqr = dasd_eckd_build_cp(startdev, block, req); 3583 if (IS_ERR(cqr)) 3584 private->count--; 3585 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 3586 return cqr; 3587 } 3588 3589 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 3590 struct request *req) 3591 { 3592 struct dasd_eckd_private *private; 3593 unsigned long flags; 3594 3595 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 3596 private = (struct dasd_eckd_private *) cqr->memdev->private; 3597 private->count--; 3598 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 3599 return dasd_eckd_free_cp(cqr, req); 3600 } 3601 3602 static int 3603 dasd_eckd_fill_info(struct dasd_device * device, 3604 struct dasd_information2_t * info) 3605 { 3606 struct dasd_eckd_private *private; 3607 3608 private = (struct dasd_eckd_private *) device->private; 3609 info->label_block = 2; 3610 info->FBA_layout = private->uses_cdl ? 0 : 1; 3611 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 3612 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 3613 memcpy(info->characteristics, &private->rdc_data, 3614 sizeof(struct dasd_eckd_characteristics)); 3615 info->confdata_size = min((unsigned long)private->conf_len, 3616 sizeof(info->configuration_data)); 3617 memcpy(info->configuration_data, private->conf_data, 3618 info->confdata_size); 3619 return 0; 3620 } 3621 3622 /* 3623 * SECTION: ioctl functions for eckd devices. 3624 */ 3625 3626 /* 3627 * Release device ioctl. 3628 * Buils a channel programm to releases a prior reserved 3629 * (see dasd_eckd_reserve) device. 3630 */ 3631 static int 3632 dasd_eckd_release(struct dasd_device *device) 3633 { 3634 struct dasd_ccw_req *cqr; 3635 int rc; 3636 struct ccw1 *ccw; 3637 int useglobal; 3638 3639 if (!capable(CAP_SYS_ADMIN)) 3640 return -EACCES; 3641 3642 useglobal = 0; 3643 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 3644 if (IS_ERR(cqr)) { 3645 mutex_lock(&dasd_reserve_mutex); 3646 useglobal = 1; 3647 cqr = &dasd_reserve_req->cqr; 3648 memset(cqr, 0, sizeof(*cqr)); 3649 memset(&dasd_reserve_req->ccw, 0, 3650 sizeof(dasd_reserve_req->ccw)); 3651 cqr->cpaddr = &dasd_reserve_req->ccw; 3652 cqr->data = &dasd_reserve_req->data; 3653 cqr->magic = DASD_ECKD_MAGIC; 3654 } 3655 ccw = cqr->cpaddr; 3656 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 3657 ccw->flags |= CCW_FLAG_SLI; 3658 ccw->count = 32; 3659 ccw->cda = (__u32)(addr_t) cqr->data; 3660 cqr->startdev = device; 3661 cqr->memdev = device; 3662 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3663 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3664 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3665 cqr->expires = 2 * HZ; 3666 cqr->buildclk = get_tod_clock(); 3667 cqr->status = DASD_CQR_FILLED; 3668 3669 rc = dasd_sleep_on_immediatly(cqr); 3670 if (!rc) 3671 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3672 3673 if (useglobal) 3674 mutex_unlock(&dasd_reserve_mutex); 3675 else 3676 dasd_sfree_request(cqr, cqr->memdev); 3677 return rc; 3678 } 3679 3680 /* 3681 * Reserve device ioctl. 3682 * Options are set to 'synchronous wait for interrupt' and 3683 * 'timeout the request'. This leads to a terminate IO if 3684 * the interrupt is outstanding for a certain time. 3685 */ 3686 static int 3687 dasd_eckd_reserve(struct dasd_device *device) 3688 { 3689 struct dasd_ccw_req *cqr; 3690 int rc; 3691 struct ccw1 *ccw; 3692 int useglobal; 3693 3694 if (!capable(CAP_SYS_ADMIN)) 3695 return -EACCES; 3696 3697 useglobal = 0; 3698 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 3699 if (IS_ERR(cqr)) { 3700 mutex_lock(&dasd_reserve_mutex); 3701 useglobal = 1; 3702 cqr = &dasd_reserve_req->cqr; 3703 memset(cqr, 0, sizeof(*cqr)); 3704 memset(&dasd_reserve_req->ccw, 0, 3705 sizeof(dasd_reserve_req->ccw)); 3706 cqr->cpaddr = &dasd_reserve_req->ccw; 3707 cqr->data = &dasd_reserve_req->data; 3708 cqr->magic = DASD_ECKD_MAGIC; 3709 } 3710 ccw = cqr->cpaddr; 3711 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 3712 ccw->flags |= CCW_FLAG_SLI; 3713 ccw->count = 32; 3714 ccw->cda = (__u32)(addr_t) cqr->data; 3715 cqr->startdev = device; 3716 cqr->memdev = device; 3717 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3718 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3719 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3720 cqr->expires = 2 * HZ; 3721 cqr->buildclk = get_tod_clock(); 3722 cqr->status = DASD_CQR_FILLED; 3723 3724 rc = dasd_sleep_on_immediatly(cqr); 3725 if (!rc) 3726 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3727 3728 if (useglobal) 3729 mutex_unlock(&dasd_reserve_mutex); 3730 else 3731 dasd_sfree_request(cqr, cqr->memdev); 3732 return rc; 3733 } 3734 3735 /* 3736 * Steal lock ioctl - unconditional reserve device. 3737 * Buils a channel programm to break a device's reservation. 3738 * (unconditional reserve) 3739 */ 3740 static int 3741 dasd_eckd_steal_lock(struct dasd_device *device) 3742 { 3743 struct dasd_ccw_req *cqr; 3744 int rc; 3745 struct ccw1 *ccw; 3746 int useglobal; 3747 3748 if (!capable(CAP_SYS_ADMIN)) 3749 return -EACCES; 3750 3751 useglobal = 0; 3752 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 3753 if (IS_ERR(cqr)) { 3754 mutex_lock(&dasd_reserve_mutex); 3755 useglobal = 1; 3756 cqr = &dasd_reserve_req->cqr; 3757 memset(cqr, 0, sizeof(*cqr)); 3758 memset(&dasd_reserve_req->ccw, 0, 3759 sizeof(dasd_reserve_req->ccw)); 3760 cqr->cpaddr = &dasd_reserve_req->ccw; 3761 cqr->data = &dasd_reserve_req->data; 3762 cqr->magic = DASD_ECKD_MAGIC; 3763 } 3764 ccw = cqr->cpaddr; 3765 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 3766 ccw->flags |= CCW_FLAG_SLI; 3767 ccw->count = 32; 3768 ccw->cda = (__u32)(addr_t) cqr->data; 3769 cqr->startdev = device; 3770 cqr->memdev = device; 3771 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3772 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3773 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3774 cqr->expires = 2 * HZ; 3775 cqr->buildclk = get_tod_clock(); 3776 cqr->status = DASD_CQR_FILLED; 3777 3778 rc = dasd_sleep_on_immediatly(cqr); 3779 if (!rc) 3780 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3781 3782 if (useglobal) 3783 mutex_unlock(&dasd_reserve_mutex); 3784 else 3785 dasd_sfree_request(cqr, cqr->memdev); 3786 return rc; 3787 } 3788 3789 /* 3790 * SNID - Sense Path Group ID 3791 * This ioctl may be used in situations where I/O is stalled due to 3792 * a reserve, so if the normal dasd_smalloc_request fails, we use the 3793 * preallocated dasd_reserve_req. 3794 */ 3795 static int dasd_eckd_snid(struct dasd_device *device, 3796 void __user *argp) 3797 { 3798 struct dasd_ccw_req *cqr; 3799 int rc; 3800 struct ccw1 *ccw; 3801 int useglobal; 3802 struct dasd_snid_ioctl_data usrparm; 3803 3804 if (!capable(CAP_SYS_ADMIN)) 3805 return -EACCES; 3806 3807 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 3808 return -EFAULT; 3809 3810 useglobal = 0; 3811 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 3812 sizeof(struct dasd_snid_data), device); 3813 if (IS_ERR(cqr)) { 3814 mutex_lock(&dasd_reserve_mutex); 3815 useglobal = 1; 3816 cqr = &dasd_reserve_req->cqr; 3817 memset(cqr, 0, sizeof(*cqr)); 3818 memset(&dasd_reserve_req->ccw, 0, 3819 sizeof(dasd_reserve_req->ccw)); 3820 cqr->cpaddr = &dasd_reserve_req->ccw; 3821 cqr->data = &dasd_reserve_req->data; 3822 cqr->magic = DASD_ECKD_MAGIC; 3823 } 3824 ccw = cqr->cpaddr; 3825 ccw->cmd_code = DASD_ECKD_CCW_SNID; 3826 ccw->flags |= CCW_FLAG_SLI; 3827 ccw->count = 12; 3828 ccw->cda = (__u32)(addr_t) cqr->data; 3829 cqr->startdev = device; 3830 cqr->memdev = device; 3831 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3832 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3833 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 3834 cqr->retries = 5; 3835 cqr->expires = 10 * HZ; 3836 cqr->buildclk = get_tod_clock(); 3837 cqr->status = DASD_CQR_FILLED; 3838 cqr->lpm = usrparm.path_mask; 3839 3840 rc = dasd_sleep_on_immediatly(cqr); 3841 /* verify that I/O processing didn't modify the path mask */ 3842 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 3843 rc = -EIO; 3844 if (!rc) { 3845 usrparm.data = *((struct dasd_snid_data *)cqr->data); 3846 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 3847 rc = -EFAULT; 3848 } 3849 3850 if (useglobal) 3851 mutex_unlock(&dasd_reserve_mutex); 3852 else 3853 dasd_sfree_request(cqr, cqr->memdev); 3854 return rc; 3855 } 3856 3857 /* 3858 * Read performance statistics 3859 */ 3860 static int 3861 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 3862 { 3863 struct dasd_psf_prssd_data *prssdp; 3864 struct dasd_rssd_perf_stats_t *stats; 3865 struct dasd_ccw_req *cqr; 3866 struct ccw1 *ccw; 3867 int rc; 3868 3869 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 3870 (sizeof(struct dasd_psf_prssd_data) + 3871 sizeof(struct dasd_rssd_perf_stats_t)), 3872 device); 3873 if (IS_ERR(cqr)) { 3874 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3875 "Could not allocate initialization request"); 3876 return PTR_ERR(cqr); 3877 } 3878 cqr->startdev = device; 3879 cqr->memdev = device; 3880 cqr->retries = 0; 3881 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3882 cqr->expires = 10 * HZ; 3883 3884 /* Prepare for Read Subsystem Data */ 3885 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 3886 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 3887 prssdp->order = PSF_ORDER_PRSSD; 3888 prssdp->suborder = 0x01; /* Performance Statistics */ 3889 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 3890 3891 ccw = cqr->cpaddr; 3892 ccw->cmd_code = DASD_ECKD_CCW_PSF; 3893 ccw->count = sizeof(struct dasd_psf_prssd_data); 3894 ccw->flags |= CCW_FLAG_CC; 3895 ccw->cda = (__u32)(addr_t) prssdp; 3896 3897 /* Read Subsystem Data - Performance Statistics */ 3898 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 3899 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 3900 3901 ccw++; 3902 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 3903 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 3904 ccw->cda = (__u32)(addr_t) stats; 3905 3906 cqr->buildclk = get_tod_clock(); 3907 cqr->status = DASD_CQR_FILLED; 3908 rc = dasd_sleep_on(cqr); 3909 if (rc == 0) { 3910 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 3911 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 3912 if (copy_to_user(argp, stats, 3913 sizeof(struct dasd_rssd_perf_stats_t))) 3914 rc = -EFAULT; 3915 } 3916 dasd_sfree_request(cqr, cqr->memdev); 3917 return rc; 3918 } 3919 3920 /* 3921 * Get attributes (cache operations) 3922 * Returnes the cache attributes used in Define Extend (DE). 3923 */ 3924 static int 3925 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 3926 { 3927 struct dasd_eckd_private *private = 3928 (struct dasd_eckd_private *)device->private; 3929 struct attrib_data_t attrib = private->attrib; 3930 int rc; 3931 3932 if (!capable(CAP_SYS_ADMIN)) 3933 return -EACCES; 3934 if (!argp) 3935 return -EINVAL; 3936 3937 rc = 0; 3938 if (copy_to_user(argp, (long *) &attrib, 3939 sizeof(struct attrib_data_t))) 3940 rc = -EFAULT; 3941 3942 return rc; 3943 } 3944 3945 /* 3946 * Set attributes (cache operations) 3947 * Stores the attributes for cache operation to be used in Define Extend (DE). 3948 */ 3949 static int 3950 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 3951 { 3952 struct dasd_eckd_private *private = 3953 (struct dasd_eckd_private *)device->private; 3954 struct attrib_data_t attrib; 3955 3956 if (!capable(CAP_SYS_ADMIN)) 3957 return -EACCES; 3958 if (!argp) 3959 return -EINVAL; 3960 3961 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 3962 return -EFAULT; 3963 private->attrib = attrib; 3964 3965 dev_info(&device->cdev->dev, 3966 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 3967 private->attrib.operation, private->attrib.nr_cyl); 3968 return 0; 3969 } 3970 3971 /* 3972 * Issue syscall I/O to EMC Symmetrix array. 3973 * CCWs are PSF and RSSD 3974 */ 3975 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 3976 { 3977 struct dasd_symmio_parms usrparm; 3978 char *psf_data, *rssd_result; 3979 struct dasd_ccw_req *cqr; 3980 struct ccw1 *ccw; 3981 char psf0, psf1; 3982 int rc; 3983 3984 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 3985 return -EACCES; 3986 psf0 = psf1 = 0; 3987 3988 /* Copy parms from caller */ 3989 rc = -EFAULT; 3990 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 3991 goto out; 3992 if (is_compat_task()) { 3993 /* Make sure pointers are sane even on 31 bit. */ 3994 rc = -EINVAL; 3995 if ((usrparm.psf_data >> 32) != 0) 3996 goto out; 3997 if ((usrparm.rssd_result >> 32) != 0) 3998 goto out; 3999 usrparm.psf_data &= 0x7fffffffULL; 4000 usrparm.rssd_result &= 0x7fffffffULL; 4001 } 4002 /* alloc I/O data area */ 4003 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 4004 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 4005 if (!psf_data || !rssd_result) { 4006 rc = -ENOMEM; 4007 goto out_free; 4008 } 4009 4010 /* get syscall header from user space */ 4011 rc = -EFAULT; 4012 if (copy_from_user(psf_data, 4013 (void __user *)(unsigned long) usrparm.psf_data, 4014 usrparm.psf_data_len)) 4015 goto out_free; 4016 psf0 = psf_data[0]; 4017 psf1 = psf_data[1]; 4018 4019 /* setup CCWs for PSF + RSSD */ 4020 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); 4021 if (IS_ERR(cqr)) { 4022 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4023 "Could not allocate initialization request"); 4024 rc = PTR_ERR(cqr); 4025 goto out_free; 4026 } 4027 4028 cqr->startdev = device; 4029 cqr->memdev = device; 4030 cqr->retries = 3; 4031 cqr->expires = 10 * HZ; 4032 cqr->buildclk = get_tod_clock(); 4033 cqr->status = DASD_CQR_FILLED; 4034 4035 /* Build the ccws */ 4036 ccw = cqr->cpaddr; 4037 4038 /* PSF ccw */ 4039 ccw->cmd_code = DASD_ECKD_CCW_PSF; 4040 ccw->count = usrparm.psf_data_len; 4041 ccw->flags |= CCW_FLAG_CC; 4042 ccw->cda = (__u32)(addr_t) psf_data; 4043 4044 ccw++; 4045 4046 /* RSSD ccw */ 4047 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 4048 ccw->count = usrparm.rssd_result_len; 4049 ccw->flags = CCW_FLAG_SLI ; 4050 ccw->cda = (__u32)(addr_t) rssd_result; 4051 4052 rc = dasd_sleep_on(cqr); 4053 if (rc) 4054 goto out_sfree; 4055 4056 rc = -EFAULT; 4057 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 4058 rssd_result, usrparm.rssd_result_len)) 4059 goto out_sfree; 4060 rc = 0; 4061 4062 out_sfree: 4063 dasd_sfree_request(cqr, cqr->memdev); 4064 out_free: 4065 kfree(rssd_result); 4066 kfree(psf_data); 4067 out: 4068 DBF_DEV_EVENT(DBF_WARNING, device, 4069 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 4070 (int) psf0, (int) psf1, rc); 4071 return rc; 4072 } 4073 4074 static int 4075 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 4076 { 4077 struct dasd_device *device = block->base; 4078 4079 switch (cmd) { 4080 case BIODASDGATTR: 4081 return dasd_eckd_get_attrib(device, argp); 4082 case BIODASDSATTR: 4083 return dasd_eckd_set_attrib(device, argp); 4084 case BIODASDPSRD: 4085 return dasd_eckd_performance(device, argp); 4086 case BIODASDRLSE: 4087 return dasd_eckd_release(device); 4088 case BIODASDRSRV: 4089 return dasd_eckd_reserve(device); 4090 case BIODASDSLCK: 4091 return dasd_eckd_steal_lock(device); 4092 case BIODASDSNID: 4093 return dasd_eckd_snid(device, argp); 4094 case BIODASDSYMMIO: 4095 return dasd_symm_io(device, argp); 4096 default: 4097 return -ENOTTY; 4098 } 4099 } 4100 4101 /* 4102 * Dump the range of CCWs into 'page' buffer 4103 * and return number of printed chars. 4104 */ 4105 static int 4106 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 4107 { 4108 int len, count; 4109 char *datap; 4110 4111 len = 0; 4112 while (from <= to) { 4113 len += sprintf(page + len, PRINTK_HEADER 4114 " CCW %p: %08X %08X DAT:", 4115 from, ((int *) from)[0], ((int *) from)[1]); 4116 4117 /* get pointer to data (consider IDALs) */ 4118 if (from->flags & CCW_FLAG_IDA) 4119 datap = (char *) *((addr_t *) (addr_t) from->cda); 4120 else 4121 datap = (char *) ((addr_t) from->cda); 4122 4123 /* dump data (max 32 bytes) */ 4124 for (count = 0; count < from->count && count < 32; count++) { 4125 if (count % 8 == 0) len += sprintf(page + len, " "); 4126 if (count % 4 == 0) len += sprintf(page + len, " "); 4127 len += sprintf(page + len, "%02x", datap[count]); 4128 } 4129 len += sprintf(page + len, "\n"); 4130 from++; 4131 } 4132 return len; 4133 } 4134 4135 static void 4136 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 4137 char *reason) 4138 { 4139 u64 *sense; 4140 u64 *stat; 4141 4142 sense = (u64 *) dasd_get_sense(irb); 4143 stat = (u64 *) &irb->scsw; 4144 if (sense) { 4145 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 4146 "%016llx %016llx %016llx %016llx", 4147 reason, *stat, *((u32 *) (stat + 1)), 4148 sense[0], sense[1], sense[2], sense[3]); 4149 } else { 4150 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 4151 reason, *stat, *((u32 *) (stat + 1)), 4152 "NO VALID SENSE"); 4153 } 4154 } 4155 4156 /* 4157 * Print sense data and related channel program. 4158 * Parts are printed because printk buffer is only 1024 bytes. 4159 */ 4160 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 4161 struct dasd_ccw_req *req, struct irb *irb) 4162 { 4163 char *page; 4164 struct ccw1 *first, *last, *fail, *from, *to; 4165 int len, sl, sct; 4166 4167 page = (char *) get_zeroed_page(GFP_ATOMIC); 4168 if (page == NULL) { 4169 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4170 "No memory to dump sense data\n"); 4171 return; 4172 } 4173 /* dump the sense data */ 4174 len = sprintf(page, PRINTK_HEADER 4175 " I/O status report for device %s:\n", 4176 dev_name(&device->cdev->dev)); 4177 len += sprintf(page + len, PRINTK_HEADER 4178 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 4179 "CS:%02X RC:%d\n", 4180 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4181 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 4182 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4183 req ? req->intrc : 0); 4184 len += sprintf(page + len, PRINTK_HEADER 4185 " device %s: Failing CCW: %p\n", 4186 dev_name(&device->cdev->dev), 4187 (void *) (addr_t) irb->scsw.cmd.cpa); 4188 if (irb->esw.esw0.erw.cons) { 4189 for (sl = 0; sl < 4; sl++) { 4190 len += sprintf(page + len, PRINTK_HEADER 4191 " Sense(hex) %2d-%2d:", 4192 (8 * sl), ((8 * sl) + 7)); 4193 4194 for (sct = 0; sct < 8; sct++) { 4195 len += sprintf(page + len, " %02x", 4196 irb->ecw[8 * sl + sct]); 4197 } 4198 len += sprintf(page + len, "\n"); 4199 } 4200 4201 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 4202 /* 24 Byte Sense Data */ 4203 sprintf(page + len, PRINTK_HEADER 4204 " 24 Byte: %x MSG %x, " 4205 "%s MSGb to SYSOP\n", 4206 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 4207 irb->ecw[1] & 0x10 ? "" : "no"); 4208 } else { 4209 /* 32 Byte Sense Data */ 4210 sprintf(page + len, PRINTK_HEADER 4211 " 32 Byte: Format: %x " 4212 "Exception class %x\n", 4213 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 4214 } 4215 } else { 4216 sprintf(page + len, PRINTK_HEADER 4217 " SORRY - NO VALID SENSE AVAILABLE\n"); 4218 } 4219 printk(KERN_ERR "%s", page); 4220 4221 if (req) { 4222 /* req == NULL for unsolicited interrupts */ 4223 /* dump the Channel Program (max 140 Bytes per line) */ 4224 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 4225 first = req->cpaddr; 4226 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 4227 to = min(first + 6, last); 4228 len = sprintf(page, PRINTK_HEADER 4229 " Related CP in req: %p\n", req); 4230 dasd_eckd_dump_ccw_range(first, to, page + len); 4231 printk(KERN_ERR "%s", page); 4232 4233 /* print failing CCW area (maximum 4) */ 4234 /* scsw->cda is either valid or zero */ 4235 len = 0; 4236 from = ++to; 4237 fail = (struct ccw1 *)(addr_t) 4238 irb->scsw.cmd.cpa; /* failing CCW */ 4239 if (from < fail - 2) { 4240 from = fail - 2; /* there is a gap - print header */ 4241 len += sprintf(page, PRINTK_HEADER "......\n"); 4242 } 4243 to = min(fail + 1, last); 4244 len += dasd_eckd_dump_ccw_range(from, to, page + len); 4245 4246 /* print last CCWs (maximum 2) */ 4247 from = max(from, ++to); 4248 if (from < last - 1) { 4249 from = last - 1; /* there is a gap - print header */ 4250 len += sprintf(page + len, PRINTK_HEADER "......\n"); 4251 } 4252 len += dasd_eckd_dump_ccw_range(from, last, page + len); 4253 if (len > 0) 4254 printk(KERN_ERR "%s", page); 4255 } 4256 free_page((unsigned long) page); 4257 } 4258 4259 4260 /* 4261 * Print sense data from a tcw. 4262 */ 4263 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 4264 struct dasd_ccw_req *req, struct irb *irb) 4265 { 4266 char *page; 4267 int len, sl, sct, residual; 4268 struct tsb *tsb; 4269 u8 *sense, *rcq; 4270 4271 page = (char *) get_zeroed_page(GFP_ATOMIC); 4272 if (page == NULL) { 4273 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 4274 "No memory to dump sense data"); 4275 return; 4276 } 4277 /* dump the sense data */ 4278 len = sprintf(page, PRINTK_HEADER 4279 " I/O status report for device %s:\n", 4280 dev_name(&device->cdev->dev)); 4281 len += sprintf(page + len, PRINTK_HEADER 4282 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 4283 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 4284 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4285 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 4286 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4287 irb->scsw.tm.fcxs, irb->scsw.tm.schxs, 4288 req ? req->intrc : 0); 4289 len += sprintf(page + len, PRINTK_HEADER 4290 " device %s: Failing TCW: %p\n", 4291 dev_name(&device->cdev->dev), 4292 (void *) (addr_t) irb->scsw.tm.tcw); 4293 4294 tsb = NULL; 4295 sense = NULL; 4296 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 4297 tsb = tcw_get_tsb( 4298 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 4299 4300 if (tsb) { 4301 len += sprintf(page + len, PRINTK_HEADER 4302 " tsb->length %d\n", tsb->length); 4303 len += sprintf(page + len, PRINTK_HEADER 4304 " tsb->flags %x\n", tsb->flags); 4305 len += sprintf(page + len, PRINTK_HEADER 4306 " tsb->dcw_offset %d\n", tsb->dcw_offset); 4307 len += sprintf(page + len, PRINTK_HEADER 4308 " tsb->count %d\n", tsb->count); 4309 residual = tsb->count - 28; 4310 len += sprintf(page + len, PRINTK_HEADER 4311 " residual %d\n", residual); 4312 4313 switch (tsb->flags & 0x07) { 4314 case 1: /* tsa_iostat */ 4315 len += sprintf(page + len, PRINTK_HEADER 4316 " tsb->tsa.iostat.dev_time %d\n", 4317 tsb->tsa.iostat.dev_time); 4318 len += sprintf(page + len, PRINTK_HEADER 4319 " tsb->tsa.iostat.def_time %d\n", 4320 tsb->tsa.iostat.def_time); 4321 len += sprintf(page + len, PRINTK_HEADER 4322 " tsb->tsa.iostat.queue_time %d\n", 4323 tsb->tsa.iostat.queue_time); 4324 len += sprintf(page + len, PRINTK_HEADER 4325 " tsb->tsa.iostat.dev_busy_time %d\n", 4326 tsb->tsa.iostat.dev_busy_time); 4327 len += sprintf(page + len, PRINTK_HEADER 4328 " tsb->tsa.iostat.dev_act_time %d\n", 4329 tsb->tsa.iostat.dev_act_time); 4330 sense = tsb->tsa.iostat.sense; 4331 break; 4332 case 2: /* ts_ddpc */ 4333 len += sprintf(page + len, PRINTK_HEADER 4334 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 4335 for (sl = 0; sl < 2; sl++) { 4336 len += sprintf(page + len, PRINTK_HEADER 4337 " tsb->tsa.ddpc.rcq %2d-%2d: ", 4338 (8 * sl), ((8 * sl) + 7)); 4339 rcq = tsb->tsa.ddpc.rcq; 4340 for (sct = 0; sct < 8; sct++) { 4341 len += sprintf(page + len, " %02x", 4342 rcq[8 * sl + sct]); 4343 } 4344 len += sprintf(page + len, "\n"); 4345 } 4346 sense = tsb->tsa.ddpc.sense; 4347 break; 4348 case 3: /* tsa_intrg */ 4349 len += sprintf(page + len, PRINTK_HEADER 4350 " tsb->tsa.intrg.: not supportet yet\n"); 4351 break; 4352 } 4353 4354 if (sense) { 4355 for (sl = 0; sl < 4; sl++) { 4356 len += sprintf(page + len, PRINTK_HEADER 4357 " Sense(hex) %2d-%2d:", 4358 (8 * sl), ((8 * sl) + 7)); 4359 for (sct = 0; sct < 8; sct++) { 4360 len += sprintf(page + len, " %02x", 4361 sense[8 * sl + sct]); 4362 } 4363 len += sprintf(page + len, "\n"); 4364 } 4365 4366 if (sense[27] & DASD_SENSE_BIT_0) { 4367 /* 24 Byte Sense Data */ 4368 sprintf(page + len, PRINTK_HEADER 4369 " 24 Byte: %x MSG %x, " 4370 "%s MSGb to SYSOP\n", 4371 sense[7] >> 4, sense[7] & 0x0f, 4372 sense[1] & 0x10 ? "" : "no"); 4373 } else { 4374 /* 32 Byte Sense Data */ 4375 sprintf(page + len, PRINTK_HEADER 4376 " 32 Byte: Format: %x " 4377 "Exception class %x\n", 4378 sense[6] & 0x0f, sense[22] >> 4); 4379 } 4380 } else { 4381 sprintf(page + len, PRINTK_HEADER 4382 " SORRY - NO VALID SENSE AVAILABLE\n"); 4383 } 4384 } else { 4385 sprintf(page + len, PRINTK_HEADER 4386 " SORRY - NO TSB DATA AVAILABLE\n"); 4387 } 4388 printk(KERN_ERR "%s", page); 4389 free_page((unsigned long) page); 4390 } 4391 4392 static void dasd_eckd_dump_sense(struct dasd_device *device, 4393 struct dasd_ccw_req *req, struct irb *irb) 4394 { 4395 if (scsw_is_tm(&irb->scsw)) 4396 dasd_eckd_dump_sense_tcw(device, req, irb); 4397 else 4398 dasd_eckd_dump_sense_ccw(device, req, irb); 4399 } 4400 4401 static int dasd_eckd_pm_freeze(struct dasd_device *device) 4402 { 4403 /* 4404 * the device should be disconnected from our LCU structure 4405 * on restore we will reconnect it and reread LCU specific 4406 * information like PAV support that might have changed 4407 */ 4408 dasd_alias_remove_device(device); 4409 dasd_alias_disconnect_device_from_lcu(device); 4410 4411 return 0; 4412 } 4413 4414 static int dasd_eckd_restore_device(struct dasd_device *device) 4415 { 4416 struct dasd_eckd_private *private; 4417 struct dasd_eckd_characteristics temp_rdc_data; 4418 int rc; 4419 struct dasd_uid temp_uid; 4420 unsigned long flags; 4421 unsigned long cqr_flags = 0; 4422 4423 private = (struct dasd_eckd_private *) device->private; 4424 4425 /* Read Configuration Data */ 4426 dasd_eckd_read_conf(device); 4427 4428 dasd_eckd_get_uid(device, &temp_uid); 4429 /* Generate device unique id */ 4430 rc = dasd_eckd_generate_uid(device); 4431 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4432 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 4433 dev_err(&device->cdev->dev, "The UID of the DASD has " 4434 "changed\n"); 4435 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4436 if (rc) 4437 goto out_err; 4438 4439 /* register lcu with alias handling, enable PAV if this is a new lcu */ 4440 rc = dasd_alias_make_device_known_to_lcu(device); 4441 if (rc) 4442 return rc; 4443 4444 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); 4445 dasd_eckd_validate_server(device, cqr_flags); 4446 4447 /* RE-Read Configuration Data */ 4448 dasd_eckd_read_conf(device); 4449 4450 /* Read Feature Codes */ 4451 dasd_eckd_read_features(device); 4452 4453 /* Read Device Characteristics */ 4454 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 4455 &temp_rdc_data, 64); 4456 if (rc) { 4457 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4458 "Read device characteristic failed, rc=%d", rc); 4459 goto out_err; 4460 } 4461 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4462 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); 4463 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4464 4465 /* add device to alias management */ 4466 dasd_alias_add_device(device); 4467 4468 return 0; 4469 4470 out_err: 4471 return -1; 4472 } 4473 4474 static int dasd_eckd_reload_device(struct dasd_device *device) 4475 { 4476 struct dasd_eckd_private *private; 4477 int rc, old_base; 4478 char print_uid[60]; 4479 struct dasd_uid uid; 4480 unsigned long flags; 4481 4482 private = (struct dasd_eckd_private *) device->private; 4483 4484 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4485 old_base = private->uid.base_unit_addr; 4486 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4487 4488 /* Read Configuration Data */ 4489 rc = dasd_eckd_read_conf(device); 4490 if (rc) 4491 goto out_err; 4492 4493 rc = dasd_eckd_generate_uid(device); 4494 if (rc) 4495 goto out_err; 4496 /* 4497 * update unit address configuration and 4498 * add device to alias management 4499 */ 4500 dasd_alias_update_add_device(device); 4501 4502 dasd_eckd_get_uid(device, &uid); 4503 4504 if (old_base != uid.base_unit_addr) { 4505 if (strlen(uid.vduit) > 0) 4506 snprintf(print_uid, sizeof(print_uid), 4507 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, 4508 uid.ssid, uid.base_unit_addr, uid.vduit); 4509 else 4510 snprintf(print_uid, sizeof(print_uid), 4511 "%s.%s.%04x.%02x", uid.vendor, uid.serial, 4512 uid.ssid, uid.base_unit_addr); 4513 4514 dev_info(&device->cdev->dev, 4515 "An Alias device was reassigned to a new base device " 4516 "with UID: %s\n", print_uid); 4517 } 4518 return 0; 4519 4520 out_err: 4521 return -1; 4522 } 4523 4524 static int dasd_eckd_read_message_buffer(struct dasd_device *device, 4525 struct dasd_rssd_messages *messages, 4526 __u8 lpum) 4527 { 4528 struct dasd_rssd_messages *message_buf; 4529 struct dasd_psf_prssd_data *prssdp; 4530 struct dasd_eckd_private *private; 4531 struct dasd_ccw_req *cqr; 4532 struct ccw1 *ccw; 4533 int rc; 4534 4535 private = (struct dasd_eckd_private *) device->private; 4536 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 4537 (sizeof(struct dasd_psf_prssd_data) + 4538 sizeof(struct dasd_rssd_messages)), 4539 device); 4540 if (IS_ERR(cqr)) { 4541 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 4542 "Could not allocate read message buffer request"); 4543 return PTR_ERR(cqr); 4544 } 4545 4546 cqr->startdev = device; 4547 cqr->memdev = device; 4548 cqr->block = NULL; 4549 cqr->expires = 10 * HZ; 4550 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 4551 /* dasd_sleep_on_immediatly does not do complex error 4552 * recovery so clear erp flag and set retry counter to 4553 * do basic erp */ 4554 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4555 cqr->retries = 256; 4556 4557 /* Prepare for Read Subsystem Data */ 4558 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 4559 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 4560 prssdp->order = PSF_ORDER_PRSSD; 4561 prssdp->suborder = 0x03; /* Message Buffer */ 4562 /* all other bytes of prssdp must be zero */ 4563 4564 ccw = cqr->cpaddr; 4565 ccw->cmd_code = DASD_ECKD_CCW_PSF; 4566 ccw->count = sizeof(struct dasd_psf_prssd_data); 4567 ccw->flags |= CCW_FLAG_CC; 4568 ccw->flags |= CCW_FLAG_SLI; 4569 ccw->cda = (__u32)(addr_t) prssdp; 4570 4571 /* Read Subsystem Data - message buffer */ 4572 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 4573 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 4574 4575 ccw++; 4576 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 4577 ccw->count = sizeof(struct dasd_rssd_messages); 4578 ccw->flags |= CCW_FLAG_SLI; 4579 ccw->cda = (__u32)(addr_t) message_buf; 4580 4581 cqr->buildclk = get_tod_clock(); 4582 cqr->status = DASD_CQR_FILLED; 4583 rc = dasd_sleep_on_immediatly(cqr); 4584 if (rc == 0) { 4585 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 4586 message_buf = (struct dasd_rssd_messages *) 4587 (prssdp + 1); 4588 memcpy(messages, message_buf, 4589 sizeof(struct dasd_rssd_messages)); 4590 } else 4591 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4592 "Reading messages failed with rc=%d\n" 4593 , rc); 4594 dasd_sfree_request(cqr, cqr->memdev); 4595 return rc; 4596 } 4597 4598 /* 4599 * Perform Subsystem Function - CUIR response 4600 */ 4601 static int 4602 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 4603 __u32 message_id, 4604 struct channel_path_desc *desc, 4605 struct subchannel_id sch_id) 4606 { 4607 struct dasd_psf_cuir_response *psf_cuir; 4608 struct dasd_ccw_req *cqr; 4609 struct ccw1 *ccw; 4610 int rc; 4611 4612 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 4613 sizeof(struct dasd_psf_cuir_response), 4614 device); 4615 4616 if (IS_ERR(cqr)) { 4617 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4618 "Could not allocate PSF-CUIR request"); 4619 return PTR_ERR(cqr); 4620 } 4621 4622 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 4623 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 4624 psf_cuir->cc = response; 4625 if (desc) 4626 psf_cuir->chpid = desc->chpid; 4627 psf_cuir->message_id = message_id; 4628 psf_cuir->cssid = sch_id.cssid; 4629 psf_cuir->ssid = sch_id.ssid; 4630 ccw = cqr->cpaddr; 4631 ccw->cmd_code = DASD_ECKD_CCW_PSF; 4632 ccw->cda = (__u32)(addr_t)psf_cuir; 4633 ccw->flags = CCW_FLAG_SLI; 4634 ccw->count = sizeof(struct dasd_psf_cuir_response); 4635 4636 cqr->startdev = device; 4637 cqr->memdev = device; 4638 cqr->block = NULL; 4639 cqr->retries = 256; 4640 cqr->expires = 10*HZ; 4641 cqr->buildclk = get_tod_clock(); 4642 cqr->status = DASD_CQR_FILLED; 4643 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 4644 4645 rc = dasd_sleep_on(cqr); 4646 4647 dasd_sfree_request(cqr, cqr->memdev); 4648 return rc; 4649 } 4650 4651 /* 4652 * return configuration data that is referenced by record selector 4653 * if a record selector is specified or per default return the 4654 * conf_data pointer for the path specified by lpum 4655 */ 4656 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 4657 __u8 lpum, 4658 struct dasd_cuir_message *cuir) 4659 { 4660 struct dasd_eckd_private *private; 4661 struct dasd_conf_data *conf_data; 4662 int path, pos; 4663 4664 private = (struct dasd_eckd_private *) device->private; 4665 if (cuir->record_selector == 0) 4666 goto out; 4667 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 4668 conf_data = private->path_conf_data[pos]; 4669 if (conf_data->gneq.record_selector == 4670 cuir->record_selector) 4671 return conf_data; 4672 } 4673 out: 4674 return private->path_conf_data[8 - ffs(lpum)]; 4675 } 4676 4677 /* 4678 * This function determines the scope of a reconfiguration request by 4679 * analysing the path and device selection data provided in the CUIR request. 4680 * Returns a path mask containing CUIR affected paths for the give device. 4681 * 4682 * If the CUIR request does not contain the required information return the 4683 * path mask of the path the attention message for the CUIR request was reveived 4684 * on. 4685 */ 4686 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 4687 struct dasd_cuir_message *cuir) 4688 { 4689 struct dasd_conf_data *ref_conf_data; 4690 unsigned long bitmask = 0, mask = 0; 4691 struct dasd_eckd_private *private; 4692 struct dasd_conf_data *conf_data; 4693 unsigned int pos, path; 4694 char *ref_gneq, *gneq; 4695 char *ref_ned, *ned; 4696 int tbcpm = 0; 4697 4698 /* if CUIR request does not specify the scope use the path 4699 the attention message was presented on */ 4700 if (!cuir->ned_map || 4701 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 4702 return lpum; 4703 4704 private = (struct dasd_eckd_private *) device->private; 4705 /* get reference conf data */ 4706 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 4707 /* reference ned is determined by ned_map field */ 4708 pos = 8 - ffs(cuir->ned_map); 4709 ref_ned = (char *)&ref_conf_data->neds[pos]; 4710 ref_gneq = (char *)&ref_conf_data->gneq; 4711 /* transfer 24 bit neq_map to mask */ 4712 mask = cuir->neq_map[2]; 4713 mask |= cuir->neq_map[1] << 8; 4714 mask |= cuir->neq_map[0] << 16; 4715 4716 for (path = 0x80; path; path >>= 1) { 4717 /* initialise data per path */ 4718 bitmask = mask; 4719 pos = 8 - ffs(path); 4720 conf_data = private->path_conf_data[pos]; 4721 pos = 8 - ffs(cuir->ned_map); 4722 ned = (char *) &conf_data->neds[pos]; 4723 /* compare reference ned and per path ned */ 4724 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 4725 continue; 4726 gneq = (char *)&conf_data->gneq; 4727 /* compare reference gneq and per_path gneq under 4728 24 bit mask where mask bit 0 equals byte 7 of 4729 the gneq and mask bit 24 equals byte 31 */ 4730 while (bitmask) { 4731 pos = ffs(bitmask) - 1; 4732 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 4733 != 0) 4734 break; 4735 clear_bit(pos, &bitmask); 4736 } 4737 if (bitmask) 4738 continue; 4739 /* device and path match the reference values 4740 add path to CUIR scope */ 4741 tbcpm |= path; 4742 } 4743 return tbcpm; 4744 } 4745 4746 static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 4747 unsigned long paths, 4748 struct subchannel_id sch_id, int action) 4749 { 4750 struct channel_path_desc *desc; 4751 int pos; 4752 4753 while (paths) { 4754 /* get position of bit in mask */ 4755 pos = ffs(paths) - 1; 4756 /* get channel path descriptor from this position */ 4757 desc = ccw_device_get_chp_desc(device->cdev, 7 - pos); 4758 if (action == CUIR_QUIESCE) 4759 pr_warn("Service on the storage server caused path " 4760 "%x.%02x to go offline", sch_id.cssid, 4761 desc ? desc->chpid : 0); 4762 else if (action == CUIR_RESUME) 4763 pr_info("Path %x.%02x is back online after service " 4764 "on the storage server", sch_id.cssid, 4765 desc ? desc->chpid : 0); 4766 kfree(desc); 4767 clear_bit(pos, &paths); 4768 } 4769 } 4770 4771 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 4772 struct dasd_cuir_message *cuir) 4773 { 4774 unsigned long tbcpm; 4775 4776 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 4777 /* nothing to do if path is not in use */ 4778 if (!(device->path_data.opm & tbcpm)) 4779 return 0; 4780 if (!(device->path_data.opm & ~tbcpm)) { 4781 /* no path would be left if the CUIR action is taken 4782 return error */ 4783 return -EINVAL; 4784 } 4785 /* remove device from operational path mask */ 4786 device->path_data.opm &= ~tbcpm; 4787 device->path_data.cuirpm |= tbcpm; 4788 return tbcpm; 4789 } 4790 4791 /* 4792 * walk through all devices and build a path mask to quiesce them 4793 * return an error if the last path to a device would be removed 4794 * 4795 * if only part of the devices are quiesced and an error 4796 * occurs no onlining necessary, the storage server will 4797 * notify the already set offline devices again 4798 */ 4799 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 4800 struct subchannel_id sch_id, 4801 struct dasd_cuir_message *cuir) 4802 { 4803 struct alias_pav_group *pavgroup, *tempgroup; 4804 struct dasd_eckd_private *private; 4805 struct dasd_device *dev, *n; 4806 unsigned long paths = 0; 4807 unsigned long flags; 4808 int tbcpm; 4809 4810 private = (struct dasd_eckd_private *) device->private; 4811 /* active devices */ 4812 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 4813 alias_list) { 4814 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 4815 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 4816 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 4817 if (tbcpm < 0) 4818 goto out_err; 4819 paths |= tbcpm; 4820 } 4821 /* inactive devices */ 4822 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 4823 alias_list) { 4824 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 4825 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 4826 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 4827 if (tbcpm < 0) 4828 goto out_err; 4829 paths |= tbcpm; 4830 } 4831 /* devices in PAV groups */ 4832 list_for_each_entry_safe(pavgroup, tempgroup, 4833 &private->lcu->grouplist, group) { 4834 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 4835 alias_list) { 4836 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 4837 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 4838 spin_unlock_irqrestore( 4839 get_ccwdev_lock(dev->cdev), flags); 4840 if (tbcpm < 0) 4841 goto out_err; 4842 paths |= tbcpm; 4843 } 4844 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 4845 alias_list) { 4846 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 4847 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 4848 spin_unlock_irqrestore( 4849 get_ccwdev_lock(dev->cdev), flags); 4850 if (tbcpm < 0) 4851 goto out_err; 4852 paths |= tbcpm; 4853 } 4854 } 4855 /* notify user about all paths affected by CUIR action */ 4856 dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE); 4857 return 0; 4858 out_err: 4859 return tbcpm; 4860 } 4861 4862 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 4863 struct subchannel_id sch_id, 4864 struct dasd_cuir_message *cuir) 4865 { 4866 struct alias_pav_group *pavgroup, *tempgroup; 4867 struct dasd_eckd_private *private; 4868 struct dasd_device *dev, *n; 4869 unsigned long paths = 0; 4870 int tbcpm; 4871 4872 private = (struct dasd_eckd_private *) device->private; 4873 /* 4874 * the path may have been added through a generic path event before 4875 * only trigger path verification if the path is not already in use 4876 */ 4877 list_for_each_entry_safe(dev, n, 4878 &private->lcu->active_devices, 4879 alias_list) { 4880 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 4881 paths |= tbcpm; 4882 if (!(dev->path_data.opm & tbcpm)) { 4883 dev->path_data.tbvpm |= tbcpm; 4884 dasd_schedule_device_bh(dev); 4885 } 4886 } 4887 list_for_each_entry_safe(dev, n, 4888 &private->lcu->inactive_devices, 4889 alias_list) { 4890 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 4891 paths |= tbcpm; 4892 if (!(dev->path_data.opm & tbcpm)) { 4893 dev->path_data.tbvpm |= tbcpm; 4894 dasd_schedule_device_bh(dev); 4895 } 4896 } 4897 /* devices in PAV groups */ 4898 list_for_each_entry_safe(pavgroup, tempgroup, 4899 &private->lcu->grouplist, 4900 group) { 4901 list_for_each_entry_safe(dev, n, 4902 &pavgroup->baselist, 4903 alias_list) { 4904 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 4905 paths |= tbcpm; 4906 if (!(dev->path_data.opm & tbcpm)) { 4907 dev->path_data.tbvpm |= tbcpm; 4908 dasd_schedule_device_bh(dev); 4909 } 4910 } 4911 list_for_each_entry_safe(dev, n, 4912 &pavgroup->aliaslist, 4913 alias_list) { 4914 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 4915 paths |= tbcpm; 4916 if (!(dev->path_data.opm & tbcpm)) { 4917 dev->path_data.tbvpm |= tbcpm; 4918 dasd_schedule_device_bh(dev); 4919 } 4920 } 4921 } 4922 /* notify user about all paths affected by CUIR action */ 4923 dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME); 4924 return 0; 4925 } 4926 4927 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 4928 __u8 lpum) 4929 { 4930 struct dasd_cuir_message *cuir = messages; 4931 struct channel_path_desc *desc; 4932 struct subchannel_id sch_id; 4933 int pos, response; 4934 4935 DBF_DEV_EVENT(DBF_WARNING, device, 4936 "CUIR request: %016llx %016llx %016llx %08x", 4937 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 4938 ((u32 *)cuir)[3]); 4939 ccw_device_get_schid(device->cdev, &sch_id); 4940 /* get position of path in mask */ 4941 pos = 8 - ffs(lpum); 4942 /* get channel path descriptor from this position */ 4943 desc = ccw_device_get_chp_desc(device->cdev, pos); 4944 4945 if (cuir->code == CUIR_QUIESCE) { 4946 /* quiesce */ 4947 if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir)) 4948 response = PSF_CUIR_LAST_PATH; 4949 else 4950 response = PSF_CUIR_COMPLETED; 4951 } else if (cuir->code == CUIR_RESUME) { 4952 /* resume */ 4953 dasd_eckd_cuir_resume(device, lpum, sch_id, cuir); 4954 response = PSF_CUIR_COMPLETED; 4955 } else 4956 response = PSF_CUIR_NOT_SUPPORTED; 4957 4958 dasd_eckd_psf_cuir_response(device, response, 4959 cuir->message_id, desc, sch_id); 4960 DBF_DEV_EVENT(DBF_WARNING, device, 4961 "CUIR response: %d on message ID %08x", response, 4962 cuir->message_id); 4963 /* free descriptor copy */ 4964 kfree(desc); 4965 /* to make sure there is no attention left schedule work again */ 4966 device->discipline->check_attention(device, lpum); 4967 } 4968 4969 static void dasd_eckd_check_attention_work(struct work_struct *work) 4970 { 4971 struct check_attention_work_data *data; 4972 struct dasd_rssd_messages *messages; 4973 struct dasd_device *device; 4974 int rc; 4975 4976 data = container_of(work, struct check_attention_work_data, worker); 4977 device = data->device; 4978 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 4979 if (!messages) { 4980 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4981 "Could not allocate attention message buffer"); 4982 goto out; 4983 } 4984 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 4985 if (rc) 4986 goto out; 4987 if (messages->length == ATTENTION_LENGTH_CUIR && 4988 messages->format == ATTENTION_FORMAT_CUIR) 4989 dasd_eckd_handle_cuir(device, messages, data->lpum); 4990 out: 4991 dasd_put_device(device); 4992 kfree(messages); 4993 kfree(data); 4994 } 4995 4996 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 4997 { 4998 struct check_attention_work_data *data; 4999 5000 data = kzalloc(sizeof(*data), GFP_ATOMIC); 5001 if (!data) 5002 return -ENOMEM; 5003 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 5004 dasd_get_device(device); 5005 data->device = device; 5006 data->lpum = lpum; 5007 schedule_work(&data->worker); 5008 return 0; 5009 } 5010 5011 static struct ccw_driver dasd_eckd_driver = { 5012 .driver = { 5013 .name = "dasd-eckd", 5014 .owner = THIS_MODULE, 5015 }, 5016 .ids = dasd_eckd_ids, 5017 .probe = dasd_eckd_probe, 5018 .remove = dasd_generic_remove, 5019 .set_offline = dasd_generic_set_offline, 5020 .set_online = dasd_eckd_set_online, 5021 .notify = dasd_generic_notify, 5022 .path_event = dasd_generic_path_event, 5023 .shutdown = dasd_generic_shutdown, 5024 .freeze = dasd_generic_pm_freeze, 5025 .thaw = dasd_generic_restore_device, 5026 .restore = dasd_generic_restore_device, 5027 .uc_handler = dasd_generic_uc_handler, 5028 .int_class = IRQIO_DAS, 5029 }; 5030 5031 /* 5032 * max_blocks is dependent on the amount of storage that is available 5033 * in the static io buffer for each device. Currently each device has 5034 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 5035 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 5036 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 5037 * addition we have one define extent ccw + 16 bytes of data and one 5038 * locate record ccw + 16 bytes of data. That makes: 5039 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 5040 * We want to fit two into the available memory so that we can immediately 5041 * start the next request if one finishes off. That makes 249.5 blocks 5042 * for one request. Give a little safety and the result is 240. 5043 */ 5044 static struct dasd_discipline dasd_eckd_discipline = { 5045 .owner = THIS_MODULE, 5046 .name = "ECKD", 5047 .ebcname = "ECKD", 5048 .max_blocks = 190, 5049 .check_device = dasd_eckd_check_characteristics, 5050 .uncheck_device = dasd_eckd_uncheck_device, 5051 .do_analysis = dasd_eckd_do_analysis, 5052 .verify_path = dasd_eckd_verify_path, 5053 .basic_to_ready = dasd_eckd_basic_to_ready, 5054 .online_to_ready = dasd_eckd_online_to_ready, 5055 .basic_to_known = dasd_eckd_basic_to_known, 5056 .fill_geometry = dasd_eckd_fill_geometry, 5057 .start_IO = dasd_start_IO, 5058 .term_IO = dasd_term_IO, 5059 .handle_terminated_request = dasd_eckd_handle_terminated_request, 5060 .format_device = dasd_eckd_format_device, 5061 .erp_action = dasd_eckd_erp_action, 5062 .erp_postaction = dasd_eckd_erp_postaction, 5063 .check_for_device_change = dasd_eckd_check_for_device_change, 5064 .build_cp = dasd_eckd_build_alias_cp, 5065 .free_cp = dasd_eckd_free_alias_cp, 5066 .dump_sense = dasd_eckd_dump_sense, 5067 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 5068 .fill_info = dasd_eckd_fill_info, 5069 .ioctl = dasd_eckd_ioctl, 5070 .freeze = dasd_eckd_pm_freeze, 5071 .restore = dasd_eckd_restore_device, 5072 .reload = dasd_eckd_reload_device, 5073 .get_uid = dasd_eckd_get_uid, 5074 .kick_validate = dasd_eckd_kick_validate_server, 5075 .check_attention = dasd_eckd_check_attention, 5076 }; 5077 5078 static int __init 5079 dasd_eckd_init(void) 5080 { 5081 int ret; 5082 5083 ASCEBC(dasd_eckd_discipline.ebcname, 4); 5084 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 5085 GFP_KERNEL | GFP_DMA); 5086 if (!dasd_reserve_req) 5087 return -ENOMEM; 5088 path_verification_worker = kmalloc(sizeof(*path_verification_worker), 5089 GFP_KERNEL | GFP_DMA); 5090 if (!path_verification_worker) { 5091 kfree(dasd_reserve_req); 5092 return -ENOMEM; 5093 } 5094 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 5095 if (!rawpadpage) { 5096 kfree(path_verification_worker); 5097 kfree(dasd_reserve_req); 5098 return -ENOMEM; 5099 } 5100 ret = ccw_driver_register(&dasd_eckd_driver); 5101 if (!ret) 5102 wait_for_device_probe(); 5103 else { 5104 kfree(path_verification_worker); 5105 kfree(dasd_reserve_req); 5106 free_page((unsigned long)rawpadpage); 5107 } 5108 return ret; 5109 } 5110 5111 static void __exit 5112 dasd_eckd_cleanup(void) 5113 { 5114 ccw_driver_unregister(&dasd_eckd_driver); 5115 kfree(path_verification_worker); 5116 kfree(dasd_reserve_req); 5117 free_page((unsigned long)rawpadpage); 5118 } 5119 5120 module_init(dasd_eckd_init); 5121 module_exit(dasd_eckd_cleanup); 5122