1 /* 2 * File...........: linux/drivers/s390/block/dasd_eckd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13 #define KMSG_COMPONENT "dasd" 14 15 #include <linux/stddef.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/hdreg.h> /* HDIO_GETGEO */ 19 #include <linux/bio.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 23 #include <asm/debug.h> 24 #include <asm/idals.h> 25 #include <asm/ebcdic.h> 26 #include <asm/io.h> 27 #include <asm/todclk.h> 28 #include <asm/uaccess.h> 29 #include <asm/cio.h> 30 #include <asm/ccwdev.h> 31 #include <asm/itcw.h> 32 33 #include "dasd_int.h" 34 #include "dasd_eckd.h" 35 #include "../cio/chsc.h" 36 37 38 #ifdef PRINTK_HEADER 39 #undef PRINTK_HEADER 40 #endif /* PRINTK_HEADER */ 41 #define PRINTK_HEADER "dasd(eckd):" 42 43 #define ECKD_C0(i) (i->home_bytes) 44 #define ECKD_F(i) (i->formula) 45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 46 (i->factors.f_0x02.f1)) 47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 48 (i->factors.f_0x02.f2)) 49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 50 (i->factors.f_0x02.f3)) 51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 53 #define ECKD_F6(i) (i->factor6) 54 #define ECKD_F7(i) (i->factor7) 55 #define ECKD_F8(i) (i->factor8) 56 57 MODULE_LICENSE("GPL"); 58 59 static struct dasd_discipline dasd_eckd_discipline; 60 61 /* The ccw bus type uses this table to find devices that it sends to 62 * dasd_eckd_probe */ 63 static struct ccw_device_id dasd_eckd_ids[] = { 64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 66 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 67 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 68 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 69 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 70 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 72 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 74 { /* end of list */ }, 75 }; 76 77 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 78 79 static struct ccw_driver dasd_eckd_driver; /* see below */ 80 81 /* initial attempt at a probe function. this can be simplified once 82 * the other detection code is gone */ 83 static int 84 dasd_eckd_probe (struct ccw_device *cdev) 85 { 86 int ret; 87 88 /* set ECKD specific ccw-device options */ 89 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 90 if (ret) { 91 DBF_EVENT(DBF_WARNING, 92 "dasd_eckd_probe: could not set ccw-device options " 93 "for %s\n", dev_name(&cdev->dev)); 94 return ret; 95 } 96 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 97 return ret; 98 } 99 100 static int 101 dasd_eckd_set_online(struct ccw_device *cdev) 102 { 103 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 104 } 105 106 static const int sizes_trk0[] = { 28, 148, 84 }; 107 #define LABEL_SIZE 140 108 109 static inline unsigned int 110 round_up_multiple(unsigned int no, unsigned int mult) 111 { 112 int rem = no % mult; 113 return (rem ? no - rem + mult : no); 114 } 115 116 static inline unsigned int 117 ceil_quot(unsigned int d1, unsigned int d2) 118 { 119 return (d1 + (d2 - 1)) / d2; 120 } 121 122 static unsigned int 123 recs_per_track(struct dasd_eckd_characteristics * rdc, 124 unsigned int kl, unsigned int dl) 125 { 126 int dn, kn; 127 128 switch (rdc->dev_type) { 129 case 0x3380: 130 if (kl) 131 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 132 ceil_quot(dl + 12, 32)); 133 else 134 return 1499 / (15 + ceil_quot(dl + 12, 32)); 135 case 0x3390: 136 dn = ceil_quot(dl + 6, 232) + 1; 137 if (kl) { 138 kn = ceil_quot(kl + 6, 232) + 1; 139 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 140 9 + ceil_quot(dl + 6 * dn, 34)); 141 } else 142 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 143 case 0x9345: 144 dn = ceil_quot(dl + 6, 232) + 1; 145 if (kl) { 146 kn = ceil_quot(kl + 6, 232) + 1; 147 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 148 ceil_quot(dl + 6 * dn, 34)); 149 } else 150 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 151 } 152 return 0; 153 } 154 155 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 156 { 157 geo->cyl = (__u16) cyl; 158 geo->head = cyl >> 16; 159 geo->head <<= 4; 160 geo->head |= head; 161 } 162 163 static int 164 check_XRC (struct ccw1 *de_ccw, 165 struct DE_eckd_data *data, 166 struct dasd_device *device) 167 { 168 struct dasd_eckd_private *private; 169 int rc; 170 171 private = (struct dasd_eckd_private *) device->private; 172 if (!private->rdc_data.facilities.XRC_supported) 173 return 0; 174 175 /* switch on System Time Stamp - needed for XRC Support */ 176 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 177 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 178 179 rc = get_sync_clock(&data->ep_sys_time); 180 /* Ignore return code if sync clock is switched off. */ 181 if (rc == -ENOSYS || rc == -EACCES) 182 rc = 0; 183 184 de_ccw->count = sizeof(struct DE_eckd_data); 185 de_ccw->flags |= CCW_FLAG_SLI; 186 return rc; 187 } 188 189 static int 190 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 191 unsigned int totrk, int cmd, struct dasd_device *device) 192 { 193 struct dasd_eckd_private *private; 194 u32 begcyl, endcyl; 195 u16 heads, beghead, endhead; 196 int rc = 0; 197 198 private = (struct dasd_eckd_private *) device->private; 199 200 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 201 ccw->flags = 0; 202 ccw->count = 16; 203 ccw->cda = (__u32) __pa(data); 204 205 memset(data, 0, sizeof(struct DE_eckd_data)); 206 switch (cmd) { 207 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 208 case DASD_ECKD_CCW_READ_RECORD_ZERO: 209 case DASD_ECKD_CCW_READ: 210 case DASD_ECKD_CCW_READ_MT: 211 case DASD_ECKD_CCW_READ_CKD: 212 case DASD_ECKD_CCW_READ_CKD_MT: 213 case DASD_ECKD_CCW_READ_KD: 214 case DASD_ECKD_CCW_READ_KD_MT: 215 case DASD_ECKD_CCW_READ_COUNT: 216 data->mask.perm = 0x1; 217 data->attributes.operation = private->attrib.operation; 218 break; 219 case DASD_ECKD_CCW_WRITE: 220 case DASD_ECKD_CCW_WRITE_MT: 221 case DASD_ECKD_CCW_WRITE_KD: 222 case DASD_ECKD_CCW_WRITE_KD_MT: 223 data->mask.perm = 0x02; 224 data->attributes.operation = private->attrib.operation; 225 rc = check_XRC (ccw, data, device); 226 break; 227 case DASD_ECKD_CCW_WRITE_CKD: 228 case DASD_ECKD_CCW_WRITE_CKD_MT: 229 data->attributes.operation = DASD_BYPASS_CACHE; 230 rc = check_XRC (ccw, data, device); 231 break; 232 case DASD_ECKD_CCW_ERASE: 233 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 234 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 235 data->mask.perm = 0x3; 236 data->mask.auth = 0x1; 237 data->attributes.operation = DASD_BYPASS_CACHE; 238 rc = check_XRC (ccw, data, device); 239 break; 240 default: 241 dev_err(&device->cdev->dev, 242 "0x%x is not a known command\n", cmd); 243 break; 244 } 245 246 data->attributes.mode = 0x3; /* ECKD */ 247 248 if ((private->rdc_data.cu_type == 0x2105 || 249 private->rdc_data.cu_type == 0x2107 || 250 private->rdc_data.cu_type == 0x1750) 251 && !(private->uses_cdl && trk < 2)) 252 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 253 254 heads = private->rdc_data.trk_per_cyl; 255 begcyl = trk / heads; 256 beghead = trk % heads; 257 endcyl = totrk / heads; 258 endhead = totrk % heads; 259 260 /* check for sequential prestage - enhance cylinder range */ 261 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 262 data->attributes.operation == DASD_SEQ_ACCESS) { 263 264 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 265 endcyl += private->attrib.nr_cyl; 266 else 267 endcyl = (private->real_cyl - 1); 268 } 269 270 set_ch_t(&data->beg_ext, begcyl, beghead); 271 set_ch_t(&data->end_ext, endcyl, endhead); 272 return rc; 273 } 274 275 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, 276 struct dasd_device *device) 277 { 278 struct dasd_eckd_private *private; 279 int rc; 280 281 private = (struct dasd_eckd_private *) device->private; 282 if (!private->rdc_data.facilities.XRC_supported) 283 return 0; 284 285 /* switch on System Time Stamp - needed for XRC Support */ 286 pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */ 287 pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */ 288 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 289 290 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); 291 /* Ignore return code if sync clock is switched off. */ 292 if (rc == -ENOSYS || rc == -EACCES) 293 rc = 0; 294 return rc; 295 } 296 297 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, 298 unsigned int rec_on_trk, int count, int cmd, 299 struct dasd_device *device, unsigned int reclen, 300 unsigned int tlf) 301 { 302 struct dasd_eckd_private *private; 303 int sector; 304 int dn, d; 305 306 private = (struct dasd_eckd_private *) device->private; 307 308 memset(data, 0, sizeof(*data)); 309 sector = 0; 310 if (rec_on_trk) { 311 switch (private->rdc_data.dev_type) { 312 case 0x3390: 313 dn = ceil_quot(reclen + 6, 232); 314 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 315 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 316 break; 317 case 0x3380: 318 d = 7 + ceil_quot(reclen + 12, 32); 319 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 320 break; 321 } 322 } 323 data->sector = sector; 324 /* note: meaning of count depends on the operation 325 * for record based I/O it's the number of records, but for 326 * track based I/O it's the number of tracks 327 */ 328 data->count = count; 329 switch (cmd) { 330 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 331 data->operation.orientation = 0x3; 332 data->operation.operation = 0x03; 333 break; 334 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 335 data->operation.orientation = 0x3; 336 data->operation.operation = 0x16; 337 break; 338 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 339 data->operation.orientation = 0x1; 340 data->operation.operation = 0x03; 341 data->count++; 342 break; 343 case DASD_ECKD_CCW_READ_RECORD_ZERO: 344 data->operation.orientation = 0x3; 345 data->operation.operation = 0x16; 346 data->count++; 347 break; 348 case DASD_ECKD_CCW_WRITE: 349 case DASD_ECKD_CCW_WRITE_MT: 350 case DASD_ECKD_CCW_WRITE_KD: 351 case DASD_ECKD_CCW_WRITE_KD_MT: 352 data->auxiliary.length_valid = 0x1; 353 data->length = reclen; 354 data->operation.operation = 0x01; 355 break; 356 case DASD_ECKD_CCW_WRITE_CKD: 357 case DASD_ECKD_CCW_WRITE_CKD_MT: 358 data->auxiliary.length_valid = 0x1; 359 data->length = reclen; 360 data->operation.operation = 0x03; 361 break; 362 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 363 data->auxiliary.length_valid = 0x1; 364 data->length = reclen; /* not tlf, as one might think */ 365 data->operation.operation = 0x3F; 366 data->extended_operation = 0x23; 367 break; 368 case DASD_ECKD_CCW_READ: 369 case DASD_ECKD_CCW_READ_MT: 370 case DASD_ECKD_CCW_READ_KD: 371 case DASD_ECKD_CCW_READ_KD_MT: 372 data->auxiliary.length_valid = 0x1; 373 data->length = reclen; 374 data->operation.operation = 0x06; 375 break; 376 case DASD_ECKD_CCW_READ_CKD: 377 case DASD_ECKD_CCW_READ_CKD_MT: 378 data->auxiliary.length_valid = 0x1; 379 data->length = reclen; 380 data->operation.operation = 0x16; 381 break; 382 case DASD_ECKD_CCW_READ_COUNT: 383 data->operation.operation = 0x06; 384 break; 385 case DASD_ECKD_CCW_READ_TRACK_DATA: 386 data->auxiliary.length_valid = 0x1; 387 data->length = tlf; 388 data->operation.operation = 0x0C; 389 break; 390 case DASD_ECKD_CCW_ERASE: 391 data->length = reclen; 392 data->auxiliary.length_valid = 0x1; 393 data->operation.operation = 0x0b; 394 break; 395 default: 396 DBF_DEV_EVENT(DBF_ERR, device, 397 "fill LRE unknown opcode 0x%x", cmd); 398 BUG(); 399 } 400 set_ch_t(&data->seek_addr, 401 trk / private->rdc_data.trk_per_cyl, 402 trk % private->rdc_data.trk_per_cyl); 403 data->search_arg.cyl = data->seek_addr.cyl; 404 data->search_arg.head = data->seek_addr.head; 405 data->search_arg.record = rec_on_trk; 406 } 407 408 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 409 unsigned int trk, unsigned int totrk, int cmd, 410 struct dasd_device *basedev, struct dasd_device *startdev, 411 unsigned char format, unsigned int rec_on_trk, int count, 412 unsigned int blksize, unsigned int tlf) 413 { 414 struct dasd_eckd_private *basepriv, *startpriv; 415 struct DE_eckd_data *dedata; 416 struct LRE_eckd_data *lredata; 417 u32 begcyl, endcyl; 418 u16 heads, beghead, endhead; 419 int rc = 0; 420 421 basepriv = (struct dasd_eckd_private *) basedev->private; 422 startpriv = (struct dasd_eckd_private *) startdev->private; 423 dedata = &pfxdata->define_extent; 424 lredata = &pfxdata->locate_record; 425 426 ccw->cmd_code = DASD_ECKD_CCW_PFX; 427 ccw->flags = 0; 428 ccw->count = sizeof(*pfxdata); 429 ccw->cda = (__u32) __pa(pfxdata); 430 431 memset(pfxdata, 0, sizeof(*pfxdata)); 432 /* prefix data */ 433 if (format > 1) { 434 DBF_DEV_EVENT(DBF_ERR, basedev, 435 "PFX LRE unknown format 0x%x", format); 436 BUG(); 437 return -EINVAL; 438 } 439 pfxdata->format = format; 440 pfxdata->base_address = basepriv->ned->unit_addr; 441 pfxdata->base_lss = basepriv->ned->ID; 442 pfxdata->validity.define_extent = 1; 443 444 /* private uid is kept up to date, conf_data may be outdated */ 445 if (startpriv->uid.type != UA_BASE_DEVICE) { 446 pfxdata->validity.verify_base = 1; 447 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 448 pfxdata->validity.hyper_pav = 1; 449 } 450 451 /* define extend data (mostly)*/ 452 switch (cmd) { 453 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 454 case DASD_ECKD_CCW_READ_RECORD_ZERO: 455 case DASD_ECKD_CCW_READ: 456 case DASD_ECKD_CCW_READ_MT: 457 case DASD_ECKD_CCW_READ_CKD: 458 case DASD_ECKD_CCW_READ_CKD_MT: 459 case DASD_ECKD_CCW_READ_KD: 460 case DASD_ECKD_CCW_READ_KD_MT: 461 case DASD_ECKD_CCW_READ_COUNT: 462 dedata->mask.perm = 0x1; 463 dedata->attributes.operation = basepriv->attrib.operation; 464 break; 465 case DASD_ECKD_CCW_READ_TRACK_DATA: 466 dedata->mask.perm = 0x1; 467 dedata->attributes.operation = basepriv->attrib.operation; 468 dedata->blk_size = 0; 469 break; 470 case DASD_ECKD_CCW_WRITE: 471 case DASD_ECKD_CCW_WRITE_MT: 472 case DASD_ECKD_CCW_WRITE_KD: 473 case DASD_ECKD_CCW_WRITE_KD_MT: 474 dedata->mask.perm = 0x02; 475 dedata->attributes.operation = basepriv->attrib.operation; 476 rc = check_XRC_on_prefix(pfxdata, basedev); 477 break; 478 case DASD_ECKD_CCW_WRITE_CKD: 479 case DASD_ECKD_CCW_WRITE_CKD_MT: 480 dedata->attributes.operation = DASD_BYPASS_CACHE; 481 rc = check_XRC_on_prefix(pfxdata, basedev); 482 break; 483 case DASD_ECKD_CCW_ERASE: 484 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 485 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 486 dedata->mask.perm = 0x3; 487 dedata->mask.auth = 0x1; 488 dedata->attributes.operation = DASD_BYPASS_CACHE; 489 rc = check_XRC_on_prefix(pfxdata, basedev); 490 break; 491 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 492 dedata->mask.perm = 0x02; 493 dedata->attributes.operation = basepriv->attrib.operation; 494 dedata->blk_size = blksize; 495 rc = check_XRC_on_prefix(pfxdata, basedev); 496 break; 497 default: 498 DBF_DEV_EVENT(DBF_ERR, basedev, 499 "PFX LRE unknown opcode 0x%x", cmd); 500 BUG(); 501 return -EINVAL; 502 } 503 504 dedata->attributes.mode = 0x3; /* ECKD */ 505 506 if ((basepriv->rdc_data.cu_type == 0x2105 || 507 basepriv->rdc_data.cu_type == 0x2107 || 508 basepriv->rdc_data.cu_type == 0x1750) 509 && !(basepriv->uses_cdl && trk < 2)) 510 dedata->ga_extended |= 0x40; /* Regular Data Format Mode */ 511 512 heads = basepriv->rdc_data.trk_per_cyl; 513 begcyl = trk / heads; 514 beghead = trk % heads; 515 endcyl = totrk / heads; 516 endhead = totrk % heads; 517 518 /* check for sequential prestage - enhance cylinder range */ 519 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 520 dedata->attributes.operation == DASD_SEQ_ACCESS) { 521 522 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 523 endcyl += basepriv->attrib.nr_cyl; 524 else 525 endcyl = (basepriv->real_cyl - 1); 526 } 527 528 set_ch_t(&dedata->beg_ext, begcyl, beghead); 529 set_ch_t(&dedata->end_ext, endcyl, endhead); 530 531 if (format == 1) { 532 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd, 533 basedev, blksize, tlf); 534 } 535 536 return rc; 537 } 538 539 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 540 unsigned int trk, unsigned int totrk, int cmd, 541 struct dasd_device *basedev, struct dasd_device *startdev) 542 { 543 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 544 0, 0, 0, 0, 0); 545 } 546 547 static void 548 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 549 unsigned int rec_on_trk, int no_rec, int cmd, 550 struct dasd_device * device, int reclen) 551 { 552 struct dasd_eckd_private *private; 553 int sector; 554 int dn, d; 555 556 private = (struct dasd_eckd_private *) device->private; 557 558 DBF_DEV_EVENT(DBF_INFO, device, 559 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 560 trk, rec_on_trk, no_rec, cmd, reclen); 561 562 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 563 ccw->flags = 0; 564 ccw->count = 16; 565 ccw->cda = (__u32) __pa(data); 566 567 memset(data, 0, sizeof(struct LO_eckd_data)); 568 sector = 0; 569 if (rec_on_trk) { 570 switch (private->rdc_data.dev_type) { 571 case 0x3390: 572 dn = ceil_quot(reclen + 6, 232); 573 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 574 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 575 break; 576 case 0x3380: 577 d = 7 + ceil_quot(reclen + 12, 32); 578 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 579 break; 580 } 581 } 582 data->sector = sector; 583 data->count = no_rec; 584 switch (cmd) { 585 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 586 data->operation.orientation = 0x3; 587 data->operation.operation = 0x03; 588 break; 589 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 590 data->operation.orientation = 0x3; 591 data->operation.operation = 0x16; 592 break; 593 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 594 data->operation.orientation = 0x1; 595 data->operation.operation = 0x03; 596 data->count++; 597 break; 598 case DASD_ECKD_CCW_READ_RECORD_ZERO: 599 data->operation.orientation = 0x3; 600 data->operation.operation = 0x16; 601 data->count++; 602 break; 603 case DASD_ECKD_CCW_WRITE: 604 case DASD_ECKD_CCW_WRITE_MT: 605 case DASD_ECKD_CCW_WRITE_KD: 606 case DASD_ECKD_CCW_WRITE_KD_MT: 607 data->auxiliary.last_bytes_used = 0x1; 608 data->length = reclen; 609 data->operation.operation = 0x01; 610 break; 611 case DASD_ECKD_CCW_WRITE_CKD: 612 case DASD_ECKD_CCW_WRITE_CKD_MT: 613 data->auxiliary.last_bytes_used = 0x1; 614 data->length = reclen; 615 data->operation.operation = 0x03; 616 break; 617 case DASD_ECKD_CCW_READ: 618 case DASD_ECKD_CCW_READ_MT: 619 case DASD_ECKD_CCW_READ_KD: 620 case DASD_ECKD_CCW_READ_KD_MT: 621 data->auxiliary.last_bytes_used = 0x1; 622 data->length = reclen; 623 data->operation.operation = 0x06; 624 break; 625 case DASD_ECKD_CCW_READ_CKD: 626 case DASD_ECKD_CCW_READ_CKD_MT: 627 data->auxiliary.last_bytes_used = 0x1; 628 data->length = reclen; 629 data->operation.operation = 0x16; 630 break; 631 case DASD_ECKD_CCW_READ_COUNT: 632 data->operation.operation = 0x06; 633 break; 634 case DASD_ECKD_CCW_ERASE: 635 data->length = reclen; 636 data->auxiliary.last_bytes_used = 0x1; 637 data->operation.operation = 0x0b; 638 break; 639 default: 640 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 641 "opcode 0x%x", cmd); 642 } 643 set_ch_t(&data->seek_addr, 644 trk / private->rdc_data.trk_per_cyl, 645 trk % private->rdc_data.trk_per_cyl); 646 data->search_arg.cyl = data->seek_addr.cyl; 647 data->search_arg.head = data->seek_addr.head; 648 data->search_arg.record = rec_on_trk; 649 } 650 651 /* 652 * Returns 1 if the block is one of the special blocks that needs 653 * to get read/written with the KD variant of the command. 654 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 655 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 656 * Luckily the KD variants differ only by one bit (0x08) from the 657 * normal variant. So don't wonder about code like: 658 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 659 * ccw->cmd_code |= 0x8; 660 */ 661 static inline int 662 dasd_eckd_cdl_special(int blk_per_trk, int recid) 663 { 664 if (recid < 3) 665 return 1; 666 if (recid < blk_per_trk) 667 return 0; 668 if (recid < 2 * blk_per_trk) 669 return 1; 670 return 0; 671 } 672 673 /* 674 * Returns the record size for the special blocks of the cdl format. 675 * Only returns something useful if dasd_eckd_cdl_special is true 676 * for the recid. 677 */ 678 static inline int 679 dasd_eckd_cdl_reclen(int recid) 680 { 681 if (recid < 3) 682 return sizes_trk0[recid]; 683 return LABEL_SIZE; 684 } 685 686 /* 687 * Generate device unique id that specifies the physical device. 688 */ 689 static int dasd_eckd_generate_uid(struct dasd_device *device, 690 struct dasd_uid *uid) 691 { 692 struct dasd_eckd_private *private; 693 int count; 694 695 private = (struct dasd_eckd_private *) device->private; 696 if (!private) 697 return -ENODEV; 698 if (!private->ned || !private->gneq) 699 return -ENODEV; 700 701 memset(uid, 0, sizeof(struct dasd_uid)); 702 memcpy(uid->vendor, private->ned->HDA_manufacturer, 703 sizeof(uid->vendor) - 1); 704 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 705 memcpy(uid->serial, private->ned->HDA_location, 706 sizeof(uid->serial) - 1); 707 EBCASC(uid->serial, sizeof(uid->serial) - 1); 708 uid->ssid = private->gneq->subsystemID; 709 uid->real_unit_addr = private->ned->unit_addr;; 710 if (private->sneq) { 711 uid->type = private->sneq->sua_flags; 712 if (uid->type == UA_BASE_PAV_ALIAS) 713 uid->base_unit_addr = private->sneq->base_unit_addr; 714 } else { 715 uid->type = UA_BASE_DEVICE; 716 } 717 if (private->vdsneq) { 718 for (count = 0; count < 16; count++) { 719 sprintf(uid->vduit+2*count, "%02x", 720 private->vdsneq->uit[count]); 721 } 722 } 723 return 0; 724 } 725 726 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 727 void *rcd_buffer, 728 struct ciw *ciw, __u8 lpm) 729 { 730 struct dasd_ccw_req *cqr; 731 struct ccw1 *ccw; 732 733 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 734 735 if (IS_ERR(cqr)) { 736 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 737 "Could not allocate RCD request"); 738 return cqr; 739 } 740 741 ccw = cqr->cpaddr; 742 ccw->cmd_code = ciw->cmd; 743 ccw->cda = (__u32)(addr_t)rcd_buffer; 744 ccw->count = ciw->count; 745 746 cqr->startdev = device; 747 cqr->memdev = device; 748 cqr->block = NULL; 749 cqr->expires = 10*HZ; 750 cqr->lpm = lpm; 751 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 752 cqr->retries = 2; 753 cqr->buildclk = get_clock(); 754 cqr->status = DASD_CQR_FILLED; 755 return cqr; 756 } 757 758 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 759 void **rcd_buffer, 760 int *rcd_buffer_size, __u8 lpm) 761 { 762 struct ciw *ciw; 763 char *rcd_buf = NULL; 764 int ret; 765 struct dasd_ccw_req *cqr; 766 767 /* 768 * scan for RCD command in extended SenseID data 769 */ 770 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 771 if (!ciw || ciw->cmd == 0) { 772 ret = -EOPNOTSUPP; 773 goto out_error; 774 } 775 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 776 if (!rcd_buf) { 777 ret = -ENOMEM; 778 goto out_error; 779 } 780 781 /* 782 * buffer has to start with EBCDIC "V1.0" to show 783 * support for virtual device SNEQ 784 */ 785 rcd_buf[0] = 0xE5; 786 rcd_buf[1] = 0xF1; 787 rcd_buf[2] = 0x4B; 788 rcd_buf[3] = 0xF0; 789 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); 790 if (IS_ERR(cqr)) { 791 ret = PTR_ERR(cqr); 792 goto out_error; 793 } 794 ret = dasd_sleep_on(cqr); 795 /* 796 * on success we update the user input parms 797 */ 798 dasd_sfree_request(cqr, cqr->memdev); 799 if (ret) 800 goto out_error; 801 802 *rcd_buffer_size = ciw->count; 803 *rcd_buffer = rcd_buf; 804 return 0; 805 out_error: 806 kfree(rcd_buf); 807 *rcd_buffer = NULL; 808 *rcd_buffer_size = 0; 809 return ret; 810 } 811 812 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 813 { 814 815 struct dasd_sneq *sneq; 816 int i, count; 817 818 private->ned = NULL; 819 private->sneq = NULL; 820 private->vdsneq = NULL; 821 private->gneq = NULL; 822 count = private->conf_len / sizeof(struct dasd_sneq); 823 sneq = (struct dasd_sneq *)private->conf_data; 824 for (i = 0; i < count; ++i) { 825 if (sneq->flags.identifier == 1 && sneq->format == 1) 826 private->sneq = sneq; 827 else if (sneq->flags.identifier == 1 && sneq->format == 4) 828 private->vdsneq = (struct vd_sneq *)sneq; 829 else if (sneq->flags.identifier == 2) 830 private->gneq = (struct dasd_gneq *)sneq; 831 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 832 private->ned = (struct dasd_ned *)sneq; 833 sneq++; 834 } 835 if (!private->ned || !private->gneq) { 836 private->ned = NULL; 837 private->sneq = NULL; 838 private->vdsneq = NULL; 839 private->gneq = NULL; 840 return -EINVAL; 841 } 842 return 0; 843 844 }; 845 846 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 847 { 848 struct dasd_gneq *gneq; 849 int i, count, found; 850 851 count = conf_len / sizeof(*gneq); 852 gneq = (struct dasd_gneq *)conf_data; 853 found = 0; 854 for (i = 0; i < count; ++i) { 855 if (gneq->flags.identifier == 2) { 856 found = 1; 857 break; 858 } 859 gneq++; 860 } 861 if (found) 862 return ((char *)gneq)[18] & 0x07; 863 else 864 return 0; 865 } 866 867 static int dasd_eckd_read_conf(struct dasd_device *device) 868 { 869 void *conf_data; 870 int conf_len, conf_data_saved; 871 int rc; 872 __u8 lpm; 873 struct dasd_eckd_private *private; 874 struct dasd_eckd_path *path_data; 875 876 private = (struct dasd_eckd_private *) device->private; 877 path_data = (struct dasd_eckd_path *) &private->path_data; 878 path_data->opm = ccw_device_get_path_mask(device->cdev); 879 lpm = 0x80; 880 conf_data_saved = 0; 881 /* get configuration data per operational path */ 882 for (lpm = 0x80; lpm; lpm>>= 1) { 883 if (lpm & path_data->opm){ 884 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 885 &conf_len, lpm); 886 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 887 DBF_EVENT(DBF_WARNING, 888 "Read configuration data returned " 889 "error %d for device: %s", rc, 890 dev_name(&device->cdev->dev)); 891 return rc; 892 } 893 if (conf_data == NULL) { 894 DBF_EVENT(DBF_WARNING, "No configuration " 895 "data retrieved for device: %s", 896 dev_name(&device->cdev->dev)); 897 continue; /* no error */ 898 } 899 /* save first valid configuration data */ 900 if (!conf_data_saved) { 901 kfree(private->conf_data); 902 private->conf_data = conf_data; 903 private->conf_len = conf_len; 904 if (dasd_eckd_identify_conf_parts(private)) { 905 private->conf_data = NULL; 906 private->conf_len = 0; 907 kfree(conf_data); 908 continue; 909 } 910 conf_data_saved++; 911 } 912 switch (dasd_eckd_path_access(conf_data, conf_len)) { 913 case 0x02: 914 path_data->npm |= lpm; 915 break; 916 case 0x03: 917 path_data->ppm |= lpm; 918 break; 919 } 920 if (conf_data != private->conf_data) 921 kfree(conf_data); 922 } 923 } 924 return 0; 925 } 926 927 static int dasd_eckd_read_features(struct dasd_device *device) 928 { 929 struct dasd_psf_prssd_data *prssdp; 930 struct dasd_rssd_features *features; 931 struct dasd_ccw_req *cqr; 932 struct ccw1 *ccw; 933 int rc; 934 struct dasd_eckd_private *private; 935 936 private = (struct dasd_eckd_private *) device->private; 937 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 938 1 /* PSF */ + 1 /* RSSD */ , 939 (sizeof(struct dasd_psf_prssd_data) + 940 sizeof(struct dasd_rssd_features)), 941 device); 942 if (IS_ERR(cqr)) { 943 DBF_EVENT(DBF_WARNING, "Could not allocate initialization " 944 "request for device: %s", 945 dev_name(&device->cdev->dev)); 946 return PTR_ERR(cqr); 947 } 948 cqr->startdev = device; 949 cqr->memdev = device; 950 cqr->block = NULL; 951 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 952 cqr->retries = 5; 953 cqr->expires = 10 * HZ; 954 955 /* Prepare for Read Subsystem Data */ 956 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 957 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 958 prssdp->order = PSF_ORDER_PRSSD; 959 prssdp->suborder = 0x41; /* Read Feature Codes */ 960 /* all other bytes of prssdp must be zero */ 961 962 ccw = cqr->cpaddr; 963 ccw->cmd_code = DASD_ECKD_CCW_PSF; 964 ccw->count = sizeof(struct dasd_psf_prssd_data); 965 ccw->flags |= CCW_FLAG_CC; 966 ccw->cda = (__u32)(addr_t) prssdp; 967 968 /* Read Subsystem Data - feature codes */ 969 features = (struct dasd_rssd_features *) (prssdp + 1); 970 memset(features, 0, sizeof(struct dasd_rssd_features)); 971 972 ccw++; 973 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 974 ccw->count = sizeof(struct dasd_rssd_features); 975 ccw->cda = (__u32)(addr_t) features; 976 977 cqr->buildclk = get_clock(); 978 cqr->status = DASD_CQR_FILLED; 979 rc = dasd_sleep_on(cqr); 980 if (rc == 0) { 981 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 982 features = (struct dasd_rssd_features *) (prssdp + 1); 983 memcpy(&private->features, features, 984 sizeof(struct dasd_rssd_features)); 985 } 986 dasd_sfree_request(cqr, cqr->memdev); 987 return rc; 988 } 989 990 991 /* 992 * Build CP for Perform Subsystem Function - SSC. 993 */ 994 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 995 int enable_pav) 996 { 997 struct dasd_ccw_req *cqr; 998 struct dasd_psf_ssc_data *psf_ssc_data; 999 struct ccw1 *ccw; 1000 1001 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 1002 sizeof(struct dasd_psf_ssc_data), 1003 device); 1004 1005 if (IS_ERR(cqr)) { 1006 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1007 "Could not allocate PSF-SSC request"); 1008 return cqr; 1009 } 1010 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1011 psf_ssc_data->order = PSF_ORDER_SSC; 1012 psf_ssc_data->suborder = 0x40; 1013 if (enable_pav) { 1014 psf_ssc_data->suborder |= 0x88; 1015 psf_ssc_data->reserved[0] = 0x88; 1016 } 1017 ccw = cqr->cpaddr; 1018 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1019 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1020 ccw->count = 66; 1021 1022 cqr->startdev = device; 1023 cqr->memdev = device; 1024 cqr->block = NULL; 1025 cqr->expires = 10*HZ; 1026 cqr->buildclk = get_clock(); 1027 cqr->status = DASD_CQR_FILLED; 1028 return cqr; 1029 } 1030 1031 /* 1032 * Perform Subsystem Function. 1033 * It is necessary to trigger CIO for channel revalidation since this 1034 * call might change behaviour of DASD devices. 1035 */ 1036 static int 1037 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) 1038 { 1039 struct dasd_ccw_req *cqr; 1040 int rc; 1041 1042 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1043 if (IS_ERR(cqr)) 1044 return PTR_ERR(cqr); 1045 1046 rc = dasd_sleep_on(cqr); 1047 if (!rc) 1048 /* trigger CIO to reprobe devices */ 1049 css_schedule_reprobe(); 1050 dasd_sfree_request(cqr, cqr->memdev); 1051 return rc; 1052 } 1053 1054 /* 1055 * Valide storage server of current device. 1056 */ 1057 static int dasd_eckd_validate_server(struct dasd_device *device) 1058 { 1059 int rc; 1060 struct dasd_eckd_private *private; 1061 int enable_pav; 1062 1063 if (dasd_nopav || MACHINE_IS_VM) 1064 enable_pav = 0; 1065 else 1066 enable_pav = 1; 1067 rc = dasd_eckd_psf_ssc(device, enable_pav); 1068 /* may be requested feature is not available on server, 1069 * therefore just report error and go ahead */ 1070 private = (struct dasd_eckd_private *) device->private; 1071 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " 1072 "returned rc=%d for device: %s", 1073 private->uid.vendor, private->uid.serial, 1074 private->uid.ssid, rc, dev_name(&device->cdev->dev)); 1075 /* RE-Read Configuration Data */ 1076 return dasd_eckd_read_conf(device); 1077 } 1078 1079 /* 1080 * Check device characteristics. 1081 * If the device is accessible using ECKD discipline, the device is enabled. 1082 */ 1083 static int 1084 dasd_eckd_check_characteristics(struct dasd_device *device) 1085 { 1086 struct dasd_eckd_private *private; 1087 struct dasd_block *block; 1088 int is_known, rc; 1089 1090 private = (struct dasd_eckd_private *) device->private; 1091 if (!private) { 1092 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1093 if (!private) { 1094 dev_warn(&device->cdev->dev, 1095 "Allocating memory for private DASD data " 1096 "failed\n"); 1097 return -ENOMEM; 1098 } 1099 device->private = (void *) private; 1100 } else { 1101 memset(private, 0, sizeof(*private)); 1102 } 1103 /* Invalidate status of initial analysis. */ 1104 private->init_cqr_status = -1; 1105 /* Set default cache operations. */ 1106 private->attrib.operation = DASD_NORMAL_CACHE; 1107 private->attrib.nr_cyl = 0; 1108 1109 /* Read Configuration Data */ 1110 rc = dasd_eckd_read_conf(device); 1111 if (rc) 1112 goto out_err1; 1113 1114 /* Generate device unique id and register in devmap */ 1115 rc = dasd_eckd_generate_uid(device, &private->uid); 1116 if (rc) 1117 goto out_err1; 1118 dasd_set_uid(device->cdev, &private->uid); 1119 1120 if (private->uid.type == UA_BASE_DEVICE) { 1121 block = dasd_alloc_block(); 1122 if (IS_ERR(block)) { 1123 DBF_EVENT(DBF_WARNING, "could not allocate dasd " 1124 "block structure for device: %s", 1125 dev_name(&device->cdev->dev)); 1126 rc = PTR_ERR(block); 1127 goto out_err1; 1128 } 1129 device->block = block; 1130 block->base = device; 1131 } 1132 1133 /* register lcu with alias handling, enable PAV if this is a new lcu */ 1134 is_known = dasd_alias_make_device_known_to_lcu(device); 1135 if (is_known < 0) { 1136 rc = is_known; 1137 goto out_err2; 1138 } 1139 if (!is_known) { 1140 /* new lcu found */ 1141 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 1142 if (rc) 1143 goto out_err3; 1144 } 1145 1146 /* Read Feature Codes */ 1147 rc = dasd_eckd_read_features(device); 1148 if (rc) 1149 goto out_err3; 1150 1151 /* Read Device Characteristics */ 1152 rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, 1153 64); 1154 if (rc) { 1155 DBF_EVENT(DBF_WARNING, 1156 "Read device characteristics failed, rc=%d for " 1157 "device: %s", rc, dev_name(&device->cdev->dev)); 1158 goto out_err3; 1159 } 1160 /* find the vaild cylinder size */ 1161 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1162 private->rdc_data.long_no_cyl) 1163 private->real_cyl = private->rdc_data.long_no_cyl; 1164 else 1165 private->real_cyl = private->rdc_data.no_cyl; 1166 1167 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 1168 "with %d cylinders, %d heads, %d sectors\n", 1169 private->rdc_data.dev_type, 1170 private->rdc_data.dev_model, 1171 private->rdc_data.cu_type, 1172 private->rdc_data.cu_model.model, 1173 private->real_cyl, 1174 private->rdc_data.trk_per_cyl, 1175 private->rdc_data.sec_per_trk); 1176 return 0; 1177 1178 out_err3: 1179 dasd_alias_disconnect_device_from_lcu(device); 1180 out_err2: 1181 dasd_free_block(device->block); 1182 device->block = NULL; 1183 out_err1: 1184 kfree(private->conf_data); 1185 kfree(device->private); 1186 device->private = NULL; 1187 return rc; 1188 } 1189 1190 static void dasd_eckd_uncheck_device(struct dasd_device *device) 1191 { 1192 struct dasd_eckd_private *private; 1193 1194 private = (struct dasd_eckd_private *) device->private; 1195 dasd_alias_disconnect_device_from_lcu(device); 1196 private->ned = NULL; 1197 private->sneq = NULL; 1198 private->vdsneq = NULL; 1199 private->gneq = NULL; 1200 private->conf_len = 0; 1201 kfree(private->conf_data); 1202 private->conf_data = NULL; 1203 } 1204 1205 static struct dasd_ccw_req * 1206 dasd_eckd_analysis_ccw(struct dasd_device *device) 1207 { 1208 struct dasd_eckd_private *private; 1209 struct eckd_count *count_data; 1210 struct LO_eckd_data *LO_data; 1211 struct dasd_ccw_req *cqr; 1212 struct ccw1 *ccw; 1213 int cplength, datasize; 1214 int i; 1215 1216 private = (struct dasd_eckd_private *) device->private; 1217 1218 cplength = 8; 1219 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1220 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1221 cplength, datasize, device); 1222 if (IS_ERR(cqr)) 1223 return cqr; 1224 ccw = cqr->cpaddr; 1225 /* Define extent for the first 3 tracks. */ 1226 define_extent(ccw++, cqr->data, 0, 2, 1227 DASD_ECKD_CCW_READ_COUNT, device); 1228 LO_data = cqr->data + sizeof(struct DE_eckd_data); 1229 /* Locate record for the first 4 records on track 0. */ 1230 ccw[-1].flags |= CCW_FLAG_CC; 1231 locate_record(ccw++, LO_data++, 0, 0, 4, 1232 DASD_ECKD_CCW_READ_COUNT, device, 0); 1233 1234 count_data = private->count_area; 1235 for (i = 0; i < 4; i++) { 1236 ccw[-1].flags |= CCW_FLAG_CC; 1237 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1238 ccw->flags = 0; 1239 ccw->count = 8; 1240 ccw->cda = (__u32)(addr_t) count_data; 1241 ccw++; 1242 count_data++; 1243 } 1244 1245 /* Locate record for the first record on track 2. */ 1246 ccw[-1].flags |= CCW_FLAG_CC; 1247 locate_record(ccw++, LO_data++, 2, 0, 1, 1248 DASD_ECKD_CCW_READ_COUNT, device, 0); 1249 /* Read count ccw. */ 1250 ccw[-1].flags |= CCW_FLAG_CC; 1251 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1252 ccw->flags = 0; 1253 ccw->count = 8; 1254 ccw->cda = (__u32)(addr_t) count_data; 1255 1256 cqr->block = NULL; 1257 cqr->startdev = device; 1258 cqr->memdev = device; 1259 cqr->retries = 0; 1260 cqr->buildclk = get_clock(); 1261 cqr->status = DASD_CQR_FILLED; 1262 return cqr; 1263 } 1264 1265 /* 1266 * This is the callback function for the init_analysis cqr. It saves 1267 * the status of the initial analysis ccw before it frees it and kicks 1268 * the device to continue the startup sequence. This will call 1269 * dasd_eckd_do_analysis again (if the devices has not been marked 1270 * for deletion in the meantime). 1271 */ 1272 static void 1273 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1274 { 1275 struct dasd_eckd_private *private; 1276 struct dasd_device *device; 1277 1278 device = init_cqr->startdev; 1279 private = (struct dasd_eckd_private *) device->private; 1280 private->init_cqr_status = init_cqr->status; 1281 dasd_sfree_request(init_cqr, device); 1282 dasd_kick_device(device); 1283 } 1284 1285 static int 1286 dasd_eckd_start_analysis(struct dasd_block *block) 1287 { 1288 struct dasd_eckd_private *private; 1289 struct dasd_ccw_req *init_cqr; 1290 1291 private = (struct dasd_eckd_private *) block->base->private; 1292 init_cqr = dasd_eckd_analysis_ccw(block->base); 1293 if (IS_ERR(init_cqr)) 1294 return PTR_ERR(init_cqr); 1295 init_cqr->callback = dasd_eckd_analysis_callback; 1296 init_cqr->callback_data = NULL; 1297 init_cqr->expires = 5*HZ; 1298 dasd_add_request_head(init_cqr); 1299 return -EAGAIN; 1300 } 1301 1302 static int 1303 dasd_eckd_end_analysis(struct dasd_block *block) 1304 { 1305 struct dasd_device *device; 1306 struct dasd_eckd_private *private; 1307 struct eckd_count *count_area; 1308 unsigned int sb, blk_per_trk; 1309 int status, i; 1310 1311 device = block->base; 1312 private = (struct dasd_eckd_private *) device->private; 1313 status = private->init_cqr_status; 1314 private->init_cqr_status = -1; 1315 if (status != DASD_CQR_DONE) { 1316 dev_warn(&device->cdev->dev, 1317 "The DASD is not formatted\n"); 1318 return -EMEDIUMTYPE; 1319 } 1320 1321 private->uses_cdl = 1; 1322 /* Check Track 0 for Compatible Disk Layout */ 1323 count_area = NULL; 1324 for (i = 0; i < 3; i++) { 1325 if (private->count_area[i].kl != 4 || 1326 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { 1327 private->uses_cdl = 0; 1328 break; 1329 } 1330 } 1331 if (i == 3) 1332 count_area = &private->count_area[4]; 1333 1334 if (private->uses_cdl == 0) { 1335 for (i = 0; i < 5; i++) { 1336 if ((private->count_area[i].kl != 0) || 1337 (private->count_area[i].dl != 1338 private->count_area[0].dl)) 1339 break; 1340 } 1341 if (i == 5) 1342 count_area = &private->count_area[0]; 1343 } else { 1344 if (private->count_area[3].record == 1) 1345 dev_warn(&device->cdev->dev, 1346 "Track 0 has no records following the VTOC\n"); 1347 } 1348 if (count_area != NULL && count_area->kl == 0) { 1349 /* we found notthing violating our disk layout */ 1350 if (dasd_check_blocksize(count_area->dl) == 0) 1351 block->bp_block = count_area->dl; 1352 } 1353 if (block->bp_block == 0) { 1354 dev_warn(&device->cdev->dev, 1355 "The disk layout of the DASD is not supported\n"); 1356 return -EMEDIUMTYPE; 1357 } 1358 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 1359 for (sb = 512; sb < block->bp_block; sb = sb << 1) 1360 block->s2b_shift++; 1361 1362 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1363 block->blocks = (private->real_cyl * 1364 private->rdc_data.trk_per_cyl * 1365 blk_per_trk); 1366 1367 dev_info(&device->cdev->dev, 1368 "DASD with %d KB/block, %d KB total size, %d KB/track, " 1369 "%s\n", (block->bp_block >> 10), 1370 ((private->real_cyl * 1371 private->rdc_data.trk_per_cyl * 1372 blk_per_trk * (block->bp_block >> 9)) >> 1), 1373 ((blk_per_trk * block->bp_block) >> 10), 1374 private->uses_cdl ? 1375 "compatible disk layout" : "linux disk layout"); 1376 1377 return 0; 1378 } 1379 1380 static int dasd_eckd_do_analysis(struct dasd_block *block) 1381 { 1382 struct dasd_eckd_private *private; 1383 1384 private = (struct dasd_eckd_private *) block->base->private; 1385 if (private->init_cqr_status < 0) 1386 return dasd_eckd_start_analysis(block); 1387 else 1388 return dasd_eckd_end_analysis(block); 1389 } 1390 1391 static int dasd_eckd_ready_to_online(struct dasd_device *device) 1392 { 1393 return dasd_alias_add_device(device); 1394 }; 1395 1396 static int dasd_eckd_online_to_ready(struct dasd_device *device) 1397 { 1398 return dasd_alias_remove_device(device); 1399 }; 1400 1401 static int 1402 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 1403 { 1404 struct dasd_eckd_private *private; 1405 1406 private = (struct dasd_eckd_private *) block->base->private; 1407 if (dasd_check_blocksize(block->bp_block) == 0) { 1408 geo->sectors = recs_per_track(&private->rdc_data, 1409 0, block->bp_block); 1410 } 1411 geo->cylinders = private->rdc_data.no_cyl; 1412 geo->heads = private->rdc_data.trk_per_cyl; 1413 return 0; 1414 } 1415 1416 static struct dasd_ccw_req * 1417 dasd_eckd_format_device(struct dasd_device * device, 1418 struct format_data_t * fdata) 1419 { 1420 struct dasd_eckd_private *private; 1421 struct dasd_ccw_req *fcp; 1422 struct eckd_count *ect; 1423 struct ccw1 *ccw; 1424 void *data; 1425 int rpt; 1426 struct ch_t address; 1427 int cplength, datasize; 1428 int i; 1429 int intensity = 0; 1430 int r0_perm; 1431 1432 private = (struct dasd_eckd_private *) device->private; 1433 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); 1434 set_ch_t(&address, 1435 fdata->start_unit / private->rdc_data.trk_per_cyl, 1436 fdata->start_unit % private->rdc_data.trk_per_cyl); 1437 1438 /* Sanity checks. */ 1439 if (fdata->start_unit >= 1440 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 1441 dev_warn(&device->cdev->dev, "Start track number %d used in " 1442 "formatting is too big\n", fdata->start_unit); 1443 return ERR_PTR(-EINVAL); 1444 } 1445 if (fdata->start_unit > fdata->stop_unit) { 1446 dev_warn(&device->cdev->dev, "Start track %d used in " 1447 "formatting exceeds end track\n", fdata->start_unit); 1448 return ERR_PTR(-EINVAL); 1449 } 1450 if (dasd_check_blocksize(fdata->blksize) != 0) { 1451 dev_warn(&device->cdev->dev, 1452 "The DASD cannot be formatted with block size %d\n", 1453 fdata->blksize); 1454 return ERR_PTR(-EINVAL); 1455 } 1456 1457 /* 1458 * fdata->intensity is a bit string that tells us what to do: 1459 * Bit 0: write record zero 1460 * Bit 1: write home address, currently not supported 1461 * Bit 2: invalidate tracks 1462 * Bit 3: use OS/390 compatible disk layout (cdl) 1463 * Bit 4: do not allow storage subsystem to modify record zero 1464 * Only some bit combinations do make sense. 1465 */ 1466 if (fdata->intensity & 0x10) { 1467 r0_perm = 0; 1468 intensity = fdata->intensity & ~0x10; 1469 } else { 1470 r0_perm = 1; 1471 intensity = fdata->intensity; 1472 } 1473 switch (intensity) { 1474 case 0x00: /* Normal format */ 1475 case 0x08: /* Normal format, use cdl. */ 1476 cplength = 2 + rpt; 1477 datasize = sizeof(struct DE_eckd_data) + 1478 sizeof(struct LO_eckd_data) + 1479 rpt * sizeof(struct eckd_count); 1480 break; 1481 case 0x01: /* Write record zero and format track. */ 1482 case 0x09: /* Write record zero and format track, use cdl. */ 1483 cplength = 3 + rpt; 1484 datasize = sizeof(struct DE_eckd_data) + 1485 sizeof(struct LO_eckd_data) + 1486 sizeof(struct eckd_count) + 1487 rpt * sizeof(struct eckd_count); 1488 break; 1489 case 0x04: /* Invalidate track. */ 1490 case 0x0c: /* Invalidate track, use cdl. */ 1491 cplength = 3; 1492 datasize = sizeof(struct DE_eckd_data) + 1493 sizeof(struct LO_eckd_data) + 1494 sizeof(struct eckd_count); 1495 break; 1496 default: 1497 dev_warn(&device->cdev->dev, "An I/O control call used " 1498 "incorrect flags 0x%x\n", fdata->intensity); 1499 return ERR_PTR(-EINVAL); 1500 } 1501 /* Allocate the format ccw request. */ 1502 fcp = dasd_smalloc_request(dasd_eckd_discipline.name, 1503 cplength, datasize, device); 1504 if (IS_ERR(fcp)) 1505 return fcp; 1506 1507 data = fcp->data; 1508 ccw = fcp->cpaddr; 1509 1510 switch (intensity & ~0x08) { 1511 case 0x00: /* Normal format. */ 1512 define_extent(ccw++, (struct DE_eckd_data *) data, 1513 fdata->start_unit, fdata->start_unit, 1514 DASD_ECKD_CCW_WRITE_CKD, device); 1515 /* grant subsystem permission to format R0 */ 1516 if (r0_perm) 1517 ((struct DE_eckd_data *)data)->ga_extended |= 0x04; 1518 data += sizeof(struct DE_eckd_data); 1519 ccw[-1].flags |= CCW_FLAG_CC; 1520 locate_record(ccw++, (struct LO_eckd_data *) data, 1521 fdata->start_unit, 0, rpt, 1522 DASD_ECKD_CCW_WRITE_CKD, device, 1523 fdata->blksize); 1524 data += sizeof(struct LO_eckd_data); 1525 break; 1526 case 0x01: /* Write record zero + format track. */ 1527 define_extent(ccw++, (struct DE_eckd_data *) data, 1528 fdata->start_unit, fdata->start_unit, 1529 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 1530 device); 1531 data += sizeof(struct DE_eckd_data); 1532 ccw[-1].flags |= CCW_FLAG_CC; 1533 locate_record(ccw++, (struct LO_eckd_data *) data, 1534 fdata->start_unit, 0, rpt + 1, 1535 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, 1536 device->block->bp_block); 1537 data += sizeof(struct LO_eckd_data); 1538 break; 1539 case 0x04: /* Invalidate track. */ 1540 define_extent(ccw++, (struct DE_eckd_data *) data, 1541 fdata->start_unit, fdata->start_unit, 1542 DASD_ECKD_CCW_WRITE_CKD, device); 1543 data += sizeof(struct DE_eckd_data); 1544 ccw[-1].flags |= CCW_FLAG_CC; 1545 locate_record(ccw++, (struct LO_eckd_data *) data, 1546 fdata->start_unit, 0, 1, 1547 DASD_ECKD_CCW_WRITE_CKD, device, 8); 1548 data += sizeof(struct LO_eckd_data); 1549 break; 1550 } 1551 if (intensity & 0x01) { /* write record zero */ 1552 ect = (struct eckd_count *) data; 1553 data += sizeof(struct eckd_count); 1554 ect->cyl = address.cyl; 1555 ect->head = address.head; 1556 ect->record = 0; 1557 ect->kl = 0; 1558 ect->dl = 8; 1559 ccw[-1].flags |= CCW_FLAG_CC; 1560 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 1561 ccw->flags = CCW_FLAG_SLI; 1562 ccw->count = 8; 1563 ccw->cda = (__u32)(addr_t) ect; 1564 ccw++; 1565 } 1566 if ((intensity & ~0x08) & 0x04) { /* erase track */ 1567 ect = (struct eckd_count *) data; 1568 data += sizeof(struct eckd_count); 1569 ect->cyl = address.cyl; 1570 ect->head = address.head; 1571 ect->record = 1; 1572 ect->kl = 0; 1573 ect->dl = 0; 1574 ccw[-1].flags |= CCW_FLAG_CC; 1575 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1576 ccw->flags = CCW_FLAG_SLI; 1577 ccw->count = 8; 1578 ccw->cda = (__u32)(addr_t) ect; 1579 } else { /* write remaining records */ 1580 for (i = 0; i < rpt; i++) { 1581 ect = (struct eckd_count *) data; 1582 data += sizeof(struct eckd_count); 1583 ect->cyl = address.cyl; 1584 ect->head = address.head; 1585 ect->record = i + 1; 1586 ect->kl = 0; 1587 ect->dl = fdata->blksize; 1588 /* Check for special tracks 0-1 when formatting CDL */ 1589 if ((intensity & 0x08) && 1590 fdata->start_unit == 0) { 1591 if (i < 3) { 1592 ect->kl = 4; 1593 ect->dl = sizes_trk0[i] - 4; 1594 } 1595 } 1596 if ((intensity & 0x08) && 1597 fdata->start_unit == 1) { 1598 ect->kl = 44; 1599 ect->dl = LABEL_SIZE - 44; 1600 } 1601 ccw[-1].flags |= CCW_FLAG_CC; 1602 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1603 ccw->flags = CCW_FLAG_SLI; 1604 ccw->count = 8; 1605 ccw->cda = (__u32)(addr_t) ect; 1606 ccw++; 1607 } 1608 } 1609 fcp->startdev = device; 1610 fcp->memdev = device; 1611 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1612 fcp->retries = 5; /* set retry counter to enable default ERP */ 1613 fcp->buildclk = get_clock(); 1614 fcp->status = DASD_CQR_FILLED; 1615 return fcp; 1616 } 1617 1618 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 1619 { 1620 cqr->status = DASD_CQR_FILLED; 1621 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1622 dasd_eckd_reset_ccw_to_base_io(cqr); 1623 cqr->startdev = cqr->block->base; 1624 } 1625 }; 1626 1627 static dasd_erp_fn_t 1628 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 1629 { 1630 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 1631 struct ccw_device *cdev = device->cdev; 1632 1633 switch (cdev->id.cu_type) { 1634 case 0x3990: 1635 case 0x2105: 1636 case 0x2107: 1637 case 0x1750: 1638 return dasd_3990_erp_action; 1639 case 0x9343: 1640 case 0x3880: 1641 default: 1642 return dasd_default_erp_action; 1643 } 1644 } 1645 1646 static dasd_erp_fn_t 1647 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 1648 { 1649 return dasd_default_erp_postaction; 1650 } 1651 1652 1653 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 1654 struct irb *irb) 1655 { 1656 char mask; 1657 char *sense = NULL; 1658 1659 /* first of all check for state change pending interrupt */ 1660 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1661 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 1662 dasd_generic_handle_state_change(device); 1663 return; 1664 } 1665 1666 /* summary unit check */ 1667 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1668 (irb->ecw[7] == 0x0D)) { 1669 dasd_alias_handle_summary_unit_check(device, irb); 1670 return; 1671 } 1672 1673 sense = dasd_get_sense(irb); 1674 /* service information message SIM */ 1675 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 1676 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 1677 dasd_3990_erp_handle_sim(device, sense); 1678 dasd_schedule_device_bh(device); 1679 return; 1680 } 1681 1682 if ((scsw_cc(&irb->scsw) == 1) && 1683 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1684 (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && 1685 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { 1686 /* fake irb do nothing, they are handled elsewhere */ 1687 dasd_schedule_device_bh(device); 1688 return; 1689 } 1690 1691 if (!sense) { 1692 /* just report other unsolicited interrupts */ 1693 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1694 "unsolicited interrupt received"); 1695 } else { 1696 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1697 "unsolicited interrupt received " 1698 "(sense available)"); 1699 device->discipline->dump_sense_dbf(device, irb, "unsolicited"); 1700 } 1701 1702 dasd_schedule_device_bh(device); 1703 return; 1704 }; 1705 1706 1707 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 1708 struct dasd_device *startdev, 1709 struct dasd_block *block, 1710 struct request *req, 1711 sector_t first_rec, 1712 sector_t last_rec, 1713 sector_t first_trk, 1714 sector_t last_trk, 1715 unsigned int first_offs, 1716 unsigned int last_offs, 1717 unsigned int blk_per_trk, 1718 unsigned int blksize) 1719 { 1720 struct dasd_eckd_private *private; 1721 unsigned long *idaws; 1722 struct LO_eckd_data *LO_data; 1723 struct dasd_ccw_req *cqr; 1724 struct ccw1 *ccw; 1725 struct req_iterator iter; 1726 struct bio_vec *bv; 1727 char *dst; 1728 unsigned int off; 1729 int count, cidaw, cplength, datasize; 1730 sector_t recid; 1731 unsigned char cmd, rcmd; 1732 int use_prefix; 1733 struct dasd_device *basedev; 1734 1735 basedev = block->base; 1736 private = (struct dasd_eckd_private *) basedev->private; 1737 if (rq_data_dir(req) == READ) 1738 cmd = DASD_ECKD_CCW_READ_MT; 1739 else if (rq_data_dir(req) == WRITE) 1740 cmd = DASD_ECKD_CCW_WRITE_MT; 1741 else 1742 return ERR_PTR(-EINVAL); 1743 1744 /* Check struct bio and count the number of blocks for the request. */ 1745 count = 0; 1746 cidaw = 0; 1747 rq_for_each_segment(bv, req, iter) { 1748 if (bv->bv_len & (blksize - 1)) 1749 /* Eckd can only do full blocks. */ 1750 return ERR_PTR(-EINVAL); 1751 count += bv->bv_len >> (block->s2b_shift + 9); 1752 #if defined(CONFIG_64BIT) 1753 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 1754 cidaw += bv->bv_len >> (block->s2b_shift + 9); 1755 #endif 1756 } 1757 /* Paranoia. */ 1758 if (count != last_rec - first_rec + 1) 1759 return ERR_PTR(-EINVAL); 1760 1761 /* use the prefix command if available */ 1762 use_prefix = private->features.feature[8] & 0x01; 1763 if (use_prefix) { 1764 /* 1x prefix + number of blocks */ 1765 cplength = 2 + count; 1766 /* 1x prefix + cidaws*sizeof(long) */ 1767 datasize = sizeof(struct PFX_eckd_data) + 1768 sizeof(struct LO_eckd_data) + 1769 cidaw * sizeof(unsigned long); 1770 } else { 1771 /* 1x define extent + 1x locate record + number of blocks */ 1772 cplength = 2 + count; 1773 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 1774 datasize = sizeof(struct DE_eckd_data) + 1775 sizeof(struct LO_eckd_data) + 1776 cidaw * sizeof(unsigned long); 1777 } 1778 /* Find out the number of additional locate record ccws for cdl. */ 1779 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 1780 if (last_rec >= 2*blk_per_trk) 1781 count = 2*blk_per_trk - first_rec; 1782 cplength += count; 1783 datasize += count*sizeof(struct LO_eckd_data); 1784 } 1785 /* Allocate the ccw request. */ 1786 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1787 cplength, datasize, startdev); 1788 if (IS_ERR(cqr)) 1789 return cqr; 1790 ccw = cqr->cpaddr; 1791 /* First ccw is define extent or prefix. */ 1792 if (use_prefix) { 1793 if (prefix(ccw++, cqr->data, first_trk, 1794 last_trk, cmd, basedev, startdev) == -EAGAIN) { 1795 /* Clock not in sync and XRC is enabled. 1796 * Try again later. 1797 */ 1798 dasd_sfree_request(cqr, startdev); 1799 return ERR_PTR(-EAGAIN); 1800 } 1801 idaws = (unsigned long *) (cqr->data + 1802 sizeof(struct PFX_eckd_data)); 1803 } else { 1804 if (define_extent(ccw++, cqr->data, first_trk, 1805 last_trk, cmd, startdev) == -EAGAIN) { 1806 /* Clock not in sync and XRC is enabled. 1807 * Try again later. 1808 */ 1809 dasd_sfree_request(cqr, startdev); 1810 return ERR_PTR(-EAGAIN); 1811 } 1812 idaws = (unsigned long *) (cqr->data + 1813 sizeof(struct DE_eckd_data)); 1814 } 1815 /* Build locate_record+read/write/ccws. */ 1816 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1817 recid = first_rec; 1818 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 1819 /* Only standard blocks so there is just one locate record. */ 1820 ccw[-1].flags |= CCW_FLAG_CC; 1821 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1822 last_rec - recid + 1, cmd, basedev, blksize); 1823 } 1824 rq_for_each_segment(bv, req, iter) { 1825 dst = page_address(bv->bv_page) + bv->bv_offset; 1826 if (dasd_page_cache) { 1827 char *copy = kmem_cache_alloc(dasd_page_cache, 1828 GFP_DMA | __GFP_NOWARN); 1829 if (copy && rq_data_dir(req) == WRITE) 1830 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 1831 if (copy) 1832 dst = copy + bv->bv_offset; 1833 } 1834 for (off = 0; off < bv->bv_len; off += blksize) { 1835 sector_t trkid = recid; 1836 unsigned int recoffs = sector_div(trkid, blk_per_trk); 1837 rcmd = cmd; 1838 count = blksize; 1839 /* Locate record for cdl special block ? */ 1840 if (private->uses_cdl && recid < 2*blk_per_trk) { 1841 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 1842 rcmd |= 0x8; 1843 count = dasd_eckd_cdl_reclen(recid); 1844 if (count < blksize && 1845 rq_data_dir(req) == READ) 1846 memset(dst + count, 0xe5, 1847 blksize - count); 1848 } 1849 ccw[-1].flags |= CCW_FLAG_CC; 1850 locate_record(ccw++, LO_data++, 1851 trkid, recoffs + 1, 1852 1, rcmd, basedev, count); 1853 } 1854 /* Locate record for standard blocks ? */ 1855 if (private->uses_cdl && recid == 2*blk_per_trk) { 1856 ccw[-1].flags |= CCW_FLAG_CC; 1857 locate_record(ccw++, LO_data++, 1858 trkid, recoffs + 1, 1859 last_rec - recid + 1, 1860 cmd, basedev, count); 1861 } 1862 /* Read/write ccw. */ 1863 ccw[-1].flags |= CCW_FLAG_CC; 1864 ccw->cmd_code = rcmd; 1865 ccw->count = count; 1866 if (idal_is_needed(dst, blksize)) { 1867 ccw->cda = (__u32)(addr_t) idaws; 1868 ccw->flags = CCW_FLAG_IDA; 1869 idaws = idal_create_words(idaws, dst, blksize); 1870 } else { 1871 ccw->cda = (__u32)(addr_t) dst; 1872 ccw->flags = 0; 1873 } 1874 ccw++; 1875 dst += blksize; 1876 recid++; 1877 } 1878 } 1879 if (blk_noretry_request(req) || 1880 block->base->features & DASD_FEATURE_FAILFAST) 1881 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1882 cqr->startdev = startdev; 1883 cqr->memdev = startdev; 1884 cqr->block = block; 1885 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1886 cqr->lpm = private->path_data.ppm; 1887 cqr->retries = 256; 1888 cqr->buildclk = get_clock(); 1889 cqr->status = DASD_CQR_FILLED; 1890 return cqr; 1891 } 1892 1893 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 1894 struct dasd_device *startdev, 1895 struct dasd_block *block, 1896 struct request *req, 1897 sector_t first_rec, 1898 sector_t last_rec, 1899 sector_t first_trk, 1900 sector_t last_trk, 1901 unsigned int first_offs, 1902 unsigned int last_offs, 1903 unsigned int blk_per_trk, 1904 unsigned int blksize) 1905 { 1906 struct dasd_eckd_private *private; 1907 unsigned long *idaws; 1908 struct dasd_ccw_req *cqr; 1909 struct ccw1 *ccw; 1910 struct req_iterator iter; 1911 struct bio_vec *bv; 1912 char *dst, *idaw_dst; 1913 unsigned int cidaw, cplength, datasize; 1914 unsigned int tlf; 1915 sector_t recid; 1916 unsigned char cmd; 1917 struct dasd_device *basedev; 1918 unsigned int trkcount, count, count_to_trk_end; 1919 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 1920 unsigned char new_track, end_idaw; 1921 sector_t trkid; 1922 unsigned int recoffs; 1923 1924 basedev = block->base; 1925 private = (struct dasd_eckd_private *) basedev->private; 1926 if (rq_data_dir(req) == READ) 1927 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 1928 else if (rq_data_dir(req) == WRITE) 1929 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 1930 else 1931 return ERR_PTR(-EINVAL); 1932 1933 /* Track based I/O needs IDAWs for each page, and not just for 1934 * 64 bit addresses. We need additional idals for pages 1935 * that get filled from two tracks, so we use the number 1936 * of records as upper limit. 1937 */ 1938 cidaw = last_rec - first_rec + 1; 1939 trkcount = last_trk - first_trk + 1; 1940 1941 /* 1x prefix + one read/write ccw per track */ 1942 cplength = 1 + trkcount; 1943 1944 /* on 31-bit we need space for two 32 bit addresses per page 1945 * on 64-bit one 64 bit address 1946 */ 1947 datasize = sizeof(struct PFX_eckd_data) + 1948 cidaw * sizeof(unsigned long long); 1949 1950 /* Allocate the ccw request. */ 1951 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1952 cplength, datasize, startdev); 1953 if (IS_ERR(cqr)) 1954 return cqr; 1955 ccw = cqr->cpaddr; 1956 /* transfer length factor: how many bytes to read from the last track */ 1957 if (first_trk == last_trk) 1958 tlf = last_offs - first_offs + 1; 1959 else 1960 tlf = last_offs + 1; 1961 tlf *= blksize; 1962 1963 if (prefix_LRE(ccw++, cqr->data, first_trk, 1964 last_trk, cmd, basedev, startdev, 1965 1 /* format */, first_offs + 1, 1966 trkcount, blksize, 1967 tlf) == -EAGAIN) { 1968 /* Clock not in sync and XRC is enabled. 1969 * Try again later. 1970 */ 1971 dasd_sfree_request(cqr, startdev); 1972 return ERR_PTR(-EAGAIN); 1973 } 1974 1975 /* 1976 * The translation of request into ccw programs must meet the 1977 * following conditions: 1978 * - all idaws but the first and the last must address full pages 1979 * (or 2K blocks on 31-bit) 1980 * - the scope of a ccw and it's idal ends with the track boundaries 1981 */ 1982 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 1983 recid = first_rec; 1984 new_track = 1; 1985 end_idaw = 0; 1986 len_to_track_end = 0; 1987 idaw_dst = 0; 1988 idaw_len = 0; 1989 rq_for_each_segment(bv, req, iter) { 1990 dst = page_address(bv->bv_page) + bv->bv_offset; 1991 seg_len = bv->bv_len; 1992 while (seg_len) { 1993 if (new_track) { 1994 trkid = recid; 1995 recoffs = sector_div(trkid, blk_per_trk); 1996 count_to_trk_end = blk_per_trk - recoffs; 1997 count = min((last_rec - recid + 1), 1998 (sector_t)count_to_trk_end); 1999 len_to_track_end = count * blksize; 2000 ccw[-1].flags |= CCW_FLAG_CC; 2001 ccw->cmd_code = cmd; 2002 ccw->count = len_to_track_end; 2003 ccw->cda = (__u32)(addr_t)idaws; 2004 ccw->flags = CCW_FLAG_IDA; 2005 ccw++; 2006 recid += count; 2007 new_track = 0; 2008 /* first idaw for a ccw may start anywhere */ 2009 if (!idaw_dst) 2010 idaw_dst = dst; 2011 } 2012 /* If we start a new idaw, we must make sure that it 2013 * starts on an IDA_BLOCK_SIZE boundary. 2014 * If we continue an idaw, we must make sure that the 2015 * current segment begins where the so far accumulated 2016 * idaw ends 2017 */ 2018 if (!idaw_dst) { 2019 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 2020 dasd_sfree_request(cqr, startdev); 2021 return ERR_PTR(-ERANGE); 2022 } else 2023 idaw_dst = dst; 2024 } 2025 if ((idaw_dst + idaw_len) != dst) { 2026 dasd_sfree_request(cqr, startdev); 2027 return ERR_PTR(-ERANGE); 2028 } 2029 part_len = min(seg_len, len_to_track_end); 2030 seg_len -= part_len; 2031 dst += part_len; 2032 idaw_len += part_len; 2033 len_to_track_end -= part_len; 2034 /* collected memory area ends on an IDA_BLOCK border, 2035 * -> create an idaw 2036 * idal_create_words will handle cases where idaw_len 2037 * is larger then IDA_BLOCK_SIZE 2038 */ 2039 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 2040 end_idaw = 1; 2041 /* We also need to end the idaw at track end */ 2042 if (!len_to_track_end) { 2043 new_track = 1; 2044 end_idaw = 1; 2045 } 2046 if (end_idaw) { 2047 idaws = idal_create_words(idaws, idaw_dst, 2048 idaw_len); 2049 idaw_dst = 0; 2050 idaw_len = 0; 2051 end_idaw = 0; 2052 } 2053 } 2054 } 2055 2056 if (blk_noretry_request(req) || 2057 block->base->features & DASD_FEATURE_FAILFAST) 2058 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2059 cqr->startdev = startdev; 2060 cqr->memdev = startdev; 2061 cqr->block = block; 2062 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 2063 cqr->lpm = private->path_data.ppm; 2064 cqr->retries = 256; 2065 cqr->buildclk = get_clock(); 2066 cqr->status = DASD_CQR_FILLED; 2067 return cqr; 2068 } 2069 2070 static int prepare_itcw(struct itcw *itcw, 2071 unsigned int trk, unsigned int totrk, int cmd, 2072 struct dasd_device *basedev, 2073 struct dasd_device *startdev, 2074 unsigned int rec_on_trk, int count, 2075 unsigned int blksize, 2076 unsigned int total_data_size, 2077 unsigned int tlf, 2078 unsigned int blk_per_trk) 2079 { 2080 struct PFX_eckd_data pfxdata; 2081 struct dasd_eckd_private *basepriv, *startpriv; 2082 struct DE_eckd_data *dedata; 2083 struct LRE_eckd_data *lredata; 2084 struct dcw *dcw; 2085 2086 u32 begcyl, endcyl; 2087 u16 heads, beghead, endhead; 2088 u8 pfx_cmd; 2089 2090 int rc = 0; 2091 int sector = 0; 2092 int dn, d; 2093 2094 2095 /* setup prefix data */ 2096 basepriv = (struct dasd_eckd_private *) basedev->private; 2097 startpriv = (struct dasd_eckd_private *) startdev->private; 2098 dedata = &pfxdata.define_extent; 2099 lredata = &pfxdata.locate_record; 2100 2101 memset(&pfxdata, 0, sizeof(pfxdata)); 2102 pfxdata.format = 1; /* PFX with LRE */ 2103 pfxdata.base_address = basepriv->ned->unit_addr; 2104 pfxdata.base_lss = basepriv->ned->ID; 2105 pfxdata.validity.define_extent = 1; 2106 2107 /* private uid is kept up to date, conf_data may be outdated */ 2108 if (startpriv->uid.type != UA_BASE_DEVICE) { 2109 pfxdata.validity.verify_base = 1; 2110 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 2111 pfxdata.validity.hyper_pav = 1; 2112 } 2113 2114 switch (cmd) { 2115 case DASD_ECKD_CCW_READ_TRACK_DATA: 2116 dedata->mask.perm = 0x1; 2117 dedata->attributes.operation = basepriv->attrib.operation; 2118 dedata->blk_size = blksize; 2119 dedata->ga_extended |= 0x42; 2120 lredata->operation.orientation = 0x0; 2121 lredata->operation.operation = 0x0C; 2122 lredata->auxiliary.check_bytes = 0x01; 2123 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 2124 break; 2125 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 2126 dedata->mask.perm = 0x02; 2127 dedata->attributes.operation = basepriv->attrib.operation; 2128 dedata->blk_size = blksize; 2129 rc = check_XRC_on_prefix(&pfxdata, basedev); 2130 dedata->ga_extended |= 0x42; 2131 lredata->operation.orientation = 0x0; 2132 lredata->operation.operation = 0x3F; 2133 lredata->extended_operation = 0x23; 2134 lredata->auxiliary.check_bytes = 0x2; 2135 pfx_cmd = DASD_ECKD_CCW_PFX; 2136 break; 2137 default: 2138 DBF_DEV_EVENT(DBF_ERR, basedev, 2139 "prepare itcw, unknown opcode 0x%x", cmd); 2140 BUG(); 2141 break; 2142 } 2143 if (rc) 2144 return rc; 2145 2146 dedata->attributes.mode = 0x3; /* ECKD */ 2147 2148 heads = basepriv->rdc_data.trk_per_cyl; 2149 begcyl = trk / heads; 2150 beghead = trk % heads; 2151 endcyl = totrk / heads; 2152 endhead = totrk % heads; 2153 2154 /* check for sequential prestage - enhance cylinder range */ 2155 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 2156 dedata->attributes.operation == DASD_SEQ_ACCESS) { 2157 2158 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 2159 endcyl += basepriv->attrib.nr_cyl; 2160 else 2161 endcyl = (basepriv->real_cyl - 1); 2162 } 2163 2164 set_ch_t(&dedata->beg_ext, begcyl, beghead); 2165 set_ch_t(&dedata->end_ext, endcyl, endhead); 2166 2167 dedata->ep_format = 0x20; /* records per track is valid */ 2168 dedata->ep_rec_per_track = blk_per_trk; 2169 2170 if (rec_on_trk) { 2171 switch (basepriv->rdc_data.dev_type) { 2172 case 0x3390: 2173 dn = ceil_quot(blksize + 6, 232); 2174 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 2175 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 2176 break; 2177 case 0x3380: 2178 d = 7 + ceil_quot(blksize + 12, 32); 2179 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 2180 break; 2181 } 2182 } 2183 2184 lredata->auxiliary.length_valid = 1; 2185 lredata->auxiliary.length_scope = 1; 2186 lredata->auxiliary.imbedded_ccw_valid = 1; 2187 lredata->length = tlf; 2188 lredata->imbedded_ccw = cmd; 2189 lredata->count = count; 2190 lredata->sector = sector; 2191 set_ch_t(&lredata->seek_addr, begcyl, beghead); 2192 lredata->search_arg.cyl = lredata->seek_addr.cyl; 2193 lredata->search_arg.head = lredata->seek_addr.head; 2194 lredata->search_arg.record = rec_on_trk; 2195 2196 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 2197 &pfxdata, sizeof(pfxdata), total_data_size); 2198 2199 return rc; 2200 } 2201 2202 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 2203 struct dasd_device *startdev, 2204 struct dasd_block *block, 2205 struct request *req, 2206 sector_t first_rec, 2207 sector_t last_rec, 2208 sector_t first_trk, 2209 sector_t last_trk, 2210 unsigned int first_offs, 2211 unsigned int last_offs, 2212 unsigned int blk_per_trk, 2213 unsigned int blksize) 2214 { 2215 struct dasd_eckd_private *private; 2216 struct dasd_ccw_req *cqr; 2217 struct req_iterator iter; 2218 struct bio_vec *bv; 2219 char *dst; 2220 unsigned int trkcount, ctidaw; 2221 unsigned char cmd; 2222 struct dasd_device *basedev; 2223 unsigned int tlf; 2224 struct itcw *itcw; 2225 struct tidaw *last_tidaw = NULL; 2226 int itcw_op; 2227 size_t itcw_size; 2228 2229 basedev = block->base; 2230 private = (struct dasd_eckd_private *) basedev->private; 2231 if (rq_data_dir(req) == READ) { 2232 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2233 itcw_op = ITCW_OP_READ; 2234 } else if (rq_data_dir(req) == WRITE) { 2235 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 2236 itcw_op = ITCW_OP_WRITE; 2237 } else 2238 return ERR_PTR(-EINVAL); 2239 2240 /* trackbased I/O needs address all memory via TIDAWs, 2241 * not just for 64 bit addresses. This allows us to map 2242 * each segment directly to one tidaw. 2243 */ 2244 trkcount = last_trk - first_trk + 1; 2245 ctidaw = 0; 2246 rq_for_each_segment(bv, req, iter) { 2247 ++ctidaw; 2248 } 2249 2250 /* Allocate the ccw request. */ 2251 itcw_size = itcw_calc_size(0, ctidaw, 0); 2252 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2253 0, itcw_size, startdev); 2254 if (IS_ERR(cqr)) 2255 return cqr; 2256 2257 cqr->cpmode = 1; 2258 cqr->startdev = startdev; 2259 cqr->memdev = startdev; 2260 cqr->block = block; 2261 cqr->expires = 100*HZ; 2262 cqr->buildclk = get_clock(); 2263 cqr->status = DASD_CQR_FILLED; 2264 cqr->retries = 10; 2265 2266 /* transfer length factor: how many bytes to read from the last track */ 2267 if (first_trk == last_trk) 2268 tlf = last_offs - first_offs + 1; 2269 else 2270 tlf = last_offs + 1; 2271 tlf *= blksize; 2272 2273 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2274 cqr->cpaddr = itcw_get_tcw(itcw); 2275 2276 if (prepare_itcw(itcw, first_trk, last_trk, 2277 cmd, basedev, startdev, 2278 first_offs + 1, 2279 trkcount, blksize, 2280 (last_rec - first_rec + 1) * blksize, 2281 tlf, blk_per_trk) == -EAGAIN) { 2282 /* Clock not in sync and XRC is enabled. 2283 * Try again later. 2284 */ 2285 dasd_sfree_request(cqr, startdev); 2286 return ERR_PTR(-EAGAIN); 2287 } 2288 2289 /* 2290 * A tidaw can address 4k of memory, but must not cross page boundaries 2291 * We can let the block layer handle this by setting 2292 * blk_queue_segment_boundary to page boundaries and 2293 * blk_max_segment_size to page size when setting up the request queue. 2294 */ 2295 rq_for_each_segment(bv, req, iter) { 2296 dst = page_address(bv->bv_page) + bv->bv_offset; 2297 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); 2298 if (IS_ERR(last_tidaw)) 2299 return (struct dasd_ccw_req *)last_tidaw; 2300 } 2301 2302 last_tidaw->flags |= 0x80; 2303 itcw_finalize(itcw); 2304 2305 if (blk_noretry_request(req) || 2306 block->base->features & DASD_FEATURE_FAILFAST) 2307 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2308 cqr->startdev = startdev; 2309 cqr->memdev = startdev; 2310 cqr->block = block; 2311 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 2312 cqr->lpm = private->path_data.ppm; 2313 cqr->retries = 256; 2314 cqr->buildclk = get_clock(); 2315 cqr->status = DASD_CQR_FILLED; 2316 return cqr; 2317 } 2318 2319 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 2320 struct dasd_block *block, 2321 struct request *req) 2322 { 2323 int tpm, cmdrtd, cmdwtd; 2324 int use_prefix; 2325 #if defined(CONFIG_64BIT) 2326 int fcx_in_css, fcx_in_gneq, fcx_in_features; 2327 #endif 2328 struct dasd_eckd_private *private; 2329 struct dasd_device *basedev; 2330 sector_t first_rec, last_rec; 2331 sector_t first_trk, last_trk; 2332 unsigned int first_offs, last_offs; 2333 unsigned int blk_per_trk, blksize; 2334 int cdlspecial; 2335 struct dasd_ccw_req *cqr; 2336 2337 basedev = block->base; 2338 private = (struct dasd_eckd_private *) basedev->private; 2339 2340 /* Calculate number of blocks/records per track. */ 2341 blksize = block->bp_block; 2342 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2343 /* Calculate record id of first and last block. */ 2344 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 2345 first_offs = sector_div(first_trk, blk_per_trk); 2346 last_rec = last_trk = 2347 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 2348 last_offs = sector_div(last_trk, blk_per_trk); 2349 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2350 2351 /* is transport mode supported? */ 2352 #if defined(CONFIG_64BIT) 2353 fcx_in_css = css_general_characteristics.fcx; 2354 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2355 fcx_in_features = private->features.feature[40] & 0x80; 2356 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 2357 #else 2358 tpm = 0; 2359 #endif 2360 2361 /* is read track data and write track data in command mode supported? */ 2362 cmdrtd = private->features.feature[9] & 0x20; 2363 cmdwtd = private->features.feature[12] & 0x40; 2364 use_prefix = private->features.feature[8] & 0x01; 2365 2366 cqr = NULL; 2367 if (cdlspecial || dasd_page_cache) { 2368 /* do nothing, just fall through to the cmd mode single case */ 2369 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { 2370 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 2371 first_rec, last_rec, 2372 first_trk, last_trk, 2373 first_offs, last_offs, 2374 blk_per_trk, blksize); 2375 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2376 cqr = NULL; 2377 } else if (use_prefix && 2378 (((rq_data_dir(req) == READ) && cmdrtd) || 2379 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 2380 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 2381 first_rec, last_rec, 2382 first_trk, last_trk, 2383 first_offs, last_offs, 2384 blk_per_trk, blksize); 2385 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2386 cqr = NULL; 2387 } 2388 if (!cqr) 2389 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 2390 first_rec, last_rec, 2391 first_trk, last_trk, 2392 first_offs, last_offs, 2393 blk_per_trk, blksize); 2394 return cqr; 2395 } 2396 2397 static int 2398 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2399 { 2400 struct dasd_eckd_private *private; 2401 struct ccw1 *ccw; 2402 struct req_iterator iter; 2403 struct bio_vec *bv; 2404 char *dst, *cda; 2405 unsigned int blksize, blk_per_trk, off; 2406 sector_t recid; 2407 int status; 2408 2409 if (!dasd_page_cache) 2410 goto out; 2411 private = (struct dasd_eckd_private *) cqr->block->base->private; 2412 blksize = cqr->block->bp_block; 2413 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2414 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 2415 ccw = cqr->cpaddr; 2416 /* Skip over define extent & locate record. */ 2417 ccw++; 2418 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 2419 ccw++; 2420 rq_for_each_segment(bv, req, iter) { 2421 dst = page_address(bv->bv_page) + bv->bv_offset; 2422 for (off = 0; off < bv->bv_len; off += blksize) { 2423 /* Skip locate record. */ 2424 if (private->uses_cdl && recid <= 2*blk_per_trk) 2425 ccw++; 2426 if (dst) { 2427 if (ccw->flags & CCW_FLAG_IDA) 2428 cda = *((char **)((addr_t) ccw->cda)); 2429 else 2430 cda = (char *)((addr_t) ccw->cda); 2431 if (dst != cda) { 2432 if (rq_data_dir(req) == READ) 2433 memcpy(dst, cda, bv->bv_len); 2434 kmem_cache_free(dasd_page_cache, 2435 (void *)((addr_t)cda & PAGE_MASK)); 2436 } 2437 dst = NULL; 2438 } 2439 ccw++; 2440 recid++; 2441 } 2442 } 2443 out: 2444 status = cqr->status == DASD_CQR_DONE; 2445 dasd_sfree_request(cqr, cqr->memdev); 2446 return status; 2447 } 2448 2449 /* 2450 * Modify ccw/tcw in cqr so it can be started on a base device. 2451 * 2452 * Note that this is not enough to restart the cqr! 2453 * Either reset cqr->startdev as well (summary unit check handling) 2454 * or restart via separate cqr (as in ERP handling). 2455 */ 2456 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 2457 { 2458 struct ccw1 *ccw; 2459 struct PFX_eckd_data *pfxdata; 2460 struct tcw *tcw; 2461 struct tccb *tccb; 2462 struct dcw *dcw; 2463 2464 if (cqr->cpmode == 1) { 2465 tcw = cqr->cpaddr; 2466 tccb = tcw_get_tccb(tcw); 2467 dcw = (struct dcw *)&tccb->tca[0]; 2468 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 2469 pfxdata->validity.verify_base = 0; 2470 pfxdata->validity.hyper_pav = 0; 2471 } else { 2472 ccw = cqr->cpaddr; 2473 pfxdata = cqr->data; 2474 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 2475 pfxdata->validity.verify_base = 0; 2476 pfxdata->validity.hyper_pav = 0; 2477 } 2478 } 2479 } 2480 2481 #define DASD_ECKD_CHANQ_MAX_SIZE 4 2482 2483 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 2484 struct dasd_block *block, 2485 struct request *req) 2486 { 2487 struct dasd_eckd_private *private; 2488 struct dasd_device *startdev; 2489 unsigned long flags; 2490 struct dasd_ccw_req *cqr; 2491 2492 startdev = dasd_alias_get_start_dev(base); 2493 if (!startdev) 2494 startdev = base; 2495 private = (struct dasd_eckd_private *) startdev->private; 2496 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 2497 return ERR_PTR(-EBUSY); 2498 2499 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 2500 private->count++; 2501 cqr = dasd_eckd_build_cp(startdev, block, req); 2502 if (IS_ERR(cqr)) 2503 private->count--; 2504 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 2505 return cqr; 2506 } 2507 2508 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 2509 struct request *req) 2510 { 2511 struct dasd_eckd_private *private; 2512 unsigned long flags; 2513 2514 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 2515 private = (struct dasd_eckd_private *) cqr->memdev->private; 2516 private->count--; 2517 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 2518 return dasd_eckd_free_cp(cqr, req); 2519 } 2520 2521 static int 2522 dasd_eckd_fill_info(struct dasd_device * device, 2523 struct dasd_information2_t * info) 2524 { 2525 struct dasd_eckd_private *private; 2526 2527 private = (struct dasd_eckd_private *) device->private; 2528 info->label_block = 2; 2529 info->FBA_layout = private->uses_cdl ? 0 : 1; 2530 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 2531 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 2532 memcpy(info->characteristics, &private->rdc_data, 2533 sizeof(struct dasd_eckd_characteristics)); 2534 info->confdata_size = min((unsigned long)private->conf_len, 2535 sizeof(info->configuration_data)); 2536 memcpy(info->configuration_data, private->conf_data, 2537 info->confdata_size); 2538 return 0; 2539 } 2540 2541 /* 2542 * SECTION: ioctl functions for eckd devices. 2543 */ 2544 2545 /* 2546 * Release device ioctl. 2547 * Buils a channel programm to releases a prior reserved 2548 * (see dasd_eckd_reserve) device. 2549 */ 2550 static int 2551 dasd_eckd_release(struct dasd_device *device) 2552 { 2553 struct dasd_ccw_req *cqr; 2554 int rc; 2555 struct ccw1 *ccw; 2556 2557 if (!capable(CAP_SYS_ADMIN)) 2558 return -EACCES; 2559 2560 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2561 1, 32, device); 2562 if (IS_ERR(cqr)) { 2563 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2564 "Could not allocate initialization request"); 2565 return PTR_ERR(cqr); 2566 } 2567 ccw = cqr->cpaddr; 2568 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 2569 ccw->flags |= CCW_FLAG_SLI; 2570 ccw->count = 32; 2571 ccw->cda = (__u32)(addr_t) cqr->data; 2572 cqr->startdev = device; 2573 cqr->memdev = device; 2574 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2575 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2576 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2577 cqr->expires = 2 * HZ; 2578 cqr->buildclk = get_clock(); 2579 cqr->status = DASD_CQR_FILLED; 2580 2581 rc = dasd_sleep_on_immediatly(cqr); 2582 2583 dasd_sfree_request(cqr, cqr->memdev); 2584 return rc; 2585 } 2586 2587 /* 2588 * Reserve device ioctl. 2589 * Options are set to 'synchronous wait for interrupt' and 2590 * 'timeout the request'. This leads to a terminate IO if 2591 * the interrupt is outstanding for a certain time. 2592 */ 2593 static int 2594 dasd_eckd_reserve(struct dasd_device *device) 2595 { 2596 struct dasd_ccw_req *cqr; 2597 int rc; 2598 struct ccw1 *ccw; 2599 2600 if (!capable(CAP_SYS_ADMIN)) 2601 return -EACCES; 2602 2603 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2604 1, 32, device); 2605 if (IS_ERR(cqr)) { 2606 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2607 "Could not allocate initialization request"); 2608 return PTR_ERR(cqr); 2609 } 2610 ccw = cqr->cpaddr; 2611 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 2612 ccw->flags |= CCW_FLAG_SLI; 2613 ccw->count = 32; 2614 ccw->cda = (__u32)(addr_t) cqr->data; 2615 cqr->startdev = device; 2616 cqr->memdev = device; 2617 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2618 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2619 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2620 cqr->expires = 2 * HZ; 2621 cqr->buildclk = get_clock(); 2622 cqr->status = DASD_CQR_FILLED; 2623 2624 rc = dasd_sleep_on_immediatly(cqr); 2625 2626 dasd_sfree_request(cqr, cqr->memdev); 2627 return rc; 2628 } 2629 2630 /* 2631 * Steal lock ioctl - unconditional reserve device. 2632 * Buils a channel programm to break a device's reservation. 2633 * (unconditional reserve) 2634 */ 2635 static int 2636 dasd_eckd_steal_lock(struct dasd_device *device) 2637 { 2638 struct dasd_ccw_req *cqr; 2639 int rc; 2640 struct ccw1 *ccw; 2641 2642 if (!capable(CAP_SYS_ADMIN)) 2643 return -EACCES; 2644 2645 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2646 1, 32, device); 2647 if (IS_ERR(cqr)) { 2648 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2649 "Could not allocate initialization request"); 2650 return PTR_ERR(cqr); 2651 } 2652 ccw = cqr->cpaddr; 2653 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 2654 ccw->flags |= CCW_FLAG_SLI; 2655 ccw->count = 32; 2656 ccw->cda = (__u32)(addr_t) cqr->data; 2657 cqr->startdev = device; 2658 cqr->memdev = device; 2659 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2660 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2661 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2662 cqr->expires = 2 * HZ; 2663 cqr->buildclk = get_clock(); 2664 cqr->status = DASD_CQR_FILLED; 2665 2666 rc = dasd_sleep_on_immediatly(cqr); 2667 2668 dasd_sfree_request(cqr, cqr->memdev); 2669 return rc; 2670 } 2671 2672 /* 2673 * Read performance statistics 2674 */ 2675 static int 2676 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 2677 { 2678 struct dasd_psf_prssd_data *prssdp; 2679 struct dasd_rssd_perf_stats_t *stats; 2680 struct dasd_ccw_req *cqr; 2681 struct ccw1 *ccw; 2682 int rc; 2683 2684 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2685 1 /* PSF */ + 1 /* RSSD */ , 2686 (sizeof(struct dasd_psf_prssd_data) + 2687 sizeof(struct dasd_rssd_perf_stats_t)), 2688 device); 2689 if (IS_ERR(cqr)) { 2690 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2691 "Could not allocate initialization request"); 2692 return PTR_ERR(cqr); 2693 } 2694 cqr->startdev = device; 2695 cqr->memdev = device; 2696 cqr->retries = 0; 2697 cqr->expires = 10 * HZ; 2698 2699 /* Prepare for Read Subsystem Data */ 2700 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 2701 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 2702 prssdp->order = PSF_ORDER_PRSSD; 2703 prssdp->suborder = 0x01; /* Performance Statistics */ 2704 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 2705 2706 ccw = cqr->cpaddr; 2707 ccw->cmd_code = DASD_ECKD_CCW_PSF; 2708 ccw->count = sizeof(struct dasd_psf_prssd_data); 2709 ccw->flags |= CCW_FLAG_CC; 2710 ccw->cda = (__u32)(addr_t) prssdp; 2711 2712 /* Read Subsystem Data - Performance Statistics */ 2713 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 2714 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 2715 2716 ccw++; 2717 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 2718 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 2719 ccw->cda = (__u32)(addr_t) stats; 2720 2721 cqr->buildclk = get_clock(); 2722 cqr->status = DASD_CQR_FILLED; 2723 rc = dasd_sleep_on(cqr); 2724 if (rc == 0) { 2725 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 2726 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 2727 if (copy_to_user(argp, stats, 2728 sizeof(struct dasd_rssd_perf_stats_t))) 2729 rc = -EFAULT; 2730 } 2731 dasd_sfree_request(cqr, cqr->memdev); 2732 return rc; 2733 } 2734 2735 /* 2736 * Get attributes (cache operations) 2737 * Returnes the cache attributes used in Define Extend (DE). 2738 */ 2739 static int 2740 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 2741 { 2742 struct dasd_eckd_private *private = 2743 (struct dasd_eckd_private *)device->private; 2744 struct attrib_data_t attrib = private->attrib; 2745 int rc; 2746 2747 if (!capable(CAP_SYS_ADMIN)) 2748 return -EACCES; 2749 if (!argp) 2750 return -EINVAL; 2751 2752 rc = 0; 2753 if (copy_to_user(argp, (long *) &attrib, 2754 sizeof(struct attrib_data_t))) 2755 rc = -EFAULT; 2756 2757 return rc; 2758 } 2759 2760 /* 2761 * Set attributes (cache operations) 2762 * Stores the attributes for cache operation to be used in Define Extend (DE). 2763 */ 2764 static int 2765 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 2766 { 2767 struct dasd_eckd_private *private = 2768 (struct dasd_eckd_private *)device->private; 2769 struct attrib_data_t attrib; 2770 2771 if (!capable(CAP_SYS_ADMIN)) 2772 return -EACCES; 2773 if (!argp) 2774 return -EINVAL; 2775 2776 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 2777 return -EFAULT; 2778 private->attrib = attrib; 2779 2780 dev_info(&device->cdev->dev, 2781 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 2782 private->attrib.operation, private->attrib.nr_cyl); 2783 return 0; 2784 } 2785 2786 /* 2787 * Issue syscall I/O to EMC Symmetrix array. 2788 * CCWs are PSF and RSSD 2789 */ 2790 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 2791 { 2792 struct dasd_symmio_parms usrparm; 2793 char *psf_data, *rssd_result; 2794 struct dasd_ccw_req *cqr; 2795 struct ccw1 *ccw; 2796 int rc; 2797 2798 /* Copy parms from caller */ 2799 rc = -EFAULT; 2800 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 2801 goto out; 2802 #ifndef CONFIG_64BIT 2803 /* Make sure pointers are sane even on 31 bit. */ 2804 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) { 2805 rc = -EINVAL; 2806 goto out; 2807 } 2808 #endif 2809 /* alloc I/O data area */ 2810 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 2811 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 2812 if (!psf_data || !rssd_result) { 2813 rc = -ENOMEM; 2814 goto out_free; 2815 } 2816 2817 /* get syscall header from user space */ 2818 rc = -EFAULT; 2819 if (copy_from_user(psf_data, 2820 (void __user *)(unsigned long) usrparm.psf_data, 2821 usrparm.psf_data_len)) 2822 goto out_free; 2823 2824 /* sanity check on syscall header */ 2825 if (psf_data[0] != 0x17 && psf_data[1] != 0xce) { 2826 rc = -EINVAL; 2827 goto out_free; 2828 } 2829 2830 /* setup CCWs for PSF + RSSD */ 2831 cqr = dasd_smalloc_request("ECKD", 2 , 0, device); 2832 if (IS_ERR(cqr)) { 2833 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2834 "Could not allocate initialization request"); 2835 rc = PTR_ERR(cqr); 2836 goto out_free; 2837 } 2838 2839 cqr->startdev = device; 2840 cqr->memdev = device; 2841 cqr->retries = 3; 2842 cqr->expires = 10 * HZ; 2843 cqr->buildclk = get_clock(); 2844 cqr->status = DASD_CQR_FILLED; 2845 2846 /* Build the ccws */ 2847 ccw = cqr->cpaddr; 2848 2849 /* PSF ccw */ 2850 ccw->cmd_code = DASD_ECKD_CCW_PSF; 2851 ccw->count = usrparm.psf_data_len; 2852 ccw->flags |= CCW_FLAG_CC; 2853 ccw->cda = (__u32)(addr_t) psf_data; 2854 2855 ccw++; 2856 2857 /* RSSD ccw */ 2858 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 2859 ccw->count = usrparm.rssd_result_len; 2860 ccw->flags = CCW_FLAG_SLI ; 2861 ccw->cda = (__u32)(addr_t) rssd_result; 2862 2863 rc = dasd_sleep_on(cqr); 2864 if (rc) 2865 goto out_sfree; 2866 2867 rc = -EFAULT; 2868 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 2869 rssd_result, usrparm.rssd_result_len)) 2870 goto out_sfree; 2871 rc = 0; 2872 2873 out_sfree: 2874 dasd_sfree_request(cqr, cqr->memdev); 2875 out_free: 2876 kfree(rssd_result); 2877 kfree(psf_data); 2878 out: 2879 DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc); 2880 return rc; 2881 } 2882 2883 static int 2884 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 2885 { 2886 struct dasd_device *device = block->base; 2887 2888 switch (cmd) { 2889 case BIODASDGATTR: 2890 return dasd_eckd_get_attrib(device, argp); 2891 case BIODASDSATTR: 2892 return dasd_eckd_set_attrib(device, argp); 2893 case BIODASDPSRD: 2894 return dasd_eckd_performance(device, argp); 2895 case BIODASDRLSE: 2896 return dasd_eckd_release(device); 2897 case BIODASDRSRV: 2898 return dasd_eckd_reserve(device); 2899 case BIODASDSLCK: 2900 return dasd_eckd_steal_lock(device); 2901 case BIODASDSYMMIO: 2902 return dasd_symm_io(device, argp); 2903 default: 2904 return -ENOIOCTLCMD; 2905 } 2906 } 2907 2908 /* 2909 * Dump the range of CCWs into 'page' buffer 2910 * and return number of printed chars. 2911 */ 2912 static int 2913 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 2914 { 2915 int len, count; 2916 char *datap; 2917 2918 len = 0; 2919 while (from <= to) { 2920 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2921 " CCW %p: %08X %08X DAT:", 2922 from, ((int *) from)[0], ((int *) from)[1]); 2923 2924 /* get pointer to data (consider IDALs) */ 2925 if (from->flags & CCW_FLAG_IDA) 2926 datap = (char *) *((addr_t *) (addr_t) from->cda); 2927 else 2928 datap = (char *) ((addr_t) from->cda); 2929 2930 /* dump data (max 32 bytes) */ 2931 for (count = 0; count < from->count && count < 32; count++) { 2932 if (count % 8 == 0) len += sprintf(page + len, " "); 2933 if (count % 4 == 0) len += sprintf(page + len, " "); 2934 len += sprintf(page + len, "%02x", datap[count]); 2935 } 2936 len += sprintf(page + len, "\n"); 2937 from++; 2938 } 2939 return len; 2940 } 2941 2942 static void 2943 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 2944 char *reason) 2945 { 2946 u64 *sense; 2947 2948 sense = (u64 *) dasd_get_sense(irb); 2949 if (sense) { 2950 DBF_DEV_EVENT(DBF_EMERG, device, 2951 "%s: %s %02x%02x%02x %016llx %016llx %016llx " 2952 "%016llx", reason, 2953 scsw_is_tm(&irb->scsw) ? "t" : "c", 2954 scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), 2955 scsw_dstat(&irb->scsw), sense[0], sense[1], 2956 sense[2], sense[3]); 2957 } else { 2958 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 2959 "SORRY - NO VALID SENSE AVAILABLE\n"); 2960 } 2961 } 2962 2963 /* 2964 * Print sense data and related channel program. 2965 * Parts are printed because printk buffer is only 1024 bytes. 2966 */ 2967 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 2968 struct dasd_ccw_req *req, struct irb *irb) 2969 { 2970 char *page; 2971 struct ccw1 *first, *last, *fail, *from, *to; 2972 int len, sl, sct; 2973 2974 page = (char *) get_zeroed_page(GFP_ATOMIC); 2975 if (page == NULL) { 2976 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2977 "No memory to dump sense data\n"); 2978 return; 2979 } 2980 /* dump the sense data */ 2981 len = sprintf(page, KERN_ERR PRINTK_HEADER 2982 " I/O status report for device %s:\n", 2983 dev_name(&device->cdev->dev)); 2984 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2985 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 2986 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 2987 scsw_cc(&irb->scsw), req->intrc); 2988 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2989 " device %s: Failing CCW: %p\n", 2990 dev_name(&device->cdev->dev), 2991 (void *) (addr_t) irb->scsw.cmd.cpa); 2992 if (irb->esw.esw0.erw.cons) { 2993 for (sl = 0; sl < 4; sl++) { 2994 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2995 " Sense(hex) %2d-%2d:", 2996 (8 * sl), ((8 * sl) + 7)); 2997 2998 for (sct = 0; sct < 8; sct++) { 2999 len += sprintf(page + len, " %02x", 3000 irb->ecw[8 * sl + sct]); 3001 } 3002 len += sprintf(page + len, "\n"); 3003 } 3004 3005 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 3006 /* 24 Byte Sense Data */ 3007 sprintf(page + len, KERN_ERR PRINTK_HEADER 3008 " 24 Byte: %x MSG %x, " 3009 "%s MSGb to SYSOP\n", 3010 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 3011 irb->ecw[1] & 0x10 ? "" : "no"); 3012 } else { 3013 /* 32 Byte Sense Data */ 3014 sprintf(page + len, KERN_ERR PRINTK_HEADER 3015 " 32 Byte: Format: %x " 3016 "Exception class %x\n", 3017 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 3018 } 3019 } else { 3020 sprintf(page + len, KERN_ERR PRINTK_HEADER 3021 " SORRY - NO VALID SENSE AVAILABLE\n"); 3022 } 3023 printk("%s", page); 3024 3025 if (req) { 3026 /* req == NULL for unsolicited interrupts */ 3027 /* dump the Channel Program (max 140 Bytes per line) */ 3028 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 3029 first = req->cpaddr; 3030 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 3031 to = min(first + 6, last); 3032 len = sprintf(page, KERN_ERR PRINTK_HEADER 3033 " Related CP in req: %p\n", req); 3034 dasd_eckd_dump_ccw_range(first, to, page + len); 3035 printk("%s", page); 3036 3037 /* print failing CCW area (maximum 4) */ 3038 /* scsw->cda is either valid or zero */ 3039 len = 0; 3040 from = ++to; 3041 fail = (struct ccw1 *)(addr_t) 3042 irb->scsw.cmd.cpa; /* failing CCW */ 3043 if (from < fail - 2) { 3044 from = fail - 2; /* there is a gap - print header */ 3045 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 3046 } 3047 to = min(fail + 1, last); 3048 len += dasd_eckd_dump_ccw_range(from, to, page + len); 3049 3050 /* print last CCWs (maximum 2) */ 3051 from = max(from, ++to); 3052 if (from < last - 1) { 3053 from = last - 1; /* there is a gap - print header */ 3054 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 3055 } 3056 len += dasd_eckd_dump_ccw_range(from, last, page + len); 3057 if (len > 0) 3058 printk("%s", page); 3059 } 3060 free_page((unsigned long) page); 3061 } 3062 3063 3064 /* 3065 * Print sense data from a tcw. 3066 */ 3067 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 3068 struct dasd_ccw_req *req, struct irb *irb) 3069 { 3070 char *page; 3071 int len, sl, sct, residual; 3072 3073 struct tsb *tsb; 3074 u8 *sense; 3075 3076 3077 page = (char *) get_zeroed_page(GFP_ATOMIC); 3078 if (page == NULL) { 3079 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 3080 "No memory to dump sense data"); 3081 return; 3082 } 3083 /* dump the sense data */ 3084 len = sprintf(page, KERN_ERR PRINTK_HEADER 3085 " I/O status report for device %s:\n", 3086 dev_name(&device->cdev->dev)); 3087 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3088 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " 3089 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3090 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3091 scsw_cc(&irb->scsw), req->intrc, 3092 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3093 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3094 " device %s: Failing TCW: %p\n", 3095 dev_name(&device->cdev->dev), 3096 (void *) (addr_t) irb->scsw.tm.tcw); 3097 3098 tsb = NULL; 3099 sense = NULL; 3100 if (irb->scsw.tm.tcw) 3101 tsb = tcw_get_tsb( 3102 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3103 3104 if (tsb && (irb->scsw.tm.fcxs == 0x01)) { 3105 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3106 " tsb->length %d\n", tsb->length); 3107 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3108 " tsb->flags %x\n", tsb->flags); 3109 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3110 " tsb->dcw_offset %d\n", tsb->dcw_offset); 3111 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3112 " tsb->count %d\n", tsb->count); 3113 residual = tsb->count - 28; 3114 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3115 " residual %d\n", residual); 3116 3117 switch (tsb->flags & 0x07) { 3118 case 1: /* tsa_iostat */ 3119 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3120 " tsb->tsa.iostat.dev_time %d\n", 3121 tsb->tsa.iostat.dev_time); 3122 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3123 " tsb->tsa.iostat.def_time %d\n", 3124 tsb->tsa.iostat.def_time); 3125 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3126 " tsb->tsa.iostat.queue_time %d\n", 3127 tsb->tsa.iostat.queue_time); 3128 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3129 " tsb->tsa.iostat.dev_busy_time %d\n", 3130 tsb->tsa.iostat.dev_busy_time); 3131 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3132 " tsb->tsa.iostat.dev_act_time %d\n", 3133 tsb->tsa.iostat.dev_act_time); 3134 sense = tsb->tsa.iostat.sense; 3135 break; 3136 case 2: /* ts_ddpc */ 3137 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3138 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3139 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3140 " tsb->tsa.ddpc.rcq: "); 3141 for (sl = 0; sl < 16; sl++) { 3142 for (sct = 0; sct < 8; sct++) { 3143 len += sprintf(page + len, " %02x", 3144 tsb->tsa.ddpc.rcq[sl]); 3145 } 3146 len += sprintf(page + len, "\n"); 3147 } 3148 sense = tsb->tsa.ddpc.sense; 3149 break; 3150 case 3: /* tsa_intrg */ 3151 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3152 " tsb->tsa.intrg.: not supportet yet \n"); 3153 break; 3154 } 3155 3156 if (sense) { 3157 for (sl = 0; sl < 4; sl++) { 3158 len += sprintf(page + len, 3159 KERN_ERR PRINTK_HEADER 3160 " Sense(hex) %2d-%2d:", 3161 (8 * sl), ((8 * sl) + 7)); 3162 for (sct = 0; sct < 8; sct++) { 3163 len += sprintf(page + len, " %02x", 3164 sense[8 * sl + sct]); 3165 } 3166 len += sprintf(page + len, "\n"); 3167 } 3168 3169 if (sense[27] & DASD_SENSE_BIT_0) { 3170 /* 24 Byte Sense Data */ 3171 sprintf(page + len, KERN_ERR PRINTK_HEADER 3172 " 24 Byte: %x MSG %x, " 3173 "%s MSGb to SYSOP\n", 3174 sense[7] >> 4, sense[7] & 0x0f, 3175 sense[1] & 0x10 ? "" : "no"); 3176 } else { 3177 /* 32 Byte Sense Data */ 3178 sprintf(page + len, KERN_ERR PRINTK_HEADER 3179 " 32 Byte: Format: %x " 3180 "Exception class %x\n", 3181 sense[6] & 0x0f, sense[22] >> 4); 3182 } 3183 } else { 3184 sprintf(page + len, KERN_ERR PRINTK_HEADER 3185 " SORRY - NO VALID SENSE AVAILABLE\n"); 3186 } 3187 } else { 3188 sprintf(page + len, KERN_ERR PRINTK_HEADER 3189 " SORRY - NO TSB DATA AVAILABLE\n"); 3190 } 3191 printk("%s", page); 3192 free_page((unsigned long) page); 3193 } 3194 3195 static void dasd_eckd_dump_sense(struct dasd_device *device, 3196 struct dasd_ccw_req *req, struct irb *irb) 3197 { 3198 if (req && scsw_is_tm(&req->irb.scsw)) 3199 dasd_eckd_dump_sense_tcw(device, req, irb); 3200 else 3201 dasd_eckd_dump_sense_ccw(device, req, irb); 3202 } 3203 3204 int dasd_eckd_pm_freeze(struct dasd_device *device) 3205 { 3206 /* 3207 * the device should be disconnected from our LCU structure 3208 * on restore we will reconnect it and reread LCU specific 3209 * information like PAV support that might have changed 3210 */ 3211 dasd_alias_remove_device(device); 3212 dasd_alias_disconnect_device_from_lcu(device); 3213 3214 return 0; 3215 } 3216 3217 int dasd_eckd_restore_device(struct dasd_device *device) 3218 { 3219 struct dasd_eckd_private *private; 3220 int is_known, rc; 3221 struct dasd_uid temp_uid; 3222 3223 private = (struct dasd_eckd_private *) device->private; 3224 3225 /* Read Configuration Data */ 3226 rc = dasd_eckd_read_conf(device); 3227 if (rc) 3228 goto out_err; 3229 3230 /* Generate device unique id and register in devmap */ 3231 rc = dasd_eckd_generate_uid(device, &private->uid); 3232 dasd_get_uid(device->cdev, &temp_uid); 3233 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 3234 dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); 3235 if (rc) 3236 goto out_err; 3237 dasd_set_uid(device->cdev, &private->uid); 3238 3239 /* register lcu with alias handling, enable PAV if this is a new lcu */ 3240 is_known = dasd_alias_make_device_known_to_lcu(device); 3241 if (is_known < 0) 3242 return is_known; 3243 if (!is_known) { 3244 /* new lcu found */ 3245 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 3246 if (rc) 3247 goto out_err; 3248 } 3249 3250 /* Read Feature Codes */ 3251 rc = dasd_eckd_read_features(device); 3252 if (rc) 3253 goto out_err; 3254 3255 /* Read Device Characteristics */ 3256 memset(&private->rdc_data, 0, sizeof(private->rdc_data)); 3257 rc = dasd_generic_read_dev_chars(device, "ECKD", 3258 &private->rdc_data, 64); 3259 if (rc) { 3260 DBF_EVENT(DBF_WARNING, 3261 "Read device characteristics failed, rc=%d for " 3262 "device: %s", rc, dev_name(&device->cdev->dev)); 3263 goto out_err; 3264 } 3265 3266 /* add device to alias management */ 3267 dasd_alias_add_device(device); 3268 3269 return 0; 3270 3271 out_err: 3272 return -1; 3273 } 3274 3275 static struct ccw_driver dasd_eckd_driver = { 3276 .name = "dasd-eckd", 3277 .owner = THIS_MODULE, 3278 .ids = dasd_eckd_ids, 3279 .probe = dasd_eckd_probe, 3280 .remove = dasd_generic_remove, 3281 .set_offline = dasd_generic_set_offline, 3282 .set_online = dasd_eckd_set_online, 3283 .notify = dasd_generic_notify, 3284 .freeze = dasd_generic_pm_freeze, 3285 .thaw = dasd_generic_restore_device, 3286 .restore = dasd_generic_restore_device, 3287 }; 3288 3289 /* 3290 * max_blocks is dependent on the amount of storage that is available 3291 * in the static io buffer for each device. Currently each device has 3292 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 3293 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 3294 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 3295 * addition we have one define extent ccw + 16 bytes of data and one 3296 * locate record ccw + 16 bytes of data. That makes: 3297 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 3298 * We want to fit two into the available memory so that we can immediately 3299 * start the next request if one finishes off. That makes 249.5 blocks 3300 * for one request. Give a little safety and the result is 240. 3301 */ 3302 static struct dasd_discipline dasd_eckd_discipline = { 3303 .owner = THIS_MODULE, 3304 .name = "ECKD", 3305 .ebcname = "ECKD", 3306 .max_blocks = 240, 3307 .check_device = dasd_eckd_check_characteristics, 3308 .uncheck_device = dasd_eckd_uncheck_device, 3309 .do_analysis = dasd_eckd_do_analysis, 3310 .ready_to_online = dasd_eckd_ready_to_online, 3311 .online_to_ready = dasd_eckd_online_to_ready, 3312 .fill_geometry = dasd_eckd_fill_geometry, 3313 .start_IO = dasd_start_IO, 3314 .term_IO = dasd_term_IO, 3315 .handle_terminated_request = dasd_eckd_handle_terminated_request, 3316 .format_device = dasd_eckd_format_device, 3317 .erp_action = dasd_eckd_erp_action, 3318 .erp_postaction = dasd_eckd_erp_postaction, 3319 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 3320 .build_cp = dasd_eckd_build_alias_cp, 3321 .free_cp = dasd_eckd_free_alias_cp, 3322 .dump_sense = dasd_eckd_dump_sense, 3323 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 3324 .fill_info = dasd_eckd_fill_info, 3325 .ioctl = dasd_eckd_ioctl, 3326 .freeze = dasd_eckd_pm_freeze, 3327 .restore = dasd_eckd_restore_device, 3328 }; 3329 3330 static int __init 3331 dasd_eckd_init(void) 3332 { 3333 int ret; 3334 3335 ASCEBC(dasd_eckd_discipline.ebcname, 4); 3336 ret = ccw_driver_register(&dasd_eckd_driver); 3337 if (!ret) 3338 wait_for_device_probe(); 3339 3340 return ret; 3341 } 3342 3343 static void __exit 3344 dasd_eckd_cleanup(void) 3345 { 3346 ccw_driver_unregister(&dasd_eckd_driver); 3347 } 3348 3349 module_init(dasd_eckd_init); 3350 module_exit(dasd_eckd_cleanup); 3351