xref: /linux/drivers/s390/block/dasd_eckd.c (revision 0d3b051adbb72ed81956447d0d1e54d5943ee6f5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24 
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/io.h>
30 #include <linux/uaccess.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36 
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39 
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif				/* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44 
45 /*
46  * raw track access always map to 64k in memory
47  * so it maps to 16 blocks of 4k per track
48  */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors  */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53 
54 MODULE_LICENSE("GPL");
55 
56 static struct dasd_discipline dasd_eckd_discipline;
57 
58 /* The ccw bus type uses this table to find devices that it sends to
59  * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71 	{ /* end of list */ },
72 };
73 
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75 
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77 
78 static void *rawpadpage;
79 
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83 
84 /* emergency request for reserve/release */
85 static struct {
86 	struct dasd_ccw_req cqr;
87 	struct ccw1 ccw;
88 	char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91 
92 static struct {
93 	struct dasd_ccw_req cqr;
94 	struct ccw1 ccw[2];
95 	char data[40];
96 } *dasd_vol_info_req;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
98 
99 struct ext_pool_exhaust_work_data {
100 	struct work_struct worker;
101 	struct dasd_device *device;
102 	struct dasd_device *base;
103 };
104 
105 /* definitions for the path verification worker */
106 struct pe_handler_work_data {
107 	struct work_struct worker;
108 	struct dasd_device *device;
109 	struct dasd_ccw_req cqr;
110 	struct ccw1 ccw;
111 	__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
112 	int isglobal;
113 	__u8 tbvpm;
114 	__u8 fcsecpm;
115 };
116 static struct pe_handler_work_data *pe_handler_worker;
117 static DEFINE_MUTEX(dasd_pe_handler_mutex);
118 
119 struct check_attention_work_data {
120 	struct work_struct worker;
121 	struct dasd_device *device;
122 	__u8 lpum;
123 };
124 
125 static int dasd_eckd_ext_pool_id(struct dasd_device *);
126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
127 			struct dasd_device *, struct dasd_device *,
128 			unsigned int, int, unsigned int, unsigned int,
129 			unsigned int, unsigned int);
130 
131 /* initial attempt at a probe function. this can be simplified once
132  * the other detection code is gone */
133 static int
134 dasd_eckd_probe (struct ccw_device *cdev)
135 {
136 	int ret;
137 
138 	/* set ECKD specific ccw-device options */
139 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
140 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
141 	if (ret) {
142 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
143 				"dasd_eckd_probe: could not set "
144 				"ccw-device options");
145 		return ret;
146 	}
147 	ret = dasd_generic_probe(cdev);
148 	return ret;
149 }
150 
151 static int
152 dasd_eckd_set_online(struct ccw_device *cdev)
153 {
154 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
155 }
156 
157 static const int sizes_trk0[] = { 28, 148, 84 };
158 #define LABEL_SIZE 140
159 
160 /* head and record addresses of count_area read in analysis ccw */
161 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
163 
164 static inline unsigned int
165 ceil_quot(unsigned int d1, unsigned int d2)
166 {
167 	return (d1 + (d2 - 1)) / d2;
168 }
169 
170 static unsigned int
171 recs_per_track(struct dasd_eckd_characteristics * rdc,
172 	       unsigned int kl, unsigned int dl)
173 {
174 	int dn, kn;
175 
176 	switch (rdc->dev_type) {
177 	case 0x3380:
178 		if (kl)
179 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
180 				       ceil_quot(dl + 12, 32));
181 		else
182 			return 1499 / (15 + ceil_quot(dl + 12, 32));
183 	case 0x3390:
184 		dn = ceil_quot(dl + 6, 232) + 1;
185 		if (kl) {
186 			kn = ceil_quot(kl + 6, 232) + 1;
187 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
188 				       9 + ceil_quot(dl + 6 * dn, 34));
189 		} else
190 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
191 	case 0x9345:
192 		dn = ceil_quot(dl + 6, 232) + 1;
193 		if (kl) {
194 			kn = ceil_quot(kl + 6, 232) + 1;
195 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
196 				       ceil_quot(dl + 6 * dn, 34));
197 		} else
198 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
199 	}
200 	return 0;
201 }
202 
203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
204 {
205 	geo->cyl = (__u16) cyl;
206 	geo->head = cyl >> 16;
207 	geo->head <<= 4;
208 	geo->head |= head;
209 }
210 
211 /*
212  * calculate failing track from sense data depending if
213  * it is an EAV device or not
214  */
215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
216 				    sector_t *track)
217 {
218 	struct dasd_eckd_private *private = device->private;
219 	u8 *sense = NULL;
220 	u32 cyl;
221 	u8 head;
222 
223 	sense = dasd_get_sense(irb);
224 	if (!sense) {
225 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
226 			      "ESE error no sense data\n");
227 		return -EINVAL;
228 	}
229 	if (!(sense[27] & DASD_SENSE_BIT_2)) {
230 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
231 			      "ESE error no valid track data\n");
232 		return -EINVAL;
233 	}
234 
235 	if (sense[27] & DASD_SENSE_BIT_3) {
236 		/* enhanced addressing */
237 		cyl = sense[30] << 20;
238 		cyl |= (sense[31] & 0xF0) << 12;
239 		cyl |= sense[28] << 8;
240 		cyl |= sense[29];
241 	} else {
242 		cyl = sense[29] << 8;
243 		cyl |= sense[30];
244 	}
245 	head = sense[31] & 0x0F;
246 	*track = cyl * private->rdc_data.trk_per_cyl + head;
247 	return 0;
248 }
249 
250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
251 		     struct dasd_device *device)
252 {
253 	struct dasd_eckd_private *private = device->private;
254 	int rc;
255 
256 	rc = get_phys_clock(&data->ep_sys_time);
257 	/*
258 	 * Ignore return code if XRC is not supported or
259 	 * sync clock is switched off
260 	 */
261 	if ((rc && !private->rdc_data.facilities.XRC_supported) ||
262 	    rc == -EOPNOTSUPP || rc == -EACCES)
263 		return 0;
264 
265 	/* switch on System Time Stamp - needed for XRC Support */
266 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
267 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
268 
269 	if (ccw) {
270 		ccw->count = sizeof(struct DE_eckd_data);
271 		ccw->flags |= CCW_FLAG_SLI;
272 	}
273 
274 	return rc;
275 }
276 
277 static int
278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
279 	      unsigned int totrk, int cmd, struct dasd_device *device,
280 	      int blksize)
281 {
282 	struct dasd_eckd_private *private = device->private;
283 	u16 heads, beghead, endhead;
284 	u32 begcyl, endcyl;
285 	int rc = 0;
286 
287 	if (ccw) {
288 		ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
289 		ccw->flags = 0;
290 		ccw->count = 16;
291 		ccw->cda = (__u32)__pa(data);
292 	}
293 
294 	memset(data, 0, sizeof(struct DE_eckd_data));
295 	switch (cmd) {
296 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
297 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
298 	case DASD_ECKD_CCW_READ:
299 	case DASD_ECKD_CCW_READ_MT:
300 	case DASD_ECKD_CCW_READ_CKD:
301 	case DASD_ECKD_CCW_READ_CKD_MT:
302 	case DASD_ECKD_CCW_READ_KD:
303 	case DASD_ECKD_CCW_READ_KD_MT:
304 		data->mask.perm = 0x1;
305 		data->attributes.operation = private->attrib.operation;
306 		break;
307 	case DASD_ECKD_CCW_READ_COUNT:
308 		data->mask.perm = 0x1;
309 		data->attributes.operation = DASD_BYPASS_CACHE;
310 		break;
311 	case DASD_ECKD_CCW_READ_TRACK:
312 	case DASD_ECKD_CCW_READ_TRACK_DATA:
313 		data->mask.perm = 0x1;
314 		data->attributes.operation = private->attrib.operation;
315 		data->blk_size = 0;
316 		break;
317 	case DASD_ECKD_CCW_WRITE:
318 	case DASD_ECKD_CCW_WRITE_MT:
319 	case DASD_ECKD_CCW_WRITE_KD:
320 	case DASD_ECKD_CCW_WRITE_KD_MT:
321 		data->mask.perm = 0x02;
322 		data->attributes.operation = private->attrib.operation;
323 		rc = set_timestamp(ccw, data, device);
324 		break;
325 	case DASD_ECKD_CCW_WRITE_CKD:
326 	case DASD_ECKD_CCW_WRITE_CKD_MT:
327 		data->attributes.operation = DASD_BYPASS_CACHE;
328 		rc = set_timestamp(ccw, data, device);
329 		break;
330 	case DASD_ECKD_CCW_ERASE:
331 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
332 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
333 		data->mask.perm = 0x3;
334 		data->mask.auth = 0x1;
335 		data->attributes.operation = DASD_BYPASS_CACHE;
336 		rc = set_timestamp(ccw, data, device);
337 		break;
338 	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
339 		data->mask.perm = 0x03;
340 		data->attributes.operation = private->attrib.operation;
341 		data->blk_size = 0;
342 		break;
343 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
344 		data->mask.perm = 0x02;
345 		data->attributes.operation = private->attrib.operation;
346 		data->blk_size = blksize;
347 		rc = set_timestamp(ccw, data, device);
348 		break;
349 	default:
350 		dev_err(&device->cdev->dev,
351 			"0x%x is not a known command\n", cmd);
352 		break;
353 	}
354 
355 	data->attributes.mode = 0x3;	/* ECKD */
356 
357 	if ((private->rdc_data.cu_type == 0x2105 ||
358 	     private->rdc_data.cu_type == 0x2107 ||
359 	     private->rdc_data.cu_type == 0x1750)
360 	    && !(private->uses_cdl && trk < 2))
361 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
362 
363 	heads = private->rdc_data.trk_per_cyl;
364 	begcyl = trk / heads;
365 	beghead = trk % heads;
366 	endcyl = totrk / heads;
367 	endhead = totrk % heads;
368 
369 	/* check for sequential prestage - enhance cylinder range */
370 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
371 	    data->attributes.operation == DASD_SEQ_ACCESS) {
372 
373 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
374 			endcyl += private->attrib.nr_cyl;
375 		else
376 			endcyl = (private->real_cyl - 1);
377 	}
378 
379 	set_ch_t(&data->beg_ext, begcyl, beghead);
380 	set_ch_t(&data->end_ext, endcyl, endhead);
381 	return rc;
382 }
383 
384 
385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
386 			      unsigned int trk, unsigned int rec_on_trk,
387 			      int count, int cmd, struct dasd_device *device,
388 			      unsigned int reclen, unsigned int tlf)
389 {
390 	struct dasd_eckd_private *private = device->private;
391 	int sector;
392 	int dn, d;
393 
394 	if (ccw) {
395 		ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
396 		ccw->flags = 0;
397 		if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
398 			ccw->count = 22;
399 		else
400 			ccw->count = 20;
401 		ccw->cda = (__u32)__pa(data);
402 	}
403 
404 	memset(data, 0, sizeof(*data));
405 	sector = 0;
406 	if (rec_on_trk) {
407 		switch (private->rdc_data.dev_type) {
408 		case 0x3390:
409 			dn = ceil_quot(reclen + 6, 232);
410 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
411 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
412 			break;
413 		case 0x3380:
414 			d = 7 + ceil_quot(reclen + 12, 32);
415 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
416 			break;
417 		}
418 	}
419 	data->sector = sector;
420 	/* note: meaning of count depends on the operation
421 	 *	 for record based I/O it's the number of records, but for
422 	 *	 track based I/O it's the number of tracks
423 	 */
424 	data->count = count;
425 	switch (cmd) {
426 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
427 		data->operation.orientation = 0x3;
428 		data->operation.operation = 0x03;
429 		break;
430 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
431 		data->operation.orientation = 0x3;
432 		data->operation.operation = 0x16;
433 		break;
434 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
435 		data->operation.orientation = 0x1;
436 		data->operation.operation = 0x03;
437 		data->count++;
438 		break;
439 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
440 		data->operation.orientation = 0x3;
441 		data->operation.operation = 0x16;
442 		data->count++;
443 		break;
444 	case DASD_ECKD_CCW_WRITE:
445 	case DASD_ECKD_CCW_WRITE_MT:
446 	case DASD_ECKD_CCW_WRITE_KD:
447 	case DASD_ECKD_CCW_WRITE_KD_MT:
448 		data->auxiliary.length_valid = 0x1;
449 		data->length = reclen;
450 		data->operation.operation = 0x01;
451 		break;
452 	case DASD_ECKD_CCW_WRITE_CKD:
453 	case DASD_ECKD_CCW_WRITE_CKD_MT:
454 		data->auxiliary.length_valid = 0x1;
455 		data->length = reclen;
456 		data->operation.operation = 0x03;
457 		break;
458 	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
459 		data->operation.orientation = 0x0;
460 		data->operation.operation = 0x3F;
461 		data->extended_operation = 0x11;
462 		data->length = 0;
463 		data->extended_parameter_length = 0x02;
464 		if (data->count > 8) {
465 			data->extended_parameter[0] = 0xFF;
466 			data->extended_parameter[1] = 0xFF;
467 			data->extended_parameter[1] <<= (16 - count);
468 		} else {
469 			data->extended_parameter[0] = 0xFF;
470 			data->extended_parameter[0] <<= (8 - count);
471 			data->extended_parameter[1] = 0x00;
472 		}
473 		data->sector = 0xFF;
474 		break;
475 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
476 		data->auxiliary.length_valid = 0x1;
477 		data->length = reclen;	/* not tlf, as one might think */
478 		data->operation.operation = 0x3F;
479 		data->extended_operation = 0x23;
480 		break;
481 	case DASD_ECKD_CCW_READ:
482 	case DASD_ECKD_CCW_READ_MT:
483 	case DASD_ECKD_CCW_READ_KD:
484 	case DASD_ECKD_CCW_READ_KD_MT:
485 		data->auxiliary.length_valid = 0x1;
486 		data->length = reclen;
487 		data->operation.operation = 0x06;
488 		break;
489 	case DASD_ECKD_CCW_READ_CKD:
490 	case DASD_ECKD_CCW_READ_CKD_MT:
491 		data->auxiliary.length_valid = 0x1;
492 		data->length = reclen;
493 		data->operation.operation = 0x16;
494 		break;
495 	case DASD_ECKD_CCW_READ_COUNT:
496 		data->operation.operation = 0x06;
497 		break;
498 	case DASD_ECKD_CCW_READ_TRACK:
499 		data->operation.orientation = 0x1;
500 		data->operation.operation = 0x0C;
501 		data->extended_parameter_length = 0;
502 		data->sector = 0xFF;
503 		break;
504 	case DASD_ECKD_CCW_READ_TRACK_DATA:
505 		data->auxiliary.length_valid = 0x1;
506 		data->length = tlf;
507 		data->operation.operation = 0x0C;
508 		break;
509 	case DASD_ECKD_CCW_ERASE:
510 		data->length = reclen;
511 		data->auxiliary.length_valid = 0x1;
512 		data->operation.operation = 0x0b;
513 		break;
514 	default:
515 		DBF_DEV_EVENT(DBF_ERR, device,
516 			    "fill LRE unknown opcode 0x%x", cmd);
517 		BUG();
518 	}
519 	set_ch_t(&data->seek_addr,
520 		 trk / private->rdc_data.trk_per_cyl,
521 		 trk % private->rdc_data.trk_per_cyl);
522 	data->search_arg.cyl = data->seek_addr.cyl;
523 	data->search_arg.head = data->seek_addr.head;
524 	data->search_arg.record = rec_on_trk;
525 }
526 
527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
528 		      unsigned int trk, unsigned int totrk, int cmd,
529 		      struct dasd_device *basedev, struct dasd_device *startdev,
530 		      unsigned int format, unsigned int rec_on_trk, int count,
531 		      unsigned int blksize, unsigned int tlf)
532 {
533 	struct dasd_eckd_private *basepriv, *startpriv;
534 	struct LRE_eckd_data *lredata;
535 	struct DE_eckd_data *dedata;
536 	int rc = 0;
537 
538 	basepriv = basedev->private;
539 	startpriv = startdev->private;
540 	dedata = &pfxdata->define_extent;
541 	lredata = &pfxdata->locate_record;
542 
543 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
544 	ccw->flags = 0;
545 	if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
546 		ccw->count = sizeof(*pfxdata) + 2;
547 		ccw->cda = (__u32) __pa(pfxdata);
548 		memset(pfxdata, 0, sizeof(*pfxdata) + 2);
549 	} else {
550 		ccw->count = sizeof(*pfxdata);
551 		ccw->cda = (__u32) __pa(pfxdata);
552 		memset(pfxdata, 0, sizeof(*pfxdata));
553 	}
554 
555 	/* prefix data */
556 	if (format > 1) {
557 		DBF_DEV_EVENT(DBF_ERR, basedev,
558 			      "PFX LRE unknown format 0x%x", format);
559 		BUG();
560 		return -EINVAL;
561 	}
562 	pfxdata->format = format;
563 	pfxdata->base_address = basepriv->ned->unit_addr;
564 	pfxdata->base_lss = basepriv->ned->ID;
565 	pfxdata->validity.define_extent = 1;
566 
567 	/* private uid is kept up to date, conf_data may be outdated */
568 	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
569 		pfxdata->validity.verify_base = 1;
570 
571 	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
572 		pfxdata->validity.verify_base = 1;
573 		pfxdata->validity.hyper_pav = 1;
574 	}
575 
576 	rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
577 
578 	/*
579 	 * For some commands the System Time Stamp is set in the define extent
580 	 * data when XRC is supported. The validity of the time stamp must be
581 	 * reflected in the prefix data as well.
582 	 */
583 	if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
584 		pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
585 
586 	if (format == 1) {
587 		locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
588 				  basedev, blksize, tlf);
589 	}
590 
591 	return rc;
592 }
593 
594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
595 		  unsigned int trk, unsigned int totrk, int cmd,
596 		  struct dasd_device *basedev, struct dasd_device *startdev)
597 {
598 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
599 			  0, 0, 0, 0, 0);
600 }
601 
602 static void
603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
604 	      unsigned int rec_on_trk, int no_rec, int cmd,
605 	      struct dasd_device * device, int reclen)
606 {
607 	struct dasd_eckd_private *private = device->private;
608 	int sector;
609 	int dn, d;
610 
611 	DBF_DEV_EVENT(DBF_INFO, device,
612 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
613 		  trk, rec_on_trk, no_rec, cmd, reclen);
614 
615 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
616 	ccw->flags = 0;
617 	ccw->count = 16;
618 	ccw->cda = (__u32) __pa(data);
619 
620 	memset(data, 0, sizeof(struct LO_eckd_data));
621 	sector = 0;
622 	if (rec_on_trk) {
623 		switch (private->rdc_data.dev_type) {
624 		case 0x3390:
625 			dn = ceil_quot(reclen + 6, 232);
626 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
627 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
628 			break;
629 		case 0x3380:
630 			d = 7 + ceil_quot(reclen + 12, 32);
631 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
632 			break;
633 		}
634 	}
635 	data->sector = sector;
636 	data->count = no_rec;
637 	switch (cmd) {
638 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
639 		data->operation.orientation = 0x3;
640 		data->operation.operation = 0x03;
641 		break;
642 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
643 		data->operation.orientation = 0x3;
644 		data->operation.operation = 0x16;
645 		break;
646 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
647 		data->operation.orientation = 0x1;
648 		data->operation.operation = 0x03;
649 		data->count++;
650 		break;
651 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
652 		data->operation.orientation = 0x3;
653 		data->operation.operation = 0x16;
654 		data->count++;
655 		break;
656 	case DASD_ECKD_CCW_WRITE:
657 	case DASD_ECKD_CCW_WRITE_MT:
658 	case DASD_ECKD_CCW_WRITE_KD:
659 	case DASD_ECKD_CCW_WRITE_KD_MT:
660 		data->auxiliary.last_bytes_used = 0x1;
661 		data->length = reclen;
662 		data->operation.operation = 0x01;
663 		break;
664 	case DASD_ECKD_CCW_WRITE_CKD:
665 	case DASD_ECKD_CCW_WRITE_CKD_MT:
666 		data->auxiliary.last_bytes_used = 0x1;
667 		data->length = reclen;
668 		data->operation.operation = 0x03;
669 		break;
670 	case DASD_ECKD_CCW_READ:
671 	case DASD_ECKD_CCW_READ_MT:
672 	case DASD_ECKD_CCW_READ_KD:
673 	case DASD_ECKD_CCW_READ_KD_MT:
674 		data->auxiliary.last_bytes_used = 0x1;
675 		data->length = reclen;
676 		data->operation.operation = 0x06;
677 		break;
678 	case DASD_ECKD_CCW_READ_CKD:
679 	case DASD_ECKD_CCW_READ_CKD_MT:
680 		data->auxiliary.last_bytes_used = 0x1;
681 		data->length = reclen;
682 		data->operation.operation = 0x16;
683 		break;
684 	case DASD_ECKD_CCW_READ_COUNT:
685 		data->operation.operation = 0x06;
686 		break;
687 	case DASD_ECKD_CCW_ERASE:
688 		data->length = reclen;
689 		data->auxiliary.last_bytes_used = 0x1;
690 		data->operation.operation = 0x0b;
691 		break;
692 	default:
693 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
694 			      "opcode 0x%x", cmd);
695 	}
696 	set_ch_t(&data->seek_addr,
697 		 trk / private->rdc_data.trk_per_cyl,
698 		 trk % private->rdc_data.trk_per_cyl);
699 	data->search_arg.cyl = data->seek_addr.cyl;
700 	data->search_arg.head = data->seek_addr.head;
701 	data->search_arg.record = rec_on_trk;
702 }
703 
704 /*
705  * Returns 1 if the block is one of the special blocks that needs
706  * to get read/written with the KD variant of the command.
707  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
708  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
709  * Luckily the KD variants differ only by one bit (0x08) from the
710  * normal variant. So don't wonder about code like:
711  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
712  *         ccw->cmd_code |= 0x8;
713  */
714 static inline int
715 dasd_eckd_cdl_special(int blk_per_trk, int recid)
716 {
717 	if (recid < 3)
718 		return 1;
719 	if (recid < blk_per_trk)
720 		return 0;
721 	if (recid < 2 * blk_per_trk)
722 		return 1;
723 	return 0;
724 }
725 
726 /*
727  * Returns the record size for the special blocks of the cdl format.
728  * Only returns something useful if dasd_eckd_cdl_special is true
729  * for the recid.
730  */
731 static inline int
732 dasd_eckd_cdl_reclen(int recid)
733 {
734 	if (recid < 3)
735 		return sizes_trk0[recid];
736 	return LABEL_SIZE;
737 }
738 /* create unique id from private structure. */
739 static void create_uid(struct dasd_eckd_private *private)
740 {
741 	int count;
742 	struct dasd_uid *uid;
743 
744 	uid = &private->uid;
745 	memset(uid, 0, sizeof(struct dasd_uid));
746 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
747 	       sizeof(uid->vendor) - 1);
748 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
749 	memcpy(uid->serial, private->ned->HDA_location,
750 	       sizeof(uid->serial) - 1);
751 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
752 	uid->ssid = private->gneq->subsystemID;
753 	uid->real_unit_addr = private->ned->unit_addr;
754 	if (private->sneq) {
755 		uid->type = private->sneq->sua_flags;
756 		if (uid->type == UA_BASE_PAV_ALIAS)
757 			uid->base_unit_addr = private->sneq->base_unit_addr;
758 	} else {
759 		uid->type = UA_BASE_DEVICE;
760 	}
761 	if (private->vdsneq) {
762 		for (count = 0; count < 16; count++) {
763 			sprintf(uid->vduit+2*count, "%02x",
764 				private->vdsneq->uit[count]);
765 		}
766 	}
767 }
768 
769 /*
770  * Generate device unique id that specifies the physical device.
771  */
772 static int dasd_eckd_generate_uid(struct dasd_device *device)
773 {
774 	struct dasd_eckd_private *private = device->private;
775 	unsigned long flags;
776 
777 	if (!private)
778 		return -ENODEV;
779 	if (!private->ned || !private->gneq)
780 		return -ENODEV;
781 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
782 	create_uid(private);
783 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
784 	return 0;
785 }
786 
787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
788 {
789 	struct dasd_eckd_private *private = device->private;
790 	unsigned long flags;
791 
792 	if (private) {
793 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
794 		*uid = private->uid;
795 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
796 		return 0;
797 	}
798 	return -EINVAL;
799 }
800 
801 /*
802  * compare device UID with data of a given dasd_eckd_private structure
803  * return 0 for match
804  */
805 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
806 				      struct dasd_eckd_private *private)
807 {
808 	struct dasd_uid device_uid;
809 
810 	create_uid(private);
811 	dasd_eckd_get_uid(device, &device_uid);
812 
813 	return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
814 }
815 
816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
817 				   struct dasd_ccw_req *cqr,
818 				   __u8 *rcd_buffer,
819 				   __u8 lpm)
820 {
821 	struct ccw1 *ccw;
822 	/*
823 	 * buffer has to start with EBCDIC "V1.0" to show
824 	 * support for virtual device SNEQ
825 	 */
826 	rcd_buffer[0] = 0xE5;
827 	rcd_buffer[1] = 0xF1;
828 	rcd_buffer[2] = 0x4B;
829 	rcd_buffer[3] = 0xF0;
830 
831 	ccw = cqr->cpaddr;
832 	ccw->cmd_code = DASD_ECKD_CCW_RCD;
833 	ccw->flags = 0;
834 	ccw->cda = (__u32)(addr_t)rcd_buffer;
835 	ccw->count = DASD_ECKD_RCD_DATA_SIZE;
836 	cqr->magic = DASD_ECKD_MAGIC;
837 
838 	cqr->startdev = device;
839 	cqr->memdev = device;
840 	cqr->block = NULL;
841 	cqr->expires = 10*HZ;
842 	cqr->lpm = lpm;
843 	cqr->retries = 256;
844 	cqr->buildclk = get_tod_clock();
845 	cqr->status = DASD_CQR_FILLED;
846 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
847 }
848 
849 /*
850  * Wakeup helper for read_conf
851  * if the cqr is not done and needs some error recovery
852  * the buffer has to be re-initialized with the EBCDIC "V1.0"
853  * to show support for virtual device SNEQ
854  */
855 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
856 {
857 	struct ccw1 *ccw;
858 	__u8 *rcd_buffer;
859 
860 	if (cqr->status !=  DASD_CQR_DONE) {
861 		ccw = cqr->cpaddr;
862 		rcd_buffer = (__u8 *)((addr_t) ccw->cda);
863 		memset(rcd_buffer, 0, sizeof(*rcd_buffer));
864 
865 		rcd_buffer[0] = 0xE5;
866 		rcd_buffer[1] = 0xF1;
867 		rcd_buffer[2] = 0x4B;
868 		rcd_buffer[3] = 0xF0;
869 	}
870 	dasd_wakeup_cb(cqr, data);
871 }
872 
873 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
874 					   struct dasd_ccw_req *cqr,
875 					   __u8 *rcd_buffer,
876 					   __u8 lpm)
877 {
878 	struct ciw *ciw;
879 	int rc;
880 	/*
881 	 * sanity check: scan for RCD command in extended SenseID data
882 	 * some devices do not support RCD
883 	 */
884 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
885 	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
886 		return -EOPNOTSUPP;
887 
888 	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
889 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
890 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
891 	cqr->retries = 5;
892 	cqr->callback = read_conf_cb;
893 	rc = dasd_sleep_on_immediatly(cqr);
894 	return rc;
895 }
896 
897 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
898 				   void **rcd_buffer,
899 				   int *rcd_buffer_size, __u8 lpm)
900 {
901 	struct ciw *ciw;
902 	char *rcd_buf = NULL;
903 	int ret;
904 	struct dasd_ccw_req *cqr;
905 
906 	/*
907 	 * sanity check: scan for RCD command in extended SenseID data
908 	 * some devices do not support RCD
909 	 */
910 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
911 	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
912 		ret = -EOPNOTSUPP;
913 		goto out_error;
914 	}
915 	rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
916 	if (!rcd_buf) {
917 		ret = -ENOMEM;
918 		goto out_error;
919 	}
920 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
921 				   0, /* use rcd_buf as data ara */
922 				   device, NULL);
923 	if (IS_ERR(cqr)) {
924 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
925 			      "Could not allocate RCD request");
926 		ret = -ENOMEM;
927 		goto out_error;
928 	}
929 	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
930 	cqr->callback = read_conf_cb;
931 	ret = dasd_sleep_on(cqr);
932 	/*
933 	 * on success we update the user input parms
934 	 */
935 	dasd_sfree_request(cqr, cqr->memdev);
936 	if (ret)
937 		goto out_error;
938 
939 	*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
940 	*rcd_buffer = rcd_buf;
941 	return 0;
942 out_error:
943 	kfree(rcd_buf);
944 	*rcd_buffer = NULL;
945 	*rcd_buffer_size = 0;
946 	return ret;
947 }
948 
949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
950 {
951 
952 	struct dasd_sneq *sneq;
953 	int i, count;
954 
955 	private->ned = NULL;
956 	private->sneq = NULL;
957 	private->vdsneq = NULL;
958 	private->gneq = NULL;
959 	count = private->conf_len / sizeof(struct dasd_sneq);
960 	sneq = (struct dasd_sneq *)private->conf_data;
961 	for (i = 0; i < count; ++i) {
962 		if (sneq->flags.identifier == 1 && sneq->format == 1)
963 			private->sneq = sneq;
964 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
965 			private->vdsneq = (struct vd_sneq *)sneq;
966 		else if (sneq->flags.identifier == 2)
967 			private->gneq = (struct dasd_gneq *)sneq;
968 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
969 			private->ned = (struct dasd_ned *)sneq;
970 		sneq++;
971 	}
972 	if (!private->ned || !private->gneq) {
973 		private->ned = NULL;
974 		private->sneq = NULL;
975 		private->vdsneq = NULL;
976 		private->gneq = NULL;
977 		return -EINVAL;
978 	}
979 	return 0;
980 
981 };
982 
983 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
984 {
985 	struct dasd_gneq *gneq;
986 	int i, count, found;
987 
988 	count = conf_len / sizeof(*gneq);
989 	gneq = (struct dasd_gneq *)conf_data;
990 	found = 0;
991 	for (i = 0; i < count; ++i) {
992 		if (gneq->flags.identifier == 2) {
993 			found = 1;
994 			break;
995 		}
996 		gneq++;
997 	}
998 	if (found)
999 		return ((char *)gneq)[18] & 0x07;
1000 	else
1001 		return 0;
1002 }
1003 
1004 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1005 				      struct dasd_conf_data *conf_data, int chp)
1006 {
1007 	struct channel_path_desc_fmt0 *chp_desc;
1008 	struct subchannel_id sch_id;
1009 
1010 	ccw_device_get_schid(device->cdev, &sch_id);
1011 	/*
1012 	 * path handling and read_conf allocate data
1013 	 * free it before replacing the pointer
1014 	 */
1015 	kfree(device->path[chp].conf_data);
1016 	device->path[chp].conf_data = conf_data;
1017 	device->path[chp].cssid = sch_id.cssid;
1018 	device->path[chp].ssid = sch_id.ssid;
1019 	chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1020 	if (chp_desc)
1021 		device->path[chp].chpid = chp_desc->chpid;
1022 	kfree(chp_desc);
1023 }
1024 
1025 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1026 {
1027 	struct dasd_eckd_private *private = device->private;
1028 	int i;
1029 
1030 	private->conf_data = NULL;
1031 	private->conf_len = 0;
1032 	for (i = 0; i < 8; i++) {
1033 		kfree(device->path[i].conf_data);
1034 		device->path[i].conf_data = NULL;
1035 		device->path[i].cssid = 0;
1036 		device->path[i].ssid = 0;
1037 		device->path[i].chpid = 0;
1038 		dasd_path_notoper(device, i);
1039 		dasd_path_remove_kobj(device, i);
1040 	}
1041 }
1042 
1043 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1044 {
1045 	struct dasd_eckd_private *private = device->private;
1046 	u8 esm_valid;
1047 	u8 esm[8];
1048 	int chp;
1049 	int rc;
1050 
1051 	rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1052 	if (rc) {
1053 		for (chp = 0; chp < 8; chp++)
1054 			device->path[chp].fc_security = 0;
1055 		return;
1056 	}
1057 
1058 	for (chp = 0; chp < 8; chp++) {
1059 		if (esm_valid & (0x80 >> chp))
1060 			device->path[chp].fc_security = esm[chp];
1061 		else
1062 			device->path[chp].fc_security = 0;
1063 	}
1064 }
1065 
1066 static int dasd_eckd_read_conf(struct dasd_device *device)
1067 {
1068 	void *conf_data;
1069 	int conf_len, conf_data_saved;
1070 	int rc, path_err, pos;
1071 	__u8 lpm, opm;
1072 	struct dasd_eckd_private *private, path_private;
1073 	struct dasd_uid *uid;
1074 	char print_path_uid[60], print_device_uid[60];
1075 
1076 	private = device->private;
1077 	opm = ccw_device_get_path_mask(device->cdev);
1078 	conf_data_saved = 0;
1079 	path_err = 0;
1080 	/* get configuration data per operational path */
1081 	for (lpm = 0x80; lpm; lpm>>= 1) {
1082 		if (!(lpm & opm))
1083 			continue;
1084 		rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1085 					     &conf_len, lpm);
1086 		if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
1087 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1088 					"Read configuration data returned "
1089 					"error %d", rc);
1090 			return rc;
1091 		}
1092 		if (conf_data == NULL) {
1093 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1094 					"No configuration data "
1095 					"retrieved");
1096 			/* no further analysis possible */
1097 			dasd_path_add_opm(device, opm);
1098 			continue;	/* no error */
1099 		}
1100 		/* save first valid configuration data */
1101 		if (!conf_data_saved) {
1102 			/* initially clear previously stored conf_data */
1103 			dasd_eckd_clear_conf_data(device);
1104 			private->conf_data = conf_data;
1105 			private->conf_len = conf_len;
1106 			if (dasd_eckd_identify_conf_parts(private)) {
1107 				private->conf_data = NULL;
1108 				private->conf_len = 0;
1109 				kfree(conf_data);
1110 				continue;
1111 			}
1112 			/*
1113 			 * build device UID that other path data
1114 			 * can be compared to it
1115 			 */
1116 			dasd_eckd_generate_uid(device);
1117 			conf_data_saved++;
1118 		} else {
1119 			path_private.conf_data = conf_data;
1120 			path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1121 			if (dasd_eckd_identify_conf_parts(
1122 				    &path_private)) {
1123 				path_private.conf_data = NULL;
1124 				path_private.conf_len = 0;
1125 				kfree(conf_data);
1126 				continue;
1127 			}
1128 			if (dasd_eckd_compare_path_uid(
1129 				    device, &path_private)) {
1130 				uid = &path_private.uid;
1131 				if (strlen(uid->vduit) > 0)
1132 					snprintf(print_path_uid,
1133 						 sizeof(print_path_uid),
1134 						 "%s.%s.%04x.%02x.%s",
1135 						 uid->vendor, uid->serial,
1136 						 uid->ssid, uid->real_unit_addr,
1137 						 uid->vduit);
1138 				else
1139 					snprintf(print_path_uid,
1140 						 sizeof(print_path_uid),
1141 						 "%s.%s.%04x.%02x",
1142 						 uid->vendor, uid->serial,
1143 						 uid->ssid,
1144 						 uid->real_unit_addr);
1145 				uid = &private->uid;
1146 				if (strlen(uid->vduit) > 0)
1147 					snprintf(print_device_uid,
1148 						 sizeof(print_device_uid),
1149 						 "%s.%s.%04x.%02x.%s",
1150 						 uid->vendor, uid->serial,
1151 						 uid->ssid, uid->real_unit_addr,
1152 						 uid->vduit);
1153 				else
1154 					snprintf(print_device_uid,
1155 						 sizeof(print_device_uid),
1156 						 "%s.%s.%04x.%02x",
1157 						 uid->vendor, uid->serial,
1158 						 uid->ssid,
1159 						 uid->real_unit_addr);
1160 				dev_err(&device->cdev->dev,
1161 					"Not all channel paths lead to "
1162 					"the same device, path %02X leads to "
1163 					"device %s instead of %s\n", lpm,
1164 					print_path_uid, print_device_uid);
1165 				path_err = -EINVAL;
1166 				dasd_path_add_cablepm(device, lpm);
1167 				continue;
1168 			}
1169 			path_private.conf_data = NULL;
1170 			path_private.conf_len = 0;
1171 		}
1172 
1173 		pos = pathmask_to_pos(lpm);
1174 		dasd_eckd_store_conf_data(device, conf_data, pos);
1175 
1176 		switch (dasd_eckd_path_access(conf_data, conf_len)) {
1177 		case 0x02:
1178 			dasd_path_add_nppm(device, lpm);
1179 			break;
1180 		case 0x03:
1181 			dasd_path_add_ppm(device, lpm);
1182 			break;
1183 		}
1184 		if (!dasd_path_get_opm(device)) {
1185 			dasd_path_set_opm(device, lpm);
1186 			dasd_generic_path_operational(device);
1187 		} else {
1188 			dasd_path_add_opm(device, lpm);
1189 		}
1190 	}
1191 
1192 	dasd_eckd_read_fc_security(device);
1193 
1194 	return path_err;
1195 }
1196 
1197 static u32 get_fcx_max_data(struct dasd_device *device)
1198 {
1199 	struct dasd_eckd_private *private = device->private;
1200 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
1201 	unsigned int mdc;
1202 	int tpm;
1203 
1204 	if (dasd_nofcx)
1205 		return 0;
1206 	/* is transport mode supported? */
1207 	fcx_in_css = css_general_characteristics.fcx;
1208 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1209 	fcx_in_features = private->features.feature[40] & 0x80;
1210 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1211 
1212 	if (!tpm)
1213 		return 0;
1214 
1215 	mdc = ccw_device_get_mdc(device->cdev, 0);
1216 	if (mdc == 0) {
1217 		dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1218 		return 0;
1219 	} else {
1220 		return (u32)mdc * FCX_MAX_DATA_FACTOR;
1221 	}
1222 }
1223 
1224 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1225 {
1226 	struct dasd_eckd_private *private = device->private;
1227 	unsigned int mdc;
1228 	u32 fcx_max_data;
1229 
1230 	if (private->fcx_max_data) {
1231 		mdc = ccw_device_get_mdc(device->cdev, lpm);
1232 		if (mdc == 0) {
1233 			dev_warn(&device->cdev->dev,
1234 				 "Detecting the maximum data size for zHPF "
1235 				 "requests failed (rc=%d) for a new path %x\n",
1236 				 mdc, lpm);
1237 			return mdc;
1238 		}
1239 		fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1240 		if (fcx_max_data < private->fcx_max_data) {
1241 			dev_warn(&device->cdev->dev,
1242 				 "The maximum data size for zHPF requests %u "
1243 				 "on a new path %x is below the active maximum "
1244 				 "%u\n", fcx_max_data, lpm,
1245 				 private->fcx_max_data);
1246 			return -EACCES;
1247 		}
1248 	}
1249 	return 0;
1250 }
1251 
1252 static int rebuild_device_uid(struct dasd_device *device,
1253 			      struct pe_handler_work_data *data)
1254 {
1255 	struct dasd_eckd_private *private = device->private;
1256 	__u8 lpm, opm = dasd_path_get_opm(device);
1257 	int rc = -ENODEV;
1258 
1259 	for (lpm = 0x80; lpm; lpm >>= 1) {
1260 		if (!(lpm & opm))
1261 			continue;
1262 		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1263 		memset(&data->cqr, 0, sizeof(data->cqr));
1264 		data->cqr.cpaddr = &data->ccw;
1265 		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1266 						     data->rcd_buffer,
1267 						     lpm);
1268 
1269 		if (rc) {
1270 			if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1271 				continue;
1272 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1273 					"Read configuration data "
1274 					"returned error %d", rc);
1275 			break;
1276 		}
1277 		memcpy(private->conf_data, data->rcd_buffer,
1278 		       DASD_ECKD_RCD_DATA_SIZE);
1279 		if (dasd_eckd_identify_conf_parts(private)) {
1280 			rc = -ENODEV;
1281 		} else /* first valid path is enough */
1282 			break;
1283 	}
1284 
1285 	if (!rc)
1286 		rc = dasd_eckd_generate_uid(device);
1287 
1288 	return rc;
1289 }
1290 
1291 static void dasd_eckd_path_available_action(struct dasd_device *device,
1292 					    struct pe_handler_work_data *data)
1293 {
1294 	struct dasd_eckd_private path_private;
1295 	struct dasd_uid *uid;
1296 	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1297 	__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1298 	struct dasd_conf_data *conf_data;
1299 	unsigned long flags;
1300 	char print_uid[60];
1301 	int rc, pos;
1302 
1303 	opm = 0;
1304 	npm = 0;
1305 	ppm = 0;
1306 	epm = 0;
1307 	hpfpm = 0;
1308 	cablepm = 0;
1309 
1310 	for (lpm = 0x80; lpm; lpm >>= 1) {
1311 		if (!(lpm & data->tbvpm))
1312 			continue;
1313 		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1314 		memset(&data->cqr, 0, sizeof(data->cqr));
1315 		data->cqr.cpaddr = &data->ccw;
1316 		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1317 						     data->rcd_buffer,
1318 						     lpm);
1319 		if (!rc) {
1320 			switch (dasd_eckd_path_access(data->rcd_buffer,
1321 						      DASD_ECKD_RCD_DATA_SIZE)
1322 				) {
1323 			case 0x02:
1324 				npm |= lpm;
1325 				break;
1326 			case 0x03:
1327 				ppm |= lpm;
1328 				break;
1329 			}
1330 			opm |= lpm;
1331 		} else if (rc == -EOPNOTSUPP) {
1332 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1333 					"path verification: No configuration "
1334 					"data retrieved");
1335 			opm |= lpm;
1336 		} else if (rc == -EAGAIN) {
1337 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1338 					"path verification: device is stopped,"
1339 					" try again later");
1340 			epm |= lpm;
1341 		} else {
1342 			dev_warn(&device->cdev->dev,
1343 				 "Reading device feature codes failed "
1344 				 "(rc=%d) for new path %x\n", rc, lpm);
1345 			continue;
1346 		}
1347 		if (verify_fcx_max_data(device, lpm)) {
1348 			opm &= ~lpm;
1349 			npm &= ~lpm;
1350 			ppm &= ~lpm;
1351 			hpfpm |= lpm;
1352 			continue;
1353 		}
1354 
1355 		/*
1356 		 * save conf_data for comparison after
1357 		 * rebuild_device_uid may have changed
1358 		 * the original data
1359 		 */
1360 		memcpy(&path_rcd_buf, data->rcd_buffer,
1361 		       DASD_ECKD_RCD_DATA_SIZE);
1362 		path_private.conf_data = (void *) &path_rcd_buf;
1363 		path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1364 		if (dasd_eckd_identify_conf_parts(&path_private)) {
1365 			path_private.conf_data = NULL;
1366 			path_private.conf_len = 0;
1367 			continue;
1368 		}
1369 
1370 		/*
1371 		 * compare path UID with device UID only if at least
1372 		 * one valid path is left
1373 		 * in other case the device UID may have changed and
1374 		 * the first working path UID will be used as device UID
1375 		 */
1376 		if (dasd_path_get_opm(device) &&
1377 		    dasd_eckd_compare_path_uid(device, &path_private)) {
1378 			/*
1379 			 * the comparison was not successful
1380 			 * rebuild the device UID with at least one
1381 			 * known path in case a z/VM hyperswap command
1382 			 * has changed the device
1383 			 *
1384 			 * after this compare again
1385 			 *
1386 			 * if either the rebuild or the recompare fails
1387 			 * the path can not be used
1388 			 */
1389 			if (rebuild_device_uid(device, data) ||
1390 			    dasd_eckd_compare_path_uid(
1391 				    device, &path_private)) {
1392 				uid = &path_private.uid;
1393 				if (strlen(uid->vduit) > 0)
1394 					snprintf(print_uid, sizeof(print_uid),
1395 						 "%s.%s.%04x.%02x.%s",
1396 						 uid->vendor, uid->serial,
1397 						 uid->ssid, uid->real_unit_addr,
1398 						 uid->vduit);
1399 				else
1400 					snprintf(print_uid, sizeof(print_uid),
1401 						 "%s.%s.%04x.%02x",
1402 						 uid->vendor, uid->serial,
1403 						 uid->ssid,
1404 						 uid->real_unit_addr);
1405 				dev_err(&device->cdev->dev,
1406 					"The newly added channel path %02X "
1407 					"will not be used because it leads "
1408 					"to a different device %s\n",
1409 					lpm, print_uid);
1410 				opm &= ~lpm;
1411 				npm &= ~lpm;
1412 				ppm &= ~lpm;
1413 				cablepm |= lpm;
1414 				continue;
1415 			}
1416 		}
1417 
1418 		conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1419 		if (conf_data) {
1420 			memcpy(conf_data, data->rcd_buffer,
1421 			       DASD_ECKD_RCD_DATA_SIZE);
1422 		}
1423 		pos = pathmask_to_pos(lpm);
1424 		dasd_eckd_store_conf_data(device, conf_data, pos);
1425 
1426 		/*
1427 		 * There is a small chance that a path is lost again between
1428 		 * above path verification and the following modification of
1429 		 * the device opm mask. We could avoid that race here by using
1430 		 * yet another path mask, but we rather deal with this unlikely
1431 		 * situation in dasd_start_IO.
1432 		 */
1433 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1434 		if (!dasd_path_get_opm(device) && opm) {
1435 			dasd_path_set_opm(device, opm);
1436 			dasd_generic_path_operational(device);
1437 		} else {
1438 			dasd_path_add_opm(device, opm);
1439 		}
1440 		dasd_path_add_nppm(device, npm);
1441 		dasd_path_add_ppm(device, ppm);
1442 		dasd_path_add_tbvpm(device, epm);
1443 		dasd_path_add_cablepm(device, cablepm);
1444 		dasd_path_add_nohpfpm(device, hpfpm);
1445 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1446 
1447 		dasd_path_create_kobj(device, pos);
1448 	}
1449 }
1450 
1451 static void do_pe_handler_work(struct work_struct *work)
1452 {
1453 	struct pe_handler_work_data *data;
1454 	struct dasd_device *device;
1455 
1456 	data = container_of(work, struct pe_handler_work_data, worker);
1457 	device = data->device;
1458 
1459 	/* delay path verification until device was resumed */
1460 	if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1461 		schedule_work(work);
1462 		return;
1463 	}
1464 	/* check if path verification already running and delay if so */
1465 	if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1466 		schedule_work(work);
1467 		return;
1468 	}
1469 
1470 	if (data->tbvpm)
1471 		dasd_eckd_path_available_action(device, data);
1472 	if (data->fcsecpm)
1473 		dasd_eckd_read_fc_security(device);
1474 
1475 	clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1476 	dasd_put_device(device);
1477 	if (data->isglobal)
1478 		mutex_unlock(&dasd_pe_handler_mutex);
1479 	else
1480 		kfree(data);
1481 }
1482 
1483 static int dasd_eckd_pe_handler(struct dasd_device *device,
1484 				__u8 tbvpm, __u8 fcsecpm)
1485 {
1486 	struct pe_handler_work_data *data;
1487 
1488 	data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1489 	if (!data) {
1490 		if (mutex_trylock(&dasd_pe_handler_mutex)) {
1491 			data = pe_handler_worker;
1492 			data->isglobal = 1;
1493 		} else {
1494 			return -ENOMEM;
1495 		}
1496 	} else {
1497 		memset(data, 0, sizeof(*data));
1498 		data->isglobal = 0;
1499 	}
1500 	INIT_WORK(&data->worker, do_pe_handler_work);
1501 	dasd_get_device(device);
1502 	data->device = device;
1503 	data->tbvpm = tbvpm;
1504 	data->fcsecpm = fcsecpm;
1505 	schedule_work(&data->worker);
1506 	return 0;
1507 }
1508 
1509 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1510 {
1511 	struct dasd_eckd_private *private = device->private;
1512 	unsigned long flags;
1513 
1514 	if (!private->fcx_max_data)
1515 		private->fcx_max_data = get_fcx_max_data(device);
1516 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1517 	dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1518 	dasd_schedule_device_bh(device);
1519 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520 }
1521 
1522 static int dasd_eckd_read_features(struct dasd_device *device)
1523 {
1524 	struct dasd_eckd_private *private = device->private;
1525 	struct dasd_psf_prssd_data *prssdp;
1526 	struct dasd_rssd_features *features;
1527 	struct dasd_ccw_req *cqr;
1528 	struct ccw1 *ccw;
1529 	int rc;
1530 
1531 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1532 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
1533 				   (sizeof(struct dasd_psf_prssd_data) +
1534 				    sizeof(struct dasd_rssd_features)),
1535 				   device, NULL);
1536 	if (IS_ERR(cqr)) {
1537 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1538 				"allocate initialization request");
1539 		return PTR_ERR(cqr);
1540 	}
1541 	cqr->startdev = device;
1542 	cqr->memdev = device;
1543 	cqr->block = NULL;
1544 	cqr->retries = 256;
1545 	cqr->expires = 10 * HZ;
1546 
1547 	/* Prepare for Read Subsystem Data */
1548 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1549 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1550 	prssdp->order = PSF_ORDER_PRSSD;
1551 	prssdp->suborder = 0x41;	/* Read Feature Codes */
1552 	/* all other bytes of prssdp must be zero */
1553 
1554 	ccw = cqr->cpaddr;
1555 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1556 	ccw->count = sizeof(struct dasd_psf_prssd_data);
1557 	ccw->flags |= CCW_FLAG_CC;
1558 	ccw->cda = (__u32)(addr_t) prssdp;
1559 
1560 	/* Read Subsystem Data - feature codes */
1561 	features = (struct dasd_rssd_features *) (prssdp + 1);
1562 	memset(features, 0, sizeof(struct dasd_rssd_features));
1563 
1564 	ccw++;
1565 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1566 	ccw->count = sizeof(struct dasd_rssd_features);
1567 	ccw->cda = (__u32)(addr_t) features;
1568 
1569 	cqr->buildclk = get_tod_clock();
1570 	cqr->status = DASD_CQR_FILLED;
1571 	rc = dasd_sleep_on(cqr);
1572 	if (rc == 0) {
1573 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1574 		features = (struct dasd_rssd_features *) (prssdp + 1);
1575 		memcpy(&private->features, features,
1576 		       sizeof(struct dasd_rssd_features));
1577 	} else
1578 		dev_warn(&device->cdev->dev, "Reading device feature codes"
1579 			 " failed with rc=%d\n", rc);
1580 	dasd_sfree_request(cqr, cqr->memdev);
1581 	return rc;
1582 }
1583 
1584 /* Read Volume Information - Volume Storage Query */
1585 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1586 {
1587 	struct dasd_eckd_private *private = device->private;
1588 	struct dasd_psf_prssd_data *prssdp;
1589 	struct dasd_rssd_vsq *vsq;
1590 	struct dasd_ccw_req *cqr;
1591 	struct ccw1 *ccw;
1592 	int useglobal;
1593 	int rc;
1594 
1595 	/* This command cannot be executed on an alias device */
1596 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1597 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1598 		return 0;
1599 
1600 	useglobal = 0;
1601 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1602 				   sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1603 	if (IS_ERR(cqr)) {
1604 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1605 				"Could not allocate initialization request");
1606 		mutex_lock(&dasd_vol_info_mutex);
1607 		useglobal = 1;
1608 		cqr = &dasd_vol_info_req->cqr;
1609 		memset(cqr, 0, sizeof(*cqr));
1610 		memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1611 		cqr->cpaddr = &dasd_vol_info_req->ccw;
1612 		cqr->data = &dasd_vol_info_req->data;
1613 		cqr->magic = DASD_ECKD_MAGIC;
1614 	}
1615 
1616 	/* Prepare for Read Subsystem Data */
1617 	prssdp = cqr->data;
1618 	prssdp->order = PSF_ORDER_PRSSD;
1619 	prssdp->suborder = PSF_SUBORDER_VSQ;	/* Volume Storage Query */
1620 	prssdp->lss = private->ned->ID;
1621 	prssdp->volume = private->ned->unit_addr;
1622 
1623 	ccw = cqr->cpaddr;
1624 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1625 	ccw->count = sizeof(*prssdp);
1626 	ccw->flags |= CCW_FLAG_CC;
1627 	ccw->cda = (__u32)(addr_t)prssdp;
1628 
1629 	/* Read Subsystem Data - Volume Storage Query */
1630 	vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1631 	memset(vsq, 0, sizeof(*vsq));
1632 
1633 	ccw++;
1634 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1635 	ccw->count = sizeof(*vsq);
1636 	ccw->flags |= CCW_FLAG_SLI;
1637 	ccw->cda = (__u32)(addr_t)vsq;
1638 
1639 	cqr->buildclk = get_tod_clock();
1640 	cqr->status = DASD_CQR_FILLED;
1641 	cqr->startdev = device;
1642 	cqr->memdev = device;
1643 	cqr->block = NULL;
1644 	cqr->retries = 256;
1645 	cqr->expires = device->default_expires * HZ;
1646 	/* The command might not be supported. Suppress the error output */
1647 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1648 
1649 	rc = dasd_sleep_on_interruptible(cqr);
1650 	if (rc == 0) {
1651 		memcpy(&private->vsq, vsq, sizeof(*vsq));
1652 	} else {
1653 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1654 				"Reading the volume storage information failed with rc=%d", rc);
1655 	}
1656 
1657 	if (useglobal)
1658 		mutex_unlock(&dasd_vol_info_mutex);
1659 	else
1660 		dasd_sfree_request(cqr, cqr->memdev);
1661 
1662 	return rc;
1663 }
1664 
1665 static int dasd_eckd_is_ese(struct dasd_device *device)
1666 {
1667 	struct dasd_eckd_private *private = device->private;
1668 
1669 	return private->vsq.vol_info.ese;
1670 }
1671 
1672 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1673 {
1674 	struct dasd_eckd_private *private = device->private;
1675 
1676 	return private->vsq.extent_pool_id;
1677 }
1678 
1679 /*
1680  * This value represents the total amount of available space. As more space is
1681  * allocated by ESE volumes, this value will decrease.
1682  * The data for this value is therefore updated on any call.
1683  */
1684 static int dasd_eckd_space_configured(struct dasd_device *device)
1685 {
1686 	struct dasd_eckd_private *private = device->private;
1687 	int rc;
1688 
1689 	rc = dasd_eckd_read_vol_info(device);
1690 
1691 	return rc ? : private->vsq.space_configured;
1692 }
1693 
1694 /*
1695  * The value of space allocated by an ESE volume may have changed and is
1696  * therefore updated on any call.
1697  */
1698 static int dasd_eckd_space_allocated(struct dasd_device *device)
1699 {
1700 	struct dasd_eckd_private *private = device->private;
1701 	int rc;
1702 
1703 	rc = dasd_eckd_read_vol_info(device);
1704 
1705 	return rc ? : private->vsq.space_allocated;
1706 }
1707 
1708 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1709 {
1710 	struct dasd_eckd_private *private = device->private;
1711 
1712 	return private->vsq.logical_capacity;
1713 }
1714 
1715 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1716 {
1717 	struct ext_pool_exhaust_work_data *data;
1718 	struct dasd_device *device;
1719 	struct dasd_device *base;
1720 
1721 	data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1722 	device = data->device;
1723 	base = data->base;
1724 
1725 	if (!base)
1726 		base = device;
1727 	if (dasd_eckd_space_configured(base) != 0) {
1728 		dasd_generic_space_avail(device);
1729 	} else {
1730 		dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1731 		DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1732 	}
1733 
1734 	dasd_put_device(device);
1735 	kfree(data);
1736 }
1737 
1738 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1739 				      struct dasd_ccw_req *cqr)
1740 {
1741 	struct ext_pool_exhaust_work_data *data;
1742 
1743 	data = kzalloc(sizeof(*data), GFP_ATOMIC);
1744 	if (!data)
1745 		return -ENOMEM;
1746 	INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1747 	dasd_get_device(device);
1748 	data->device = device;
1749 
1750 	if (cqr->block)
1751 		data->base = cqr->block->base;
1752 	else if (cqr->basedev)
1753 		data->base = cqr->basedev;
1754 	else
1755 		data->base = NULL;
1756 
1757 	schedule_work(&data->worker);
1758 
1759 	return 0;
1760 }
1761 
1762 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1763 					struct dasd_rssd_lcq *lcq)
1764 {
1765 	struct dasd_eckd_private *private = device->private;
1766 	int pool_id = dasd_eckd_ext_pool_id(device);
1767 	struct dasd_ext_pool_sum eps;
1768 	int i;
1769 
1770 	for (i = 0; i < lcq->pool_count; i++) {
1771 		eps = lcq->ext_pool_sum[i];
1772 		if (eps.pool_id == pool_id) {
1773 			memcpy(&private->eps, &eps,
1774 			       sizeof(struct dasd_ext_pool_sum));
1775 		}
1776 	}
1777 }
1778 
1779 /* Read Extent Pool Information - Logical Configuration Query */
1780 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1781 {
1782 	struct dasd_eckd_private *private = device->private;
1783 	struct dasd_psf_prssd_data *prssdp;
1784 	struct dasd_rssd_lcq *lcq;
1785 	struct dasd_ccw_req *cqr;
1786 	struct ccw1 *ccw;
1787 	int rc;
1788 
1789 	/* This command cannot be executed on an alias device */
1790 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1791 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1792 		return 0;
1793 
1794 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1795 				   sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1796 	if (IS_ERR(cqr)) {
1797 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1798 				"Could not allocate initialization request");
1799 		return PTR_ERR(cqr);
1800 	}
1801 
1802 	/* Prepare for Read Subsystem Data */
1803 	prssdp = cqr->data;
1804 	memset(prssdp, 0, sizeof(*prssdp));
1805 	prssdp->order = PSF_ORDER_PRSSD;
1806 	prssdp->suborder = PSF_SUBORDER_LCQ;	/* Logical Configuration Query */
1807 
1808 	ccw = cqr->cpaddr;
1809 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1810 	ccw->count = sizeof(*prssdp);
1811 	ccw->flags |= CCW_FLAG_CC;
1812 	ccw->cda = (__u32)(addr_t)prssdp;
1813 
1814 	lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1815 	memset(lcq, 0, sizeof(*lcq));
1816 
1817 	ccw++;
1818 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1819 	ccw->count = sizeof(*lcq);
1820 	ccw->flags |= CCW_FLAG_SLI;
1821 	ccw->cda = (__u32)(addr_t)lcq;
1822 
1823 	cqr->buildclk = get_tod_clock();
1824 	cqr->status = DASD_CQR_FILLED;
1825 	cqr->startdev = device;
1826 	cqr->memdev = device;
1827 	cqr->block = NULL;
1828 	cqr->retries = 256;
1829 	cqr->expires = device->default_expires * HZ;
1830 	/* The command might not be supported. Suppress the error output */
1831 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1832 
1833 	rc = dasd_sleep_on_interruptible(cqr);
1834 	if (rc == 0) {
1835 		dasd_eckd_cpy_ext_pool_data(device, lcq);
1836 	} else {
1837 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1838 				"Reading the logical configuration failed with rc=%d", rc);
1839 	}
1840 
1841 	dasd_sfree_request(cqr, cqr->memdev);
1842 
1843 	return rc;
1844 }
1845 
1846 /*
1847  * Depending on the device type, the extent size is specified either as
1848  * cylinders per extent (CKD) or size per extent (FBA)
1849  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1850  */
1851 static int dasd_eckd_ext_size(struct dasd_device *device)
1852 {
1853 	struct dasd_eckd_private *private = device->private;
1854 	struct dasd_ext_pool_sum eps = private->eps;
1855 
1856 	if (!eps.flags.extent_size_valid)
1857 		return 0;
1858 	if (eps.extent_size.size_1G)
1859 		return 1113;
1860 	if (eps.extent_size.size_16M)
1861 		return 21;
1862 
1863 	return 0;
1864 }
1865 
1866 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1867 {
1868 	struct dasd_eckd_private *private = device->private;
1869 
1870 	return private->eps.warn_thrshld;
1871 }
1872 
1873 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1874 {
1875 	struct dasd_eckd_private *private = device->private;
1876 
1877 	return private->eps.flags.capacity_at_warnlevel;
1878 }
1879 
1880 /*
1881  * Extent Pool out of space
1882  */
1883 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1884 {
1885 	struct dasd_eckd_private *private = device->private;
1886 
1887 	return private->eps.flags.pool_oos;
1888 }
1889 
1890 /*
1891  * Build CP for Perform Subsystem Function - SSC.
1892  */
1893 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1894 						    int enable_pav)
1895 {
1896 	struct dasd_ccw_req *cqr;
1897 	struct dasd_psf_ssc_data *psf_ssc_data;
1898 	struct ccw1 *ccw;
1899 
1900 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1901 				  sizeof(struct dasd_psf_ssc_data),
1902 				   device, NULL);
1903 
1904 	if (IS_ERR(cqr)) {
1905 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1906 			   "Could not allocate PSF-SSC request");
1907 		return cqr;
1908 	}
1909 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1910 	psf_ssc_data->order = PSF_ORDER_SSC;
1911 	psf_ssc_data->suborder = 0xc0;
1912 	if (enable_pav) {
1913 		psf_ssc_data->suborder |= 0x08;
1914 		psf_ssc_data->reserved[0] = 0x88;
1915 	}
1916 	ccw = cqr->cpaddr;
1917 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1918 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1919 	ccw->count = 66;
1920 
1921 	cqr->startdev = device;
1922 	cqr->memdev = device;
1923 	cqr->block = NULL;
1924 	cqr->retries = 256;
1925 	cqr->expires = 10*HZ;
1926 	cqr->buildclk = get_tod_clock();
1927 	cqr->status = DASD_CQR_FILLED;
1928 	return cqr;
1929 }
1930 
1931 /*
1932  * Perform Subsystem Function.
1933  * It is necessary to trigger CIO for channel revalidation since this
1934  * call might change behaviour of DASD devices.
1935  */
1936 static int
1937 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1938 		  unsigned long flags)
1939 {
1940 	struct dasd_ccw_req *cqr;
1941 	int rc;
1942 
1943 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1944 	if (IS_ERR(cqr))
1945 		return PTR_ERR(cqr);
1946 
1947 	/*
1948 	 * set flags e.g. turn on failfast, to prevent blocking
1949 	 * the calling function should handle failed requests
1950 	 */
1951 	cqr->flags |= flags;
1952 
1953 	rc = dasd_sleep_on(cqr);
1954 	if (!rc)
1955 		/* trigger CIO to reprobe devices */
1956 		css_schedule_reprobe();
1957 	else if (cqr->intrc == -EAGAIN)
1958 		rc = -EAGAIN;
1959 
1960 	dasd_sfree_request(cqr, cqr->memdev);
1961 	return rc;
1962 }
1963 
1964 /*
1965  * Valide storage server of current device.
1966  */
1967 static int dasd_eckd_validate_server(struct dasd_device *device,
1968 				     unsigned long flags)
1969 {
1970 	struct dasd_eckd_private *private = device->private;
1971 	int enable_pav, rc;
1972 
1973 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1974 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1975 		return 0;
1976 	if (dasd_nopav || MACHINE_IS_VM)
1977 		enable_pav = 0;
1978 	else
1979 		enable_pav = 1;
1980 	rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1981 
1982 	/* may be requested feature is not available on server,
1983 	 * therefore just report error and go ahead */
1984 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1985 			"returned rc=%d", private->uid.ssid, rc);
1986 	return rc;
1987 }
1988 
1989 /*
1990  * worker to do a validate server in case of a lost pathgroup
1991  */
1992 static void dasd_eckd_do_validate_server(struct work_struct *work)
1993 {
1994 	struct dasd_device *device = container_of(work, struct dasd_device,
1995 						  kick_validate);
1996 	unsigned long flags = 0;
1997 
1998 	set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1999 	if (dasd_eckd_validate_server(device, flags)
2000 	    == -EAGAIN) {
2001 		/* schedule worker again if failed */
2002 		schedule_work(&device->kick_validate);
2003 		return;
2004 	}
2005 
2006 	dasd_put_device(device);
2007 }
2008 
2009 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2010 {
2011 	dasd_get_device(device);
2012 	/* exit if device not online or in offline processing */
2013 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2014 	   device->state < DASD_STATE_ONLINE) {
2015 		dasd_put_device(device);
2016 		return;
2017 	}
2018 	/* queue call to do_validate_server to the kernel event daemon. */
2019 	if (!schedule_work(&device->kick_validate))
2020 		dasd_put_device(device);
2021 }
2022 
2023 /*
2024  * Check device characteristics.
2025  * If the device is accessible using ECKD discipline, the device is enabled.
2026  */
2027 static int
2028 dasd_eckd_check_characteristics(struct dasd_device *device)
2029 {
2030 	struct dasd_eckd_private *private = device->private;
2031 	struct dasd_block *block;
2032 	struct dasd_uid temp_uid;
2033 	int rc, i;
2034 	int readonly;
2035 	unsigned long value;
2036 
2037 	/* setup work queue for validate server*/
2038 	INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2039 	/* setup work queue for summary unit check */
2040 	INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2041 
2042 	if (!ccw_device_is_pathgroup(device->cdev)) {
2043 		dev_warn(&device->cdev->dev,
2044 			 "A channel path group could not be established\n");
2045 		return -EIO;
2046 	}
2047 	if (!ccw_device_is_multipath(device->cdev)) {
2048 		dev_info(&device->cdev->dev,
2049 			 "The DASD is not operating in multipath mode\n");
2050 	}
2051 	if (!private) {
2052 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2053 		if (!private) {
2054 			dev_warn(&device->cdev->dev,
2055 				 "Allocating memory for private DASD data "
2056 				 "failed\n");
2057 			return -ENOMEM;
2058 		}
2059 		device->private = private;
2060 	} else {
2061 		memset(private, 0, sizeof(*private));
2062 	}
2063 	/* Invalidate status of initial analysis. */
2064 	private->init_cqr_status = -1;
2065 	/* Set default cache operations. */
2066 	private->attrib.operation = DASD_NORMAL_CACHE;
2067 	private->attrib.nr_cyl = 0;
2068 
2069 	/* Read Configuration Data */
2070 	rc = dasd_eckd_read_conf(device);
2071 	if (rc)
2072 		goto out_err1;
2073 
2074 	/* set some default values */
2075 	device->default_expires = DASD_EXPIRES;
2076 	device->default_retries = DASD_RETRIES;
2077 	device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2078 	device->path_interval = DASD_ECKD_PATH_INTERVAL;
2079 
2080 	if (private->gneq) {
2081 		value = 1;
2082 		for (i = 0; i < private->gneq->timeout.value; i++)
2083 			value = 10 * value;
2084 		value = value * private->gneq->timeout.number;
2085 		/* do not accept useless values */
2086 		if (value != 0 && value <= DASD_EXPIRES_MAX)
2087 			device->default_expires = value;
2088 	}
2089 
2090 	dasd_eckd_get_uid(device, &temp_uid);
2091 	if (temp_uid.type == UA_BASE_DEVICE) {
2092 		block = dasd_alloc_block();
2093 		if (IS_ERR(block)) {
2094 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2095 					"could not allocate dasd "
2096 					"block structure");
2097 			rc = PTR_ERR(block);
2098 			goto out_err1;
2099 		}
2100 		device->block = block;
2101 		block->base = device;
2102 	}
2103 
2104 	/* register lcu with alias handling, enable PAV */
2105 	rc = dasd_alias_make_device_known_to_lcu(device);
2106 	if (rc)
2107 		goto out_err2;
2108 
2109 	dasd_eckd_validate_server(device, 0);
2110 
2111 	/* device may report different configuration data after LCU setup */
2112 	rc = dasd_eckd_read_conf(device);
2113 	if (rc)
2114 		goto out_err3;
2115 
2116 	dasd_path_create_kobjects(device);
2117 
2118 	/* Read Feature Codes */
2119 	dasd_eckd_read_features(device);
2120 
2121 	/* Read Volume Information */
2122 	dasd_eckd_read_vol_info(device);
2123 
2124 	/* Read Extent Pool Information */
2125 	dasd_eckd_read_ext_pool_info(device);
2126 
2127 	/* Read Device Characteristics */
2128 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2129 					 &private->rdc_data, 64);
2130 	if (rc) {
2131 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2132 				"Read device characteristic failed, rc=%d", rc);
2133 		goto out_err3;
2134 	}
2135 
2136 	if ((device->features & DASD_FEATURE_USERAW) &&
2137 	    !(private->rdc_data.facilities.RT_in_LR)) {
2138 		dev_err(&device->cdev->dev, "The storage server does not "
2139 			"support raw-track access\n");
2140 		rc = -EINVAL;
2141 		goto out_err3;
2142 	}
2143 
2144 	/* find the valid cylinder size */
2145 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2146 	    private->rdc_data.long_no_cyl)
2147 		private->real_cyl = private->rdc_data.long_no_cyl;
2148 	else
2149 		private->real_cyl = private->rdc_data.no_cyl;
2150 
2151 	private->fcx_max_data = get_fcx_max_data(device);
2152 
2153 	readonly = dasd_device_is_ro(device);
2154 	if (readonly)
2155 		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2156 
2157 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2158 		 "with %d cylinders, %d heads, %d sectors%s\n",
2159 		 private->rdc_data.dev_type,
2160 		 private->rdc_data.dev_model,
2161 		 private->rdc_data.cu_type,
2162 		 private->rdc_data.cu_model.model,
2163 		 private->real_cyl,
2164 		 private->rdc_data.trk_per_cyl,
2165 		 private->rdc_data.sec_per_trk,
2166 		 readonly ? ", read-only device" : "");
2167 	return 0;
2168 
2169 out_err3:
2170 	dasd_alias_disconnect_device_from_lcu(device);
2171 out_err2:
2172 	dasd_free_block(device->block);
2173 	device->block = NULL;
2174 out_err1:
2175 	dasd_eckd_clear_conf_data(device);
2176 	kfree(device->private);
2177 	device->private = NULL;
2178 	return rc;
2179 }
2180 
2181 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2182 {
2183 	struct dasd_eckd_private *private = device->private;
2184 
2185 	if (!private)
2186 		return;
2187 
2188 	dasd_alias_disconnect_device_from_lcu(device);
2189 	private->ned = NULL;
2190 	private->sneq = NULL;
2191 	private->vdsneq = NULL;
2192 	private->gneq = NULL;
2193 	dasd_eckd_clear_conf_data(device);
2194 }
2195 
2196 static struct dasd_ccw_req *
2197 dasd_eckd_analysis_ccw(struct dasd_device *device)
2198 {
2199 	struct dasd_eckd_private *private = device->private;
2200 	struct eckd_count *count_data;
2201 	struct LO_eckd_data *LO_data;
2202 	struct dasd_ccw_req *cqr;
2203 	struct ccw1 *ccw;
2204 	int cplength, datasize;
2205 	int i;
2206 
2207 	cplength = 8;
2208 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2209 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2210 				   NULL);
2211 	if (IS_ERR(cqr))
2212 		return cqr;
2213 	ccw = cqr->cpaddr;
2214 	/* Define extent for the first 2 tracks. */
2215 	define_extent(ccw++, cqr->data, 0, 1,
2216 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2217 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
2218 	/* Locate record for the first 4 records on track 0. */
2219 	ccw[-1].flags |= CCW_FLAG_CC;
2220 	locate_record(ccw++, LO_data++, 0, 0, 4,
2221 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2222 
2223 	count_data = private->count_area;
2224 	for (i = 0; i < 4; i++) {
2225 		ccw[-1].flags |= CCW_FLAG_CC;
2226 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2227 		ccw->flags = 0;
2228 		ccw->count = 8;
2229 		ccw->cda = (__u32)(addr_t) count_data;
2230 		ccw++;
2231 		count_data++;
2232 	}
2233 
2234 	/* Locate record for the first record on track 1. */
2235 	ccw[-1].flags |= CCW_FLAG_CC;
2236 	locate_record(ccw++, LO_data++, 1, 0, 1,
2237 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2238 	/* Read count ccw. */
2239 	ccw[-1].flags |= CCW_FLAG_CC;
2240 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2241 	ccw->flags = 0;
2242 	ccw->count = 8;
2243 	ccw->cda = (__u32)(addr_t) count_data;
2244 
2245 	cqr->block = NULL;
2246 	cqr->startdev = device;
2247 	cqr->memdev = device;
2248 	cqr->retries = 255;
2249 	cqr->buildclk = get_tod_clock();
2250 	cqr->status = DASD_CQR_FILLED;
2251 	/* Set flags to suppress output for expected errors */
2252 	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2253 
2254 	return cqr;
2255 }
2256 
2257 /* differentiate between 'no record found' and any other error */
2258 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2259 {
2260 	char *sense;
2261 	if (init_cqr->status == DASD_CQR_DONE)
2262 		return INIT_CQR_OK;
2263 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2264 		 init_cqr->status == DASD_CQR_FAILED) {
2265 		sense = dasd_get_sense(&init_cqr->irb);
2266 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2267 			return INIT_CQR_UNFORMATTED;
2268 		else
2269 			return INIT_CQR_ERROR;
2270 	} else
2271 		return INIT_CQR_ERROR;
2272 }
2273 
2274 /*
2275  * This is the callback function for the init_analysis cqr. It saves
2276  * the status of the initial analysis ccw before it frees it and kicks
2277  * the device to continue the startup sequence. This will call
2278  * dasd_eckd_do_analysis again (if the devices has not been marked
2279  * for deletion in the meantime).
2280  */
2281 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2282 					void *data)
2283 {
2284 	struct dasd_device *device = init_cqr->startdev;
2285 	struct dasd_eckd_private *private = device->private;
2286 
2287 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2288 	dasd_sfree_request(init_cqr, device);
2289 	dasd_kick_device(device);
2290 }
2291 
2292 static int dasd_eckd_start_analysis(struct dasd_block *block)
2293 {
2294 	struct dasd_ccw_req *init_cqr;
2295 
2296 	init_cqr = dasd_eckd_analysis_ccw(block->base);
2297 	if (IS_ERR(init_cqr))
2298 		return PTR_ERR(init_cqr);
2299 	init_cqr->callback = dasd_eckd_analysis_callback;
2300 	init_cqr->callback_data = NULL;
2301 	init_cqr->expires = 5*HZ;
2302 	/* first try without ERP, so we can later handle unformatted
2303 	 * devices as special case
2304 	 */
2305 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2306 	init_cqr->retries = 0;
2307 	dasd_add_request_head(init_cqr);
2308 	return -EAGAIN;
2309 }
2310 
2311 static int dasd_eckd_end_analysis(struct dasd_block *block)
2312 {
2313 	struct dasd_device *device = block->base;
2314 	struct dasd_eckd_private *private = device->private;
2315 	struct eckd_count *count_area;
2316 	unsigned int sb, blk_per_trk;
2317 	int status, i;
2318 	struct dasd_ccw_req *init_cqr;
2319 
2320 	status = private->init_cqr_status;
2321 	private->init_cqr_status = -1;
2322 	if (status == INIT_CQR_ERROR) {
2323 		/* try again, this time with full ERP */
2324 		init_cqr = dasd_eckd_analysis_ccw(device);
2325 		dasd_sleep_on(init_cqr);
2326 		status = dasd_eckd_analysis_evaluation(init_cqr);
2327 		dasd_sfree_request(init_cqr, device);
2328 	}
2329 
2330 	if (device->features & DASD_FEATURE_USERAW) {
2331 		block->bp_block = DASD_RAW_BLOCKSIZE;
2332 		blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2333 		block->s2b_shift = 3;
2334 		goto raw;
2335 	}
2336 
2337 	if (status == INIT_CQR_UNFORMATTED) {
2338 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2339 		return -EMEDIUMTYPE;
2340 	} else if (status == INIT_CQR_ERROR) {
2341 		dev_err(&device->cdev->dev,
2342 			"Detecting the DASD disk layout failed because "
2343 			"of an I/O error\n");
2344 		return -EIO;
2345 	}
2346 
2347 	private->uses_cdl = 1;
2348 	/* Check Track 0 for Compatible Disk Layout */
2349 	count_area = NULL;
2350 	for (i = 0; i < 3; i++) {
2351 		if (private->count_area[i].kl != 4 ||
2352 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2353 		    private->count_area[i].cyl != 0 ||
2354 		    private->count_area[i].head != count_area_head[i] ||
2355 		    private->count_area[i].record != count_area_rec[i]) {
2356 			private->uses_cdl = 0;
2357 			break;
2358 		}
2359 	}
2360 	if (i == 3)
2361 		count_area = &private->count_area[3];
2362 
2363 	if (private->uses_cdl == 0) {
2364 		for (i = 0; i < 5; i++) {
2365 			if ((private->count_area[i].kl != 0) ||
2366 			    (private->count_area[i].dl !=
2367 			     private->count_area[0].dl) ||
2368 			    private->count_area[i].cyl !=  0 ||
2369 			    private->count_area[i].head != count_area_head[i] ||
2370 			    private->count_area[i].record != count_area_rec[i])
2371 				break;
2372 		}
2373 		if (i == 5)
2374 			count_area = &private->count_area[0];
2375 	} else {
2376 		if (private->count_area[3].record == 1)
2377 			dev_warn(&device->cdev->dev,
2378 				 "Track 0 has no records following the VTOC\n");
2379 	}
2380 
2381 	if (count_area != NULL && count_area->kl == 0) {
2382 		/* we found notthing violating our disk layout */
2383 		if (dasd_check_blocksize(count_area->dl) == 0)
2384 			block->bp_block = count_area->dl;
2385 	}
2386 	if (block->bp_block == 0) {
2387 		dev_warn(&device->cdev->dev,
2388 			 "The disk layout of the DASD is not supported\n");
2389 		return -EMEDIUMTYPE;
2390 	}
2391 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
2392 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
2393 		block->s2b_shift++;
2394 
2395 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2396 
2397 raw:
2398 	block->blocks = ((unsigned long) private->real_cyl *
2399 			  private->rdc_data.trk_per_cyl *
2400 			  blk_per_trk);
2401 
2402 	dev_info(&device->cdev->dev,
2403 		 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2404 		 "%s\n", (block->bp_block >> 10),
2405 		 (((unsigned long) private->real_cyl *
2406 		   private->rdc_data.trk_per_cyl *
2407 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
2408 		 ((blk_per_trk * block->bp_block) >> 10),
2409 		 private->uses_cdl ?
2410 		 "compatible disk layout" : "linux disk layout");
2411 
2412 	return 0;
2413 }
2414 
2415 static int dasd_eckd_do_analysis(struct dasd_block *block)
2416 {
2417 	struct dasd_eckd_private *private = block->base->private;
2418 
2419 	if (private->init_cqr_status < 0)
2420 		return dasd_eckd_start_analysis(block);
2421 	else
2422 		return dasd_eckd_end_analysis(block);
2423 }
2424 
2425 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2426 {
2427 	return dasd_alias_add_device(device);
2428 };
2429 
2430 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2431 {
2432 	if (cancel_work_sync(&device->reload_device))
2433 		dasd_put_device(device);
2434 	if (cancel_work_sync(&device->kick_validate))
2435 		dasd_put_device(device);
2436 
2437 	return 0;
2438 };
2439 
2440 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2441 {
2442 	return dasd_alias_remove_device(device);
2443 };
2444 
2445 static int
2446 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2447 {
2448 	struct dasd_eckd_private *private = block->base->private;
2449 
2450 	if (dasd_check_blocksize(block->bp_block) == 0) {
2451 		geo->sectors = recs_per_track(&private->rdc_data,
2452 					      0, block->bp_block);
2453 	}
2454 	geo->cylinders = private->rdc_data.no_cyl;
2455 	geo->heads = private->rdc_data.trk_per_cyl;
2456 	return 0;
2457 }
2458 
2459 /*
2460  * Build the TCW request for the format check
2461  */
2462 static struct dasd_ccw_req *
2463 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2464 			  int enable_pav, struct eckd_count *fmt_buffer,
2465 			  int rpt)
2466 {
2467 	struct dasd_eckd_private *start_priv;
2468 	struct dasd_device *startdev = NULL;
2469 	struct tidaw *last_tidaw = NULL;
2470 	struct dasd_ccw_req *cqr;
2471 	struct itcw *itcw;
2472 	int itcw_size;
2473 	int count;
2474 	int rc;
2475 	int i;
2476 
2477 	if (enable_pav)
2478 		startdev = dasd_alias_get_start_dev(base);
2479 
2480 	if (!startdev)
2481 		startdev = base;
2482 
2483 	start_priv = startdev->private;
2484 
2485 	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2486 
2487 	/*
2488 	 * we're adding 'count' amount of tidaw to the itcw.
2489 	 * calculate the corresponding itcw_size
2490 	 */
2491 	itcw_size = itcw_calc_size(0, count, 0);
2492 
2493 	cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2494 	if (IS_ERR(cqr))
2495 		return cqr;
2496 
2497 	start_priv->count++;
2498 
2499 	itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2500 	if (IS_ERR(itcw)) {
2501 		rc = -EINVAL;
2502 		goto out_err;
2503 	}
2504 
2505 	cqr->cpaddr = itcw_get_tcw(itcw);
2506 	rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2507 			  DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2508 			  sizeof(struct eckd_count),
2509 			  count * sizeof(struct eckd_count), 0, rpt);
2510 	if (rc)
2511 		goto out_err;
2512 
2513 	for (i = 0; i < count; i++) {
2514 		last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2515 					    sizeof(struct eckd_count));
2516 		if (IS_ERR(last_tidaw)) {
2517 			rc = -EINVAL;
2518 			goto out_err;
2519 		}
2520 	}
2521 
2522 	last_tidaw->flags |= TIDAW_FLAGS_LAST;
2523 	itcw_finalize(itcw);
2524 
2525 	cqr->cpmode = 1;
2526 	cqr->startdev = startdev;
2527 	cqr->memdev = startdev;
2528 	cqr->basedev = base;
2529 	cqr->retries = startdev->default_retries;
2530 	cqr->expires = startdev->default_expires * HZ;
2531 	cqr->buildclk = get_tod_clock();
2532 	cqr->status = DASD_CQR_FILLED;
2533 	/* Set flags to suppress output for expected errors */
2534 	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2535 	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2536 
2537 	return cqr;
2538 
2539 out_err:
2540 	dasd_sfree_request(cqr, startdev);
2541 
2542 	return ERR_PTR(rc);
2543 }
2544 
2545 /*
2546  * Build the CCW request for the format check
2547  */
2548 static struct dasd_ccw_req *
2549 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2550 		      int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2551 {
2552 	struct dasd_eckd_private *start_priv;
2553 	struct dasd_eckd_private *base_priv;
2554 	struct dasd_device *startdev = NULL;
2555 	struct dasd_ccw_req *cqr;
2556 	struct ccw1 *ccw;
2557 	void *data;
2558 	int cplength, datasize;
2559 	int use_prefix;
2560 	int count;
2561 	int i;
2562 
2563 	if (enable_pav)
2564 		startdev = dasd_alias_get_start_dev(base);
2565 
2566 	if (!startdev)
2567 		startdev = base;
2568 
2569 	start_priv = startdev->private;
2570 	base_priv = base->private;
2571 
2572 	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2573 
2574 	use_prefix = base_priv->features.feature[8] & 0x01;
2575 
2576 	if (use_prefix) {
2577 		cplength = 1;
2578 		datasize = sizeof(struct PFX_eckd_data);
2579 	} else {
2580 		cplength = 2;
2581 		datasize = sizeof(struct DE_eckd_data) +
2582 			sizeof(struct LO_eckd_data);
2583 	}
2584 	cplength += count;
2585 
2586 	cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2587 	if (IS_ERR(cqr))
2588 		return cqr;
2589 
2590 	start_priv->count++;
2591 	data = cqr->data;
2592 	ccw = cqr->cpaddr;
2593 
2594 	if (use_prefix) {
2595 		prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2596 			   DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2597 			   count, 0, 0);
2598 	} else {
2599 		define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2600 			      DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2601 
2602 		data += sizeof(struct DE_eckd_data);
2603 		ccw[-1].flags |= CCW_FLAG_CC;
2604 
2605 		locate_record(ccw++, data, fdata->start_unit, 0, count,
2606 			      DASD_ECKD_CCW_READ_COUNT, base, 0);
2607 	}
2608 
2609 	for (i = 0; i < count; i++) {
2610 		ccw[-1].flags |= CCW_FLAG_CC;
2611 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2612 		ccw->flags = CCW_FLAG_SLI;
2613 		ccw->count = 8;
2614 		ccw->cda = (__u32)(addr_t) fmt_buffer;
2615 		ccw++;
2616 		fmt_buffer++;
2617 	}
2618 
2619 	cqr->startdev = startdev;
2620 	cqr->memdev = startdev;
2621 	cqr->basedev = base;
2622 	cqr->retries = DASD_RETRIES;
2623 	cqr->expires = startdev->default_expires * HZ;
2624 	cqr->buildclk = get_tod_clock();
2625 	cqr->status = DASD_CQR_FILLED;
2626 	/* Set flags to suppress output for expected errors */
2627 	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2628 
2629 	return cqr;
2630 }
2631 
2632 static struct dasd_ccw_req *
2633 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2634 		       struct format_data_t *fdata, int enable_pav)
2635 {
2636 	struct dasd_eckd_private *base_priv;
2637 	struct dasd_eckd_private *start_priv;
2638 	struct dasd_ccw_req *fcp;
2639 	struct eckd_count *ect;
2640 	struct ch_t address;
2641 	struct ccw1 *ccw;
2642 	void *data;
2643 	int rpt;
2644 	int cplength, datasize;
2645 	int i, j;
2646 	int intensity = 0;
2647 	int r0_perm;
2648 	int nr_tracks;
2649 	int use_prefix;
2650 
2651 	if (enable_pav)
2652 		startdev = dasd_alias_get_start_dev(base);
2653 
2654 	if (!startdev)
2655 		startdev = base;
2656 
2657 	start_priv = startdev->private;
2658 	base_priv = base->private;
2659 
2660 	rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2661 
2662 	nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2663 
2664 	/*
2665 	 * fdata->intensity is a bit string that tells us what to do:
2666 	 *   Bit 0: write record zero
2667 	 *   Bit 1: write home address, currently not supported
2668 	 *   Bit 2: invalidate tracks
2669 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
2670 	 *   Bit 4: do not allow storage subsystem to modify record zero
2671 	 * Only some bit combinations do make sense.
2672 	 */
2673 	if (fdata->intensity & 0x10) {
2674 		r0_perm = 0;
2675 		intensity = fdata->intensity & ~0x10;
2676 	} else {
2677 		r0_perm = 1;
2678 		intensity = fdata->intensity;
2679 	}
2680 
2681 	use_prefix = base_priv->features.feature[8] & 0x01;
2682 
2683 	switch (intensity) {
2684 	case 0x00:	/* Normal format */
2685 	case 0x08:	/* Normal format, use cdl. */
2686 		cplength = 2 + (rpt*nr_tracks);
2687 		if (use_prefix)
2688 			datasize = sizeof(struct PFX_eckd_data) +
2689 				sizeof(struct LO_eckd_data) +
2690 				rpt * nr_tracks * sizeof(struct eckd_count);
2691 		else
2692 			datasize = sizeof(struct DE_eckd_data) +
2693 				sizeof(struct LO_eckd_data) +
2694 				rpt * nr_tracks * sizeof(struct eckd_count);
2695 		break;
2696 	case 0x01:	/* Write record zero and format track. */
2697 	case 0x09:	/* Write record zero and format track, use cdl. */
2698 		cplength = 2 + rpt * nr_tracks;
2699 		if (use_prefix)
2700 			datasize = sizeof(struct PFX_eckd_data) +
2701 				sizeof(struct LO_eckd_data) +
2702 				sizeof(struct eckd_count) +
2703 				rpt * nr_tracks * sizeof(struct eckd_count);
2704 		else
2705 			datasize = sizeof(struct DE_eckd_data) +
2706 				sizeof(struct LO_eckd_data) +
2707 				sizeof(struct eckd_count) +
2708 				rpt * nr_tracks * sizeof(struct eckd_count);
2709 		break;
2710 	case 0x04:	/* Invalidate track. */
2711 	case 0x0c:	/* Invalidate track, use cdl. */
2712 		cplength = 3;
2713 		if (use_prefix)
2714 			datasize = sizeof(struct PFX_eckd_data) +
2715 				sizeof(struct LO_eckd_data) +
2716 				sizeof(struct eckd_count);
2717 		else
2718 			datasize = sizeof(struct DE_eckd_data) +
2719 				sizeof(struct LO_eckd_data) +
2720 				sizeof(struct eckd_count);
2721 		break;
2722 	default:
2723 		dev_warn(&startdev->cdev->dev,
2724 			 "An I/O control call used incorrect flags 0x%x\n",
2725 			 fdata->intensity);
2726 		return ERR_PTR(-EINVAL);
2727 	}
2728 
2729 	fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2730 	if (IS_ERR(fcp))
2731 		return fcp;
2732 
2733 	start_priv->count++;
2734 	data = fcp->data;
2735 	ccw = fcp->cpaddr;
2736 
2737 	switch (intensity & ~0x08) {
2738 	case 0x00: /* Normal format. */
2739 		if (use_prefix) {
2740 			prefix(ccw++, (struct PFX_eckd_data *) data,
2741 			       fdata->start_unit, fdata->stop_unit,
2742 			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2743 			/* grant subsystem permission to format R0 */
2744 			if (r0_perm)
2745 				((struct PFX_eckd_data *)data)
2746 					->define_extent.ga_extended |= 0x04;
2747 			data += sizeof(struct PFX_eckd_data);
2748 		} else {
2749 			define_extent(ccw++, (struct DE_eckd_data *) data,
2750 				      fdata->start_unit, fdata->stop_unit,
2751 				      DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2752 			/* grant subsystem permission to format R0 */
2753 			if (r0_perm)
2754 				((struct DE_eckd_data *) data)
2755 					->ga_extended |= 0x04;
2756 			data += sizeof(struct DE_eckd_data);
2757 		}
2758 		ccw[-1].flags |= CCW_FLAG_CC;
2759 		locate_record(ccw++, (struct LO_eckd_data *) data,
2760 			      fdata->start_unit, 0, rpt*nr_tracks,
2761 			      DASD_ECKD_CCW_WRITE_CKD, base,
2762 			      fdata->blksize);
2763 		data += sizeof(struct LO_eckd_data);
2764 		break;
2765 	case 0x01: /* Write record zero + format track. */
2766 		if (use_prefix) {
2767 			prefix(ccw++, (struct PFX_eckd_data *) data,
2768 			       fdata->start_unit, fdata->stop_unit,
2769 			       DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2770 			       base, startdev);
2771 			data += sizeof(struct PFX_eckd_data);
2772 		} else {
2773 			define_extent(ccw++, (struct DE_eckd_data *) data,
2774 			       fdata->start_unit, fdata->stop_unit,
2775 			       DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2776 			data += sizeof(struct DE_eckd_data);
2777 		}
2778 		ccw[-1].flags |= CCW_FLAG_CC;
2779 		locate_record(ccw++, (struct LO_eckd_data *) data,
2780 			      fdata->start_unit, 0, rpt * nr_tracks + 1,
2781 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2782 			      base->block->bp_block);
2783 		data += sizeof(struct LO_eckd_data);
2784 		break;
2785 	case 0x04: /* Invalidate track. */
2786 		if (use_prefix) {
2787 			prefix(ccw++, (struct PFX_eckd_data *) data,
2788 			       fdata->start_unit, fdata->stop_unit,
2789 			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2790 			data += sizeof(struct PFX_eckd_data);
2791 		} else {
2792 			define_extent(ccw++, (struct DE_eckd_data *) data,
2793 			       fdata->start_unit, fdata->stop_unit,
2794 			       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2795 			data += sizeof(struct DE_eckd_data);
2796 		}
2797 		ccw[-1].flags |= CCW_FLAG_CC;
2798 		locate_record(ccw++, (struct LO_eckd_data *) data,
2799 			      fdata->start_unit, 0, 1,
2800 			      DASD_ECKD_CCW_WRITE_CKD, base, 8);
2801 		data += sizeof(struct LO_eckd_data);
2802 		break;
2803 	}
2804 
2805 	for (j = 0; j < nr_tracks; j++) {
2806 		/* calculate cylinder and head for the current track */
2807 		set_ch_t(&address,
2808 			 (fdata->start_unit + j) /
2809 			 base_priv->rdc_data.trk_per_cyl,
2810 			 (fdata->start_unit + j) %
2811 			 base_priv->rdc_data.trk_per_cyl);
2812 		if (intensity & 0x01) {	/* write record zero */
2813 			ect = (struct eckd_count *) data;
2814 			data += sizeof(struct eckd_count);
2815 			ect->cyl = address.cyl;
2816 			ect->head = address.head;
2817 			ect->record = 0;
2818 			ect->kl = 0;
2819 			ect->dl = 8;
2820 			ccw[-1].flags |= CCW_FLAG_CC;
2821 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2822 			ccw->flags = CCW_FLAG_SLI;
2823 			ccw->count = 8;
2824 			ccw->cda = (__u32)(addr_t) ect;
2825 			ccw++;
2826 		}
2827 		if ((intensity & ~0x08) & 0x04) {	/* erase track */
2828 			ect = (struct eckd_count *) data;
2829 			data += sizeof(struct eckd_count);
2830 			ect->cyl = address.cyl;
2831 			ect->head = address.head;
2832 			ect->record = 1;
2833 			ect->kl = 0;
2834 			ect->dl = 0;
2835 			ccw[-1].flags |= CCW_FLAG_CC;
2836 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2837 			ccw->flags = CCW_FLAG_SLI;
2838 			ccw->count = 8;
2839 			ccw->cda = (__u32)(addr_t) ect;
2840 		} else {		/* write remaining records */
2841 			for (i = 0; i < rpt; i++) {
2842 				ect = (struct eckd_count *) data;
2843 				data += sizeof(struct eckd_count);
2844 				ect->cyl = address.cyl;
2845 				ect->head = address.head;
2846 				ect->record = i + 1;
2847 				ect->kl = 0;
2848 				ect->dl = fdata->blksize;
2849 				/*
2850 				 * Check for special tracks 0-1
2851 				 * when formatting CDL
2852 				 */
2853 				if ((intensity & 0x08) &&
2854 				    address.cyl == 0 && address.head == 0) {
2855 					if (i < 3) {
2856 						ect->kl = 4;
2857 						ect->dl = sizes_trk0[i] - 4;
2858 					}
2859 				}
2860 				if ((intensity & 0x08) &&
2861 				    address.cyl == 0 && address.head == 1) {
2862 					ect->kl = 44;
2863 					ect->dl = LABEL_SIZE - 44;
2864 				}
2865 				ccw[-1].flags |= CCW_FLAG_CC;
2866 				if (i != 0 || j == 0)
2867 					ccw->cmd_code =
2868 						DASD_ECKD_CCW_WRITE_CKD;
2869 				else
2870 					ccw->cmd_code =
2871 						DASD_ECKD_CCW_WRITE_CKD_MT;
2872 				ccw->flags = CCW_FLAG_SLI;
2873 				ccw->count = 8;
2874 				ccw->cda = (__u32)(addr_t) ect;
2875 				ccw++;
2876 			}
2877 		}
2878 	}
2879 
2880 	fcp->startdev = startdev;
2881 	fcp->memdev = startdev;
2882 	fcp->basedev = base;
2883 	fcp->retries = 256;
2884 	fcp->expires = startdev->default_expires * HZ;
2885 	fcp->buildclk = get_tod_clock();
2886 	fcp->status = DASD_CQR_FILLED;
2887 
2888 	return fcp;
2889 }
2890 
2891 /*
2892  * Wrapper function to build a CCW request depending on input data
2893  */
2894 static struct dasd_ccw_req *
2895 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2896 			       struct format_data_t *fdata, int enable_pav,
2897 			       int tpm, struct eckd_count *fmt_buffer, int rpt)
2898 {
2899 	struct dasd_ccw_req *ccw_req;
2900 
2901 	if (!fmt_buffer) {
2902 		ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2903 	} else {
2904 		if (tpm)
2905 			ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2906 							    enable_pav,
2907 							    fmt_buffer, rpt);
2908 		else
2909 			ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2910 							fmt_buffer, rpt);
2911 	}
2912 
2913 	return ccw_req;
2914 }
2915 
2916 /*
2917  * Sanity checks on format_data
2918  */
2919 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2920 					  struct format_data_t *fdata)
2921 {
2922 	struct dasd_eckd_private *private = base->private;
2923 
2924 	if (fdata->start_unit >=
2925 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2926 		dev_warn(&base->cdev->dev,
2927 			 "Start track number %u used in formatting is too big\n",
2928 			 fdata->start_unit);
2929 		return -EINVAL;
2930 	}
2931 	if (fdata->stop_unit >=
2932 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2933 		dev_warn(&base->cdev->dev,
2934 			 "Stop track number %u used in formatting is too big\n",
2935 			 fdata->stop_unit);
2936 		return -EINVAL;
2937 	}
2938 	if (fdata->start_unit > fdata->stop_unit) {
2939 		dev_warn(&base->cdev->dev,
2940 			 "Start track %u used in formatting exceeds end track\n",
2941 			 fdata->start_unit);
2942 		return -EINVAL;
2943 	}
2944 	if (dasd_check_blocksize(fdata->blksize) != 0) {
2945 		dev_warn(&base->cdev->dev,
2946 			 "The DASD cannot be formatted with block size %u\n",
2947 			 fdata->blksize);
2948 		return -EINVAL;
2949 	}
2950 	return 0;
2951 }
2952 
2953 /*
2954  * This function will process format_data originally coming from an IOCTL
2955  */
2956 static int dasd_eckd_format_process_data(struct dasd_device *base,
2957 					 struct format_data_t *fdata,
2958 					 int enable_pav, int tpm,
2959 					 struct eckd_count *fmt_buffer, int rpt,
2960 					 struct irb *irb)
2961 {
2962 	struct dasd_eckd_private *private = base->private;
2963 	struct dasd_ccw_req *cqr, *n;
2964 	struct list_head format_queue;
2965 	struct dasd_device *device;
2966 	char *sense = NULL;
2967 	int old_start, old_stop, format_step;
2968 	int step, retry;
2969 	int rc;
2970 
2971 	rc = dasd_eckd_format_sanity_checks(base, fdata);
2972 	if (rc)
2973 		return rc;
2974 
2975 	INIT_LIST_HEAD(&format_queue);
2976 
2977 	old_start = fdata->start_unit;
2978 	old_stop = fdata->stop_unit;
2979 
2980 	if (!tpm && fmt_buffer != NULL) {
2981 		/* Command Mode / Format Check */
2982 		format_step = 1;
2983 	} else if (tpm && fmt_buffer != NULL) {
2984 		/* Transport Mode / Format Check */
2985 		format_step = DASD_CQR_MAX_CCW / rpt;
2986 	} else {
2987 		/* Normal Formatting */
2988 		format_step = DASD_CQR_MAX_CCW /
2989 			recs_per_track(&private->rdc_data, 0, fdata->blksize);
2990 	}
2991 
2992 	do {
2993 		retry = 0;
2994 		while (fdata->start_unit <= old_stop) {
2995 			step = fdata->stop_unit - fdata->start_unit + 1;
2996 			if (step > format_step) {
2997 				fdata->stop_unit =
2998 					fdata->start_unit + format_step - 1;
2999 			}
3000 
3001 			cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3002 							     enable_pav, tpm,
3003 							     fmt_buffer, rpt);
3004 			if (IS_ERR(cqr)) {
3005 				rc = PTR_ERR(cqr);
3006 				if (rc == -ENOMEM) {
3007 					if (list_empty(&format_queue))
3008 						goto out;
3009 					/*
3010 					 * not enough memory available, start
3011 					 * requests retry after first requests
3012 					 * were finished
3013 					 */
3014 					retry = 1;
3015 					break;
3016 				}
3017 				goto out_err;
3018 			}
3019 			list_add_tail(&cqr->blocklist, &format_queue);
3020 
3021 			if (fmt_buffer) {
3022 				step = fdata->stop_unit - fdata->start_unit + 1;
3023 				fmt_buffer += rpt * step;
3024 			}
3025 			fdata->start_unit = fdata->stop_unit + 1;
3026 			fdata->stop_unit = old_stop;
3027 		}
3028 
3029 		rc = dasd_sleep_on_queue(&format_queue);
3030 
3031 out_err:
3032 		list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3033 			device = cqr->startdev;
3034 			private = device->private;
3035 
3036 			if (cqr->status == DASD_CQR_FAILED) {
3037 				/*
3038 				 * Only get sense data if called by format
3039 				 * check
3040 				 */
3041 				if (fmt_buffer && irb) {
3042 					sense = dasd_get_sense(&cqr->irb);
3043 					memcpy(irb, &cqr->irb, sizeof(*irb));
3044 				}
3045 				rc = -EIO;
3046 			}
3047 			list_del_init(&cqr->blocklist);
3048 			dasd_ffree_request(cqr, device);
3049 			private->count--;
3050 		}
3051 
3052 		if (rc && rc != -EIO)
3053 			goto out;
3054 		if (rc == -EIO) {
3055 			/*
3056 			 * In case fewer than the expected records are on the
3057 			 * track, we will most likely get a 'No Record Found'
3058 			 * error (in command mode) or a 'File Protected' error
3059 			 * (in transport mode). Those particular cases shouldn't
3060 			 * pass the -EIO to the IOCTL, therefore reset the rc
3061 			 * and continue.
3062 			 */
3063 			if (sense &&
3064 			    (sense[1] & SNS1_NO_REC_FOUND ||
3065 			     sense[1] & SNS1_FILE_PROTECTED))
3066 				retry = 1;
3067 			else
3068 				goto out;
3069 		}
3070 
3071 	} while (retry);
3072 
3073 out:
3074 	fdata->start_unit = old_start;
3075 	fdata->stop_unit = old_stop;
3076 
3077 	return rc;
3078 }
3079 
3080 static int dasd_eckd_format_device(struct dasd_device *base,
3081 				   struct format_data_t *fdata, int enable_pav)
3082 {
3083 	return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3084 					     0, NULL);
3085 }
3086 
3087 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3088 				      struct dasd_block *block)
3089 {
3090 	struct dasd_format_entry *format;
3091 	unsigned long flags;
3092 	bool rc = false;
3093 
3094 	spin_lock_irqsave(&block->format_lock, flags);
3095 	list_for_each_entry(format, &block->format_list, list) {
3096 		if (format->track == to_format->track) {
3097 			rc = true;
3098 			goto out;
3099 		}
3100 	}
3101 	list_add_tail(&to_format->list, &block->format_list);
3102 
3103 out:
3104 	spin_unlock_irqrestore(&block->format_lock, flags);
3105 	return rc;
3106 }
3107 
3108 static void clear_format_track(struct dasd_format_entry *format,
3109 			      struct dasd_block *block)
3110 {
3111 	unsigned long flags;
3112 
3113 	spin_lock_irqsave(&block->format_lock, flags);
3114 	list_del_init(&format->list);
3115 	spin_unlock_irqrestore(&block->format_lock, flags);
3116 }
3117 
3118 /*
3119  * Callback function to free ESE format requests.
3120  */
3121 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3122 {
3123 	struct dasd_device *device = cqr->startdev;
3124 	struct dasd_eckd_private *private = device->private;
3125 	struct dasd_format_entry *format = data;
3126 
3127 	clear_format_track(format, cqr->basedev->block);
3128 	private->count--;
3129 	dasd_ffree_request(cqr, device);
3130 }
3131 
3132 static struct dasd_ccw_req *
3133 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3134 		     struct irb *irb)
3135 {
3136 	struct dasd_eckd_private *private;
3137 	struct dasd_format_entry *format;
3138 	struct format_data_t fdata;
3139 	unsigned int recs_per_trk;
3140 	struct dasd_ccw_req *fcqr;
3141 	struct dasd_device *base;
3142 	struct dasd_block *block;
3143 	unsigned int blksize;
3144 	struct request *req;
3145 	sector_t first_trk;
3146 	sector_t last_trk;
3147 	sector_t curr_trk;
3148 	int rc;
3149 
3150 	req = cqr->callback_data;
3151 	block = cqr->block;
3152 	base = block->base;
3153 	private = base->private;
3154 	blksize = block->bp_block;
3155 	recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3156 	format = &startdev->format_entry;
3157 
3158 	first_trk = blk_rq_pos(req) >> block->s2b_shift;
3159 	sector_div(first_trk, recs_per_trk);
3160 	last_trk =
3161 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3162 	sector_div(last_trk, recs_per_trk);
3163 	rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3164 	if (rc)
3165 		return ERR_PTR(rc);
3166 
3167 	if (curr_trk < first_trk || curr_trk > last_trk) {
3168 		DBF_DEV_EVENT(DBF_WARNING, startdev,
3169 			      "ESE error track %llu not within range %llu - %llu\n",
3170 			      curr_trk, first_trk, last_trk);
3171 		return ERR_PTR(-EINVAL);
3172 	}
3173 	format->track = curr_trk;
3174 	/* test if track is already in formatting by another thread */
3175 	if (test_and_set_format_track(format, block))
3176 		return ERR_PTR(-EEXIST);
3177 
3178 	fdata.start_unit = curr_trk;
3179 	fdata.stop_unit = curr_trk;
3180 	fdata.blksize = blksize;
3181 	fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3182 
3183 	rc = dasd_eckd_format_sanity_checks(base, &fdata);
3184 	if (rc)
3185 		return ERR_PTR(-EINVAL);
3186 
3187 	/*
3188 	 * We're building the request with PAV disabled as we're reusing
3189 	 * the former startdev.
3190 	 */
3191 	fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3192 	if (IS_ERR(fcqr))
3193 		return fcqr;
3194 
3195 	fcqr->callback = dasd_eckd_ese_format_cb;
3196 	fcqr->callback_data = (void *) format;
3197 
3198 	return fcqr;
3199 }
3200 
3201 /*
3202  * When data is read from an unformatted area of an ESE volume, this function
3203  * returns zeroed data and thereby mimics a read of zero data.
3204  *
3205  * The first unformatted track is the one that got the NRF error, the address is
3206  * encoded in the sense data.
3207  *
3208  * All tracks before have returned valid data and should not be touched.
3209  * All tracks after the unformatted track might be formatted or not. This is
3210  * currently not known, remember the processed data and return the remainder of
3211  * the request to the blocklayer in __dasd_cleanup_cqr().
3212  */
3213 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3214 {
3215 	struct dasd_eckd_private *private;
3216 	sector_t first_trk, last_trk;
3217 	sector_t first_blk, last_blk;
3218 	unsigned int blksize, off;
3219 	unsigned int recs_per_trk;
3220 	struct dasd_device *base;
3221 	struct req_iterator iter;
3222 	struct dasd_block *block;
3223 	unsigned int skip_block;
3224 	unsigned int blk_count;
3225 	struct request *req;
3226 	struct bio_vec bv;
3227 	sector_t curr_trk;
3228 	sector_t end_blk;
3229 	char *dst;
3230 	int rc;
3231 
3232 	req = (struct request *) cqr->callback_data;
3233 	base = cqr->block->base;
3234 	blksize = base->block->bp_block;
3235 	block =  cqr->block;
3236 	private = base->private;
3237 	skip_block = 0;
3238 	blk_count = 0;
3239 
3240 	recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3241 	first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3242 	sector_div(first_trk, recs_per_trk);
3243 	last_trk = last_blk =
3244 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3245 	sector_div(last_trk, recs_per_trk);
3246 	rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3247 	if (rc)
3248 		return rc;
3249 
3250 	/* sanity check if the current track from sense data is valid */
3251 	if (curr_trk < first_trk || curr_trk > last_trk) {
3252 		DBF_DEV_EVENT(DBF_WARNING, base,
3253 			      "ESE error track %llu not within range %llu - %llu\n",
3254 			      curr_trk, first_trk, last_trk);
3255 		return -EINVAL;
3256 	}
3257 
3258 	/*
3259 	 * if not the first track got the NRF error we have to skip over valid
3260 	 * blocks
3261 	 */
3262 	if (curr_trk != first_trk)
3263 		skip_block = curr_trk * recs_per_trk - first_blk;
3264 
3265 	/* we have no information beyond the current track */
3266 	end_blk = (curr_trk + 1) * recs_per_trk;
3267 
3268 	rq_for_each_segment(bv, req, iter) {
3269 		dst = page_address(bv.bv_page) + bv.bv_offset;
3270 		for (off = 0; off < bv.bv_len; off += blksize) {
3271 			if (first_blk + blk_count >= end_blk) {
3272 				cqr->proc_bytes = blk_count * blksize;
3273 				return 0;
3274 			}
3275 			if (dst && !skip_block) {
3276 				dst += off;
3277 				memset(dst, 0, blksize);
3278 			} else {
3279 				skip_block--;
3280 			}
3281 			blk_count++;
3282 		}
3283 	}
3284 	return 0;
3285 }
3286 
3287 /*
3288  * Helper function to count consecutive records of a single track.
3289  */
3290 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3291 				   int max)
3292 {
3293 	int head;
3294 	int i;
3295 
3296 	head = fmt_buffer[start].head;
3297 
3298 	/*
3299 	 * There are 3 conditions where we stop counting:
3300 	 * - if data reoccurs (same head and record may reoccur), which may
3301 	 *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3302 	 * - when the head changes, because we're iterating over several tracks
3303 	 *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3304 	 * - when we've reached the end of sensible data in the buffer (the
3305 	 *   record will be 0 then)
3306 	 */
3307 	for (i = start; i < max; i++) {
3308 		if (i > start) {
3309 			if ((fmt_buffer[i].head == head &&
3310 			    fmt_buffer[i].record == 1) ||
3311 			    fmt_buffer[i].head != head ||
3312 			    fmt_buffer[i].record == 0)
3313 				break;
3314 		}
3315 	}
3316 
3317 	return i - start;
3318 }
3319 
3320 /*
3321  * Evaluate a given range of tracks. Data like number of records, blocksize,
3322  * record ids, and key length are compared with expected data.
3323  *
3324  * If a mismatch occurs, the corresponding error bit is set, as well as
3325  * additional information, depending on the error.
3326  */
3327 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3328 					     struct format_check_t *cdata,
3329 					     int rpt_max, int rpt_exp,
3330 					     int trk_per_cyl, int tpm)
3331 {
3332 	struct ch_t geo;
3333 	int max_entries;
3334 	int count = 0;
3335 	int trkcount;
3336 	int blksize;
3337 	int pos = 0;
3338 	int i, j;
3339 	int kl;
3340 
3341 	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3342 	max_entries = trkcount * rpt_max;
3343 
3344 	for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3345 		/* Calculate the correct next starting position in the buffer */
3346 		if (tpm) {
3347 			while (fmt_buffer[pos].record == 0 &&
3348 			       fmt_buffer[pos].dl == 0) {
3349 				if (pos++ > max_entries)
3350 					break;
3351 			}
3352 		} else {
3353 			if (i != cdata->expect.start_unit)
3354 				pos += rpt_max - count;
3355 		}
3356 
3357 		/* Calculate the expected geo values for the current track */
3358 		set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3359 
3360 		/* Count and check number of records */
3361 		count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3362 
3363 		if (count < rpt_exp) {
3364 			cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3365 			break;
3366 		}
3367 		if (count > rpt_exp) {
3368 			cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3369 			break;
3370 		}
3371 
3372 		for (j = 0; j < count; j++, pos++) {
3373 			blksize = cdata->expect.blksize;
3374 			kl = 0;
3375 
3376 			/*
3377 			 * Set special values when checking CDL formatted
3378 			 * devices.
3379 			 */
3380 			if ((cdata->expect.intensity & 0x08) &&
3381 			    geo.cyl == 0 && geo.head == 0) {
3382 				if (j < 3) {
3383 					blksize = sizes_trk0[j] - 4;
3384 					kl = 4;
3385 				}
3386 			}
3387 			if ((cdata->expect.intensity & 0x08) &&
3388 			    geo.cyl == 0 && geo.head == 1) {
3389 				blksize = LABEL_SIZE - 44;
3390 				kl = 44;
3391 			}
3392 
3393 			/* Check blocksize */
3394 			if (fmt_buffer[pos].dl != blksize) {
3395 				cdata->result = DASD_FMT_ERR_BLKSIZE;
3396 				goto out;
3397 			}
3398 			/* Check if key length is 0 */
3399 			if (fmt_buffer[pos].kl != kl) {
3400 				cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3401 				goto out;
3402 			}
3403 			/* Check if record_id is correct */
3404 			if (fmt_buffer[pos].cyl != geo.cyl ||
3405 			    fmt_buffer[pos].head != geo.head ||
3406 			    fmt_buffer[pos].record != (j + 1)) {
3407 				cdata->result = DASD_FMT_ERR_RECORD_ID;
3408 				goto out;
3409 			}
3410 		}
3411 	}
3412 
3413 out:
3414 	/*
3415 	 * In case of no errors, we need to decrease by one
3416 	 * to get the correct positions.
3417 	 */
3418 	if (!cdata->result) {
3419 		i--;
3420 		pos--;
3421 	}
3422 
3423 	cdata->unit = i;
3424 	cdata->num_records = count;
3425 	cdata->rec = fmt_buffer[pos].record;
3426 	cdata->blksize = fmt_buffer[pos].dl;
3427 	cdata->key_length = fmt_buffer[pos].kl;
3428 }
3429 
3430 /*
3431  * Check the format of a range of tracks of a DASD.
3432  */
3433 static int dasd_eckd_check_device_format(struct dasd_device *base,
3434 					 struct format_check_t *cdata,
3435 					 int enable_pav)
3436 {
3437 	struct dasd_eckd_private *private = base->private;
3438 	struct eckd_count *fmt_buffer;
3439 	struct irb irb;
3440 	int rpt_max, rpt_exp;
3441 	int fmt_buffer_size;
3442 	int trk_per_cyl;
3443 	int trkcount;
3444 	int tpm = 0;
3445 	int rc;
3446 
3447 	trk_per_cyl = private->rdc_data.trk_per_cyl;
3448 
3449 	/* Get maximum and expected amount of records per track */
3450 	rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3451 	rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3452 
3453 	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3454 	fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3455 
3456 	fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3457 	if (!fmt_buffer)
3458 		return -ENOMEM;
3459 
3460 	/*
3461 	 * A certain FICON feature subset is needed to operate in transport
3462 	 * mode. Additionally, the support for transport mode is implicitly
3463 	 * checked by comparing the buffer size with fcx_max_data. As long as
3464 	 * the buffer size is smaller we can operate in transport mode and
3465 	 * process multiple tracks. If not, only one track at once is being
3466 	 * processed using command mode.
3467 	 */
3468 	if ((private->features.feature[40] & 0x04) &&
3469 	    fmt_buffer_size <= private->fcx_max_data)
3470 		tpm = 1;
3471 
3472 	rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3473 					   tpm, fmt_buffer, rpt_max, &irb);
3474 	if (rc && rc != -EIO)
3475 		goto out;
3476 	if (rc == -EIO) {
3477 		/*
3478 		 * If our first attempt with transport mode enabled comes back
3479 		 * with an incorrect length error, we're going to retry the
3480 		 * check with command mode.
3481 		 */
3482 		if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3483 			tpm = 0;
3484 			rc = dasd_eckd_format_process_data(base, &cdata->expect,
3485 							   enable_pav, tpm,
3486 							   fmt_buffer, rpt_max,
3487 							   &irb);
3488 			if (rc)
3489 				goto out;
3490 		} else {
3491 			goto out;
3492 		}
3493 	}
3494 
3495 	dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3496 					 trk_per_cyl, tpm);
3497 
3498 out:
3499 	kfree(fmt_buffer);
3500 
3501 	return rc;
3502 }
3503 
3504 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3505 {
3506 	if (cqr->retries < 0) {
3507 		cqr->status = DASD_CQR_FAILED;
3508 		return;
3509 	}
3510 	cqr->status = DASD_CQR_FILLED;
3511 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
3512 		dasd_eckd_reset_ccw_to_base_io(cqr);
3513 		cqr->startdev = cqr->block->base;
3514 		cqr->lpm = dasd_path_get_opm(cqr->block->base);
3515 	}
3516 };
3517 
3518 static dasd_erp_fn_t
3519 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3520 {
3521 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3522 	struct ccw_device *cdev = device->cdev;
3523 
3524 	switch (cdev->id.cu_type) {
3525 	case 0x3990:
3526 	case 0x2105:
3527 	case 0x2107:
3528 	case 0x1750:
3529 		return dasd_3990_erp_action;
3530 	case 0x9343:
3531 	case 0x3880:
3532 	default:
3533 		return dasd_default_erp_action;
3534 	}
3535 }
3536 
3537 static dasd_erp_fn_t
3538 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3539 {
3540 	return dasd_default_erp_postaction;
3541 }
3542 
3543 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3544 					      struct dasd_ccw_req *cqr,
3545 					      struct irb *irb)
3546 {
3547 	char mask;
3548 	char *sense = NULL;
3549 	struct dasd_eckd_private *private = device->private;
3550 
3551 	/* first of all check for state change pending interrupt */
3552 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3553 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3554 		/*
3555 		 * for alias only, not in offline processing
3556 		 * and only if not suspended
3557 		 */
3558 		if (!device->block && private->lcu &&
3559 		    device->state == DASD_STATE_ONLINE &&
3560 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3561 		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3562 			/* schedule worker to reload device */
3563 			dasd_reload_device(device);
3564 		}
3565 		dasd_generic_handle_state_change(device);
3566 		return;
3567 	}
3568 
3569 	sense = dasd_get_sense(irb);
3570 	if (!sense)
3571 		return;
3572 
3573 	/* summary unit check */
3574 	if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3575 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3576 		if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3577 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3578 				      "eckd suc: device already notified");
3579 			return;
3580 		}
3581 		sense = dasd_get_sense(irb);
3582 		if (!sense) {
3583 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3584 				      "eckd suc: no reason code available");
3585 			clear_bit(DASD_FLAG_SUC, &device->flags);
3586 			return;
3587 
3588 		}
3589 		private->suc_reason = sense[8];
3590 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3591 			      "eckd handle summary unit check: reason",
3592 			      private->suc_reason);
3593 		dasd_get_device(device);
3594 		if (!schedule_work(&device->suc_work))
3595 			dasd_put_device(device);
3596 
3597 		return;
3598 	}
3599 
3600 	/* service information message SIM */
3601 	if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3602 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3603 		dasd_3990_erp_handle_sim(device, sense);
3604 		return;
3605 	}
3606 
3607 	/* loss of device reservation is handled via base devices only
3608 	 * as alias devices may be used with several bases
3609 	 */
3610 	if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3611 	    (sense[7] == 0x3F) &&
3612 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3613 	    test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3614 		if (device->features & DASD_FEATURE_FAILONSLCK)
3615 			set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3616 		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3617 		dev_err(&device->cdev->dev,
3618 			"The device reservation was lost\n");
3619 	}
3620 }
3621 
3622 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3623 				       unsigned int first_trk,
3624 				       unsigned int last_trk)
3625 {
3626 	struct dasd_eckd_private *private = device->private;
3627 	unsigned int trks_per_vol;
3628 	int rc = 0;
3629 
3630 	trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3631 
3632 	if (first_trk >= trks_per_vol) {
3633 		dev_warn(&device->cdev->dev,
3634 			 "Start track number %u used in the space release command is too big\n",
3635 			 first_trk);
3636 		rc = -EINVAL;
3637 	} else if (last_trk >= trks_per_vol) {
3638 		dev_warn(&device->cdev->dev,
3639 			 "Stop track number %u used in the space release command is too big\n",
3640 			 last_trk);
3641 		rc = -EINVAL;
3642 	} else if (first_trk > last_trk) {
3643 		dev_warn(&device->cdev->dev,
3644 			 "Start track %u used in the space release command exceeds the end track\n",
3645 			 first_trk);
3646 		rc = -EINVAL;
3647 	}
3648 	return rc;
3649 }
3650 
3651 /*
3652  * Helper function to count the amount of involved extents within a given range
3653  * with extent alignment in mind.
3654  */
3655 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3656 {
3657 	int cur_pos = 0;
3658 	int count = 0;
3659 	int tmp;
3660 
3661 	if (from == to)
3662 		return 1;
3663 
3664 	/* Count first partial extent */
3665 	if (from % trks_per_ext != 0) {
3666 		tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3667 		if (tmp > to)
3668 			tmp = to;
3669 		cur_pos = tmp - from + 1;
3670 		count++;
3671 	}
3672 	/* Count full extents */
3673 	if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3674 		tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3675 		count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3676 		cur_pos = tmp;
3677 	}
3678 	/* Count last partial extent */
3679 	if (cur_pos < to)
3680 		count++;
3681 
3682 	return count;
3683 }
3684 
3685 /*
3686  * Release allocated space for a given range or an entire volume.
3687  */
3688 static struct dasd_ccw_req *
3689 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3690 		  struct request *req, unsigned int first_trk,
3691 		  unsigned int last_trk, int by_extent)
3692 {
3693 	struct dasd_eckd_private *private = device->private;
3694 	struct dasd_dso_ras_ext_range *ras_range;
3695 	struct dasd_rssd_features *features;
3696 	struct dasd_dso_ras_data *ras_data;
3697 	u16 heads, beg_head, end_head;
3698 	int cur_to_trk, cur_from_trk;
3699 	struct dasd_ccw_req *cqr;
3700 	u32 beg_cyl, end_cyl;
3701 	struct ccw1 *ccw;
3702 	int trks_per_ext;
3703 	size_t ras_size;
3704 	size_t size;
3705 	int nr_exts;
3706 	void *rq;
3707 	int i;
3708 
3709 	if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3710 		return ERR_PTR(-EINVAL);
3711 
3712 	rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3713 
3714 	features = &private->features;
3715 
3716 	trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3717 	nr_exts = 0;
3718 	if (by_extent)
3719 		nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3720 	ras_size = sizeof(*ras_data);
3721 	size = ras_size + (nr_exts * sizeof(*ras_range));
3722 
3723 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3724 	if (IS_ERR(cqr)) {
3725 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3726 				"Could not allocate RAS request");
3727 		return cqr;
3728 	}
3729 
3730 	ras_data = cqr->data;
3731 	memset(ras_data, 0, size);
3732 
3733 	ras_data->order = DSO_ORDER_RAS;
3734 	ras_data->flags.vol_type = 0; /* CKD volume */
3735 	/* Release specified extents or entire volume */
3736 	ras_data->op_flags.by_extent = by_extent;
3737 	/*
3738 	 * This bit guarantees initialisation of tracks within an extent that is
3739 	 * not fully specified, but is only supported with a certain feature
3740 	 * subset.
3741 	 */
3742 	ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3743 	ras_data->lss = private->ned->ID;
3744 	ras_data->dev_addr = private->ned->unit_addr;
3745 	ras_data->nr_exts = nr_exts;
3746 
3747 	if (by_extent) {
3748 		heads = private->rdc_data.trk_per_cyl;
3749 		cur_from_trk = first_trk;
3750 		cur_to_trk = first_trk + trks_per_ext -
3751 			(first_trk % trks_per_ext) - 1;
3752 		if (cur_to_trk > last_trk)
3753 			cur_to_trk = last_trk;
3754 		ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3755 
3756 		for (i = 0; i < nr_exts; i++) {
3757 			beg_cyl = cur_from_trk / heads;
3758 			beg_head = cur_from_trk % heads;
3759 			end_cyl = cur_to_trk / heads;
3760 			end_head = cur_to_trk % heads;
3761 
3762 			set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3763 			set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3764 
3765 			cur_from_trk = cur_to_trk + 1;
3766 			cur_to_trk = cur_from_trk + trks_per_ext - 1;
3767 			if (cur_to_trk > last_trk)
3768 				cur_to_trk = last_trk;
3769 			ras_range++;
3770 		}
3771 	}
3772 
3773 	ccw = cqr->cpaddr;
3774 	ccw->cda = (__u32)(addr_t)cqr->data;
3775 	ccw->cmd_code = DASD_ECKD_CCW_DSO;
3776 	ccw->count = size;
3777 
3778 	cqr->startdev = device;
3779 	cqr->memdev = device;
3780 	cqr->block = block;
3781 	cqr->retries = 256;
3782 	cqr->expires = device->default_expires * HZ;
3783 	cqr->buildclk = get_tod_clock();
3784 	cqr->status = DASD_CQR_FILLED;
3785 
3786 	return cqr;
3787 }
3788 
3789 static int dasd_eckd_release_space_full(struct dasd_device *device)
3790 {
3791 	struct dasd_ccw_req *cqr;
3792 	int rc;
3793 
3794 	cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3795 	if (IS_ERR(cqr))
3796 		return PTR_ERR(cqr);
3797 
3798 	rc = dasd_sleep_on_interruptible(cqr);
3799 
3800 	dasd_sfree_request(cqr, cqr->memdev);
3801 
3802 	return rc;
3803 }
3804 
3805 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3806 					unsigned int from, unsigned int to)
3807 {
3808 	struct dasd_eckd_private *private = device->private;
3809 	struct dasd_block *block = device->block;
3810 	struct dasd_ccw_req *cqr, *n;
3811 	struct list_head ras_queue;
3812 	unsigned int device_exts;
3813 	int trks_per_ext;
3814 	int stop, step;
3815 	int cur_pos;
3816 	int rc = 0;
3817 	int retry;
3818 
3819 	INIT_LIST_HEAD(&ras_queue);
3820 
3821 	device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3822 	trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3823 
3824 	/* Make sure device limits are not exceeded */
3825 	step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3826 	cur_pos = from;
3827 
3828 	do {
3829 		retry = 0;
3830 		while (cur_pos < to) {
3831 			stop = cur_pos + step -
3832 				((cur_pos + step) % trks_per_ext) - 1;
3833 			if (stop > to)
3834 				stop = to;
3835 
3836 			cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3837 			if (IS_ERR(cqr)) {
3838 				rc = PTR_ERR(cqr);
3839 				if (rc == -ENOMEM) {
3840 					if (list_empty(&ras_queue))
3841 						goto out;
3842 					retry = 1;
3843 					break;
3844 				}
3845 				goto err_out;
3846 			}
3847 
3848 			spin_lock_irq(&block->queue_lock);
3849 			list_add_tail(&cqr->blocklist, &ras_queue);
3850 			spin_unlock_irq(&block->queue_lock);
3851 			cur_pos = stop + 1;
3852 		}
3853 
3854 		rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3855 
3856 err_out:
3857 		list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3858 			device = cqr->startdev;
3859 			private = device->private;
3860 
3861 			spin_lock_irq(&block->queue_lock);
3862 			list_del_init(&cqr->blocklist);
3863 			spin_unlock_irq(&block->queue_lock);
3864 			dasd_sfree_request(cqr, device);
3865 			private->count--;
3866 		}
3867 	} while (retry);
3868 
3869 out:
3870 	return rc;
3871 }
3872 
3873 static int dasd_eckd_release_space(struct dasd_device *device,
3874 				   struct format_data_t *rdata)
3875 {
3876 	if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3877 		return dasd_eckd_release_space_full(device);
3878 	else if (rdata->intensity == 0)
3879 		return dasd_eckd_release_space_trks(device, rdata->start_unit,
3880 						    rdata->stop_unit);
3881 	else
3882 		return -EINVAL;
3883 }
3884 
3885 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3886 					       struct dasd_device *startdev,
3887 					       struct dasd_block *block,
3888 					       struct request *req,
3889 					       sector_t first_rec,
3890 					       sector_t last_rec,
3891 					       sector_t first_trk,
3892 					       sector_t last_trk,
3893 					       unsigned int first_offs,
3894 					       unsigned int last_offs,
3895 					       unsigned int blk_per_trk,
3896 					       unsigned int blksize)
3897 {
3898 	struct dasd_eckd_private *private;
3899 	unsigned long *idaws;
3900 	struct LO_eckd_data *LO_data;
3901 	struct dasd_ccw_req *cqr;
3902 	struct ccw1 *ccw;
3903 	struct req_iterator iter;
3904 	struct bio_vec bv;
3905 	char *dst;
3906 	unsigned int off;
3907 	int count, cidaw, cplength, datasize;
3908 	sector_t recid;
3909 	unsigned char cmd, rcmd;
3910 	int use_prefix;
3911 	struct dasd_device *basedev;
3912 
3913 	basedev = block->base;
3914 	private = basedev->private;
3915 	if (rq_data_dir(req) == READ)
3916 		cmd = DASD_ECKD_CCW_READ_MT;
3917 	else if (rq_data_dir(req) == WRITE)
3918 		cmd = DASD_ECKD_CCW_WRITE_MT;
3919 	else
3920 		return ERR_PTR(-EINVAL);
3921 
3922 	/* Check struct bio and count the number of blocks for the request. */
3923 	count = 0;
3924 	cidaw = 0;
3925 	rq_for_each_segment(bv, req, iter) {
3926 		if (bv.bv_len & (blksize - 1))
3927 			/* Eckd can only do full blocks. */
3928 			return ERR_PTR(-EINVAL);
3929 		count += bv.bv_len >> (block->s2b_shift + 9);
3930 		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3931 			cidaw += bv.bv_len >> (block->s2b_shift + 9);
3932 	}
3933 	/* Paranoia. */
3934 	if (count != last_rec - first_rec + 1)
3935 		return ERR_PTR(-EINVAL);
3936 
3937 	/* use the prefix command if available */
3938 	use_prefix = private->features.feature[8] & 0x01;
3939 	if (use_prefix) {
3940 		/* 1x prefix + number of blocks */
3941 		cplength = 2 + count;
3942 		/* 1x prefix + cidaws*sizeof(long) */
3943 		datasize = sizeof(struct PFX_eckd_data) +
3944 			sizeof(struct LO_eckd_data) +
3945 			cidaw * sizeof(unsigned long);
3946 	} else {
3947 		/* 1x define extent + 1x locate record + number of blocks */
3948 		cplength = 2 + count;
3949 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3950 		datasize = sizeof(struct DE_eckd_data) +
3951 			sizeof(struct LO_eckd_data) +
3952 			cidaw * sizeof(unsigned long);
3953 	}
3954 	/* Find out the number of additional locate record ccws for cdl. */
3955 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3956 		if (last_rec >= 2*blk_per_trk)
3957 			count = 2*blk_per_trk - first_rec;
3958 		cplength += count;
3959 		datasize += count*sizeof(struct LO_eckd_data);
3960 	}
3961 	/* Allocate the ccw request. */
3962 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3963 				   startdev, blk_mq_rq_to_pdu(req));
3964 	if (IS_ERR(cqr))
3965 		return cqr;
3966 	ccw = cqr->cpaddr;
3967 	/* First ccw is define extent or prefix. */
3968 	if (use_prefix) {
3969 		if (prefix(ccw++, cqr->data, first_trk,
3970 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
3971 			/* Clock not in sync and XRC is enabled.
3972 			 * Try again later.
3973 			 */
3974 			dasd_sfree_request(cqr, startdev);
3975 			return ERR_PTR(-EAGAIN);
3976 		}
3977 		idaws = (unsigned long *) (cqr->data +
3978 					   sizeof(struct PFX_eckd_data));
3979 	} else {
3980 		if (define_extent(ccw++, cqr->data, first_trk,
3981 				  last_trk, cmd, basedev, 0) == -EAGAIN) {
3982 			/* Clock not in sync and XRC is enabled.
3983 			 * Try again later.
3984 			 */
3985 			dasd_sfree_request(cqr, startdev);
3986 			return ERR_PTR(-EAGAIN);
3987 		}
3988 		idaws = (unsigned long *) (cqr->data +
3989 					   sizeof(struct DE_eckd_data));
3990 	}
3991 	/* Build locate_record+read/write/ccws. */
3992 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3993 	recid = first_rec;
3994 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3995 		/* Only standard blocks so there is just one locate record. */
3996 		ccw[-1].flags |= CCW_FLAG_CC;
3997 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3998 			      last_rec - recid + 1, cmd, basedev, blksize);
3999 	}
4000 	rq_for_each_segment(bv, req, iter) {
4001 		dst = page_address(bv.bv_page) + bv.bv_offset;
4002 		if (dasd_page_cache) {
4003 			char *copy = kmem_cache_alloc(dasd_page_cache,
4004 						      GFP_DMA | __GFP_NOWARN);
4005 			if (copy && rq_data_dir(req) == WRITE)
4006 				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4007 			if (copy)
4008 				dst = copy + bv.bv_offset;
4009 		}
4010 		for (off = 0; off < bv.bv_len; off += blksize) {
4011 			sector_t trkid = recid;
4012 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
4013 			rcmd = cmd;
4014 			count = blksize;
4015 			/* Locate record for cdl special block ? */
4016 			if (private->uses_cdl && recid < 2*blk_per_trk) {
4017 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4018 					rcmd |= 0x8;
4019 					count = dasd_eckd_cdl_reclen(recid);
4020 					if (count < blksize &&
4021 					    rq_data_dir(req) == READ)
4022 						memset(dst + count, 0xe5,
4023 						       blksize - count);
4024 				}
4025 				ccw[-1].flags |= CCW_FLAG_CC;
4026 				locate_record(ccw++, LO_data++,
4027 					      trkid, recoffs + 1,
4028 					      1, rcmd, basedev, count);
4029 			}
4030 			/* Locate record for standard blocks ? */
4031 			if (private->uses_cdl && recid == 2*blk_per_trk) {
4032 				ccw[-1].flags |= CCW_FLAG_CC;
4033 				locate_record(ccw++, LO_data++,
4034 					      trkid, recoffs + 1,
4035 					      last_rec - recid + 1,
4036 					      cmd, basedev, count);
4037 			}
4038 			/* Read/write ccw. */
4039 			ccw[-1].flags |= CCW_FLAG_CC;
4040 			ccw->cmd_code = rcmd;
4041 			ccw->count = count;
4042 			if (idal_is_needed(dst, blksize)) {
4043 				ccw->cda = (__u32)(addr_t) idaws;
4044 				ccw->flags = CCW_FLAG_IDA;
4045 				idaws = idal_create_words(idaws, dst, blksize);
4046 			} else {
4047 				ccw->cda = (__u32)(addr_t) dst;
4048 				ccw->flags = 0;
4049 			}
4050 			ccw++;
4051 			dst += blksize;
4052 			recid++;
4053 		}
4054 	}
4055 	if (blk_noretry_request(req) ||
4056 	    block->base->features & DASD_FEATURE_FAILFAST)
4057 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4058 	cqr->startdev = startdev;
4059 	cqr->memdev = startdev;
4060 	cqr->block = block;
4061 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
4062 	cqr->lpm = dasd_path_get_ppm(startdev);
4063 	cqr->retries = startdev->default_retries;
4064 	cqr->buildclk = get_tod_clock();
4065 	cqr->status = DASD_CQR_FILLED;
4066 
4067 	/* Set flags to suppress output for expected errors */
4068 	if (dasd_eckd_is_ese(basedev)) {
4069 		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4070 		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4071 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4072 	}
4073 
4074 	return cqr;
4075 }
4076 
4077 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4078 					       struct dasd_device *startdev,
4079 					       struct dasd_block *block,
4080 					       struct request *req,
4081 					       sector_t first_rec,
4082 					       sector_t last_rec,
4083 					       sector_t first_trk,
4084 					       sector_t last_trk,
4085 					       unsigned int first_offs,
4086 					       unsigned int last_offs,
4087 					       unsigned int blk_per_trk,
4088 					       unsigned int blksize)
4089 {
4090 	unsigned long *idaws;
4091 	struct dasd_ccw_req *cqr;
4092 	struct ccw1 *ccw;
4093 	struct req_iterator iter;
4094 	struct bio_vec bv;
4095 	char *dst, *idaw_dst;
4096 	unsigned int cidaw, cplength, datasize;
4097 	unsigned int tlf;
4098 	sector_t recid;
4099 	unsigned char cmd;
4100 	struct dasd_device *basedev;
4101 	unsigned int trkcount, count, count_to_trk_end;
4102 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4103 	unsigned char new_track, end_idaw;
4104 	sector_t trkid;
4105 	unsigned int recoffs;
4106 
4107 	basedev = block->base;
4108 	if (rq_data_dir(req) == READ)
4109 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4110 	else if (rq_data_dir(req) == WRITE)
4111 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4112 	else
4113 		return ERR_PTR(-EINVAL);
4114 
4115 	/* Track based I/O needs IDAWs for each page, and not just for
4116 	 * 64 bit addresses. We need additional idals for pages
4117 	 * that get filled from two tracks, so we use the number
4118 	 * of records as upper limit.
4119 	 */
4120 	cidaw = last_rec - first_rec + 1;
4121 	trkcount = last_trk - first_trk + 1;
4122 
4123 	/* 1x prefix + one read/write ccw per track */
4124 	cplength = 1 + trkcount;
4125 
4126 	datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4127 
4128 	/* Allocate the ccw request. */
4129 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4130 				   startdev, blk_mq_rq_to_pdu(req));
4131 	if (IS_ERR(cqr))
4132 		return cqr;
4133 	ccw = cqr->cpaddr;
4134 	/* transfer length factor: how many bytes to read from the last track */
4135 	if (first_trk == last_trk)
4136 		tlf = last_offs - first_offs + 1;
4137 	else
4138 		tlf = last_offs + 1;
4139 	tlf *= blksize;
4140 
4141 	if (prefix_LRE(ccw++, cqr->data, first_trk,
4142 		       last_trk, cmd, basedev, startdev,
4143 		       1 /* format */, first_offs + 1,
4144 		       trkcount, blksize,
4145 		       tlf) == -EAGAIN) {
4146 		/* Clock not in sync and XRC is enabled.
4147 		 * Try again later.
4148 		 */
4149 		dasd_sfree_request(cqr, startdev);
4150 		return ERR_PTR(-EAGAIN);
4151 	}
4152 
4153 	/*
4154 	 * The translation of request into ccw programs must meet the
4155 	 * following conditions:
4156 	 * - all idaws but the first and the last must address full pages
4157 	 *   (or 2K blocks on 31-bit)
4158 	 * - the scope of a ccw and it's idal ends with the track boundaries
4159 	 */
4160 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4161 	recid = first_rec;
4162 	new_track = 1;
4163 	end_idaw = 0;
4164 	len_to_track_end = 0;
4165 	idaw_dst = NULL;
4166 	idaw_len = 0;
4167 	rq_for_each_segment(bv, req, iter) {
4168 		dst = page_address(bv.bv_page) + bv.bv_offset;
4169 		seg_len = bv.bv_len;
4170 		while (seg_len) {
4171 			if (new_track) {
4172 				trkid = recid;
4173 				recoffs = sector_div(trkid, blk_per_trk);
4174 				count_to_trk_end = blk_per_trk - recoffs;
4175 				count = min((last_rec - recid + 1),
4176 					    (sector_t)count_to_trk_end);
4177 				len_to_track_end = count * blksize;
4178 				ccw[-1].flags |= CCW_FLAG_CC;
4179 				ccw->cmd_code = cmd;
4180 				ccw->count = len_to_track_end;
4181 				ccw->cda = (__u32)(addr_t)idaws;
4182 				ccw->flags = CCW_FLAG_IDA;
4183 				ccw++;
4184 				recid += count;
4185 				new_track = 0;
4186 				/* first idaw for a ccw may start anywhere */
4187 				if (!idaw_dst)
4188 					idaw_dst = dst;
4189 			}
4190 			/* If we start a new idaw, we must make sure that it
4191 			 * starts on an IDA_BLOCK_SIZE boundary.
4192 			 * If we continue an idaw, we must make sure that the
4193 			 * current segment begins where the so far accumulated
4194 			 * idaw ends
4195 			 */
4196 			if (!idaw_dst) {
4197 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4198 					dasd_sfree_request(cqr, startdev);
4199 					return ERR_PTR(-ERANGE);
4200 				} else
4201 					idaw_dst = dst;
4202 			}
4203 			if ((idaw_dst + idaw_len) != dst) {
4204 				dasd_sfree_request(cqr, startdev);
4205 				return ERR_PTR(-ERANGE);
4206 			}
4207 			part_len = min(seg_len, len_to_track_end);
4208 			seg_len -= part_len;
4209 			dst += part_len;
4210 			idaw_len += part_len;
4211 			len_to_track_end -= part_len;
4212 			/* collected memory area ends on an IDA_BLOCK border,
4213 			 * -> create an idaw
4214 			 * idal_create_words will handle cases where idaw_len
4215 			 * is larger then IDA_BLOCK_SIZE
4216 			 */
4217 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4218 				end_idaw = 1;
4219 			/* We also need to end the idaw at track end */
4220 			if (!len_to_track_end) {
4221 				new_track = 1;
4222 				end_idaw = 1;
4223 			}
4224 			if (end_idaw) {
4225 				idaws = idal_create_words(idaws, idaw_dst,
4226 							  idaw_len);
4227 				idaw_dst = NULL;
4228 				idaw_len = 0;
4229 				end_idaw = 0;
4230 			}
4231 		}
4232 	}
4233 
4234 	if (blk_noretry_request(req) ||
4235 	    block->base->features & DASD_FEATURE_FAILFAST)
4236 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4237 	cqr->startdev = startdev;
4238 	cqr->memdev = startdev;
4239 	cqr->block = block;
4240 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
4241 	cqr->lpm = dasd_path_get_ppm(startdev);
4242 	cqr->retries = startdev->default_retries;
4243 	cqr->buildclk = get_tod_clock();
4244 	cqr->status = DASD_CQR_FILLED;
4245 
4246 	/* Set flags to suppress output for expected errors */
4247 	if (dasd_eckd_is_ese(basedev))
4248 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4249 
4250 	return cqr;
4251 }
4252 
4253 static int prepare_itcw(struct itcw *itcw,
4254 			unsigned int trk, unsigned int totrk, int cmd,
4255 			struct dasd_device *basedev,
4256 			struct dasd_device *startdev,
4257 			unsigned int rec_on_trk, int count,
4258 			unsigned int blksize,
4259 			unsigned int total_data_size,
4260 			unsigned int tlf,
4261 			unsigned int blk_per_trk)
4262 {
4263 	struct PFX_eckd_data pfxdata;
4264 	struct dasd_eckd_private *basepriv, *startpriv;
4265 	struct DE_eckd_data *dedata;
4266 	struct LRE_eckd_data *lredata;
4267 	struct dcw *dcw;
4268 
4269 	u32 begcyl, endcyl;
4270 	u16 heads, beghead, endhead;
4271 	u8 pfx_cmd;
4272 
4273 	int rc = 0;
4274 	int sector = 0;
4275 	int dn, d;
4276 
4277 
4278 	/* setup prefix data */
4279 	basepriv = basedev->private;
4280 	startpriv = startdev->private;
4281 	dedata = &pfxdata.define_extent;
4282 	lredata = &pfxdata.locate_record;
4283 
4284 	memset(&pfxdata, 0, sizeof(pfxdata));
4285 	pfxdata.format = 1; /* PFX with LRE */
4286 	pfxdata.base_address = basepriv->ned->unit_addr;
4287 	pfxdata.base_lss = basepriv->ned->ID;
4288 	pfxdata.validity.define_extent = 1;
4289 
4290 	/* private uid is kept up to date, conf_data may be outdated */
4291 	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4292 		pfxdata.validity.verify_base = 1;
4293 
4294 	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4295 		pfxdata.validity.verify_base = 1;
4296 		pfxdata.validity.hyper_pav = 1;
4297 	}
4298 
4299 	switch (cmd) {
4300 	case DASD_ECKD_CCW_READ_TRACK_DATA:
4301 		dedata->mask.perm = 0x1;
4302 		dedata->attributes.operation = basepriv->attrib.operation;
4303 		dedata->blk_size = blksize;
4304 		dedata->ga_extended |= 0x42;
4305 		lredata->operation.orientation = 0x0;
4306 		lredata->operation.operation = 0x0C;
4307 		lredata->auxiliary.check_bytes = 0x01;
4308 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4309 		break;
4310 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4311 		dedata->mask.perm = 0x02;
4312 		dedata->attributes.operation = basepriv->attrib.operation;
4313 		dedata->blk_size = blksize;
4314 		rc = set_timestamp(NULL, dedata, basedev);
4315 		dedata->ga_extended |= 0x42;
4316 		lredata->operation.orientation = 0x0;
4317 		lredata->operation.operation = 0x3F;
4318 		lredata->extended_operation = 0x23;
4319 		lredata->auxiliary.check_bytes = 0x2;
4320 		/*
4321 		 * If XRC is supported the System Time Stamp is set. The
4322 		 * validity of the time stamp must be reflected in the prefix
4323 		 * data as well.
4324 		 */
4325 		if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4326 			pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4327 		pfx_cmd = DASD_ECKD_CCW_PFX;
4328 		break;
4329 	case DASD_ECKD_CCW_READ_COUNT_MT:
4330 		dedata->mask.perm = 0x1;
4331 		dedata->attributes.operation = DASD_BYPASS_CACHE;
4332 		dedata->ga_extended |= 0x42;
4333 		dedata->blk_size = blksize;
4334 		lredata->operation.orientation = 0x2;
4335 		lredata->operation.operation = 0x16;
4336 		lredata->auxiliary.check_bytes = 0x01;
4337 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4338 		break;
4339 	default:
4340 		DBF_DEV_EVENT(DBF_ERR, basedev,
4341 			      "prepare itcw, unknown opcode 0x%x", cmd);
4342 		BUG();
4343 		break;
4344 	}
4345 	if (rc)
4346 		return rc;
4347 
4348 	dedata->attributes.mode = 0x3;	/* ECKD */
4349 
4350 	heads = basepriv->rdc_data.trk_per_cyl;
4351 	begcyl = trk / heads;
4352 	beghead = trk % heads;
4353 	endcyl = totrk / heads;
4354 	endhead = totrk % heads;
4355 
4356 	/* check for sequential prestage - enhance cylinder range */
4357 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4358 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
4359 
4360 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4361 			endcyl += basepriv->attrib.nr_cyl;
4362 		else
4363 			endcyl = (basepriv->real_cyl - 1);
4364 	}
4365 
4366 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
4367 	set_ch_t(&dedata->end_ext, endcyl, endhead);
4368 
4369 	dedata->ep_format = 0x20; /* records per track is valid */
4370 	dedata->ep_rec_per_track = blk_per_trk;
4371 
4372 	if (rec_on_trk) {
4373 		switch (basepriv->rdc_data.dev_type) {
4374 		case 0x3390:
4375 			dn = ceil_quot(blksize + 6, 232);
4376 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4377 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4378 			break;
4379 		case 0x3380:
4380 			d = 7 + ceil_quot(blksize + 12, 32);
4381 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4382 			break;
4383 		}
4384 	}
4385 
4386 	if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4387 		lredata->auxiliary.length_valid = 0;
4388 		lredata->auxiliary.length_scope = 0;
4389 		lredata->sector = 0xff;
4390 	} else {
4391 		lredata->auxiliary.length_valid = 1;
4392 		lredata->auxiliary.length_scope = 1;
4393 		lredata->sector = sector;
4394 	}
4395 	lredata->auxiliary.imbedded_ccw_valid = 1;
4396 	lredata->length = tlf;
4397 	lredata->imbedded_ccw = cmd;
4398 	lredata->count = count;
4399 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
4400 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
4401 	lredata->search_arg.head = lredata->seek_addr.head;
4402 	lredata->search_arg.record = rec_on_trk;
4403 
4404 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4405 		     &pfxdata, sizeof(pfxdata), total_data_size);
4406 	return PTR_ERR_OR_ZERO(dcw);
4407 }
4408 
4409 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4410 					       struct dasd_device *startdev,
4411 					       struct dasd_block *block,
4412 					       struct request *req,
4413 					       sector_t first_rec,
4414 					       sector_t last_rec,
4415 					       sector_t first_trk,
4416 					       sector_t last_trk,
4417 					       unsigned int first_offs,
4418 					       unsigned int last_offs,
4419 					       unsigned int blk_per_trk,
4420 					       unsigned int blksize)
4421 {
4422 	struct dasd_ccw_req *cqr;
4423 	struct req_iterator iter;
4424 	struct bio_vec bv;
4425 	char *dst;
4426 	unsigned int trkcount, ctidaw;
4427 	unsigned char cmd;
4428 	struct dasd_device *basedev;
4429 	unsigned int tlf;
4430 	struct itcw *itcw;
4431 	struct tidaw *last_tidaw = NULL;
4432 	int itcw_op;
4433 	size_t itcw_size;
4434 	u8 tidaw_flags;
4435 	unsigned int seg_len, part_len, len_to_track_end;
4436 	unsigned char new_track;
4437 	sector_t recid, trkid;
4438 	unsigned int offs;
4439 	unsigned int count, count_to_trk_end;
4440 	int ret;
4441 
4442 	basedev = block->base;
4443 	if (rq_data_dir(req) == READ) {
4444 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4445 		itcw_op = ITCW_OP_READ;
4446 	} else if (rq_data_dir(req) == WRITE) {
4447 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4448 		itcw_op = ITCW_OP_WRITE;
4449 	} else
4450 		return ERR_PTR(-EINVAL);
4451 
4452 	/* trackbased I/O needs address all memory via TIDAWs,
4453 	 * not just for 64 bit addresses. This allows us to map
4454 	 * each segment directly to one tidaw.
4455 	 * In the case of write requests, additional tidaws may
4456 	 * be needed when a segment crosses a track boundary.
4457 	 */
4458 	trkcount = last_trk - first_trk + 1;
4459 	ctidaw = 0;
4460 	rq_for_each_segment(bv, req, iter) {
4461 		++ctidaw;
4462 	}
4463 	if (rq_data_dir(req) == WRITE)
4464 		ctidaw += (last_trk - first_trk);
4465 
4466 	/* Allocate the ccw request. */
4467 	itcw_size = itcw_calc_size(0, ctidaw, 0);
4468 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4469 				   blk_mq_rq_to_pdu(req));
4470 	if (IS_ERR(cqr))
4471 		return cqr;
4472 
4473 	/* transfer length factor: how many bytes to read from the last track */
4474 	if (first_trk == last_trk)
4475 		tlf = last_offs - first_offs + 1;
4476 	else
4477 		tlf = last_offs + 1;
4478 	tlf *= blksize;
4479 
4480 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4481 	if (IS_ERR(itcw)) {
4482 		ret = -EINVAL;
4483 		goto out_error;
4484 	}
4485 	cqr->cpaddr = itcw_get_tcw(itcw);
4486 	if (prepare_itcw(itcw, first_trk, last_trk,
4487 			 cmd, basedev, startdev,
4488 			 first_offs + 1,
4489 			 trkcount, blksize,
4490 			 (last_rec - first_rec + 1) * blksize,
4491 			 tlf, blk_per_trk) == -EAGAIN) {
4492 		/* Clock not in sync and XRC is enabled.
4493 		 * Try again later.
4494 		 */
4495 		ret = -EAGAIN;
4496 		goto out_error;
4497 	}
4498 	len_to_track_end = 0;
4499 	/*
4500 	 * A tidaw can address 4k of memory, but must not cross page boundaries
4501 	 * We can let the block layer handle this by setting
4502 	 * blk_queue_segment_boundary to page boundaries and
4503 	 * blk_max_segment_size to page size when setting up the request queue.
4504 	 * For write requests, a TIDAW must not cross track boundaries, because
4505 	 * we have to set the CBC flag on the last tidaw for each track.
4506 	 */
4507 	if (rq_data_dir(req) == WRITE) {
4508 		new_track = 1;
4509 		recid = first_rec;
4510 		rq_for_each_segment(bv, req, iter) {
4511 			dst = page_address(bv.bv_page) + bv.bv_offset;
4512 			seg_len = bv.bv_len;
4513 			while (seg_len) {
4514 				if (new_track) {
4515 					trkid = recid;
4516 					offs = sector_div(trkid, blk_per_trk);
4517 					count_to_trk_end = blk_per_trk - offs;
4518 					count = min((last_rec - recid + 1),
4519 						    (sector_t)count_to_trk_end);
4520 					len_to_track_end = count * blksize;
4521 					recid += count;
4522 					new_track = 0;
4523 				}
4524 				part_len = min(seg_len, len_to_track_end);
4525 				seg_len -= part_len;
4526 				len_to_track_end -= part_len;
4527 				/* We need to end the tidaw at track end */
4528 				if (!len_to_track_end) {
4529 					new_track = 1;
4530 					tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4531 				} else
4532 					tidaw_flags = 0;
4533 				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4534 							    dst, part_len);
4535 				if (IS_ERR(last_tidaw)) {
4536 					ret = -EINVAL;
4537 					goto out_error;
4538 				}
4539 				dst += part_len;
4540 			}
4541 		}
4542 	} else {
4543 		rq_for_each_segment(bv, req, iter) {
4544 			dst = page_address(bv.bv_page) + bv.bv_offset;
4545 			last_tidaw = itcw_add_tidaw(itcw, 0x00,
4546 						    dst, bv.bv_len);
4547 			if (IS_ERR(last_tidaw)) {
4548 				ret = -EINVAL;
4549 				goto out_error;
4550 			}
4551 		}
4552 	}
4553 	last_tidaw->flags |= TIDAW_FLAGS_LAST;
4554 	last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4555 	itcw_finalize(itcw);
4556 
4557 	if (blk_noretry_request(req) ||
4558 	    block->base->features & DASD_FEATURE_FAILFAST)
4559 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4560 	cqr->cpmode = 1;
4561 	cqr->startdev = startdev;
4562 	cqr->memdev = startdev;
4563 	cqr->block = block;
4564 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
4565 	cqr->lpm = dasd_path_get_ppm(startdev);
4566 	cqr->retries = startdev->default_retries;
4567 	cqr->buildclk = get_tod_clock();
4568 	cqr->status = DASD_CQR_FILLED;
4569 
4570 	/* Set flags to suppress output for expected errors */
4571 	if (dasd_eckd_is_ese(basedev)) {
4572 		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4573 		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4574 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4575 	}
4576 
4577 	return cqr;
4578 out_error:
4579 	dasd_sfree_request(cqr, startdev);
4580 	return ERR_PTR(ret);
4581 }
4582 
4583 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4584 					       struct dasd_block *block,
4585 					       struct request *req)
4586 {
4587 	int cmdrtd, cmdwtd;
4588 	int use_prefix;
4589 	int fcx_multitrack;
4590 	struct dasd_eckd_private *private;
4591 	struct dasd_device *basedev;
4592 	sector_t first_rec, last_rec;
4593 	sector_t first_trk, last_trk;
4594 	unsigned int first_offs, last_offs;
4595 	unsigned int blk_per_trk, blksize;
4596 	int cdlspecial;
4597 	unsigned int data_size;
4598 	struct dasd_ccw_req *cqr;
4599 
4600 	basedev = block->base;
4601 	private = basedev->private;
4602 
4603 	/* Calculate number of blocks/records per track. */
4604 	blksize = block->bp_block;
4605 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4606 	if (blk_per_trk == 0)
4607 		return ERR_PTR(-EINVAL);
4608 	/* Calculate record id of first and last block. */
4609 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4610 	first_offs = sector_div(first_trk, blk_per_trk);
4611 	last_rec = last_trk =
4612 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4613 	last_offs = sector_div(last_trk, blk_per_trk);
4614 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4615 
4616 	fcx_multitrack = private->features.feature[40] & 0x20;
4617 	data_size = blk_rq_bytes(req);
4618 	if (data_size % blksize)
4619 		return ERR_PTR(-EINVAL);
4620 	/* tpm write request add CBC data on each track boundary */
4621 	if (rq_data_dir(req) == WRITE)
4622 		data_size += (last_trk - first_trk) * 4;
4623 
4624 	/* is read track data and write track data in command mode supported? */
4625 	cmdrtd = private->features.feature[9] & 0x20;
4626 	cmdwtd = private->features.feature[12] & 0x40;
4627 	use_prefix = private->features.feature[8] & 0x01;
4628 
4629 	cqr = NULL;
4630 	if (cdlspecial || dasd_page_cache) {
4631 		/* do nothing, just fall through to the cmd mode single case */
4632 	} else if ((data_size <= private->fcx_max_data)
4633 		   && (fcx_multitrack || (first_trk == last_trk))) {
4634 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4635 						    first_rec, last_rec,
4636 						    first_trk, last_trk,
4637 						    first_offs, last_offs,
4638 						    blk_per_trk, blksize);
4639 		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4640 		    (PTR_ERR(cqr) != -ENOMEM))
4641 			cqr = NULL;
4642 	} else if (use_prefix &&
4643 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
4644 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4645 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4646 						   first_rec, last_rec,
4647 						   first_trk, last_trk,
4648 						   first_offs, last_offs,
4649 						   blk_per_trk, blksize);
4650 		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4651 		    (PTR_ERR(cqr) != -ENOMEM))
4652 			cqr = NULL;
4653 	}
4654 	if (!cqr)
4655 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4656 						    first_rec, last_rec,
4657 						    first_trk, last_trk,
4658 						    first_offs, last_offs,
4659 						    blk_per_trk, blksize);
4660 	return cqr;
4661 }
4662 
4663 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4664 						   struct dasd_block *block,
4665 						   struct request *req)
4666 {
4667 	sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4668 	unsigned int seg_len, len_to_track_end;
4669 	unsigned int cidaw, cplength, datasize;
4670 	sector_t first_trk, last_trk, sectors;
4671 	struct dasd_eckd_private *base_priv;
4672 	struct dasd_device *basedev;
4673 	struct req_iterator iter;
4674 	struct dasd_ccw_req *cqr;
4675 	unsigned int first_offs;
4676 	unsigned int trkcount;
4677 	unsigned long *idaws;
4678 	unsigned int size;
4679 	unsigned char cmd;
4680 	struct bio_vec bv;
4681 	struct ccw1 *ccw;
4682 	int use_prefix;
4683 	void *data;
4684 	char *dst;
4685 
4686 	/*
4687 	 * raw track access needs to be mutiple of 64k and on 64k boundary
4688 	 * For read requests we can fix an incorrect alignment by padding
4689 	 * the request with dummy pages.
4690 	 */
4691 	start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4692 	end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4693 		DASD_RAW_SECTORS_PER_TRACK;
4694 	end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4695 		DASD_RAW_SECTORS_PER_TRACK;
4696 	basedev = block->base;
4697 	if ((start_padding_sectors || end_padding_sectors) &&
4698 	    (rq_data_dir(req) == WRITE)) {
4699 		DBF_DEV_EVENT(DBF_ERR, basedev,
4700 			      "raw write not track aligned (%llu,%llu) req %p",
4701 			      start_padding_sectors, end_padding_sectors, req);
4702 		return ERR_PTR(-EINVAL);
4703 	}
4704 
4705 	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4706 	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4707 		DASD_RAW_SECTORS_PER_TRACK;
4708 	trkcount = last_trk - first_trk + 1;
4709 	first_offs = 0;
4710 
4711 	if (rq_data_dir(req) == READ)
4712 		cmd = DASD_ECKD_CCW_READ_TRACK;
4713 	else if (rq_data_dir(req) == WRITE)
4714 		cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4715 	else
4716 		return ERR_PTR(-EINVAL);
4717 
4718 	/*
4719 	 * Raw track based I/O needs IDAWs for each page,
4720 	 * and not just for 64 bit addresses.
4721 	 */
4722 	cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4723 
4724 	/*
4725 	 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4726 	 * of extended parameter. This is needed for write full track.
4727 	 */
4728 	base_priv = basedev->private;
4729 	use_prefix = base_priv->features.feature[8] & 0x01;
4730 	if (use_prefix) {
4731 		cplength = 1 + trkcount;
4732 		size = sizeof(struct PFX_eckd_data) + 2;
4733 	} else {
4734 		cplength = 2 + trkcount;
4735 		size = sizeof(struct DE_eckd_data) +
4736 			sizeof(struct LRE_eckd_data) + 2;
4737 	}
4738 	size = ALIGN(size, 8);
4739 
4740 	datasize = size + cidaw * sizeof(unsigned long);
4741 
4742 	/* Allocate the ccw request. */
4743 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4744 				   datasize, startdev, blk_mq_rq_to_pdu(req));
4745 	if (IS_ERR(cqr))
4746 		return cqr;
4747 
4748 	ccw = cqr->cpaddr;
4749 	data = cqr->data;
4750 
4751 	if (use_prefix) {
4752 		prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4753 			   startdev, 1, first_offs + 1, trkcount, 0, 0);
4754 	} else {
4755 		define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4756 		ccw[-1].flags |= CCW_FLAG_CC;
4757 
4758 		data += sizeof(struct DE_eckd_data);
4759 		locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4760 				  trkcount, cmd, basedev, 0, 0);
4761 	}
4762 
4763 	idaws = (unsigned long *)(cqr->data + size);
4764 	len_to_track_end = 0;
4765 	if (start_padding_sectors) {
4766 		ccw[-1].flags |= CCW_FLAG_CC;
4767 		ccw->cmd_code = cmd;
4768 		/* maximum 3390 track size */
4769 		ccw->count = 57326;
4770 		/* 64k map to one track */
4771 		len_to_track_end = 65536 - start_padding_sectors * 512;
4772 		ccw->cda = (__u32)(addr_t)idaws;
4773 		ccw->flags |= CCW_FLAG_IDA;
4774 		ccw->flags |= CCW_FLAG_SLI;
4775 		ccw++;
4776 		for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4777 			idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4778 	}
4779 	rq_for_each_segment(bv, req, iter) {
4780 		dst = page_address(bv.bv_page) + bv.bv_offset;
4781 		seg_len = bv.bv_len;
4782 		if (cmd == DASD_ECKD_CCW_READ_TRACK)
4783 			memset(dst, 0, seg_len);
4784 		if (!len_to_track_end) {
4785 			ccw[-1].flags |= CCW_FLAG_CC;
4786 			ccw->cmd_code = cmd;
4787 			/* maximum 3390 track size */
4788 			ccw->count = 57326;
4789 			/* 64k map to one track */
4790 			len_to_track_end = 65536;
4791 			ccw->cda = (__u32)(addr_t)idaws;
4792 			ccw->flags |= CCW_FLAG_IDA;
4793 			ccw->flags |= CCW_FLAG_SLI;
4794 			ccw++;
4795 		}
4796 		len_to_track_end -= seg_len;
4797 		idaws = idal_create_words(idaws, dst, seg_len);
4798 	}
4799 	for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4800 		idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4801 	if (blk_noretry_request(req) ||
4802 	    block->base->features & DASD_FEATURE_FAILFAST)
4803 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4804 	cqr->startdev = startdev;
4805 	cqr->memdev = startdev;
4806 	cqr->block = block;
4807 	cqr->expires = startdev->default_expires * HZ;
4808 	cqr->lpm = dasd_path_get_ppm(startdev);
4809 	cqr->retries = startdev->default_retries;
4810 	cqr->buildclk = get_tod_clock();
4811 	cqr->status = DASD_CQR_FILLED;
4812 
4813 	return cqr;
4814 }
4815 
4816 
4817 static int
4818 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4819 {
4820 	struct dasd_eckd_private *private;
4821 	struct ccw1 *ccw;
4822 	struct req_iterator iter;
4823 	struct bio_vec bv;
4824 	char *dst, *cda;
4825 	unsigned int blksize, blk_per_trk, off;
4826 	sector_t recid;
4827 	int status;
4828 
4829 	if (!dasd_page_cache)
4830 		goto out;
4831 	private = cqr->block->base->private;
4832 	blksize = cqr->block->bp_block;
4833 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4834 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4835 	ccw = cqr->cpaddr;
4836 	/* Skip over define extent & locate record. */
4837 	ccw++;
4838 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4839 		ccw++;
4840 	rq_for_each_segment(bv, req, iter) {
4841 		dst = page_address(bv.bv_page) + bv.bv_offset;
4842 		for (off = 0; off < bv.bv_len; off += blksize) {
4843 			/* Skip locate record. */
4844 			if (private->uses_cdl && recid <= 2*blk_per_trk)
4845 				ccw++;
4846 			if (dst) {
4847 				if (ccw->flags & CCW_FLAG_IDA)
4848 					cda = *((char **)((addr_t) ccw->cda));
4849 				else
4850 					cda = (char *)((addr_t) ccw->cda);
4851 				if (dst != cda) {
4852 					if (rq_data_dir(req) == READ)
4853 						memcpy(dst, cda, bv.bv_len);
4854 					kmem_cache_free(dasd_page_cache,
4855 					    (void *)((addr_t)cda & PAGE_MASK));
4856 				}
4857 				dst = NULL;
4858 			}
4859 			ccw++;
4860 			recid++;
4861 		}
4862 	}
4863 out:
4864 	status = cqr->status == DASD_CQR_DONE;
4865 	dasd_sfree_request(cqr, cqr->memdev);
4866 	return status;
4867 }
4868 
4869 /*
4870  * Modify ccw/tcw in cqr so it can be started on a base device.
4871  *
4872  * Note that this is not enough to restart the cqr!
4873  * Either reset cqr->startdev as well (summary unit check handling)
4874  * or restart via separate cqr (as in ERP handling).
4875  */
4876 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4877 {
4878 	struct ccw1 *ccw;
4879 	struct PFX_eckd_data *pfxdata;
4880 	struct tcw *tcw;
4881 	struct tccb *tccb;
4882 	struct dcw *dcw;
4883 
4884 	if (cqr->cpmode == 1) {
4885 		tcw = cqr->cpaddr;
4886 		tccb = tcw_get_tccb(tcw);
4887 		dcw = (struct dcw *)&tccb->tca[0];
4888 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4889 		pfxdata->validity.verify_base = 0;
4890 		pfxdata->validity.hyper_pav = 0;
4891 	} else {
4892 		ccw = cqr->cpaddr;
4893 		pfxdata = cqr->data;
4894 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4895 			pfxdata->validity.verify_base = 0;
4896 			pfxdata->validity.hyper_pav = 0;
4897 		}
4898 	}
4899 }
4900 
4901 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4902 
4903 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4904 						     struct dasd_block *block,
4905 						     struct request *req)
4906 {
4907 	struct dasd_eckd_private *private;
4908 	struct dasd_device *startdev;
4909 	unsigned long flags;
4910 	struct dasd_ccw_req *cqr;
4911 
4912 	startdev = dasd_alias_get_start_dev(base);
4913 	if (!startdev)
4914 		startdev = base;
4915 	private = startdev->private;
4916 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4917 		return ERR_PTR(-EBUSY);
4918 
4919 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4920 	private->count++;
4921 	if ((base->features & DASD_FEATURE_USERAW))
4922 		cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4923 	else
4924 		cqr = dasd_eckd_build_cp(startdev, block, req);
4925 	if (IS_ERR(cqr))
4926 		private->count--;
4927 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4928 	return cqr;
4929 }
4930 
4931 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4932 				   struct request *req)
4933 {
4934 	struct dasd_eckd_private *private;
4935 	unsigned long flags;
4936 
4937 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4938 	private = cqr->memdev->private;
4939 	private->count--;
4940 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4941 	return dasd_eckd_free_cp(cqr, req);
4942 }
4943 
4944 static int
4945 dasd_eckd_fill_info(struct dasd_device * device,
4946 		    struct dasd_information2_t * info)
4947 {
4948 	struct dasd_eckd_private *private = device->private;
4949 
4950 	info->label_block = 2;
4951 	info->FBA_layout = private->uses_cdl ? 0 : 1;
4952 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4953 	info->characteristics_size = sizeof(private->rdc_data);
4954 	memcpy(info->characteristics, &private->rdc_data,
4955 	       sizeof(private->rdc_data));
4956 	info->confdata_size = min((unsigned long)private->conf_len,
4957 				  sizeof(info->configuration_data));
4958 	memcpy(info->configuration_data, private->conf_data,
4959 	       info->confdata_size);
4960 	return 0;
4961 }
4962 
4963 /*
4964  * SECTION: ioctl functions for eckd devices.
4965  */
4966 
4967 /*
4968  * Release device ioctl.
4969  * Buils a channel programm to releases a prior reserved
4970  * (see dasd_eckd_reserve) device.
4971  */
4972 static int
4973 dasd_eckd_release(struct dasd_device *device)
4974 {
4975 	struct dasd_ccw_req *cqr;
4976 	int rc;
4977 	struct ccw1 *ccw;
4978 	int useglobal;
4979 
4980 	if (!capable(CAP_SYS_ADMIN))
4981 		return -EACCES;
4982 
4983 	useglobal = 0;
4984 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4985 	if (IS_ERR(cqr)) {
4986 		mutex_lock(&dasd_reserve_mutex);
4987 		useglobal = 1;
4988 		cqr = &dasd_reserve_req->cqr;
4989 		memset(cqr, 0, sizeof(*cqr));
4990 		memset(&dasd_reserve_req->ccw, 0,
4991 		       sizeof(dasd_reserve_req->ccw));
4992 		cqr->cpaddr = &dasd_reserve_req->ccw;
4993 		cqr->data = &dasd_reserve_req->data;
4994 		cqr->magic = DASD_ECKD_MAGIC;
4995 	}
4996 	ccw = cqr->cpaddr;
4997 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4998 	ccw->flags |= CCW_FLAG_SLI;
4999 	ccw->count = 32;
5000 	ccw->cda = (__u32)(addr_t) cqr->data;
5001 	cqr->startdev = device;
5002 	cqr->memdev = device;
5003 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5004 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5005 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
5006 	cqr->expires = 2 * HZ;
5007 	cqr->buildclk = get_tod_clock();
5008 	cqr->status = DASD_CQR_FILLED;
5009 
5010 	rc = dasd_sleep_on_immediatly(cqr);
5011 	if (!rc)
5012 		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5013 
5014 	if (useglobal)
5015 		mutex_unlock(&dasd_reserve_mutex);
5016 	else
5017 		dasd_sfree_request(cqr, cqr->memdev);
5018 	return rc;
5019 }
5020 
5021 /*
5022  * Reserve device ioctl.
5023  * Options are set to 'synchronous wait for interrupt' and
5024  * 'timeout the request'. This leads to a terminate IO if
5025  * the interrupt is outstanding for a certain time.
5026  */
5027 static int
5028 dasd_eckd_reserve(struct dasd_device *device)
5029 {
5030 	struct dasd_ccw_req *cqr;
5031 	int rc;
5032 	struct ccw1 *ccw;
5033 	int useglobal;
5034 
5035 	if (!capable(CAP_SYS_ADMIN))
5036 		return -EACCES;
5037 
5038 	useglobal = 0;
5039 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5040 	if (IS_ERR(cqr)) {
5041 		mutex_lock(&dasd_reserve_mutex);
5042 		useglobal = 1;
5043 		cqr = &dasd_reserve_req->cqr;
5044 		memset(cqr, 0, sizeof(*cqr));
5045 		memset(&dasd_reserve_req->ccw, 0,
5046 		       sizeof(dasd_reserve_req->ccw));
5047 		cqr->cpaddr = &dasd_reserve_req->ccw;
5048 		cqr->data = &dasd_reserve_req->data;
5049 		cqr->magic = DASD_ECKD_MAGIC;
5050 	}
5051 	ccw = cqr->cpaddr;
5052 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5053 	ccw->flags |= CCW_FLAG_SLI;
5054 	ccw->count = 32;
5055 	ccw->cda = (__u32)(addr_t) cqr->data;
5056 	cqr->startdev = device;
5057 	cqr->memdev = device;
5058 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5059 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5060 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
5061 	cqr->expires = 2 * HZ;
5062 	cqr->buildclk = get_tod_clock();
5063 	cqr->status = DASD_CQR_FILLED;
5064 
5065 	rc = dasd_sleep_on_immediatly(cqr);
5066 	if (!rc)
5067 		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5068 
5069 	if (useglobal)
5070 		mutex_unlock(&dasd_reserve_mutex);
5071 	else
5072 		dasd_sfree_request(cqr, cqr->memdev);
5073 	return rc;
5074 }
5075 
5076 /*
5077  * Steal lock ioctl - unconditional reserve device.
5078  * Buils a channel programm to break a device's reservation.
5079  * (unconditional reserve)
5080  */
5081 static int
5082 dasd_eckd_steal_lock(struct dasd_device *device)
5083 {
5084 	struct dasd_ccw_req *cqr;
5085 	int rc;
5086 	struct ccw1 *ccw;
5087 	int useglobal;
5088 
5089 	if (!capable(CAP_SYS_ADMIN))
5090 		return -EACCES;
5091 
5092 	useglobal = 0;
5093 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5094 	if (IS_ERR(cqr)) {
5095 		mutex_lock(&dasd_reserve_mutex);
5096 		useglobal = 1;
5097 		cqr = &dasd_reserve_req->cqr;
5098 		memset(cqr, 0, sizeof(*cqr));
5099 		memset(&dasd_reserve_req->ccw, 0,
5100 		       sizeof(dasd_reserve_req->ccw));
5101 		cqr->cpaddr = &dasd_reserve_req->ccw;
5102 		cqr->data = &dasd_reserve_req->data;
5103 		cqr->magic = DASD_ECKD_MAGIC;
5104 	}
5105 	ccw = cqr->cpaddr;
5106 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5107 	ccw->flags |= CCW_FLAG_SLI;
5108 	ccw->count = 32;
5109 	ccw->cda = (__u32)(addr_t) cqr->data;
5110 	cqr->startdev = device;
5111 	cqr->memdev = device;
5112 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5113 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5114 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
5115 	cqr->expires = 2 * HZ;
5116 	cqr->buildclk = get_tod_clock();
5117 	cqr->status = DASD_CQR_FILLED;
5118 
5119 	rc = dasd_sleep_on_immediatly(cqr);
5120 	if (!rc)
5121 		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5122 
5123 	if (useglobal)
5124 		mutex_unlock(&dasd_reserve_mutex);
5125 	else
5126 		dasd_sfree_request(cqr, cqr->memdev);
5127 	return rc;
5128 }
5129 
5130 /*
5131  * SNID - Sense Path Group ID
5132  * This ioctl may be used in situations where I/O is stalled due to
5133  * a reserve, so if the normal dasd_smalloc_request fails, we use the
5134  * preallocated dasd_reserve_req.
5135  */
5136 static int dasd_eckd_snid(struct dasd_device *device,
5137 			  void __user *argp)
5138 {
5139 	struct dasd_ccw_req *cqr;
5140 	int rc;
5141 	struct ccw1 *ccw;
5142 	int useglobal;
5143 	struct dasd_snid_ioctl_data usrparm;
5144 
5145 	if (!capable(CAP_SYS_ADMIN))
5146 		return -EACCES;
5147 
5148 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5149 		return -EFAULT;
5150 
5151 	useglobal = 0;
5152 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5153 				   sizeof(struct dasd_snid_data), device,
5154 				   NULL);
5155 	if (IS_ERR(cqr)) {
5156 		mutex_lock(&dasd_reserve_mutex);
5157 		useglobal = 1;
5158 		cqr = &dasd_reserve_req->cqr;
5159 		memset(cqr, 0, sizeof(*cqr));
5160 		memset(&dasd_reserve_req->ccw, 0,
5161 		       sizeof(dasd_reserve_req->ccw));
5162 		cqr->cpaddr = &dasd_reserve_req->ccw;
5163 		cqr->data = &dasd_reserve_req->data;
5164 		cqr->magic = DASD_ECKD_MAGIC;
5165 	}
5166 	ccw = cqr->cpaddr;
5167 	ccw->cmd_code = DASD_ECKD_CCW_SNID;
5168 	ccw->flags |= CCW_FLAG_SLI;
5169 	ccw->count = 12;
5170 	ccw->cda = (__u32)(addr_t) cqr->data;
5171 	cqr->startdev = device;
5172 	cqr->memdev = device;
5173 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5174 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5175 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5176 	cqr->retries = 5;
5177 	cqr->expires = 10 * HZ;
5178 	cqr->buildclk = get_tod_clock();
5179 	cqr->status = DASD_CQR_FILLED;
5180 	cqr->lpm = usrparm.path_mask;
5181 
5182 	rc = dasd_sleep_on_immediatly(cqr);
5183 	/* verify that I/O processing didn't modify the path mask */
5184 	if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5185 		rc = -EIO;
5186 	if (!rc) {
5187 		usrparm.data = *((struct dasd_snid_data *)cqr->data);
5188 		if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5189 			rc = -EFAULT;
5190 	}
5191 
5192 	if (useglobal)
5193 		mutex_unlock(&dasd_reserve_mutex);
5194 	else
5195 		dasd_sfree_request(cqr, cqr->memdev);
5196 	return rc;
5197 }
5198 
5199 /*
5200  * Read performance statistics
5201  */
5202 static int
5203 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5204 {
5205 	struct dasd_psf_prssd_data *prssdp;
5206 	struct dasd_rssd_perf_stats_t *stats;
5207 	struct dasd_ccw_req *cqr;
5208 	struct ccw1 *ccw;
5209 	int rc;
5210 
5211 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5212 				   (sizeof(struct dasd_psf_prssd_data) +
5213 				    sizeof(struct dasd_rssd_perf_stats_t)),
5214 				   device, NULL);
5215 	if (IS_ERR(cqr)) {
5216 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5217 			    "Could not allocate initialization request");
5218 		return PTR_ERR(cqr);
5219 	}
5220 	cqr->startdev = device;
5221 	cqr->memdev = device;
5222 	cqr->retries = 0;
5223 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5224 	cqr->expires = 10 * HZ;
5225 
5226 	/* Prepare for Read Subsystem Data */
5227 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5228 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5229 	prssdp->order = PSF_ORDER_PRSSD;
5230 	prssdp->suborder = 0x01;	/* Performance Statistics */
5231 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
5232 
5233 	ccw = cqr->cpaddr;
5234 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5235 	ccw->count = sizeof(struct dasd_psf_prssd_data);
5236 	ccw->flags |= CCW_FLAG_CC;
5237 	ccw->cda = (__u32)(addr_t) prssdp;
5238 
5239 	/* Read Subsystem Data - Performance Statistics */
5240 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5241 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5242 
5243 	ccw++;
5244 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5245 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5246 	ccw->cda = (__u32)(addr_t) stats;
5247 
5248 	cqr->buildclk = get_tod_clock();
5249 	cqr->status = DASD_CQR_FILLED;
5250 	rc = dasd_sleep_on(cqr);
5251 	if (rc == 0) {
5252 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5253 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5254 		if (copy_to_user(argp, stats,
5255 				 sizeof(struct dasd_rssd_perf_stats_t)))
5256 			rc = -EFAULT;
5257 	}
5258 	dasd_sfree_request(cqr, cqr->memdev);
5259 	return rc;
5260 }
5261 
5262 /*
5263  * Get attributes (cache operations)
5264  * Returnes the cache attributes used in Define Extend (DE).
5265  */
5266 static int
5267 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5268 {
5269 	struct dasd_eckd_private *private = device->private;
5270 	struct attrib_data_t attrib = private->attrib;
5271 	int rc;
5272 
5273         if (!capable(CAP_SYS_ADMIN))
5274                 return -EACCES;
5275 	if (!argp)
5276                 return -EINVAL;
5277 
5278 	rc = 0;
5279 	if (copy_to_user(argp, (long *) &attrib,
5280 			 sizeof(struct attrib_data_t)))
5281 		rc = -EFAULT;
5282 
5283 	return rc;
5284 }
5285 
5286 /*
5287  * Set attributes (cache operations)
5288  * Stores the attributes for cache operation to be used in Define Extend (DE).
5289  */
5290 static int
5291 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5292 {
5293 	struct dasd_eckd_private *private = device->private;
5294 	struct attrib_data_t attrib;
5295 
5296 	if (!capable(CAP_SYS_ADMIN))
5297 		return -EACCES;
5298 	if (!argp)
5299 		return -EINVAL;
5300 
5301 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5302 		return -EFAULT;
5303 	private->attrib = attrib;
5304 
5305 	dev_info(&device->cdev->dev,
5306 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5307 		 private->attrib.operation, private->attrib.nr_cyl);
5308 	return 0;
5309 }
5310 
5311 /*
5312  * Issue syscall I/O to EMC Symmetrix array.
5313  * CCWs are PSF and RSSD
5314  */
5315 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5316 {
5317 	struct dasd_symmio_parms usrparm;
5318 	char *psf_data, *rssd_result;
5319 	struct dasd_ccw_req *cqr;
5320 	struct ccw1 *ccw;
5321 	char psf0, psf1;
5322 	int rc;
5323 
5324 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5325 		return -EACCES;
5326 	psf0 = psf1 = 0;
5327 
5328 	/* Copy parms from caller */
5329 	rc = -EFAULT;
5330 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5331 		goto out;
5332 	if (is_compat_task()) {
5333 		/* Make sure pointers are sane even on 31 bit. */
5334 		rc = -EINVAL;
5335 		if ((usrparm.psf_data >> 32) != 0)
5336 			goto out;
5337 		if ((usrparm.rssd_result >> 32) != 0)
5338 			goto out;
5339 		usrparm.psf_data &= 0x7fffffffULL;
5340 		usrparm.rssd_result &= 0x7fffffffULL;
5341 	}
5342 	/* at least 2 bytes are accessed and should be allocated */
5343 	if (usrparm.psf_data_len < 2) {
5344 		DBF_DEV_EVENT(DBF_WARNING, device,
5345 			      "Symmetrix ioctl invalid data length %d",
5346 			      usrparm.psf_data_len);
5347 		rc = -EINVAL;
5348 		goto out;
5349 	}
5350 	/* alloc I/O data area */
5351 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5352 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5353 	if (!psf_data || !rssd_result) {
5354 		rc = -ENOMEM;
5355 		goto out_free;
5356 	}
5357 
5358 	/* get syscall header from user space */
5359 	rc = -EFAULT;
5360 	if (copy_from_user(psf_data,
5361 			   (void __user *)(unsigned long) usrparm.psf_data,
5362 			   usrparm.psf_data_len))
5363 		goto out_free;
5364 	psf0 = psf_data[0];
5365 	psf1 = psf_data[1];
5366 
5367 	/* setup CCWs for PSF + RSSD */
5368 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5369 	if (IS_ERR(cqr)) {
5370 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5371 			"Could not allocate initialization request");
5372 		rc = PTR_ERR(cqr);
5373 		goto out_free;
5374 	}
5375 
5376 	cqr->startdev = device;
5377 	cqr->memdev = device;
5378 	cqr->retries = 3;
5379 	cqr->expires = 10 * HZ;
5380 	cqr->buildclk = get_tod_clock();
5381 	cqr->status = DASD_CQR_FILLED;
5382 
5383 	/* Build the ccws */
5384 	ccw = cqr->cpaddr;
5385 
5386 	/* PSF ccw */
5387 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5388 	ccw->count = usrparm.psf_data_len;
5389 	ccw->flags |= CCW_FLAG_CC;
5390 	ccw->cda = (__u32)(addr_t) psf_data;
5391 
5392 	ccw++;
5393 
5394 	/* RSSD ccw  */
5395 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5396 	ccw->count = usrparm.rssd_result_len;
5397 	ccw->flags = CCW_FLAG_SLI ;
5398 	ccw->cda = (__u32)(addr_t) rssd_result;
5399 
5400 	rc = dasd_sleep_on(cqr);
5401 	if (rc)
5402 		goto out_sfree;
5403 
5404 	rc = -EFAULT;
5405 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5406 			   rssd_result, usrparm.rssd_result_len))
5407 		goto out_sfree;
5408 	rc = 0;
5409 
5410 out_sfree:
5411 	dasd_sfree_request(cqr, cqr->memdev);
5412 out_free:
5413 	kfree(rssd_result);
5414 	kfree(psf_data);
5415 out:
5416 	DBF_DEV_EVENT(DBF_WARNING, device,
5417 		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5418 		      (int) psf0, (int) psf1, rc);
5419 	return rc;
5420 }
5421 
5422 static int
5423 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5424 {
5425 	struct dasd_device *device = block->base;
5426 
5427 	switch (cmd) {
5428 	case BIODASDGATTR:
5429 		return dasd_eckd_get_attrib(device, argp);
5430 	case BIODASDSATTR:
5431 		return dasd_eckd_set_attrib(device, argp);
5432 	case BIODASDPSRD:
5433 		return dasd_eckd_performance(device, argp);
5434 	case BIODASDRLSE:
5435 		return dasd_eckd_release(device);
5436 	case BIODASDRSRV:
5437 		return dasd_eckd_reserve(device);
5438 	case BIODASDSLCK:
5439 		return dasd_eckd_steal_lock(device);
5440 	case BIODASDSNID:
5441 		return dasd_eckd_snid(device, argp);
5442 	case BIODASDSYMMIO:
5443 		return dasd_symm_io(device, argp);
5444 	default:
5445 		return -ENOTTY;
5446 	}
5447 }
5448 
5449 /*
5450  * Dump the range of CCWs into 'page' buffer
5451  * and return number of printed chars.
5452  */
5453 static int
5454 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5455 {
5456 	int len, count;
5457 	char *datap;
5458 
5459 	len = 0;
5460 	while (from <= to) {
5461 		len += sprintf(page + len, PRINTK_HEADER
5462 			       " CCW %p: %08X %08X DAT:",
5463 			       from, ((int *) from)[0], ((int *) from)[1]);
5464 
5465 		/* get pointer to data (consider IDALs) */
5466 		if (from->flags & CCW_FLAG_IDA)
5467 			datap = (char *) *((addr_t *) (addr_t) from->cda);
5468 		else
5469 			datap = (char *) ((addr_t) from->cda);
5470 
5471 		/* dump data (max 32 bytes) */
5472 		for (count = 0; count < from->count && count < 32; count++) {
5473 			if (count % 8 == 0) len += sprintf(page + len, " ");
5474 			if (count % 4 == 0) len += sprintf(page + len, " ");
5475 			len += sprintf(page + len, "%02x", datap[count]);
5476 		}
5477 		len += sprintf(page + len, "\n");
5478 		from++;
5479 	}
5480 	return len;
5481 }
5482 
5483 static void
5484 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5485 			 char *reason)
5486 {
5487 	u64 *sense;
5488 	u64 *stat;
5489 
5490 	sense = (u64 *) dasd_get_sense(irb);
5491 	stat = (u64 *) &irb->scsw;
5492 	if (sense) {
5493 		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5494 			      "%016llx %016llx %016llx %016llx",
5495 			      reason, *stat, *((u32 *) (stat + 1)),
5496 			      sense[0], sense[1], sense[2], sense[3]);
5497 	} else {
5498 		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5499 			      reason, *stat, *((u32 *) (stat + 1)),
5500 			      "NO VALID SENSE");
5501 	}
5502 }
5503 
5504 /*
5505  * Print sense data and related channel program.
5506  * Parts are printed because printk buffer is only 1024 bytes.
5507  */
5508 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5509 				 struct dasd_ccw_req *req, struct irb *irb)
5510 {
5511 	char *page;
5512 	struct ccw1 *first, *last, *fail, *from, *to;
5513 	int len, sl, sct;
5514 
5515 	page = (char *) get_zeroed_page(GFP_ATOMIC);
5516 	if (page == NULL) {
5517 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5518 			      "No memory to dump sense data\n");
5519 		return;
5520 	}
5521 	/* dump the sense data */
5522 	len = sprintf(page, PRINTK_HEADER
5523 		      " I/O status report for device %s:\n",
5524 		      dev_name(&device->cdev->dev));
5525 	len += sprintf(page + len, PRINTK_HEADER
5526 		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5527 		       "CS:%02X RC:%d\n",
5528 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5529 		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5530 		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5531 		       req ? req->intrc : 0);
5532 	len += sprintf(page + len, PRINTK_HEADER
5533 		       " device %s: Failing CCW: %p\n",
5534 		       dev_name(&device->cdev->dev),
5535 		       (void *) (addr_t) irb->scsw.cmd.cpa);
5536 	if (irb->esw.esw0.erw.cons) {
5537 		for (sl = 0; sl < 4; sl++) {
5538 			len += sprintf(page + len, PRINTK_HEADER
5539 				       " Sense(hex) %2d-%2d:",
5540 				       (8 * sl), ((8 * sl) + 7));
5541 
5542 			for (sct = 0; sct < 8; sct++) {
5543 				len += sprintf(page + len, " %02x",
5544 					       irb->ecw[8 * sl + sct]);
5545 			}
5546 			len += sprintf(page + len, "\n");
5547 		}
5548 
5549 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5550 			/* 24 Byte Sense Data */
5551 			sprintf(page + len, PRINTK_HEADER
5552 				" 24 Byte: %x MSG %x, "
5553 				"%s MSGb to SYSOP\n",
5554 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5555 				irb->ecw[1] & 0x10 ? "" : "no");
5556 		} else {
5557 			/* 32 Byte Sense Data */
5558 			sprintf(page + len, PRINTK_HEADER
5559 				" 32 Byte: Format: %x "
5560 				"Exception class %x\n",
5561 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5562 		}
5563 	} else {
5564 		sprintf(page + len, PRINTK_HEADER
5565 			" SORRY - NO VALID SENSE AVAILABLE\n");
5566 	}
5567 	printk(KERN_ERR "%s", page);
5568 
5569 	if (req) {
5570 		/* req == NULL for unsolicited interrupts */
5571 		/* dump the Channel Program (max 140 Bytes per line) */
5572 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5573 		first = req->cpaddr;
5574 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5575 		to = min(first + 6, last);
5576 		len = sprintf(page, PRINTK_HEADER
5577 			      " Related CP in req: %p\n", req);
5578 		dasd_eckd_dump_ccw_range(first, to, page + len);
5579 		printk(KERN_ERR "%s", page);
5580 
5581 		/* print failing CCW area (maximum 4) */
5582 		/* scsw->cda is either valid or zero  */
5583 		len = 0;
5584 		from = ++to;
5585 		fail = (struct ccw1 *)(addr_t)
5586 				irb->scsw.cmd.cpa; /* failing CCW */
5587 		if (from <  fail - 2) {
5588 			from = fail - 2;     /* there is a gap - print header */
5589 			len += sprintf(page, PRINTK_HEADER "......\n");
5590 		}
5591 		to = min(fail + 1, last);
5592 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
5593 
5594 		/* print last CCWs (maximum 2) */
5595 		from = max(from, ++to);
5596 		if (from < last - 1) {
5597 			from = last - 1;     /* there is a gap - print header */
5598 			len += sprintf(page + len, PRINTK_HEADER "......\n");
5599 		}
5600 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
5601 		if (len > 0)
5602 			printk(KERN_ERR "%s", page);
5603 	}
5604 	free_page((unsigned long) page);
5605 }
5606 
5607 
5608 /*
5609  * Print sense data from a tcw.
5610  */
5611 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5612 				 struct dasd_ccw_req *req, struct irb *irb)
5613 {
5614 	char *page;
5615 	int len, sl, sct, residual;
5616 	struct tsb *tsb;
5617 	u8 *sense, *rcq;
5618 
5619 	page = (char *) get_zeroed_page(GFP_ATOMIC);
5620 	if (page == NULL) {
5621 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5622 			    "No memory to dump sense data");
5623 		return;
5624 	}
5625 	/* dump the sense data */
5626 	len = sprintf(page, PRINTK_HEADER
5627 		      " I/O status report for device %s:\n",
5628 		      dev_name(&device->cdev->dev));
5629 	len += sprintf(page + len, PRINTK_HEADER
5630 		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5631 		       "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5632 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5633 		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5634 		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5635 		       irb->scsw.tm.fcxs,
5636 		       (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5637 		       req ? req->intrc : 0);
5638 	len += sprintf(page + len, PRINTK_HEADER
5639 		       " device %s: Failing TCW: %p\n",
5640 		       dev_name(&device->cdev->dev),
5641 		       (void *) (addr_t) irb->scsw.tm.tcw);
5642 
5643 	tsb = NULL;
5644 	sense = NULL;
5645 	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5646 		tsb = tcw_get_tsb(
5647 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5648 
5649 	if (tsb) {
5650 		len += sprintf(page + len, PRINTK_HEADER
5651 			       " tsb->length %d\n", tsb->length);
5652 		len += sprintf(page + len, PRINTK_HEADER
5653 			       " tsb->flags %x\n", tsb->flags);
5654 		len += sprintf(page + len, PRINTK_HEADER
5655 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
5656 		len += sprintf(page + len, PRINTK_HEADER
5657 			       " tsb->count %d\n", tsb->count);
5658 		residual = tsb->count - 28;
5659 		len += sprintf(page + len, PRINTK_HEADER
5660 			       " residual %d\n", residual);
5661 
5662 		switch (tsb->flags & 0x07) {
5663 		case 1:	/* tsa_iostat */
5664 			len += sprintf(page + len, PRINTK_HEADER
5665 			       " tsb->tsa.iostat.dev_time %d\n",
5666 				       tsb->tsa.iostat.dev_time);
5667 			len += sprintf(page + len, PRINTK_HEADER
5668 			       " tsb->tsa.iostat.def_time %d\n",
5669 				       tsb->tsa.iostat.def_time);
5670 			len += sprintf(page + len, PRINTK_HEADER
5671 			       " tsb->tsa.iostat.queue_time %d\n",
5672 				       tsb->tsa.iostat.queue_time);
5673 			len += sprintf(page + len, PRINTK_HEADER
5674 			       " tsb->tsa.iostat.dev_busy_time %d\n",
5675 				       tsb->tsa.iostat.dev_busy_time);
5676 			len += sprintf(page + len, PRINTK_HEADER
5677 			       " tsb->tsa.iostat.dev_act_time %d\n",
5678 				       tsb->tsa.iostat.dev_act_time);
5679 			sense = tsb->tsa.iostat.sense;
5680 			break;
5681 		case 2: /* ts_ddpc */
5682 			len += sprintf(page + len, PRINTK_HEADER
5683 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5684 			for (sl = 0; sl < 2; sl++) {
5685 				len += sprintf(page + len, PRINTK_HEADER
5686 					       " tsb->tsa.ddpc.rcq %2d-%2d: ",
5687 					       (8 * sl), ((8 * sl) + 7));
5688 				rcq = tsb->tsa.ddpc.rcq;
5689 				for (sct = 0; sct < 8; sct++) {
5690 					len += sprintf(page + len, " %02x",
5691 						       rcq[8 * sl + sct]);
5692 				}
5693 				len += sprintf(page + len, "\n");
5694 			}
5695 			sense = tsb->tsa.ddpc.sense;
5696 			break;
5697 		case 3: /* tsa_intrg */
5698 			len += sprintf(page + len, PRINTK_HEADER
5699 				      " tsb->tsa.intrg.: not supported yet\n");
5700 			break;
5701 		}
5702 
5703 		if (sense) {
5704 			for (sl = 0; sl < 4; sl++) {
5705 				len += sprintf(page + len, PRINTK_HEADER
5706 					       " Sense(hex) %2d-%2d:",
5707 					       (8 * sl), ((8 * sl) + 7));
5708 				for (sct = 0; sct < 8; sct++) {
5709 					len += sprintf(page + len, " %02x",
5710 						       sense[8 * sl + sct]);
5711 				}
5712 				len += sprintf(page + len, "\n");
5713 			}
5714 
5715 			if (sense[27] & DASD_SENSE_BIT_0) {
5716 				/* 24 Byte Sense Data */
5717 				sprintf(page + len, PRINTK_HEADER
5718 					" 24 Byte: %x MSG %x, "
5719 					"%s MSGb to SYSOP\n",
5720 					sense[7] >> 4, sense[7] & 0x0f,
5721 					sense[1] & 0x10 ? "" : "no");
5722 			} else {
5723 				/* 32 Byte Sense Data */
5724 				sprintf(page + len, PRINTK_HEADER
5725 					" 32 Byte: Format: %x "
5726 					"Exception class %x\n",
5727 					sense[6] & 0x0f, sense[22] >> 4);
5728 			}
5729 		} else {
5730 			sprintf(page + len, PRINTK_HEADER
5731 				" SORRY - NO VALID SENSE AVAILABLE\n");
5732 		}
5733 	} else {
5734 		sprintf(page + len, PRINTK_HEADER
5735 			" SORRY - NO TSB DATA AVAILABLE\n");
5736 	}
5737 	printk(KERN_ERR "%s", page);
5738 	free_page((unsigned long) page);
5739 }
5740 
5741 static void dasd_eckd_dump_sense(struct dasd_device *device,
5742 				 struct dasd_ccw_req *req, struct irb *irb)
5743 {
5744 	u8 *sense = dasd_get_sense(irb);
5745 
5746 	if (scsw_is_tm(&irb->scsw)) {
5747 		/*
5748 		 * In some cases the 'File Protected' or 'Incorrect Length'
5749 		 * error might be expected and log messages shouldn't be written
5750 		 * then. Check if the according suppress bit is set.
5751 		 */
5752 		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5753 		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5754 			return;
5755 		if (scsw_cstat(&irb->scsw) == 0x40 &&
5756 		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5757 			return;
5758 
5759 		dasd_eckd_dump_sense_tcw(device, req, irb);
5760 	} else {
5761 		/*
5762 		 * In some cases the 'Command Reject' or 'No Record Found'
5763 		 * error might be expected and log messages shouldn't be
5764 		 * written then. Check if the according suppress bit is set.
5765 		 */
5766 		if (sense && sense[0] & SNS0_CMD_REJECT &&
5767 		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5768 			return;
5769 
5770 		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5771 		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5772 			return;
5773 
5774 		dasd_eckd_dump_sense_ccw(device, req, irb);
5775 	}
5776 }
5777 
5778 static int dasd_eckd_reload_device(struct dasd_device *device)
5779 {
5780 	struct dasd_eckd_private *private = device->private;
5781 	int rc, old_base;
5782 	char print_uid[60];
5783 	struct dasd_uid uid;
5784 	unsigned long flags;
5785 
5786 	/*
5787 	 * remove device from alias handling to prevent new requests
5788 	 * from being scheduled on the wrong alias device
5789 	 */
5790 	dasd_alias_remove_device(device);
5791 
5792 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5793 	old_base = private->uid.base_unit_addr;
5794 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5795 
5796 	/* Read Configuration Data */
5797 	rc = dasd_eckd_read_conf(device);
5798 	if (rc)
5799 		goto out_err;
5800 
5801 	rc = dasd_eckd_generate_uid(device);
5802 	if (rc)
5803 		goto out_err;
5804 	/*
5805 	 * update unit address configuration and
5806 	 * add device to alias management
5807 	 */
5808 	dasd_alias_update_add_device(device);
5809 
5810 	dasd_eckd_get_uid(device, &uid);
5811 
5812 	if (old_base != uid.base_unit_addr) {
5813 		if (strlen(uid.vduit) > 0)
5814 			snprintf(print_uid, sizeof(print_uid),
5815 				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5816 				 uid.ssid, uid.base_unit_addr, uid.vduit);
5817 		else
5818 			snprintf(print_uid, sizeof(print_uid),
5819 				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5820 				 uid.ssid, uid.base_unit_addr);
5821 
5822 		dev_info(&device->cdev->dev,
5823 			 "An Alias device was reassigned to a new base device "
5824 			 "with UID: %s\n", print_uid);
5825 	}
5826 	return 0;
5827 
5828 out_err:
5829 	return -1;
5830 }
5831 
5832 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5833 					 struct dasd_rssd_messages *messages,
5834 					 __u8 lpum)
5835 {
5836 	struct dasd_rssd_messages *message_buf;
5837 	struct dasd_psf_prssd_data *prssdp;
5838 	struct dasd_ccw_req *cqr;
5839 	struct ccw1 *ccw;
5840 	int rc;
5841 
5842 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
5843 				   (sizeof(struct dasd_psf_prssd_data) +
5844 				    sizeof(struct dasd_rssd_messages)),
5845 				   device, NULL);
5846 	if (IS_ERR(cqr)) {
5847 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5848 				"Could not allocate read message buffer request");
5849 		return PTR_ERR(cqr);
5850 	}
5851 
5852 	cqr->lpm = lpum;
5853 retry:
5854 	cqr->startdev = device;
5855 	cqr->memdev = device;
5856 	cqr->block = NULL;
5857 	cqr->expires = 10 * HZ;
5858 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5859 	/* dasd_sleep_on_immediatly does not do complex error
5860 	 * recovery so clear erp flag and set retry counter to
5861 	 * do basic erp */
5862 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5863 	cqr->retries = 256;
5864 
5865 	/* Prepare for Read Subsystem Data */
5866 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5867 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5868 	prssdp->order = PSF_ORDER_PRSSD;
5869 	prssdp->suborder = 0x03;	/* Message Buffer */
5870 	/* all other bytes of prssdp must be zero */
5871 
5872 	ccw = cqr->cpaddr;
5873 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5874 	ccw->count = sizeof(struct dasd_psf_prssd_data);
5875 	ccw->flags |= CCW_FLAG_CC;
5876 	ccw->flags |= CCW_FLAG_SLI;
5877 	ccw->cda = (__u32)(addr_t) prssdp;
5878 
5879 	/* Read Subsystem Data - message buffer */
5880 	message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5881 	memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5882 
5883 	ccw++;
5884 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5885 	ccw->count = sizeof(struct dasd_rssd_messages);
5886 	ccw->flags |= CCW_FLAG_SLI;
5887 	ccw->cda = (__u32)(addr_t) message_buf;
5888 
5889 	cqr->buildclk = get_tod_clock();
5890 	cqr->status = DASD_CQR_FILLED;
5891 	rc = dasd_sleep_on_immediatly(cqr);
5892 	if (rc == 0) {
5893 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5894 		message_buf = (struct dasd_rssd_messages *)
5895 			(prssdp + 1);
5896 		memcpy(messages, message_buf,
5897 		       sizeof(struct dasd_rssd_messages));
5898 	} else if (cqr->lpm) {
5899 		/*
5900 		 * on z/VM we might not be able to do I/O on the requested path
5901 		 * but instead we get the required information on any path
5902 		 * so retry with open path mask
5903 		 */
5904 		cqr->lpm = 0;
5905 		goto retry;
5906 	} else
5907 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5908 				"Reading messages failed with rc=%d\n"
5909 				, rc);
5910 	dasd_sfree_request(cqr, cqr->memdev);
5911 	return rc;
5912 }
5913 
5914 static int dasd_eckd_query_host_access(struct dasd_device *device,
5915 				       struct dasd_psf_query_host_access *data)
5916 {
5917 	struct dasd_eckd_private *private = device->private;
5918 	struct dasd_psf_query_host_access *host_access;
5919 	struct dasd_psf_prssd_data *prssdp;
5920 	struct dasd_ccw_req *cqr;
5921 	struct ccw1 *ccw;
5922 	int rc;
5923 
5924 	/* not available for HYPER PAV alias devices */
5925 	if (!device->block && private->lcu->pav == HYPER_PAV)
5926 		return -EOPNOTSUPP;
5927 
5928 	/* may not be supported by the storage server */
5929 	if (!(private->features.feature[14] & 0x80))
5930 		return -EOPNOTSUPP;
5931 
5932 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
5933 				   sizeof(struct dasd_psf_prssd_data) + 1,
5934 				   device, NULL);
5935 	if (IS_ERR(cqr)) {
5936 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5937 				"Could not allocate read message buffer request");
5938 		return PTR_ERR(cqr);
5939 	}
5940 	host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5941 	if (!host_access) {
5942 		dasd_sfree_request(cqr, device);
5943 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5944 				"Could not allocate host_access buffer");
5945 		return -ENOMEM;
5946 	}
5947 	cqr->startdev = device;
5948 	cqr->memdev = device;
5949 	cqr->block = NULL;
5950 	cqr->retries = 256;
5951 	cqr->expires = 10 * HZ;
5952 
5953 	/* Prepare for Read Subsystem Data */
5954 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5955 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5956 	prssdp->order = PSF_ORDER_PRSSD;
5957 	prssdp->suborder = PSF_SUBORDER_QHA;	/* query host access */
5958 	/* LSS and Volume that will be queried */
5959 	prssdp->lss = private->ned->ID;
5960 	prssdp->volume = private->ned->unit_addr;
5961 	/* all other bytes of prssdp must be zero */
5962 
5963 	ccw = cqr->cpaddr;
5964 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5965 	ccw->count = sizeof(struct dasd_psf_prssd_data);
5966 	ccw->flags |= CCW_FLAG_CC;
5967 	ccw->flags |= CCW_FLAG_SLI;
5968 	ccw->cda = (__u32)(addr_t) prssdp;
5969 
5970 	/* Read Subsystem Data - query host access */
5971 	ccw++;
5972 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5973 	ccw->count = sizeof(struct dasd_psf_query_host_access);
5974 	ccw->flags |= CCW_FLAG_SLI;
5975 	ccw->cda = (__u32)(addr_t) host_access;
5976 
5977 	cqr->buildclk = get_tod_clock();
5978 	cqr->status = DASD_CQR_FILLED;
5979 	/* the command might not be supported, suppress error message */
5980 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5981 	rc = dasd_sleep_on_interruptible(cqr);
5982 	if (rc == 0) {
5983 		*data = *host_access;
5984 	} else {
5985 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5986 				"Reading host access data failed with rc=%d\n",
5987 				rc);
5988 		rc = -EOPNOTSUPP;
5989 	}
5990 
5991 	dasd_sfree_request(cqr, cqr->memdev);
5992 	kfree(host_access);
5993 	return rc;
5994 }
5995 /*
5996  * return number of grouped devices
5997  */
5998 static int dasd_eckd_host_access_count(struct dasd_device *device)
5999 {
6000 	struct dasd_psf_query_host_access *access;
6001 	struct dasd_ckd_path_group_entry *entry;
6002 	struct dasd_ckd_host_information *info;
6003 	int count = 0;
6004 	int rc, i;
6005 
6006 	access = kzalloc(sizeof(*access), GFP_NOIO);
6007 	if (!access) {
6008 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6009 				"Could not allocate access buffer");
6010 		return -ENOMEM;
6011 	}
6012 	rc = dasd_eckd_query_host_access(device, access);
6013 	if (rc) {
6014 		kfree(access);
6015 		return rc;
6016 	}
6017 
6018 	info = (struct dasd_ckd_host_information *)
6019 		access->host_access_information;
6020 	for (i = 0; i < info->entry_count; i++) {
6021 		entry = (struct dasd_ckd_path_group_entry *)
6022 			(info->entry + i * info->entry_size);
6023 		if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6024 			count++;
6025 	}
6026 
6027 	kfree(access);
6028 	return count;
6029 }
6030 
6031 /*
6032  * write host access information to a sequential file
6033  */
6034 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6035 {
6036 	struct dasd_psf_query_host_access *access;
6037 	struct dasd_ckd_path_group_entry *entry;
6038 	struct dasd_ckd_host_information *info;
6039 	char sysplex[9] = "";
6040 	int rc, i;
6041 
6042 	access = kzalloc(sizeof(*access), GFP_NOIO);
6043 	if (!access) {
6044 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6045 				"Could not allocate access buffer");
6046 		return -ENOMEM;
6047 	}
6048 	rc = dasd_eckd_query_host_access(device, access);
6049 	if (rc) {
6050 		kfree(access);
6051 		return rc;
6052 	}
6053 
6054 	info = (struct dasd_ckd_host_information *)
6055 		access->host_access_information;
6056 	for (i = 0; i < info->entry_count; i++) {
6057 		entry = (struct dasd_ckd_path_group_entry *)
6058 			(info->entry + i * info->entry_size);
6059 		/* PGID */
6060 		seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6061 		/* FLAGS */
6062 		seq_printf(m, "status_flags %02x\n", entry->status_flags);
6063 		/* SYSPLEX NAME */
6064 		memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6065 		EBCASC(sysplex, sizeof(sysplex));
6066 		seq_printf(m, "sysplex_name %8s\n", sysplex);
6067 		/* SUPPORTED CYLINDER */
6068 		seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6069 		/* TIMESTAMP */
6070 		seq_printf(m, "timestamp %lu\n", (unsigned long)
6071 			   entry->timestamp);
6072 	}
6073 	kfree(access);
6074 
6075 	return 0;
6076 }
6077 
6078 /*
6079  * Perform Subsystem Function - CUIR response
6080  */
6081 static int
6082 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6083 			    __u32 message_id, __u8 lpum)
6084 {
6085 	struct dasd_psf_cuir_response *psf_cuir;
6086 	int pos = pathmask_to_pos(lpum);
6087 	struct dasd_ccw_req *cqr;
6088 	struct ccw1 *ccw;
6089 	int rc;
6090 
6091 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6092 				   sizeof(struct dasd_psf_cuir_response),
6093 				   device, NULL);
6094 
6095 	if (IS_ERR(cqr)) {
6096 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6097 			   "Could not allocate PSF-CUIR request");
6098 		return PTR_ERR(cqr);
6099 	}
6100 
6101 	psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6102 	psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6103 	psf_cuir->cc = response;
6104 	psf_cuir->chpid = device->path[pos].chpid;
6105 	psf_cuir->message_id = message_id;
6106 	psf_cuir->cssid = device->path[pos].cssid;
6107 	psf_cuir->ssid = device->path[pos].ssid;
6108 	ccw = cqr->cpaddr;
6109 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
6110 	ccw->cda = (__u32)(addr_t)psf_cuir;
6111 	ccw->flags = CCW_FLAG_SLI;
6112 	ccw->count = sizeof(struct dasd_psf_cuir_response);
6113 
6114 	cqr->startdev = device;
6115 	cqr->memdev = device;
6116 	cqr->block = NULL;
6117 	cqr->retries = 256;
6118 	cqr->expires = 10*HZ;
6119 	cqr->buildclk = get_tod_clock();
6120 	cqr->status = DASD_CQR_FILLED;
6121 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6122 
6123 	rc = dasd_sleep_on(cqr);
6124 
6125 	dasd_sfree_request(cqr, cqr->memdev);
6126 	return rc;
6127 }
6128 
6129 /*
6130  * return configuration data that is referenced by record selector
6131  * if a record selector is specified or per default return the
6132  * conf_data pointer for the path specified by lpum
6133  */
6134 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6135 						     __u8 lpum,
6136 						     struct dasd_cuir_message *cuir)
6137 {
6138 	struct dasd_conf_data *conf_data;
6139 	int path, pos;
6140 
6141 	if (cuir->record_selector == 0)
6142 		goto out;
6143 	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6144 		conf_data = device->path[pos].conf_data;
6145 		if (conf_data->gneq.record_selector ==
6146 		    cuir->record_selector)
6147 			return conf_data;
6148 	}
6149 out:
6150 	return device->path[pathmask_to_pos(lpum)].conf_data;
6151 }
6152 
6153 /*
6154  * This function determines the scope of a reconfiguration request by
6155  * analysing the path and device selection data provided in the CUIR request.
6156  * Returns a path mask containing CUIR affected paths for the give device.
6157  *
6158  * If the CUIR request does not contain the required information return the
6159  * path mask of the path the attention message for the CUIR request was reveived
6160  * on.
6161  */
6162 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6163 				struct dasd_cuir_message *cuir)
6164 {
6165 	struct dasd_conf_data *ref_conf_data;
6166 	unsigned long bitmask = 0, mask = 0;
6167 	struct dasd_conf_data *conf_data;
6168 	unsigned int pos, path;
6169 	char *ref_gneq, *gneq;
6170 	char *ref_ned, *ned;
6171 	int tbcpm = 0;
6172 
6173 	/* if CUIR request does not specify the scope use the path
6174 	   the attention message was presented on */
6175 	if (!cuir->ned_map ||
6176 	    !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6177 		return lpum;
6178 
6179 	/* get reference conf data */
6180 	ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6181 	/* reference ned is determined by ned_map field */
6182 	pos = 8 - ffs(cuir->ned_map);
6183 	ref_ned = (char *)&ref_conf_data->neds[pos];
6184 	ref_gneq = (char *)&ref_conf_data->gneq;
6185 	/* transfer 24 bit neq_map to mask */
6186 	mask = cuir->neq_map[2];
6187 	mask |= cuir->neq_map[1] << 8;
6188 	mask |= cuir->neq_map[0] << 16;
6189 
6190 	for (path = 0; path < 8; path++) {
6191 		/* initialise data per path */
6192 		bitmask = mask;
6193 		conf_data = device->path[path].conf_data;
6194 		pos = 8 - ffs(cuir->ned_map);
6195 		ned = (char *) &conf_data->neds[pos];
6196 		/* compare reference ned and per path ned */
6197 		if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6198 			continue;
6199 		gneq = (char *)&conf_data->gneq;
6200 		/* compare reference gneq and per_path gneq under
6201 		   24 bit mask where mask bit 0 equals byte 7 of
6202 		   the gneq and mask bit 24 equals byte 31 */
6203 		while (bitmask) {
6204 			pos = ffs(bitmask) - 1;
6205 			if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6206 			    != 0)
6207 				break;
6208 			clear_bit(pos, &bitmask);
6209 		}
6210 		if (bitmask)
6211 			continue;
6212 		/* device and path match the reference values
6213 		   add path to CUIR scope */
6214 		tbcpm |= 0x80 >> path;
6215 	}
6216 	return tbcpm;
6217 }
6218 
6219 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6220 				       unsigned long paths, int action)
6221 {
6222 	int pos;
6223 
6224 	while (paths) {
6225 		/* get position of bit in mask */
6226 		pos = 8 - ffs(paths);
6227 		/* get channel path descriptor from this position */
6228 		if (action == CUIR_QUIESCE)
6229 			pr_warn("Service on the storage server caused path %x.%02x to go offline",
6230 				device->path[pos].cssid,
6231 				device->path[pos].chpid);
6232 		else if (action == CUIR_RESUME)
6233 			pr_info("Path %x.%02x is back online after service on the storage server",
6234 				device->path[pos].cssid,
6235 				device->path[pos].chpid);
6236 		clear_bit(7 - pos, &paths);
6237 	}
6238 }
6239 
6240 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6241 				      struct dasd_cuir_message *cuir)
6242 {
6243 	unsigned long tbcpm;
6244 
6245 	tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6246 	/* nothing to do if path is not in use */
6247 	if (!(dasd_path_get_opm(device) & tbcpm))
6248 		return 0;
6249 	if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6250 		/* no path would be left if the CUIR action is taken
6251 		   return error */
6252 		return -EINVAL;
6253 	}
6254 	/* remove device from operational path mask */
6255 	dasd_path_remove_opm(device, tbcpm);
6256 	dasd_path_add_cuirpm(device, tbcpm);
6257 	return tbcpm;
6258 }
6259 
6260 /*
6261  * walk through all devices and build a path mask to quiesce them
6262  * return an error if the last path to a device would be removed
6263  *
6264  * if only part of the devices are quiesced and an error
6265  * occurs no onlining necessary, the storage server will
6266  * notify the already set offline devices again
6267  */
6268 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6269 				  struct dasd_cuir_message *cuir)
6270 {
6271 	struct dasd_eckd_private *private = device->private;
6272 	struct alias_pav_group *pavgroup, *tempgroup;
6273 	struct dasd_device *dev, *n;
6274 	unsigned long paths = 0;
6275 	unsigned long flags;
6276 	int tbcpm;
6277 
6278 	/* active devices */
6279 	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6280 				 alias_list) {
6281 		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6282 		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6283 		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6284 		if (tbcpm < 0)
6285 			goto out_err;
6286 		paths |= tbcpm;
6287 	}
6288 	/* inactive devices */
6289 	list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6290 				 alias_list) {
6291 		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6292 		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6293 		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6294 		if (tbcpm < 0)
6295 			goto out_err;
6296 		paths |= tbcpm;
6297 	}
6298 	/* devices in PAV groups */
6299 	list_for_each_entry_safe(pavgroup, tempgroup,
6300 				 &private->lcu->grouplist, group) {
6301 		list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6302 					 alias_list) {
6303 			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6304 			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6305 			spin_unlock_irqrestore(
6306 				get_ccwdev_lock(dev->cdev), flags);
6307 			if (tbcpm < 0)
6308 				goto out_err;
6309 			paths |= tbcpm;
6310 		}
6311 		list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6312 					 alias_list) {
6313 			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6314 			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6315 			spin_unlock_irqrestore(
6316 				get_ccwdev_lock(dev->cdev), flags);
6317 			if (tbcpm < 0)
6318 				goto out_err;
6319 			paths |= tbcpm;
6320 		}
6321 	}
6322 	/* notify user about all paths affected by CUIR action */
6323 	dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6324 	return 0;
6325 out_err:
6326 	return tbcpm;
6327 }
6328 
6329 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6330 				 struct dasd_cuir_message *cuir)
6331 {
6332 	struct dasd_eckd_private *private = device->private;
6333 	struct alias_pav_group *pavgroup, *tempgroup;
6334 	struct dasd_device *dev, *n;
6335 	unsigned long paths = 0;
6336 	int tbcpm;
6337 
6338 	/*
6339 	 * the path may have been added through a generic path event before
6340 	 * only trigger path verification if the path is not already in use
6341 	 */
6342 	list_for_each_entry_safe(dev, n,
6343 				 &private->lcu->active_devices,
6344 				 alias_list) {
6345 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6346 		paths |= tbcpm;
6347 		if (!(dasd_path_get_opm(dev) & tbcpm)) {
6348 			dasd_path_add_tbvpm(dev, tbcpm);
6349 			dasd_schedule_device_bh(dev);
6350 		}
6351 	}
6352 	list_for_each_entry_safe(dev, n,
6353 				 &private->lcu->inactive_devices,
6354 				 alias_list) {
6355 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6356 		paths |= tbcpm;
6357 		if (!(dasd_path_get_opm(dev) & tbcpm)) {
6358 			dasd_path_add_tbvpm(dev, tbcpm);
6359 			dasd_schedule_device_bh(dev);
6360 		}
6361 	}
6362 	/* devices in PAV groups */
6363 	list_for_each_entry_safe(pavgroup, tempgroup,
6364 				 &private->lcu->grouplist,
6365 				 group) {
6366 		list_for_each_entry_safe(dev, n,
6367 					 &pavgroup->baselist,
6368 					 alias_list) {
6369 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6370 			paths |= tbcpm;
6371 			if (!(dasd_path_get_opm(dev) & tbcpm)) {
6372 				dasd_path_add_tbvpm(dev, tbcpm);
6373 				dasd_schedule_device_bh(dev);
6374 			}
6375 		}
6376 		list_for_each_entry_safe(dev, n,
6377 					 &pavgroup->aliaslist,
6378 					 alias_list) {
6379 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6380 			paths |= tbcpm;
6381 			if (!(dasd_path_get_opm(dev) & tbcpm)) {
6382 				dasd_path_add_tbvpm(dev, tbcpm);
6383 				dasd_schedule_device_bh(dev);
6384 			}
6385 		}
6386 	}
6387 	/* notify user about all paths affected by CUIR action */
6388 	dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6389 	return 0;
6390 }
6391 
6392 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6393 				 __u8 lpum)
6394 {
6395 	struct dasd_cuir_message *cuir = messages;
6396 	int response;
6397 
6398 	DBF_DEV_EVENT(DBF_WARNING, device,
6399 		      "CUIR request: %016llx %016llx %016llx %08x",
6400 		      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6401 		      ((u32 *)cuir)[3]);
6402 
6403 	if (cuir->code == CUIR_QUIESCE) {
6404 		/* quiesce */
6405 		if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6406 			response = PSF_CUIR_LAST_PATH;
6407 		else
6408 			response = PSF_CUIR_COMPLETED;
6409 	} else if (cuir->code == CUIR_RESUME) {
6410 		/* resume */
6411 		dasd_eckd_cuir_resume(device, lpum, cuir);
6412 		response = PSF_CUIR_COMPLETED;
6413 	} else
6414 		response = PSF_CUIR_NOT_SUPPORTED;
6415 
6416 	dasd_eckd_psf_cuir_response(device, response,
6417 				    cuir->message_id, lpum);
6418 	DBF_DEV_EVENT(DBF_WARNING, device,
6419 		      "CUIR response: %d on message ID %08x", response,
6420 		      cuir->message_id);
6421 	/* to make sure there is no attention left schedule work again */
6422 	device->discipline->check_attention(device, lpum);
6423 }
6424 
6425 static void dasd_eckd_oos_resume(struct dasd_device *device)
6426 {
6427 	struct dasd_eckd_private *private = device->private;
6428 	struct alias_pav_group *pavgroup, *tempgroup;
6429 	struct dasd_device *dev, *n;
6430 	unsigned long flags;
6431 
6432 	spin_lock_irqsave(&private->lcu->lock, flags);
6433 	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6434 				 alias_list) {
6435 		if (dev->stopped & DASD_STOPPED_NOSPC)
6436 			dasd_generic_space_avail(dev);
6437 	}
6438 	list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6439 				 alias_list) {
6440 		if (dev->stopped & DASD_STOPPED_NOSPC)
6441 			dasd_generic_space_avail(dev);
6442 	}
6443 	/* devices in PAV groups */
6444 	list_for_each_entry_safe(pavgroup, tempgroup,
6445 				 &private->lcu->grouplist,
6446 				 group) {
6447 		list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6448 					 alias_list) {
6449 			if (dev->stopped & DASD_STOPPED_NOSPC)
6450 				dasd_generic_space_avail(dev);
6451 		}
6452 		list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6453 					 alias_list) {
6454 			if (dev->stopped & DASD_STOPPED_NOSPC)
6455 				dasd_generic_space_avail(dev);
6456 		}
6457 	}
6458 	spin_unlock_irqrestore(&private->lcu->lock, flags);
6459 }
6460 
6461 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6462 				 __u8 lpum)
6463 {
6464 	struct dasd_oos_message *oos = messages;
6465 
6466 	switch (oos->code) {
6467 	case REPO_WARN:
6468 	case POOL_WARN:
6469 		dev_warn(&device->cdev->dev,
6470 			 "Extent pool usage has reached a critical value\n");
6471 		dasd_eckd_oos_resume(device);
6472 		break;
6473 	case REPO_EXHAUST:
6474 	case POOL_EXHAUST:
6475 		dev_warn(&device->cdev->dev,
6476 			 "Extent pool is exhausted\n");
6477 		break;
6478 	case REPO_RELIEVE:
6479 	case POOL_RELIEVE:
6480 		dev_info(&device->cdev->dev,
6481 			 "Extent pool physical space constraint has been relieved\n");
6482 		break;
6483 	}
6484 
6485 	/* In any case, update related data */
6486 	dasd_eckd_read_ext_pool_info(device);
6487 
6488 	/* to make sure there is no attention left schedule work again */
6489 	device->discipline->check_attention(device, lpum);
6490 }
6491 
6492 static void dasd_eckd_check_attention_work(struct work_struct *work)
6493 {
6494 	struct check_attention_work_data *data;
6495 	struct dasd_rssd_messages *messages;
6496 	struct dasd_device *device;
6497 	int rc;
6498 
6499 	data = container_of(work, struct check_attention_work_data, worker);
6500 	device = data->device;
6501 	messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6502 	if (!messages) {
6503 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6504 			      "Could not allocate attention message buffer");
6505 		goto out;
6506 	}
6507 	rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6508 	if (rc)
6509 		goto out;
6510 
6511 	if (messages->length == ATTENTION_LENGTH_CUIR &&
6512 	    messages->format == ATTENTION_FORMAT_CUIR)
6513 		dasd_eckd_handle_cuir(device, messages, data->lpum);
6514 	if (messages->length == ATTENTION_LENGTH_OOS &&
6515 	    messages->format == ATTENTION_FORMAT_OOS)
6516 		dasd_eckd_handle_oos(device, messages, data->lpum);
6517 
6518 out:
6519 	dasd_put_device(device);
6520 	kfree(messages);
6521 	kfree(data);
6522 }
6523 
6524 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6525 {
6526 	struct check_attention_work_data *data;
6527 
6528 	data = kzalloc(sizeof(*data), GFP_ATOMIC);
6529 	if (!data)
6530 		return -ENOMEM;
6531 	INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6532 	dasd_get_device(device);
6533 	data->device = device;
6534 	data->lpum = lpum;
6535 	schedule_work(&data->worker);
6536 	return 0;
6537 }
6538 
6539 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6540 {
6541 	if (~lpum & dasd_path_get_opm(device)) {
6542 		dasd_path_add_nohpfpm(device, lpum);
6543 		dasd_path_remove_opm(device, lpum);
6544 		dev_err(&device->cdev->dev,
6545 			"Channel path %02X lost HPF functionality and is disabled\n",
6546 			lpum);
6547 		return 1;
6548 	}
6549 	return 0;
6550 }
6551 
6552 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6553 {
6554 	struct dasd_eckd_private *private = device->private;
6555 
6556 	dev_err(&device->cdev->dev,
6557 		"High Performance FICON disabled\n");
6558 	private->fcx_max_data = 0;
6559 }
6560 
6561 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6562 {
6563 	struct dasd_eckd_private *private = device->private;
6564 
6565 	return private->fcx_max_data ? 1 : 0;
6566 }
6567 
6568 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6569 				       struct irb *irb)
6570 {
6571 	struct dasd_eckd_private *private = device->private;
6572 
6573 	if (!private->fcx_max_data) {
6574 		/* sanity check for no HPF, the error makes no sense */
6575 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6576 			      "Trying to disable HPF for a non HPF device");
6577 		return;
6578 	}
6579 	if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6580 		dasd_eckd_disable_hpf_device(device);
6581 	} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6582 		if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6583 			return;
6584 		dasd_eckd_disable_hpf_device(device);
6585 		dasd_path_set_tbvpm(device,
6586 				  dasd_path_get_hpfpm(device));
6587 	}
6588 	/*
6589 	 * prevent that any new I/O ist started on the device and schedule a
6590 	 * requeue of existing requests
6591 	 */
6592 	dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6593 	dasd_schedule_requeue(device);
6594 }
6595 
6596 /*
6597  * Initialize block layer request queue.
6598  */
6599 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6600 {
6601 	unsigned int logical_block_size = block->bp_block;
6602 	struct request_queue *q = block->request_queue;
6603 	struct dasd_device *device = block->base;
6604 	int max;
6605 
6606 	if (device->features & DASD_FEATURE_USERAW) {
6607 		/*
6608 		 * the max_blocks value for raw_track access is 256
6609 		 * it is higher than the native ECKD value because we
6610 		 * only need one ccw per track
6611 		 * so the max_hw_sectors are
6612 		 * 2048 x 512B = 1024kB = 16 tracks
6613 		 */
6614 		max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6615 	} else {
6616 		max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6617 	}
6618 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6619 	q->limits.max_dev_sectors = max;
6620 	blk_queue_logical_block_size(q, logical_block_size);
6621 	blk_queue_max_hw_sectors(q, max);
6622 	blk_queue_max_segments(q, USHRT_MAX);
6623 	/* With page sized segments each segment can be translated into one idaw/tidaw */
6624 	blk_queue_max_segment_size(q, PAGE_SIZE);
6625 	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6626 }
6627 
6628 static struct ccw_driver dasd_eckd_driver = {
6629 	.driver = {
6630 		.name	= "dasd-eckd",
6631 		.owner	= THIS_MODULE,
6632 	},
6633 	.ids	     = dasd_eckd_ids,
6634 	.probe	     = dasd_eckd_probe,
6635 	.remove      = dasd_generic_remove,
6636 	.set_offline = dasd_generic_set_offline,
6637 	.set_online  = dasd_eckd_set_online,
6638 	.notify      = dasd_generic_notify,
6639 	.path_event  = dasd_generic_path_event,
6640 	.shutdown    = dasd_generic_shutdown,
6641 	.uc_handler  = dasd_generic_uc_handler,
6642 	.int_class   = IRQIO_DAS,
6643 };
6644 
6645 static struct dasd_discipline dasd_eckd_discipline = {
6646 	.owner = THIS_MODULE,
6647 	.name = "ECKD",
6648 	.ebcname = "ECKD",
6649 	.check_device = dasd_eckd_check_characteristics,
6650 	.uncheck_device = dasd_eckd_uncheck_device,
6651 	.do_analysis = dasd_eckd_do_analysis,
6652 	.pe_handler = dasd_eckd_pe_handler,
6653 	.basic_to_ready = dasd_eckd_basic_to_ready,
6654 	.online_to_ready = dasd_eckd_online_to_ready,
6655 	.basic_to_known = dasd_eckd_basic_to_known,
6656 	.setup_blk_queue = dasd_eckd_setup_blk_queue,
6657 	.fill_geometry = dasd_eckd_fill_geometry,
6658 	.start_IO = dasd_start_IO,
6659 	.term_IO = dasd_term_IO,
6660 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
6661 	.format_device = dasd_eckd_format_device,
6662 	.check_device_format = dasd_eckd_check_device_format,
6663 	.erp_action = dasd_eckd_erp_action,
6664 	.erp_postaction = dasd_eckd_erp_postaction,
6665 	.check_for_device_change = dasd_eckd_check_for_device_change,
6666 	.build_cp = dasd_eckd_build_alias_cp,
6667 	.free_cp = dasd_eckd_free_alias_cp,
6668 	.dump_sense = dasd_eckd_dump_sense,
6669 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6670 	.fill_info = dasd_eckd_fill_info,
6671 	.ioctl = dasd_eckd_ioctl,
6672 	.reload = dasd_eckd_reload_device,
6673 	.get_uid = dasd_eckd_get_uid,
6674 	.kick_validate = dasd_eckd_kick_validate_server,
6675 	.check_attention = dasd_eckd_check_attention,
6676 	.host_access_count = dasd_eckd_host_access_count,
6677 	.hosts_print = dasd_hosts_print,
6678 	.handle_hpf_error = dasd_eckd_handle_hpf_error,
6679 	.disable_hpf = dasd_eckd_disable_hpf_device,
6680 	.hpf_enabled = dasd_eckd_hpf_enabled,
6681 	.reset_path = dasd_eckd_reset_path,
6682 	.is_ese = dasd_eckd_is_ese,
6683 	.space_allocated = dasd_eckd_space_allocated,
6684 	.space_configured = dasd_eckd_space_configured,
6685 	.logical_capacity = dasd_eckd_logical_capacity,
6686 	.release_space = dasd_eckd_release_space,
6687 	.ext_pool_id = dasd_eckd_ext_pool_id,
6688 	.ext_size = dasd_eckd_ext_size,
6689 	.ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6690 	.ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6691 	.ext_pool_oos = dasd_eckd_ext_pool_oos,
6692 	.ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6693 	.ese_format = dasd_eckd_ese_format,
6694 	.ese_read = dasd_eckd_ese_read,
6695 };
6696 
6697 static int __init
6698 dasd_eckd_init(void)
6699 {
6700 	int ret;
6701 
6702 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
6703 	dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6704 				   GFP_KERNEL | GFP_DMA);
6705 	if (!dasd_reserve_req)
6706 		return -ENOMEM;
6707 	dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6708 				    GFP_KERNEL | GFP_DMA);
6709 	if (!dasd_vol_info_req)
6710 		return -ENOMEM;
6711 	pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6712 				    GFP_KERNEL | GFP_DMA);
6713 	if (!pe_handler_worker) {
6714 		kfree(dasd_reserve_req);
6715 		kfree(dasd_vol_info_req);
6716 		return -ENOMEM;
6717 	}
6718 	rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6719 	if (!rawpadpage) {
6720 		kfree(pe_handler_worker);
6721 		kfree(dasd_reserve_req);
6722 		kfree(dasd_vol_info_req);
6723 		return -ENOMEM;
6724 	}
6725 	ret = ccw_driver_register(&dasd_eckd_driver);
6726 	if (!ret)
6727 		wait_for_device_probe();
6728 	else {
6729 		kfree(pe_handler_worker);
6730 		kfree(dasd_reserve_req);
6731 		kfree(dasd_vol_info_req);
6732 		free_page((unsigned long)rawpadpage);
6733 	}
6734 	return ret;
6735 }
6736 
6737 static void __exit
6738 dasd_eckd_cleanup(void)
6739 {
6740 	ccw_driver_unregister(&dasd_eckd_driver);
6741 	kfree(pe_handler_worker);
6742 	kfree(dasd_reserve_req);
6743 	free_page((unsigned long)rawpadpage);
6744 }
6745 
6746 module_init(dasd_eckd_init);
6747 module_exit(dasd_eckd_cleanup);
6748