xref: /linux/drivers/s390/cio/device_status.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  *    Copyright IBM Corp. 2002
3  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
4  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  * Status accumulation and basic sense functions.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/init.h>
11 
12 #include <asm/ccwdev.h>
13 #include <asm/cio.h>
14 
15 #include "cio.h"
16 #include "cio_debug.h"
17 #include "css.h"
18 #include "device.h"
19 #include "ioasm.h"
20 #include "io_sch.h"
21 
22 /*
23  * Check for any kind of channel or interface control check but don't
24  * issue the message for the console device
25  */
26 static void
27 ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
28 {
29 	char dbf_text[15];
30 
31 	if (!scsw_is_valid_cstat(&irb->scsw) ||
32 	    !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
33 	      SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
34 		return;
35 	CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
36 		      "received"
37 		      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
38 		      ": %02X sch_stat : %02X\n",
39 		      cdev->private->dev_id.devno, cdev->private->schid.ssid,
40 		      cdev->private->schid.sch_no,
41 		      scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
42 	sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
43 	CIO_TRACE_EVENT(0, dbf_text);
44 	CIO_HEX_EVENT(0, irb, sizeof(struct irb));
45 }
46 
47 /*
48  * Some paths became not operational (pno bit in scsw is set).
49  */
50 static void
51 ccw_device_path_notoper(struct ccw_device *cdev)
52 {
53 	struct subchannel *sch;
54 
55 	sch = to_subchannel(cdev->dev.parent);
56 	if (cio_update_schib(sch))
57 		goto doverify;
58 
59 	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
60 		      "not operational \n", __func__,
61 		      sch->schid.ssid, sch->schid.sch_no,
62 		      sch->schib.pmcw.pnom);
63 
64 	sch->lpm &= ~sch->schib.pmcw.pnom;
65 doverify:
66 	cdev->private->flags.doverify = 1;
67 }
68 
69 /*
70  * Copy valid bits from the extended control word to device irb.
71  */
72 static void
73 ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
74 {
75 	/*
76 	 * Copy extended control bit if it is valid... yes there
77 	 * are condition that have to be met for the extended control
78 	 * bit to have meaning. Sick.
79 	 */
80 	cdev->private->irb.scsw.cmd.ectl = 0;
81 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
82 	    !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
83 		cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
84 	/* Check if extended control word is valid. */
85 	if (!cdev->private->irb.scsw.cmd.ectl)
86 		return;
87 	/* Copy concurrent sense / model dependent information. */
88 	memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
89 }
90 
91 /*
92  * Check if extended status word is valid.
93  */
94 static int
95 ccw_device_accumulate_esw_valid(struct irb *irb)
96 {
97 	if (!irb->scsw.cmd.eswf &&
98 	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
99 		return 0;
100 	if (irb->scsw.cmd.stctl ==
101 			(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
102 	    !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
103 		return 0;
104 	return 1;
105 }
106 
107 /*
108  * Copy valid bits from the extended status word to device irb.
109  */
110 static void
111 ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
112 {
113 	struct irb *cdev_irb;
114 	struct sublog *cdev_sublog, *sublog;
115 
116 	if (!ccw_device_accumulate_esw_valid(irb))
117 		return;
118 
119 	cdev_irb = &cdev->private->irb;
120 
121 	/* Copy last path used mask. */
122 	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
123 
124 	/* Copy subchannel logout information if esw is of format 0. */
125 	if (irb->scsw.cmd.eswf) {
126 		cdev_sublog = &cdev_irb->esw.esw0.sublog;
127 		sublog = &irb->esw.esw0.sublog;
128 		/* Copy extended status flags. */
129 		cdev_sublog->esf = sublog->esf;
130 		/*
131 		 * Copy fields that have a meaning for channel data check
132 		 * channel control check and interface control check.
133 		 */
134 		if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
135 				       SCHN_STAT_CHN_CTRL_CHK |
136 				       SCHN_STAT_INTF_CTRL_CHK)) {
137 			/* Copy ancillary report bit. */
138 			cdev_sublog->arep = sublog->arep;
139 			/* Copy field-validity-flags. */
140 			cdev_sublog->fvf = sublog->fvf;
141 			/* Copy storage access code. */
142 			cdev_sublog->sacc = sublog->sacc;
143 			/* Copy termination code. */
144 			cdev_sublog->termc = sublog->termc;
145 			/* Copy sequence code. */
146 			cdev_sublog->seqc = sublog->seqc;
147 		}
148 		/* Copy device status check. */
149 		cdev_sublog->devsc = sublog->devsc;
150 		/* Copy secondary error. */
151 		cdev_sublog->serr = sublog->serr;
152 		/* Copy i/o-error alert. */
153 		cdev_sublog->ioerr = sublog->ioerr;
154 		/* Copy channel path timeout bit. */
155 		if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
156 			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
157 		/* Copy failing storage address validity flag. */
158 		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
159 		if (cdev_irb->esw.esw0.erw.fsavf) {
160 			/* ... and copy the failing storage address. */
161 			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
162 			       sizeof (irb->esw.esw0.faddr));
163 			/* ... and copy the failing storage address format. */
164 			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
165 		}
166 		/* Copy secondary ccw address validity bit. */
167 		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
168 		if (irb->esw.esw0.erw.scavf)
169 			/* ... and copy the secondary ccw address. */
170 			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
171 
172 	}
173 	/* FIXME: DCTI for format 2? */
174 
175 	/* Copy authorization bit. */
176 	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
177 	/* Copy path verification required flag. */
178 	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
179 	if (irb->esw.esw0.erw.pvrf)
180 		cdev->private->flags.doverify = 1;
181 	/* Copy concurrent sense bit. */
182 	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
183 	if (irb->esw.esw0.erw.cons)
184 		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
185 }
186 
187 /*
188  * Accumulate status from irb to devstat.
189  */
190 void
191 ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
192 {
193 	struct irb *cdev_irb;
194 
195 	/*
196 	 * Check if the status pending bit is set in stctl.
197 	 * If not, the remaining bit have no meaning and we must ignore them.
198 	 * The esw is not meaningful as well...
199 	 */
200 	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
201 		return;
202 
203 	/* Check for channel checks and interface control checks. */
204 	ccw_device_msg_control_check(cdev, irb);
205 
206 	/* Check for path not operational. */
207 	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
208 		ccw_device_path_notoper(cdev);
209 	/* No irb accumulation for transport mode irbs. */
210 	if (scsw_is_tm(&irb->scsw)) {
211 		memcpy(&cdev->private->irb, irb, sizeof(struct irb));
212 		return;
213 	}
214 	/*
215 	 * Don't accumulate unsolicited interrupts.
216 	 */
217 	if (!scsw_is_solicited(&irb->scsw))
218 		return;
219 
220 	cdev_irb = &cdev->private->irb;
221 
222 	/*
223 	 * If the clear function had been performed, all formerly pending
224 	 * status at the subchannel has been cleared and we must not pass
225 	 * intermediate accumulated status to the device driver.
226 	 */
227 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
228 		memset(&cdev->private->irb, 0, sizeof(struct irb));
229 
230 	/* Copy bits which are valid only for the start function. */
231 	if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
232 		/* Copy key. */
233 		cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
234 		/* Copy suspend control bit. */
235 		cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
236 		/* Accumulate deferred condition code. */
237 		cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
238 		/* Copy ccw format bit. */
239 		cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
240 		/* Copy prefetch bit. */
241 		cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
242 		/* Copy initial-status-interruption-control. */
243 		cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
244 		/* Copy address limit checking control. */
245 		cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
246 		/* Copy suppress suspend bit. */
247 		cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
248 	}
249 
250 	/* Take care of the extended control bit and extended control word. */
251 	ccw_device_accumulate_ecw(cdev, irb);
252 
253 	/* Accumulate function control. */
254 	cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
255 	/* Copy activity control. */
256 	cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
257 	/* Accumulate status control. */
258 	cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
259 	/*
260 	 * Copy ccw address if it is valid. This is a bit simplified
261 	 * but should be close enough for all practical purposes.
262 	 */
263 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
264 	    ((irb->scsw.cmd.stctl ==
265 	      (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
266 	     (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
267 	     (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
268 	    (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
269 		cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
270 	/* Accumulate device status, but not the device busy flag. */
271 	cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
272 	/* dstat is not always valid. */
273 	if (irb->scsw.cmd.stctl &
274 	    (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
275 	     | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
276 		cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
277 	/* Accumulate subchannel status. */
278 	cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
279 	/* Copy residual count if it is valid. */
280 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
281 	    (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
282 	     == 0)
283 		cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
284 
285 	/* Take care of bits in the extended status word. */
286 	ccw_device_accumulate_esw(cdev, irb);
287 
288 	/*
289 	 * Check whether we must issue a SENSE CCW ourselves if there is no
290 	 * concurrent sense facility installed for the subchannel.
291 	 * No sense is required if no delayed sense is pending
292 	 * and we did not get a unit check without sense information.
293 	 *
294 	 * Note: We should check for ioinfo[irq]->flags.consns but VM
295 	 *	 violates the ESA/390 architecture and doesn't present an
296 	 *	 operand exception for virtual devices without concurrent
297 	 *	 sense facility available/supported when enabling the
298 	 *	 concurrent sense facility.
299 	 */
300 	if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
301 	    !(cdev_irb->esw.esw0.erw.cons))
302 		cdev->private->flags.dosense = 1;
303 }
304 
305 /*
306  * Do a basic sense.
307  */
308 int
309 ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
310 {
311 	struct subchannel *sch;
312 	struct ccw1 *sense_ccw;
313 	int rc;
314 
315 	sch = to_subchannel(cdev->dev.parent);
316 
317 	/* A sense is required, can we do it now ? */
318 	if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
319 		/*
320 		 * we received an Unit Check but we have no final
321 		 *  status yet, therefore we must delay the SENSE
322 		 *  processing. We must not report this intermediate
323 		 *  status to the device interrupt handler.
324 		 */
325 		return -EBUSY;
326 
327 	/*
328 	 * We have ending status but no sense information. Do a basic sense.
329 	 */
330 	sense_ccw = &to_io_private(sch)->sense_ccw;
331 	sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
332 	sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
333 	sense_ccw->count = SENSE_MAX_COUNT;
334 	sense_ccw->flags = CCW_FLAG_SLI;
335 
336 	rc = cio_start(sch, sense_ccw, 0xff);
337 	if (rc == -ENODEV || rc == -EACCES)
338 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
339 	return rc;
340 }
341 
342 /*
343  * Add information from basic sense to devstat.
344  */
345 void
346 ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
347 {
348 	/*
349 	 * Check if the status pending bit is set in stctl.
350 	 * If not, the remaining bit have no meaning and we must ignore them.
351 	 * The esw is not meaningful as well...
352 	 */
353 	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
354 		return;
355 
356 	/* Check for channel checks and interface control checks. */
357 	ccw_device_msg_control_check(cdev, irb);
358 
359 	/* Check for path not operational. */
360 	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
361 		ccw_device_path_notoper(cdev);
362 
363 	if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
364 	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
365 		cdev->private->irb.esw.esw0.erw.cons = 1;
366 		cdev->private->flags.dosense = 0;
367 	}
368 	/* Check if path verification is required. */
369 	if (ccw_device_accumulate_esw_valid(irb) &&
370 	    irb->esw.esw0.erw.pvrf)
371 		cdev->private->flags.doverify = 1;
372 }
373 
374 /*
375  * This function accumulates the status into the private devstat and
376  * starts a basic sense if one is needed.
377  */
378 int
379 ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
380 {
381 	ccw_device_accumulate_irb(cdev, irb);
382 	if ((irb->scsw.cmd.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
383 		return -EBUSY;
384 	/* Check for basic sense. */
385 	if (cdev->private->flags.dosense &&
386 	    !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
387 		cdev->private->irb.esw.esw0.erw.cons = 1;
388 		cdev->private->flags.dosense = 0;
389 		return 0;
390 	}
391 	if (cdev->private->flags.dosense) {
392 		ccw_device_do_sense(cdev, irb);
393 		return -EBUSY;
394 	}
395 	return 0;
396 }
397 
398