xref: /linux/drivers/s390/cio/device_pgid.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  CCW device PGID and path verification I/O handling.
4  *
5  *    Copyright IBM Corp. 2002, 2009
6  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
8  *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/bitops.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <asm/ccwdev.h>
19 #include <asm/cio.h>
20 
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "device.h"
24 #include "io_sch.h"
25 
26 #define PGID_RETRIES	256
27 #define PGID_TIMEOUT	(10 * HZ)
28 
29 static void verify_start(struct ccw_device *cdev);
30 
31 /*
32  * Process path verification data and report result.
33  */
34 static void verify_done(struct ccw_device *cdev, int rc)
35 {
36 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
37 	struct ccw_dev_id *id = &cdev->private->dev_id;
38 	int mpath = cdev->private->flags.mpath;
39 	int pgroup = cdev->private->flags.pgroup;
40 
41 	if (rc)
42 		goto out;
43 	/* Ensure consistent multipathing state at device and channel. */
44 	if (sch->config.mp != mpath) {
45 		sch->config.mp = mpath;
46 		rc = cio_commit_config(sch);
47 	}
48 out:
49 	CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
50 			 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
51 			 sch->vpm);
52 	ccw_device_verify_done(cdev, rc);
53 }
54 
55 /*
56  * Create channel program to perform a NOOP.
57  */
58 static void nop_build_cp(struct ccw_device *cdev)
59 {
60 	struct ccw_request *req = &cdev->private->req;
61 	struct ccw1 *cp = cdev->private->dma_area->iccws;
62 
63 	cp->cmd_code	= CCW_CMD_NOOP;
64 	cp->cda		= 0;
65 	cp->count	= 0;
66 	cp->flags	= CCW_FLAG_SLI;
67 	req->cp		= cp;
68 }
69 
70 /*
71  * Perform NOOP on a single path.
72  */
73 static void nop_do(struct ccw_device *cdev)
74 {
75 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
76 	struct ccw_request *req = &cdev->private->req;
77 
78 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
79 			      ~cdev->private->path_noirq_mask);
80 	if (!req->lpm)
81 		goto out_nopath;
82 	nop_build_cp(cdev);
83 	ccw_request_start(cdev);
84 	return;
85 
86 out_nopath:
87 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
88 }
89 
90 /*
91  * Adjust NOOP I/O status.
92  */
93 static enum io_status nop_filter(struct ccw_device *cdev, void *data,
94 				 struct irb *irb, enum io_status status)
95 {
96 	/* Only subchannel status might indicate a path error. */
97 	if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
98 		return IO_DONE;
99 	return status;
100 }
101 
102 /*
103  * Process NOOP request result for a single path.
104  */
105 static void nop_callback(struct ccw_device *cdev, void *data, int rc)
106 {
107 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
108 	struct ccw_request *req = &cdev->private->req;
109 
110 	switch (rc) {
111 	case 0:
112 		sch->vpm |= req->lpm;
113 		break;
114 	case -ETIME:
115 		cdev->private->path_noirq_mask |= req->lpm;
116 		break;
117 	case -EACCES:
118 		cdev->private->path_notoper_mask |= req->lpm;
119 		break;
120 	default:
121 		goto err;
122 	}
123 	/* Continue on the next path. */
124 	req->lpm >>= 1;
125 	nop_do(cdev);
126 	return;
127 
128 err:
129 	verify_done(cdev, rc);
130 }
131 
132 /*
133  * Create channel program to perform SET PGID on a single path.
134  */
135 static void spid_build_cp(struct ccw_device *cdev, u8 fn)
136 {
137 	struct ccw_request *req = &cdev->private->req;
138 	struct ccw1 *cp = cdev->private->dma_area->iccws;
139 	int i = pathmask_to_pos(req->lpm);
140 	struct pgid *pgid = &cdev->private->dma_area->pgid[i];
141 
142 	pgid->inf.fc	= fn;
143 	cp->cmd_code	= CCW_CMD_SET_PGID;
144 	cp->cda		= (u32)virt_to_phys(pgid);
145 	cp->count	= sizeof(*pgid);
146 	cp->flags	= CCW_FLAG_SLI;
147 	req->cp		= cp;
148 }
149 
150 static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
151 {
152 	if (rc) {
153 		/* We don't know the path groups' state. Abort. */
154 		verify_done(cdev, rc);
155 		return;
156 	}
157 	/*
158 	 * Path groups have been reset. Restart path verification but
159 	 * leave paths in path_noirq_mask out.
160 	 */
161 	cdev->private->flags.pgid_unknown = 0;
162 	verify_start(cdev);
163 }
164 
165 /*
166  * Reset pathgroups and restart path verification, leave unusable paths out.
167  */
168 static void pgid_wipeout_start(struct ccw_device *cdev)
169 {
170 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
171 	struct ccw_dev_id *id = &cdev->private->dev_id;
172 	struct ccw_request *req = &cdev->private->req;
173 	u8 fn;
174 
175 	CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
176 		      id->ssid, id->devno, cdev->private->pgid_valid_mask,
177 		      cdev->private->path_noirq_mask);
178 
179 	/* Initialize request data. */
180 	memset(req, 0, sizeof(*req));
181 	req->timeout	= PGID_TIMEOUT;
182 	req->maxretries	= PGID_RETRIES;
183 	req->lpm	= sch->schib.pmcw.pam;
184 	req->callback	= pgid_wipeout_callback;
185 	fn = SPID_FUNC_DISBAND;
186 	if (cdev->private->flags.mpath)
187 		fn |= SPID_FUNC_MULTI_PATH;
188 	spid_build_cp(cdev, fn);
189 	ccw_request_start(cdev);
190 }
191 
192 /*
193  * Perform establish/resign SET PGID on a single path.
194  */
195 static void spid_do(struct ccw_device *cdev)
196 {
197 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
198 	struct ccw_request *req = &cdev->private->req;
199 	u8 fn;
200 
201 	/* Use next available path that is not already in correct state. */
202 	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
203 	if (!req->lpm)
204 		goto out_nopath;
205 	/* Channel program setup. */
206 	if (req->lpm & sch->opm)
207 		fn = SPID_FUNC_ESTABLISH;
208 	else
209 		fn = SPID_FUNC_RESIGN;
210 	if (cdev->private->flags.mpath)
211 		fn |= SPID_FUNC_MULTI_PATH;
212 	spid_build_cp(cdev, fn);
213 	ccw_request_start(cdev);
214 	return;
215 
216 out_nopath:
217 	if (cdev->private->flags.pgid_unknown) {
218 		/* At least one SPID could be partially done. */
219 		pgid_wipeout_start(cdev);
220 		return;
221 	}
222 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
223 }
224 
225 /*
226  * Process SET PGID request result for a single path.
227  */
228 static void spid_callback(struct ccw_device *cdev, void *data, int rc)
229 {
230 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
231 	struct ccw_request *req = &cdev->private->req;
232 
233 	switch (rc) {
234 	case 0:
235 		sch->vpm |= req->lpm & sch->opm;
236 		break;
237 	case -ETIME:
238 		cdev->private->flags.pgid_unknown = 1;
239 		cdev->private->path_noirq_mask |= req->lpm;
240 		break;
241 	case -EACCES:
242 		cdev->private->path_notoper_mask |= req->lpm;
243 		break;
244 	case -EOPNOTSUPP:
245 		if (cdev->private->flags.mpath) {
246 			/* Try without multipathing. */
247 			cdev->private->flags.mpath = 0;
248 			goto out_restart;
249 		}
250 		/* Try without pathgrouping. */
251 		cdev->private->flags.pgroup = 0;
252 		goto out_restart;
253 	default:
254 		goto err;
255 	}
256 	req->lpm >>= 1;
257 	spid_do(cdev);
258 	return;
259 
260 out_restart:
261 	verify_start(cdev);
262 	return;
263 err:
264 	verify_done(cdev, rc);
265 }
266 
267 static void spid_start(struct ccw_device *cdev)
268 {
269 	struct ccw_request *req = &cdev->private->req;
270 
271 	/* Initialize request data. */
272 	memset(req, 0, sizeof(*req));
273 	req->timeout	= PGID_TIMEOUT;
274 	req->maxretries	= PGID_RETRIES;
275 	req->lpm	= 0x80;
276 	req->singlepath	= 1;
277 	req->callback	= spid_callback;
278 	spid_do(cdev);
279 }
280 
281 static int pgid_is_reset(struct pgid *p)
282 {
283 	char *c;
284 
285 	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
286 		if (*c != 0)
287 			return 0;
288 	}
289 	return 1;
290 }
291 
292 static int pgid_cmp(struct pgid *p1, struct pgid *p2)
293 {
294 	return memcmp((char *) p1 + 1, (char *) p2 + 1,
295 		      sizeof(struct pgid) - 1);
296 }
297 
298 /*
299  * Determine pathgroup state from PGID data.
300  */
301 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
302 			 int *mismatch, u8 *reserved, u8 *reset)
303 {
304 	struct pgid *pgid = &cdev->private->dma_area->pgid[0];
305 	struct pgid *first = NULL;
306 	int lpm;
307 	int i;
308 
309 	*mismatch = 0;
310 	*reserved = 0;
311 	*reset = 0;
312 	for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
313 		if ((cdev->private->pgid_valid_mask & lpm) == 0)
314 			continue;
315 		if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
316 			*reserved |= lpm;
317 		if (pgid_is_reset(pgid)) {
318 			*reset |= lpm;
319 			continue;
320 		}
321 		if (!first) {
322 			first = pgid;
323 			continue;
324 		}
325 		if (pgid_cmp(pgid, first) != 0)
326 			*mismatch = 1;
327 	}
328 	if (!first)
329 		first = &channel_subsystems[0]->global_pgid;
330 	*p = first;
331 }
332 
333 static u8 pgid_to_donepm(struct ccw_device *cdev)
334 {
335 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
336 	struct pgid *pgid;
337 	int i;
338 	int lpm;
339 	u8 donepm = 0;
340 
341 	/* Set bits for paths which are already in the target state. */
342 	for (i = 0; i < 8; i++) {
343 		lpm = 0x80 >> i;
344 		if ((cdev->private->pgid_valid_mask & lpm) == 0)
345 			continue;
346 		pgid = &cdev->private->dma_area->pgid[i];
347 		if (sch->opm & lpm) {
348 			if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
349 				continue;
350 		} else {
351 			if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
352 				continue;
353 		}
354 		if (cdev->private->flags.mpath) {
355 			if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
356 				continue;
357 		} else {
358 			if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
359 				continue;
360 		}
361 		donepm |= lpm;
362 	}
363 
364 	return donepm;
365 }
366 
367 static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
368 {
369 	int i;
370 
371 	for (i = 0; i < 8; i++)
372 		memcpy(&cdev->private->dma_area->pgid[i], pgid,
373 		       sizeof(struct pgid));
374 }
375 
376 /*
377  * Process SENSE PGID data and report result.
378  */
379 static void snid_done(struct ccw_device *cdev, int rc)
380 {
381 	struct ccw_dev_id *id = &cdev->private->dev_id;
382 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
383 	struct pgid *pgid;
384 	int mismatch = 0;
385 	u8 reserved = 0;
386 	u8 reset = 0;
387 	u8 donepm;
388 
389 	if (rc)
390 		goto out;
391 	pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
392 	if (reserved == cdev->private->pgid_valid_mask)
393 		rc = -EUSERS;
394 	else if (mismatch)
395 		rc = -EOPNOTSUPP;
396 	else {
397 		donepm = pgid_to_donepm(cdev);
398 		sch->vpm = donepm & sch->opm;
399 		cdev->private->pgid_reset_mask |= reset;
400 		cdev->private->pgid_todo_mask &=
401 			~(donepm | cdev->private->path_noirq_mask);
402 		pgid_fill(cdev, pgid);
403 	}
404 out:
405 	CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
406 		      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
407 		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
408 		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
409 	switch (rc) {
410 	case 0:
411 		if (cdev->private->flags.pgid_unknown) {
412 			pgid_wipeout_start(cdev);
413 			return;
414 		}
415 		/* Anything left to do? */
416 		if (cdev->private->pgid_todo_mask == 0) {
417 			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
418 			return;
419 		}
420 		/* Perform path-grouping. */
421 		spid_start(cdev);
422 		break;
423 	case -EOPNOTSUPP:
424 		/* Path-grouping not supported. */
425 		cdev->private->flags.pgroup = 0;
426 		cdev->private->flags.mpath = 0;
427 		verify_start(cdev);
428 		break;
429 	default:
430 		verify_done(cdev, rc);
431 	}
432 }
433 
434 /*
435  * Create channel program to perform a SENSE PGID on a single path.
436  */
437 static void snid_build_cp(struct ccw_device *cdev)
438 {
439 	struct ccw_request *req = &cdev->private->req;
440 	struct ccw1 *cp = cdev->private->dma_area->iccws;
441 	int i = pathmask_to_pos(req->lpm);
442 
443 	/* Channel program setup. */
444 	cp->cmd_code	= CCW_CMD_SENSE_PGID;
445 	cp->cda		= (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]);
446 	cp->count	= sizeof(struct pgid);
447 	cp->flags	= CCW_FLAG_SLI;
448 	req->cp		= cp;
449 }
450 
451 /*
452  * Perform SENSE PGID on a single path.
453  */
454 static void snid_do(struct ccw_device *cdev)
455 {
456 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
457 	struct ccw_request *req = &cdev->private->req;
458 	int ret;
459 
460 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
461 			      ~cdev->private->path_noirq_mask);
462 	if (!req->lpm)
463 		goto out_nopath;
464 	snid_build_cp(cdev);
465 	ccw_request_start(cdev);
466 	return;
467 
468 out_nopath:
469 	if (cdev->private->pgid_valid_mask)
470 		ret = 0;
471 	else if (cdev->private->path_noirq_mask)
472 		ret = -ETIME;
473 	else
474 		ret = -EACCES;
475 	snid_done(cdev, ret);
476 }
477 
478 /*
479  * Process SENSE PGID request result for single path.
480  */
481 static void snid_callback(struct ccw_device *cdev, void *data, int rc)
482 {
483 	struct ccw_request *req = &cdev->private->req;
484 
485 	switch (rc) {
486 	case 0:
487 		cdev->private->pgid_valid_mask |= req->lpm;
488 		break;
489 	case -ETIME:
490 		cdev->private->flags.pgid_unknown = 1;
491 		cdev->private->path_noirq_mask |= req->lpm;
492 		break;
493 	case -EACCES:
494 		cdev->private->path_notoper_mask |= req->lpm;
495 		break;
496 	default:
497 		goto err;
498 	}
499 	/* Continue on the next path. */
500 	req->lpm >>= 1;
501 	snid_do(cdev);
502 	return;
503 
504 err:
505 	snid_done(cdev, rc);
506 }
507 
508 /*
509  * Perform path verification.
510  */
511 static void verify_start(struct ccw_device *cdev)
512 {
513 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
514 	struct ccw_request *req = &cdev->private->req;
515 	struct ccw_dev_id *devid = &cdev->private->dev_id;
516 
517 	sch->vpm = 0;
518 	sch->lpm = sch->schib.pmcw.pam;
519 
520 	/* Initialize PGID data. */
521 	memset(cdev->private->dma_area->pgid, 0,
522 	       sizeof(cdev->private->dma_area->pgid));
523 	cdev->private->pgid_valid_mask = 0;
524 	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
525 	cdev->private->path_notoper_mask = 0;
526 
527 	/* Initialize request data. */
528 	memset(req, 0, sizeof(*req));
529 	req->timeout	= PGID_TIMEOUT;
530 	req->maxretries	= PGID_RETRIES;
531 	req->lpm	= 0x80;
532 	req->singlepath	= 1;
533 	if (cdev->private->flags.pgroup) {
534 		CIO_TRACE_EVENT(4, "snid");
535 		CIO_HEX_EVENT(4, devid, sizeof(*devid));
536 		req->callback	= snid_callback;
537 		snid_do(cdev);
538 	} else {
539 		CIO_TRACE_EVENT(4, "nop");
540 		CIO_HEX_EVENT(4, devid, sizeof(*devid));
541 		req->filter	= nop_filter;
542 		req->callback	= nop_callback;
543 		nop_do(cdev);
544 	}
545 }
546 
547 /**
548  * ccw_device_verify_start - perform path verification
549  * @cdev: ccw device
550  *
551  * Perform an I/O on each available channel path to @cdev to determine which
552  * paths are operational. The resulting path mask is stored in sch->vpm.
553  * If device options specify pathgrouping, establish a pathgroup for the
554  * operational paths. When finished, call ccw_device_verify_done with a
555  * return code specifying the result.
556  */
557 void ccw_device_verify_start(struct ccw_device *cdev)
558 {
559 	CIO_TRACE_EVENT(4, "vrfy");
560 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
561 	/*
562 	 * Initialize pathgroup and multipath state with target values.
563 	 * They may change in the course of path verification.
564 	 */
565 	cdev->private->flags.pgroup = cdev->private->options.pgroup;
566 	cdev->private->flags.mpath = cdev->private->options.mpath;
567 	cdev->private->flags.doverify = 0;
568 	cdev->private->path_noirq_mask = 0;
569 	verify_start(cdev);
570 }
571 
572 /*
573  * Process disband SET PGID request result.
574  */
575 static void disband_callback(struct ccw_device *cdev, void *data, int rc)
576 {
577 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
578 	struct ccw_dev_id *id = &cdev->private->dev_id;
579 
580 	if (rc)
581 		goto out;
582 	/* Ensure consistent multipathing state at device and channel. */
583 	cdev->private->flags.mpath = 0;
584 	if (sch->config.mp) {
585 		sch->config.mp = 0;
586 		rc = cio_commit_config(sch);
587 	}
588 out:
589 	CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
590 		      rc);
591 	ccw_device_disband_done(cdev, rc);
592 }
593 
594 /**
595  * ccw_device_disband_start - disband pathgroup
596  * @cdev: ccw device
597  *
598  * Execute a SET PGID channel program on @cdev to disband a previously
599  * established pathgroup. When finished, call ccw_device_disband_done with
600  * a return code specifying the result.
601  */
602 void ccw_device_disband_start(struct ccw_device *cdev)
603 {
604 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
605 	struct ccw_request *req = &cdev->private->req;
606 	u8 fn;
607 
608 	CIO_TRACE_EVENT(4, "disb");
609 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
610 	/* Request setup. */
611 	memset(req, 0, sizeof(*req));
612 	req->timeout	= PGID_TIMEOUT;
613 	req->maxretries	= PGID_RETRIES;
614 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
615 	req->singlepath	= 1;
616 	req->callback	= disband_callback;
617 	fn = SPID_FUNC_DISBAND;
618 	if (cdev->private->flags.mpath)
619 		fn |= SPID_FUNC_MULTI_PATH;
620 	spid_build_cp(cdev, fn);
621 	ccw_request_start(cdev);
622 }
623 
624 struct stlck_data {
625 	struct completion done;
626 	int rc;
627 };
628 
629 static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
630 {
631 	struct ccw_request *req = &cdev->private->req;
632 	struct ccw1 *cp = cdev->private->dma_area->iccws;
633 
634 	cp[0].cmd_code = CCW_CMD_STLCK;
635 	cp[0].cda = (u32)virt_to_phys(buf1);
636 	cp[0].count = 32;
637 	cp[0].flags = CCW_FLAG_CC;
638 	cp[1].cmd_code = CCW_CMD_RELEASE;
639 	cp[1].cda = (u32)virt_to_phys(buf2);
640 	cp[1].count = 32;
641 	cp[1].flags = 0;
642 	req->cp = cp;
643 }
644 
645 static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
646 {
647 	struct stlck_data *sdata = data;
648 
649 	sdata->rc = rc;
650 	complete(&sdata->done);
651 }
652 
653 /**
654  * ccw_device_stlck_start - perform unconditional release
655  * @cdev: ccw device
656  * @data: data pointer to be passed to ccw_device_stlck_done
657  * @buf1: data pointer used in channel program
658  * @buf2: data pointer used in channel program
659  *
660  * Execute a channel program on @cdev to release an existing PGID reservation.
661  */
662 static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
663 				   void *buf1, void *buf2)
664 {
665 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
666 	struct ccw_request *req = &cdev->private->req;
667 
668 	CIO_TRACE_EVENT(4, "stlck");
669 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
670 	/* Request setup. */
671 	memset(req, 0, sizeof(*req));
672 	req->timeout	= PGID_TIMEOUT;
673 	req->maxretries	= PGID_RETRIES;
674 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
675 	req->data	= data;
676 	req->callback	= stlck_callback;
677 	stlck_build_cp(cdev, buf1, buf2);
678 	ccw_request_start(cdev);
679 }
680 
681 /*
682  * Perform unconditional reserve + release.
683  */
684 int ccw_device_stlck(struct ccw_device *cdev)
685 {
686 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
687 	struct stlck_data data;
688 	u8 *buffer;
689 	int rc;
690 
691 	/* Check if steal lock operation is valid for this device. */
692 	if (cdev->drv) {
693 		if (!cdev->private->options.force)
694 			return -EINVAL;
695 	}
696 	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
697 	if (!buffer)
698 		return -ENOMEM;
699 	init_completion(&data.done);
700 	data.rc = -EIO;
701 	spin_lock_irq(sch->lock);
702 	rc = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
703 	if (rc)
704 		goto out_unlock;
705 	/* Perform operation. */
706 	cdev->private->state = DEV_STATE_STEAL_LOCK;
707 	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
708 	spin_unlock_irq(sch->lock);
709 	/* Wait for operation to finish. */
710 	if (wait_for_completion_interruptible(&data.done)) {
711 		/* Got a signal. */
712 		spin_lock_irq(sch->lock);
713 		ccw_request_cancel(cdev);
714 		spin_unlock_irq(sch->lock);
715 		wait_for_completion(&data.done);
716 	}
717 	rc = data.rc;
718 	/* Check results. */
719 	spin_lock_irq(sch->lock);
720 	cio_disable_subchannel(sch);
721 	cdev->private->state = DEV_STATE_BOXED;
722 out_unlock:
723 	spin_unlock_irq(sch->lock);
724 	kfree(buffer);
725 
726 	return rc;
727 }
728