xref: /linux/drivers/s390/cio/device_ops.c (revision a36e9f5cfe9eb3a1dce8769c7058251c42705357)
1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3  * Copyright IBM Corp. 2002, 2009
4  *
5  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
7  */
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/completion.h>
16 
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
19 #include <asm/chpid.h>
20 #include <asm/fcx.h>
21 
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "css.h"
25 #include "chsc.h"
26 #include "device.h"
27 #include "chp.h"
28 
29 /**
30  * ccw_device_set_options_mask() - set some options and unset the rest
31  * @cdev: device for which the options are to be set
32  * @flags: options to be set
33  *
34  * All flags specified in @flags are set, all flags not specified in @flags
35  * are cleared.
36  * Returns:
37  *   %0 on success, -%EINVAL on an invalid flag combination.
38  */
39 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
40 {
41        /*
42 	* The flag usage is mutal exclusive ...
43 	*/
44 	if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
45 	    (flags & CCWDEV_REPORT_ALL))
46 		return -EINVAL;
47 	cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
48 	cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
49 	cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
50 	cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
51 	cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
52 	return 0;
53 }
54 
55 /**
56  * ccw_device_set_options() - set some options
57  * @cdev: device for which the options are to be set
58  * @flags: options to be set
59  *
60  * All flags specified in @flags are set, the remainder is left untouched.
61  * Returns:
62  *   %0 on success, -%EINVAL if an invalid flag combination would ensue.
63  */
64 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
65 {
66        /*
67 	* The flag usage is mutal exclusive ...
68 	*/
69 	if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
70 	    (flags & CCWDEV_REPORT_ALL)) ||
71 	    ((flags & CCWDEV_EARLY_NOTIFICATION) &&
72 	     cdev->private->options.repall) ||
73 	    ((flags & CCWDEV_REPORT_ALL) &&
74 	     cdev->private->options.fast))
75 		return -EINVAL;
76 	cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
77 	cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
78 	cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
79 	cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
80 	cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
81 	return 0;
82 }
83 
84 /**
85  * ccw_device_clear_options() - clear some options
86  * @cdev: device for which the options are to be cleared
87  * @flags: options to be cleared
88  *
89  * All flags specified in @flags are cleared, the remainder is left untouched.
90  */
91 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
92 {
93 	cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
94 	cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
95 	cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
96 	cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
97 	cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
98 }
99 
100 /**
101  * ccw_device_is_pathgroup() - determine if paths to this device are grouped
102  * @cdev: ccw device
103  *
104  * Return non-zero if there is a path group, zero otherwise.
105  */
106 int ccw_device_is_pathgroup(struct ccw_device *cdev)
107 {
108 	return cdev->private->flags.pgroup;
109 }
110 EXPORT_SYMBOL(ccw_device_is_pathgroup);
111 
112 /**
113  * ccw_device_is_multipath() - determine if device is operating in multipath mode
114  * @cdev: ccw device
115  *
116  * Return non-zero if device is operating in multipath mode, zero otherwise.
117  */
118 int ccw_device_is_multipath(struct ccw_device *cdev)
119 {
120 	return cdev->private->flags.mpath;
121 }
122 EXPORT_SYMBOL(ccw_device_is_multipath);
123 
124 /**
125  * ccw_device_clear() - terminate I/O request processing
126  * @cdev: target ccw device
127  * @intparm: interruption parameter to be returned upon conclusion of csch
128  *
129  * ccw_device_clear() calls csch on @cdev's subchannel.
130  * Returns:
131  *  %0 on success,
132  *  -%ENODEV on device not operational,
133  *  -%EINVAL on invalid device state.
134  * Context:
135  *  Interrupts disabled, ccw device lock held
136  */
137 int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
138 {
139 	struct subchannel *sch;
140 	int ret;
141 
142 	if (!cdev || !cdev->dev.parent)
143 		return -ENODEV;
144 	sch = to_subchannel(cdev->dev.parent);
145 	if (!sch->schib.pmcw.ena)
146 		return -EINVAL;
147 	if (cdev->private->state == DEV_STATE_NOT_OPER)
148 		return -ENODEV;
149 	if (cdev->private->state != DEV_STATE_ONLINE &&
150 	    cdev->private->state != DEV_STATE_W4SENSE)
151 		return -EINVAL;
152 
153 	ret = cio_clear(sch);
154 	if (ret == 0)
155 		cdev->private->intparm = intparm;
156 	return ret;
157 }
158 
159 /**
160  * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
161  * @cdev: target ccw device
162  * @cpa: logical start address of channel program
163  * @intparm: user specific interruption parameter; will be presented back to
164  *	     @cdev's interrupt handler. Allows a device driver to associate
165  *	     the interrupt with a particular I/O request.
166  * @lpm: defines the channel path to be used for a specific I/O request. A
167  *	 value of 0 will make cio use the opm.
168  * @key: storage key to be used for the I/O
169  * @flags: additional flags; defines the action to be performed for I/O
170  *	   processing.
171  * @expires: timeout value in jiffies
172  *
173  * Start a S/390 channel program. When the interrupt arrives, the
174  * IRQ handler is called, either immediately, delayed (dev-end missing,
175  * or sense required) or never (no IRQ handler registered).
176  * This function notifies the device driver if the channel program has not
177  * completed during the time specified by @expires. If a timeout occurs, the
178  * channel program is terminated via xsch, hsch or csch, and the device's
179  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
180  * The interruption handler will echo back the @intparm specified here, unless
181  * another interruption parameter is specified by a subsequent invocation of
182  * ccw_device_halt() or ccw_device_clear().
183  * Returns:
184  *  %0, if the operation was successful;
185  *  -%EBUSY, if the device is busy, or status pending;
186  *  -%EACCES, if no path specified in @lpm is operational;
187  *  -%ENODEV, if the device is not operational.
188  * Context:
189  *  Interrupts disabled, ccw device lock held
190  */
191 int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
192 				 unsigned long intparm, __u8 lpm, __u8 key,
193 				 unsigned long flags, int expires)
194 {
195 	struct subchannel *sch;
196 	int ret;
197 
198 	if (!cdev || !cdev->dev.parent)
199 		return -ENODEV;
200 	sch = to_subchannel(cdev->dev.parent);
201 	if (!sch->schib.pmcw.ena)
202 		return -EINVAL;
203 	if (cdev->private->state == DEV_STATE_NOT_OPER)
204 		return -ENODEV;
205 	if (cdev->private->state == DEV_STATE_VERIFY ||
206 	    cdev->private->flags.doverify) {
207 		/* Remember to fake irb when finished. */
208 		if (!cdev->private->flags.fake_irb) {
209 			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
210 			cdev->private->intparm = intparm;
211 			CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
212 				      cdev->private->dev_id.ssid,
213 				      cdev->private->dev_id.devno, intparm,
214 				      cdev->private->flags.fake_irb);
215 			return 0;
216 		} else
217 			/* There's already a fake I/O around. */
218 			return -EBUSY;
219 	}
220 	if (cdev->private->state != DEV_STATE_ONLINE ||
221 	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
222 	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
223 		return -EBUSY;
224 	ret = cio_set_options (sch, flags);
225 	if (ret)
226 		return ret;
227 	/* Adjust requested path mask to exclude unusable paths. */
228 	if (lpm) {
229 		lpm &= sch->lpm;
230 		if (lpm == 0)
231 			return -EACCES;
232 	}
233 	ret = cio_start_key (sch, cpa, lpm, key);
234 	switch (ret) {
235 	case 0:
236 		cdev->private->intparm = intparm;
237 		if (expires)
238 			ccw_device_set_timeout(cdev, expires);
239 		break;
240 	case -EACCES:
241 	case -ENODEV:
242 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
243 		break;
244 	}
245 	return ret;
246 }
247 
248 /**
249  * ccw_device_start_key() - start a s390 channel program with key
250  * @cdev: target ccw device
251  * @cpa: logical start address of channel program
252  * @intparm: user specific interruption parameter; will be presented back to
253  *	     @cdev's interrupt handler. Allows a device driver to associate
254  *	     the interrupt with a particular I/O request.
255  * @lpm: defines the channel path to be used for a specific I/O request. A
256  *	 value of 0 will make cio use the opm.
257  * @key: storage key to be used for the I/O
258  * @flags: additional flags; defines the action to be performed for I/O
259  *	   processing.
260  *
261  * Start a S/390 channel program. When the interrupt arrives, the
262  * IRQ handler is called, either immediately, delayed (dev-end missing,
263  * or sense required) or never (no IRQ handler registered).
264  * The interruption handler will echo back the @intparm specified here, unless
265  * another interruption parameter is specified by a subsequent invocation of
266  * ccw_device_halt() or ccw_device_clear().
267  * Returns:
268  *  %0, if the operation was successful;
269  *  -%EBUSY, if the device is busy, or status pending;
270  *  -%EACCES, if no path specified in @lpm is operational;
271  *  -%ENODEV, if the device is not operational.
272  * Context:
273  *  Interrupts disabled, ccw device lock held
274  */
275 int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
276 			 unsigned long intparm, __u8 lpm, __u8 key,
277 			 unsigned long flags)
278 {
279 	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
280 					    flags, 0);
281 }
282 
283 /**
284  * ccw_device_start() - start a s390 channel program
285  * @cdev: target ccw device
286  * @cpa: logical start address of channel program
287  * @intparm: user specific interruption parameter; will be presented back to
288  *	     @cdev's interrupt handler. Allows a device driver to associate
289  *	     the interrupt with a particular I/O request.
290  * @lpm: defines the channel path to be used for a specific I/O request. A
291  *	 value of 0 will make cio use the opm.
292  * @flags: additional flags; defines the action to be performed for I/O
293  *	   processing.
294  *
295  * Start a S/390 channel program. When the interrupt arrives, the
296  * IRQ handler is called, either immediately, delayed (dev-end missing,
297  * or sense required) or never (no IRQ handler registered).
298  * The interruption handler will echo back the @intparm specified here, unless
299  * another interruption parameter is specified by a subsequent invocation of
300  * ccw_device_halt() or ccw_device_clear().
301  * Returns:
302  *  %0, if the operation was successful;
303  *  -%EBUSY, if the device is busy, or status pending;
304  *  -%EACCES, if no path specified in @lpm is operational;
305  *  -%ENODEV, if the device is not operational.
306  * Context:
307  *  Interrupts disabled, ccw device lock held
308  */
309 int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
310 		     unsigned long intparm, __u8 lpm, unsigned long flags)
311 {
312 	return ccw_device_start_key(cdev, cpa, intparm, lpm,
313 				    PAGE_DEFAULT_KEY, flags);
314 }
315 
316 /**
317  * ccw_device_start_timeout() - start a s390 channel program with timeout
318  * @cdev: target ccw device
319  * @cpa: logical start address of channel program
320  * @intparm: user specific interruption parameter; will be presented back to
321  *	     @cdev's interrupt handler. Allows a device driver to associate
322  *	     the interrupt with a particular I/O request.
323  * @lpm: defines the channel path to be used for a specific I/O request. A
324  *	 value of 0 will make cio use the opm.
325  * @flags: additional flags; defines the action to be performed for I/O
326  *	   processing.
327  * @expires: timeout value in jiffies
328  *
329  * Start a S/390 channel program. When the interrupt arrives, the
330  * IRQ handler is called, either immediately, delayed (dev-end missing,
331  * or sense required) or never (no IRQ handler registered).
332  * This function notifies the device driver if the channel program has not
333  * completed during the time specified by @expires. If a timeout occurs, the
334  * channel program is terminated via xsch, hsch or csch, and the device's
335  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
336  * The interruption handler will echo back the @intparm specified here, unless
337  * another interruption parameter is specified by a subsequent invocation of
338  * ccw_device_halt() or ccw_device_clear().
339  * Returns:
340  *  %0, if the operation was successful;
341  *  -%EBUSY, if the device is busy, or status pending;
342  *  -%EACCES, if no path specified in @lpm is operational;
343  *  -%ENODEV, if the device is not operational.
344  * Context:
345  *  Interrupts disabled, ccw device lock held
346  */
347 int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
348 			     unsigned long intparm, __u8 lpm,
349 			     unsigned long flags, int expires)
350 {
351 	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
352 					    PAGE_DEFAULT_KEY, flags,
353 					    expires);
354 }
355 
356 
357 /**
358  * ccw_device_halt() - halt I/O request processing
359  * @cdev: target ccw device
360  * @intparm: interruption parameter to be returned upon conclusion of hsch
361  *
362  * ccw_device_halt() calls hsch on @cdev's subchannel.
363  * The interruption handler will echo back the @intparm specified here, unless
364  * another interruption parameter is specified by a subsequent invocation of
365  * ccw_device_clear().
366  * Returns:
367  *  %0 on success,
368  *  -%ENODEV on device not operational,
369  *  -%EINVAL on invalid device state,
370  *  -%EBUSY on device busy or interrupt pending.
371  * Context:
372  *  Interrupts disabled, ccw device lock held
373  */
374 int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
375 {
376 	struct subchannel *sch;
377 	int ret;
378 
379 	if (!cdev || !cdev->dev.parent)
380 		return -ENODEV;
381 	sch = to_subchannel(cdev->dev.parent);
382 	if (!sch->schib.pmcw.ena)
383 		return -EINVAL;
384 	if (cdev->private->state == DEV_STATE_NOT_OPER)
385 		return -ENODEV;
386 	if (cdev->private->state != DEV_STATE_ONLINE &&
387 	    cdev->private->state != DEV_STATE_W4SENSE)
388 		return -EINVAL;
389 
390 	ret = cio_halt(sch);
391 	if (ret == 0)
392 		cdev->private->intparm = intparm;
393 	return ret;
394 }
395 
396 /**
397  * ccw_device_resume() - resume channel program execution
398  * @cdev: target ccw device
399  *
400  * ccw_device_resume() calls rsch on @cdev's subchannel.
401  * Returns:
402  *  %0 on success,
403  *  -%ENODEV on device not operational,
404  *  -%EINVAL on invalid device state,
405  *  -%EBUSY on device busy or interrupt pending.
406  * Context:
407  *  Interrupts disabled, ccw device lock held
408  */
409 int ccw_device_resume(struct ccw_device *cdev)
410 {
411 	struct subchannel *sch;
412 
413 	if (!cdev || !cdev->dev.parent)
414 		return -ENODEV;
415 	sch = to_subchannel(cdev->dev.parent);
416 	if (!sch->schib.pmcw.ena)
417 		return -EINVAL;
418 	if (cdev->private->state == DEV_STATE_NOT_OPER)
419 		return -ENODEV;
420 	if (cdev->private->state != DEV_STATE_ONLINE ||
421 	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
422 		return -EINVAL;
423 	return cio_resume(sch);
424 }
425 
426 /**
427  * ccw_device_get_ciw() - Search for CIW command in extended sense data.
428  * @cdev: ccw device to inspect
429  * @ct: command type to look for
430  *
431  * During SenseID, command information words (CIWs) describing special
432  * commands available to the device may have been stored in the extended
433  * sense data. This function searches for CIWs of a specified command
434  * type in the extended sense data.
435  * Returns:
436  *  %NULL if no extended sense data has been stored or if no CIW of the
437  *  specified command type could be found,
438  *  else a pointer to the CIW of the specified command type.
439  */
440 struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
441 {
442 	int ciw_cnt;
443 
444 	if (cdev->private->flags.esid == 0)
445 		return NULL;
446 	for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
447 		if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
448 			return cdev->private->dma_area->senseid.ciw + ciw_cnt;
449 	return NULL;
450 }
451 
452 /**
453  * ccw_device_get_path_mask() - get currently available paths
454  * @cdev: ccw device to be queried
455  * Returns:
456  *  %0 if no subchannel for the device is available,
457  *  else the mask of currently available paths for the ccw device's subchannel.
458  */
459 __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
460 {
461 	struct subchannel *sch;
462 
463 	if (!cdev->dev.parent)
464 		return 0;
465 
466 	sch = to_subchannel(cdev->dev.parent);
467 	return sch->lpm;
468 }
469 
470 /**
471  * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
472  * @cdev: device to obtain the descriptor for
473  * @chp_idx: index of the channel path
474  *
475  * On success return a newly allocated copy of the channel-path description
476  * data associated with the given channel path. Return %NULL on error.
477  */
478 struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
479 						       int chp_idx)
480 {
481 	struct subchannel *sch;
482 	struct chp_id chpid;
483 
484 	sch = to_subchannel(cdev->dev.parent);
485 	chp_id_init(&chpid);
486 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
487 	return chp_get_chp_desc(chpid);
488 }
489 
490 /**
491  * ccw_device_get_util_str() - return newly allocated utility strings
492  * @cdev: device to obtain the utility strings for
493  * @chp_idx: index of the channel path
494  *
495  * On success return a newly allocated copy of the utility strings
496  * associated with the given channel path. Return %NULL on error.
497  */
498 u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
499 {
500 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
501 	struct channel_path *chp;
502 	struct chp_id chpid;
503 	u8 *util_str;
504 
505 	chp_id_init(&chpid);
506 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
507 	chp = chpid_to_chp(chpid);
508 
509 	util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
510 	if (!util_str)
511 		return NULL;
512 
513 	mutex_lock(&chp->lock);
514 	memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
515 	mutex_unlock(&chp->lock);
516 
517 	return util_str;
518 }
519 
520 /**
521  * ccw_device_get_id() - obtain a ccw device id
522  * @cdev: device to obtain the id for
523  * @dev_id: where to fill in the values
524  */
525 void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
526 {
527 	*dev_id = cdev->private->dev_id;
528 }
529 EXPORT_SYMBOL(ccw_device_get_id);
530 
531 /**
532  * ccw_device_tm_start_timeout_key() - perform start function
533  * @cdev: ccw device on which to perform the start function
534  * @tcw: transport-command word to be started
535  * @intparm: user defined parameter to be passed to the interrupt handler
536  * @lpm: mask of paths to use
537  * @key: storage key to use for storage access
538  * @expires: time span in jiffies after which to abort request
539  *
540  * Start the tcw on the given ccw device. Return zero on success, non-zero
541  * otherwise.
542  */
543 int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
544 				    unsigned long intparm, u8 lpm, u8 key,
545 				    int expires)
546 {
547 	struct subchannel *sch;
548 	int rc;
549 
550 	sch = to_subchannel(cdev->dev.parent);
551 	if (!sch->schib.pmcw.ena)
552 		return -EINVAL;
553 	if (cdev->private->state == DEV_STATE_VERIFY) {
554 		/* Remember to fake irb when finished. */
555 		if (!cdev->private->flags.fake_irb) {
556 			cdev->private->flags.fake_irb = FAKE_TM_IRB;
557 			cdev->private->intparm = intparm;
558 			CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
559 				      cdev->private->dev_id.ssid,
560 				      cdev->private->dev_id.devno, intparm,
561 				      cdev->private->flags.fake_irb);
562 			return 0;
563 		} else
564 			/* There's already a fake I/O around. */
565 			return -EBUSY;
566 	}
567 	if (cdev->private->state != DEV_STATE_ONLINE)
568 		return -EIO;
569 	/* Adjust requested path mask to exclude unusable paths. */
570 	if (lpm) {
571 		lpm &= sch->lpm;
572 		if (lpm == 0)
573 			return -EACCES;
574 	}
575 	rc = cio_tm_start_key(sch, tcw, lpm, key);
576 	if (rc == 0) {
577 		cdev->private->intparm = intparm;
578 		if (expires)
579 			ccw_device_set_timeout(cdev, expires);
580 	}
581 	return rc;
582 }
583 EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
584 
585 /**
586  * ccw_device_tm_start_key() - perform start function
587  * @cdev: ccw device on which to perform the start function
588  * @tcw: transport-command word to be started
589  * @intparm: user defined parameter to be passed to the interrupt handler
590  * @lpm: mask of paths to use
591  * @key: storage key to use for storage access
592  *
593  * Start the tcw on the given ccw device. Return zero on success, non-zero
594  * otherwise.
595  */
596 int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
597 			    unsigned long intparm, u8 lpm, u8 key)
598 {
599 	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
600 }
601 EXPORT_SYMBOL(ccw_device_tm_start_key);
602 
603 /**
604  * ccw_device_tm_start() - perform start function
605  * @cdev: ccw device on which to perform the start function
606  * @tcw: transport-command word to be started
607  * @intparm: user defined parameter to be passed to the interrupt handler
608  * @lpm: mask of paths to use
609  *
610  * Start the tcw on the given ccw device. Return zero on success, non-zero
611  * otherwise.
612  */
613 int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
614 			unsigned long intparm, u8 lpm)
615 {
616 	return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
617 				       PAGE_DEFAULT_KEY);
618 }
619 EXPORT_SYMBOL(ccw_device_tm_start);
620 
621 /**
622  * ccw_device_tm_start_timeout() - perform start function
623  * @cdev: ccw device on which to perform the start function
624  * @tcw: transport-command word to be started
625  * @intparm: user defined parameter to be passed to the interrupt handler
626  * @lpm: mask of paths to use
627  * @expires: time span in jiffies after which to abort request
628  *
629  * Start the tcw on the given ccw device. Return zero on success, non-zero
630  * otherwise.
631  */
632 int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
633 			       unsigned long intparm, u8 lpm, int expires)
634 {
635 	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
636 					       PAGE_DEFAULT_KEY, expires);
637 }
638 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
639 
640 /**
641  * ccw_device_get_mdc() - accumulate max data count
642  * @cdev: ccw device for which the max data count is accumulated
643  * @mask: mask of paths to use
644  *
645  * Return the number of 64K-bytes blocks all paths at least support
646  * for a transport command. Return value 0 indicates failure.
647  */
648 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
649 {
650 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
651 	struct channel_path *chp;
652 	struct chp_id chpid;
653 	int mdc = 0, i;
654 
655 	/* Adjust requested path mask to excluded varied off paths. */
656 	if (mask)
657 		mask &= sch->lpm;
658 	else
659 		mask = sch->lpm;
660 
661 	chp_id_init(&chpid);
662 	for (i = 0; i < 8; i++) {
663 		if (!(mask & (0x80 >> i)))
664 			continue;
665 		chpid.id = sch->schib.pmcw.chpid[i];
666 		chp = chpid_to_chp(chpid);
667 		if (!chp)
668 			continue;
669 
670 		mutex_lock(&chp->lock);
671 		if (!chp->desc_fmt1.f) {
672 			mutex_unlock(&chp->lock);
673 			return 0;
674 		}
675 		if (!chp->desc_fmt1.r)
676 			mdc = 1;
677 		mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
678 			    chp->desc_fmt1.mdc;
679 		mutex_unlock(&chp->lock);
680 	}
681 
682 	return mdc;
683 }
684 EXPORT_SYMBOL(ccw_device_get_mdc);
685 
686 /**
687  * ccw_device_tm_intrg() - perform interrogate function
688  * @cdev: ccw device on which to perform the interrogate function
689  *
690  * Perform an interrogate function on the given ccw device. Return zero on
691  * success, non-zero otherwise.
692  */
693 int ccw_device_tm_intrg(struct ccw_device *cdev)
694 {
695 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
696 
697 	if (!sch->schib.pmcw.ena)
698 		return -EINVAL;
699 	if (cdev->private->state != DEV_STATE_ONLINE)
700 		return -EIO;
701 	if (!scsw_is_tm(&sch->schib.scsw) ||
702 	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
703 		return -EINVAL;
704 	return cio_tm_intrg(sch);
705 }
706 EXPORT_SYMBOL(ccw_device_tm_intrg);
707 
708 /**
709  * ccw_device_get_schid() - obtain a subchannel id
710  * @cdev: device to obtain the id for
711  * @schid: where to fill in the values
712  */
713 void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
714 {
715 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
716 
717 	*schid = sch->schid;
718 }
719 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
720 
721 /**
722  * ccw_device_pnso() - Perform Network-Subchannel Operation
723  * @cdev:		device on which PNSO is performed
724  * @pnso_area:		request and response block for the operation
725  * @oc:			Operation Code
726  * @resume_token:	resume token for multiblock response
727  * @cnc:		Boolean change-notification control
728  *
729  * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
730  *
731  * Returns 0 on success.
732  */
733 int ccw_device_pnso(struct ccw_device *cdev,
734 		    struct chsc_pnso_area *pnso_area, u8 oc,
735 		    struct chsc_pnso_resume_token resume_token, int cnc)
736 {
737 	struct subchannel_id schid;
738 
739 	ccw_device_get_schid(cdev, &schid);
740 	return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
741 }
742 EXPORT_SYMBOL_GPL(ccw_device_pnso);
743 
744 /**
745  * ccw_device_get_cssid() - obtain Channel Subsystem ID
746  * @cdev: device to obtain the CSSID for
747  * @cssid: The resulting Channel Subsystem ID
748  */
749 int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
750 {
751 	struct device *sch_dev = cdev->dev.parent;
752 	struct channel_subsystem *css = to_css(sch_dev->parent);
753 
754 	if (css->id_valid)
755 		*cssid = css->cssid;
756 	return css->id_valid ? 0 : -ENODEV;
757 }
758 EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
759 
760 /**
761  * ccw_device_get_iid() - obtain MIF-image ID
762  * @cdev: device to obtain the MIF-image ID for
763  * @iid: The resulting MIF-image ID
764  */
765 int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
766 {
767 	struct device *sch_dev = cdev->dev.parent;
768 	struct channel_subsystem *css = to_css(sch_dev->parent);
769 
770 	if (css->id_valid)
771 		*iid = css->iid;
772 	return css->id_valid ? 0 : -ENODEV;
773 }
774 EXPORT_SYMBOL_GPL(ccw_device_get_iid);
775 
776 /**
777  * ccw_device_get_chpid() - obtain Channel Path ID
778  * @cdev: device to obtain the Channel Path ID for
779  * @chp_idx: Index of the channel path
780  * @chpid: The resulting Channel Path ID
781  */
782 int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
783 {
784 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
785 	int mask;
786 
787 	if ((chp_idx < 0) || (chp_idx > 7))
788 		return -EINVAL;
789 	mask = 0x80 >> chp_idx;
790 	if (!(sch->schib.pmcw.pim & mask))
791 		return -ENODEV;
792 
793 	*chpid = sch->schib.pmcw.chpid[chp_idx];
794 	return 0;
795 }
796 EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
797 
798 /**
799  * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
800  * @cdev: device to obtain the Channel ID for
801  * @chp_idx: Index of the channel path
802  * @chid: The resulting Channel ID
803  */
804 int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
805 {
806 	struct chp_id cssid_chpid;
807 	struct channel_path *chp;
808 	int rc;
809 
810 	chp_id_init(&cssid_chpid);
811 	rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
812 	if (rc)
813 		return rc;
814 	chp = chpid_to_chp(cssid_chpid);
815 	if (!chp)
816 		return -ENODEV;
817 
818 	mutex_lock(&chp->lock);
819 	if (chp->desc_fmt1.flags & 0x10)
820 		*chid = chp->desc_fmt1.chid;
821 	else
822 		rc = -ENODEV;
823 	mutex_unlock(&chp->lock);
824 
825 	return rc;
826 }
827 EXPORT_SYMBOL_GPL(ccw_device_get_chid);
828 
829 /*
830  * Allocate zeroed dma coherent 31 bit addressable memory using
831  * the subchannels dma pool. Maximal size of allocation supported
832  * is PAGE_SIZE.
833  */
834 void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
835 			    dma32_t *dma_handle)
836 {
837 	void *addr;
838 
839 	if (!get_device(&cdev->dev))
840 		return NULL;
841 	addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle);
842 	if (IS_ERR_OR_NULL(addr))
843 		put_device(&cdev->dev);
844 	return addr;
845 }
846 EXPORT_SYMBOL(ccw_device_dma_zalloc);
847 
848 void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
849 {
850 	if (!cpu_addr)
851 		return;
852 	cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
853 	put_device(&cdev->dev);
854 }
855 EXPORT_SYMBOL(ccw_device_dma_free);
856 
857 EXPORT_SYMBOL(ccw_device_set_options_mask);
858 EXPORT_SYMBOL(ccw_device_set_options);
859 EXPORT_SYMBOL(ccw_device_clear_options);
860 EXPORT_SYMBOL(ccw_device_clear);
861 EXPORT_SYMBOL(ccw_device_halt);
862 EXPORT_SYMBOL(ccw_device_resume);
863 EXPORT_SYMBOL(ccw_device_start_timeout);
864 EXPORT_SYMBOL(ccw_device_start);
865 EXPORT_SYMBOL(ccw_device_start_timeout_key);
866 EXPORT_SYMBOL(ccw_device_start_key);
867 EXPORT_SYMBOL(ccw_device_get_ciw);
868 EXPORT_SYMBOL(ccw_device_get_path_mask);
869 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
870 EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
871