xref: /linux/drivers/s390/cio/device_ops.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3  * Copyright IBM Corp. 2002, 2009
4  *
5  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
7  */
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/completion.h>
16 
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
19 #include <asm/chpid.h>
20 #include <asm/fcx.h>
21 
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "css.h"
25 #include "chsc.h"
26 #include "device.h"
27 #include "chp.h"
28 
29 /**
30  * ccw_device_set_options_mask() - set some options and unset the rest
31  * @cdev: device for which the options are to be set
32  * @flags: options to be set
33  *
34  * All flags specified in @flags are set, all flags not specified in @flags
35  * are cleared.
36  * Returns:
37  *   %0 on success, -%EINVAL on an invalid flag combination.
38  */
39 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
40 {
41        /*
42 	* The flag usage is mutal exclusive ...
43 	*/
44 	if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
45 	    (flags & CCWDEV_REPORT_ALL))
46 		return -EINVAL;
47 	cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
48 	cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
49 	cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
50 	cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
51 	cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
52 	return 0;
53 }
54 
55 /**
56  * ccw_device_set_options() - set some options
57  * @cdev: device for which the options are to be set
58  * @flags: options to be set
59  *
60  * All flags specified in @flags are set, the remainder is left untouched.
61  * Returns:
62  *   %0 on success, -%EINVAL if an invalid flag combination would ensue.
63  */
64 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
65 {
66        /*
67 	* The flag usage is mutal exclusive ...
68 	*/
69 	if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
70 	    (flags & CCWDEV_REPORT_ALL)) ||
71 	    ((flags & CCWDEV_EARLY_NOTIFICATION) &&
72 	     cdev->private->options.repall) ||
73 	    ((flags & CCWDEV_REPORT_ALL) &&
74 	     cdev->private->options.fast))
75 		return -EINVAL;
76 	cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
77 	cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
78 	cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
79 	cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
80 	cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
81 	return 0;
82 }
83 
84 /**
85  * ccw_device_clear_options() - clear some options
86  * @cdev: device for which the options are to be cleared
87  * @flags: options to be cleared
88  *
89  * All flags specified in @flags are cleared, the remainder is left untouched.
90  */
91 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
92 {
93 	cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
94 	cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
95 	cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
96 	cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
97 	cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
98 }
99 
100 /**
101  * ccw_device_is_pathgroup() - determine if paths to this device are grouped
102  * @cdev: ccw device
103  *
104  * Return non-zero if there is a path group, zero otherwise.
105  */
106 int ccw_device_is_pathgroup(struct ccw_device *cdev)
107 {
108 	return cdev->private->flags.pgroup;
109 }
110 EXPORT_SYMBOL(ccw_device_is_pathgroup);
111 
112 /**
113  * ccw_device_is_multipath() - determine if device is operating in multipath mode
114  * @cdev: ccw device
115  *
116  * Return non-zero if device is operating in multipath mode, zero otherwise.
117  */
118 int ccw_device_is_multipath(struct ccw_device *cdev)
119 {
120 	return cdev->private->flags.mpath;
121 }
122 EXPORT_SYMBOL(ccw_device_is_multipath);
123 
124 /**
125  * ccw_device_clear() - terminate I/O request processing
126  * @cdev: target ccw device
127  * @intparm: interruption parameter to be returned upon conclusion of csch
128  *
129  * ccw_device_clear() calls csch on @cdev's subchannel.
130  * Returns:
131  *  %0 on success,
132  *  -%ENODEV on device not operational,
133  *  -%EINVAL on invalid device state.
134  * Context:
135  *  Interrupts disabled, ccw device lock held
136  */
137 int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
138 {
139 	struct subchannel *sch;
140 	int ret;
141 
142 	if (!cdev || !cdev->dev.parent)
143 		return -ENODEV;
144 	sch = to_subchannel(cdev->dev.parent);
145 	if (!sch->schib.pmcw.ena)
146 		return -EINVAL;
147 	if (cdev->private->state == DEV_STATE_NOT_OPER)
148 		return -ENODEV;
149 	if (cdev->private->state != DEV_STATE_ONLINE &&
150 	    cdev->private->state != DEV_STATE_W4SENSE)
151 		return -EINVAL;
152 
153 	ret = cio_clear(sch);
154 	if (ret == 0)
155 		cdev->private->intparm = intparm;
156 	return ret;
157 }
158 
159 /**
160  * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
161  * @cdev: target ccw device
162  * @cpa: logical start address of channel program
163  * @intparm: user specific interruption parameter; will be presented back to
164  *	     @cdev's interrupt handler. Allows a device driver to associate
165  *	     the interrupt with a particular I/O request.
166  * @lpm: defines the channel path to be used for a specific I/O request. A
167  *	 value of 0 will make cio use the opm.
168  * @key: storage key to be used for the I/O
169  * @flags: additional flags; defines the action to be performed for I/O
170  *	   processing.
171  * @expires: timeout value in jiffies
172  *
173  * Start a S/390 channel program. When the interrupt arrives, the
174  * IRQ handler is called, either immediately, delayed (dev-end missing,
175  * or sense required) or never (no IRQ handler registered).
176  * This function notifies the device driver if the channel program has not
177  * completed during the time specified by @expires. If a timeout occurs, the
178  * channel program is terminated via xsch, hsch or csch, and the device's
179  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
180  * The interruption handler will echo back the @intparm specified here, unless
181  * another interruption parameter is specified by a subsequent invocation of
182  * ccw_device_halt() or ccw_device_clear().
183  * Returns:
184  *  %0, if the operation was successful;
185  *  -%EBUSY, if the device is busy, or status pending;
186  *  -%EACCES, if no path specified in @lpm is operational;
187  *  -%ENODEV, if the device is not operational.
188  * Context:
189  *  Interrupts disabled, ccw device lock held
190  */
191 int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
192 				 unsigned long intparm, __u8 lpm, __u8 key,
193 				 unsigned long flags, int expires)
194 {
195 	struct subchannel *sch;
196 	int ret;
197 
198 	if (!cdev || !cdev->dev.parent)
199 		return -ENODEV;
200 	sch = to_subchannel(cdev->dev.parent);
201 	if (!sch->schib.pmcw.ena)
202 		return -EINVAL;
203 	if (cdev->private->state == DEV_STATE_NOT_OPER)
204 		return -ENODEV;
205 	if (cdev->private->state == DEV_STATE_VERIFY) {
206 		/* Remember to fake irb when finished. */
207 		if (!cdev->private->flags.fake_irb) {
208 			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
209 			cdev->private->intparm = intparm;
210 			return 0;
211 		} else
212 			/* There's already a fake I/O around. */
213 			return -EBUSY;
214 	}
215 	if (cdev->private->state != DEV_STATE_ONLINE ||
216 	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
217 	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
218 	    cdev->private->flags.doverify)
219 		return -EBUSY;
220 	ret = cio_set_options (sch, flags);
221 	if (ret)
222 		return ret;
223 	/* Adjust requested path mask to exclude unusable paths. */
224 	if (lpm) {
225 		lpm &= sch->lpm;
226 		if (lpm == 0)
227 			return -EACCES;
228 	}
229 	ret = cio_start_key (sch, cpa, lpm, key);
230 	switch (ret) {
231 	case 0:
232 		cdev->private->intparm = intparm;
233 		if (expires)
234 			ccw_device_set_timeout(cdev, expires);
235 		break;
236 	case -EACCES:
237 	case -ENODEV:
238 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
239 		break;
240 	}
241 	return ret;
242 }
243 
244 /**
245  * ccw_device_start_key() - start a s390 channel program with key
246  * @cdev: target ccw device
247  * @cpa: logical start address of channel program
248  * @intparm: user specific interruption parameter; will be presented back to
249  *	     @cdev's interrupt handler. Allows a device driver to associate
250  *	     the interrupt with a particular I/O request.
251  * @lpm: defines the channel path to be used for a specific I/O request. A
252  *	 value of 0 will make cio use the opm.
253  * @key: storage key to be used for the I/O
254  * @flags: additional flags; defines the action to be performed for I/O
255  *	   processing.
256  *
257  * Start a S/390 channel program. When the interrupt arrives, the
258  * IRQ handler is called, either immediately, delayed (dev-end missing,
259  * or sense required) or never (no IRQ handler registered).
260  * The interruption handler will echo back the @intparm specified here, unless
261  * another interruption parameter is specified by a subsequent invocation of
262  * ccw_device_halt() or ccw_device_clear().
263  * Returns:
264  *  %0, if the operation was successful;
265  *  -%EBUSY, if the device is busy, or status pending;
266  *  -%EACCES, if no path specified in @lpm is operational;
267  *  -%ENODEV, if the device is not operational.
268  * Context:
269  *  Interrupts disabled, ccw device lock held
270  */
271 int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
272 			 unsigned long intparm, __u8 lpm, __u8 key,
273 			 unsigned long flags)
274 {
275 	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
276 					    flags, 0);
277 }
278 
279 /**
280  * ccw_device_start() - start a s390 channel program
281  * @cdev: target ccw device
282  * @cpa: logical start address of channel program
283  * @intparm: user specific interruption parameter; will be presented back to
284  *	     @cdev's interrupt handler. Allows a device driver to associate
285  *	     the interrupt with a particular I/O request.
286  * @lpm: defines the channel path to be used for a specific I/O request. A
287  *	 value of 0 will make cio use the opm.
288  * @flags: additional flags; defines the action to be performed for I/O
289  *	   processing.
290  *
291  * Start a S/390 channel program. When the interrupt arrives, the
292  * IRQ handler is called, either immediately, delayed (dev-end missing,
293  * or sense required) or never (no IRQ handler registered).
294  * The interruption handler will echo back the @intparm specified here, unless
295  * another interruption parameter is specified by a subsequent invocation of
296  * ccw_device_halt() or ccw_device_clear().
297  * Returns:
298  *  %0, if the operation was successful;
299  *  -%EBUSY, if the device is busy, or status pending;
300  *  -%EACCES, if no path specified in @lpm is operational;
301  *  -%ENODEV, if the device is not operational.
302  * Context:
303  *  Interrupts disabled, ccw device lock held
304  */
305 int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
306 		     unsigned long intparm, __u8 lpm, unsigned long flags)
307 {
308 	return ccw_device_start_key(cdev, cpa, intparm, lpm,
309 				    PAGE_DEFAULT_KEY, flags);
310 }
311 
312 /**
313  * ccw_device_start_timeout() - start a s390 channel program with timeout
314  * @cdev: target ccw device
315  * @cpa: logical start address of channel program
316  * @intparm: user specific interruption parameter; will be presented back to
317  *	     @cdev's interrupt handler. Allows a device driver to associate
318  *	     the interrupt with a particular I/O request.
319  * @lpm: defines the channel path to be used for a specific I/O request. A
320  *	 value of 0 will make cio use the opm.
321  * @flags: additional flags; defines the action to be performed for I/O
322  *	   processing.
323  * @expires: timeout value in jiffies
324  *
325  * Start a S/390 channel program. When the interrupt arrives, the
326  * IRQ handler is called, either immediately, delayed (dev-end missing,
327  * or sense required) or never (no IRQ handler registered).
328  * This function notifies the device driver if the channel program has not
329  * completed during the time specified by @expires. If a timeout occurs, the
330  * channel program is terminated via xsch, hsch or csch, and the device's
331  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
332  * The interruption handler will echo back the @intparm specified here, unless
333  * another interruption parameter is specified by a subsequent invocation of
334  * ccw_device_halt() or ccw_device_clear().
335  * Returns:
336  *  %0, if the operation was successful;
337  *  -%EBUSY, if the device is busy, or status pending;
338  *  -%EACCES, if no path specified in @lpm is operational;
339  *  -%ENODEV, if the device is not operational.
340  * Context:
341  *  Interrupts disabled, ccw device lock held
342  */
343 int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
344 			     unsigned long intparm, __u8 lpm,
345 			     unsigned long flags, int expires)
346 {
347 	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
348 					    PAGE_DEFAULT_KEY, flags,
349 					    expires);
350 }
351 
352 
353 /**
354  * ccw_device_halt() - halt I/O request processing
355  * @cdev: target ccw device
356  * @intparm: interruption parameter to be returned upon conclusion of hsch
357  *
358  * ccw_device_halt() calls hsch on @cdev's subchannel.
359  * The interruption handler will echo back the @intparm specified here, unless
360  * another interruption parameter is specified by a subsequent invocation of
361  * ccw_device_clear().
362  * Returns:
363  *  %0 on success,
364  *  -%ENODEV on device not operational,
365  *  -%EINVAL on invalid device state,
366  *  -%EBUSY on device busy or interrupt pending.
367  * Context:
368  *  Interrupts disabled, ccw device lock held
369  */
370 int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
371 {
372 	struct subchannel *sch;
373 	int ret;
374 
375 	if (!cdev || !cdev->dev.parent)
376 		return -ENODEV;
377 	sch = to_subchannel(cdev->dev.parent);
378 	if (!sch->schib.pmcw.ena)
379 		return -EINVAL;
380 	if (cdev->private->state == DEV_STATE_NOT_OPER)
381 		return -ENODEV;
382 	if (cdev->private->state != DEV_STATE_ONLINE &&
383 	    cdev->private->state != DEV_STATE_W4SENSE)
384 		return -EINVAL;
385 
386 	ret = cio_halt(sch);
387 	if (ret == 0)
388 		cdev->private->intparm = intparm;
389 	return ret;
390 }
391 
392 /**
393  * ccw_device_resume() - resume channel program execution
394  * @cdev: target ccw device
395  *
396  * ccw_device_resume() calls rsch on @cdev's subchannel.
397  * Returns:
398  *  %0 on success,
399  *  -%ENODEV on device not operational,
400  *  -%EINVAL on invalid device state,
401  *  -%EBUSY on device busy or interrupt pending.
402  * Context:
403  *  Interrupts disabled, ccw device lock held
404  */
405 int ccw_device_resume(struct ccw_device *cdev)
406 {
407 	struct subchannel *sch;
408 
409 	if (!cdev || !cdev->dev.parent)
410 		return -ENODEV;
411 	sch = to_subchannel(cdev->dev.parent);
412 	if (!sch->schib.pmcw.ena)
413 		return -EINVAL;
414 	if (cdev->private->state == DEV_STATE_NOT_OPER)
415 		return -ENODEV;
416 	if (cdev->private->state != DEV_STATE_ONLINE ||
417 	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
418 		return -EINVAL;
419 	return cio_resume(sch);
420 }
421 
422 /**
423  * ccw_device_get_ciw() - Search for CIW command in extended sense data.
424  * @cdev: ccw device to inspect
425  * @ct: command type to look for
426  *
427  * During SenseID, command information words (CIWs) describing special
428  * commands available to the device may have been stored in the extended
429  * sense data. This function searches for CIWs of a specified command
430  * type in the extended sense data.
431  * Returns:
432  *  %NULL if no extended sense data has been stored or if no CIW of the
433  *  specified command type could be found,
434  *  else a pointer to the CIW of the specified command type.
435  */
436 struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
437 {
438 	int ciw_cnt;
439 
440 	if (cdev->private->flags.esid == 0)
441 		return NULL;
442 	for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
443 		if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
444 			return cdev->private->dma_area->senseid.ciw + ciw_cnt;
445 	return NULL;
446 }
447 
448 /**
449  * ccw_device_get_path_mask() - get currently available paths
450  * @cdev: ccw device to be queried
451  * Returns:
452  *  %0 if no subchannel for the device is available,
453  *  else the mask of currently available paths for the ccw device's subchannel.
454  */
455 __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
456 {
457 	struct subchannel *sch;
458 
459 	if (!cdev->dev.parent)
460 		return 0;
461 
462 	sch = to_subchannel(cdev->dev.parent);
463 	return sch->lpm;
464 }
465 
466 /**
467  * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
468  * @cdev: device to obtain the descriptor for
469  * @chp_idx: index of the channel path
470  *
471  * On success return a newly allocated copy of the channel-path description
472  * data associated with the given channel path. Return %NULL on error.
473  */
474 struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
475 						       int chp_idx)
476 {
477 	struct subchannel *sch;
478 	struct chp_id chpid;
479 
480 	sch = to_subchannel(cdev->dev.parent);
481 	chp_id_init(&chpid);
482 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
483 	return chp_get_chp_desc(chpid);
484 }
485 
486 /**
487  * ccw_device_get_util_str() - return newly allocated utility strings
488  * @cdev: device to obtain the utility strings for
489  * @chp_idx: index of the channel path
490  *
491  * On success return a newly allocated copy of the utility strings
492  * associated with the given channel path. Return %NULL on error.
493  */
494 u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
495 {
496 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
497 	struct channel_path *chp;
498 	struct chp_id chpid;
499 	u8 *util_str;
500 
501 	chp_id_init(&chpid);
502 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
503 	chp = chpid_to_chp(chpid);
504 
505 	util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
506 	if (!util_str)
507 		return NULL;
508 
509 	mutex_lock(&chp->lock);
510 	memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
511 	mutex_unlock(&chp->lock);
512 
513 	return util_str;
514 }
515 
516 /**
517  * ccw_device_get_id() - obtain a ccw device id
518  * @cdev: device to obtain the id for
519  * @dev_id: where to fill in the values
520  */
521 void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
522 {
523 	*dev_id = cdev->private->dev_id;
524 }
525 EXPORT_SYMBOL(ccw_device_get_id);
526 
527 /**
528  * ccw_device_tm_start_timeout_key() - perform start function
529  * @cdev: ccw device on which to perform the start function
530  * @tcw: transport-command word to be started
531  * @intparm: user defined parameter to be passed to the interrupt handler
532  * @lpm: mask of paths to use
533  * @key: storage key to use for storage access
534  * @expires: time span in jiffies after which to abort request
535  *
536  * Start the tcw on the given ccw device. Return zero on success, non-zero
537  * otherwise.
538  */
539 int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
540 				    unsigned long intparm, u8 lpm, u8 key,
541 				    int expires)
542 {
543 	struct subchannel *sch;
544 	int rc;
545 
546 	sch = to_subchannel(cdev->dev.parent);
547 	if (!sch->schib.pmcw.ena)
548 		return -EINVAL;
549 	if (cdev->private->state == DEV_STATE_VERIFY) {
550 		/* Remember to fake irb when finished. */
551 		if (!cdev->private->flags.fake_irb) {
552 			cdev->private->flags.fake_irb = FAKE_TM_IRB;
553 			cdev->private->intparm = intparm;
554 			return 0;
555 		} else
556 			/* There's already a fake I/O around. */
557 			return -EBUSY;
558 	}
559 	if (cdev->private->state != DEV_STATE_ONLINE)
560 		return -EIO;
561 	/* Adjust requested path mask to exclude unusable paths. */
562 	if (lpm) {
563 		lpm &= sch->lpm;
564 		if (lpm == 0)
565 			return -EACCES;
566 	}
567 	rc = cio_tm_start_key(sch, tcw, lpm, key);
568 	if (rc == 0) {
569 		cdev->private->intparm = intparm;
570 		if (expires)
571 			ccw_device_set_timeout(cdev, expires);
572 	}
573 	return rc;
574 }
575 EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
576 
577 /**
578  * ccw_device_tm_start_key() - perform start function
579  * @cdev: ccw device on which to perform the start function
580  * @tcw: transport-command word to be started
581  * @intparm: user defined parameter to be passed to the interrupt handler
582  * @lpm: mask of paths to use
583  * @key: storage key to use for storage access
584  *
585  * Start the tcw on the given ccw device. Return zero on success, non-zero
586  * otherwise.
587  */
588 int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
589 			    unsigned long intparm, u8 lpm, u8 key)
590 {
591 	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
592 }
593 EXPORT_SYMBOL(ccw_device_tm_start_key);
594 
595 /**
596  * ccw_device_tm_start() - perform start function
597  * @cdev: ccw device on which to perform the start function
598  * @tcw: transport-command word to be started
599  * @intparm: user defined parameter to be passed to the interrupt handler
600  * @lpm: mask of paths to use
601  *
602  * Start the tcw on the given ccw device. Return zero on success, non-zero
603  * otherwise.
604  */
605 int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
606 			unsigned long intparm, u8 lpm)
607 {
608 	return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
609 				       PAGE_DEFAULT_KEY);
610 }
611 EXPORT_SYMBOL(ccw_device_tm_start);
612 
613 /**
614  * ccw_device_tm_start_timeout() - perform start function
615  * @cdev: ccw device on which to perform the start function
616  * @tcw: transport-command word to be started
617  * @intparm: user defined parameter to be passed to the interrupt handler
618  * @lpm: mask of paths to use
619  * @expires: time span in jiffies after which to abort request
620  *
621  * Start the tcw on the given ccw device. Return zero on success, non-zero
622  * otherwise.
623  */
624 int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
625 			       unsigned long intparm, u8 lpm, int expires)
626 {
627 	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
628 					       PAGE_DEFAULT_KEY, expires);
629 }
630 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
631 
632 /**
633  * ccw_device_get_mdc() - accumulate max data count
634  * @cdev: ccw device for which the max data count is accumulated
635  * @mask: mask of paths to use
636  *
637  * Return the number of 64K-bytes blocks all paths at least support
638  * for a transport command. Return value 0 indicates failure.
639  */
640 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
641 {
642 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
643 	struct channel_path *chp;
644 	struct chp_id chpid;
645 	int mdc = 0, i;
646 
647 	/* Adjust requested path mask to excluded varied off paths. */
648 	if (mask)
649 		mask &= sch->lpm;
650 	else
651 		mask = sch->lpm;
652 
653 	chp_id_init(&chpid);
654 	for (i = 0; i < 8; i++) {
655 		if (!(mask & (0x80 >> i)))
656 			continue;
657 		chpid.id = sch->schib.pmcw.chpid[i];
658 		chp = chpid_to_chp(chpid);
659 		if (!chp)
660 			continue;
661 
662 		mutex_lock(&chp->lock);
663 		if (!chp->desc_fmt1.f) {
664 			mutex_unlock(&chp->lock);
665 			return 0;
666 		}
667 		if (!chp->desc_fmt1.r)
668 			mdc = 1;
669 		mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
670 			    chp->desc_fmt1.mdc;
671 		mutex_unlock(&chp->lock);
672 	}
673 
674 	return mdc;
675 }
676 EXPORT_SYMBOL(ccw_device_get_mdc);
677 
678 /**
679  * ccw_device_tm_intrg() - perform interrogate function
680  * @cdev: ccw device on which to perform the interrogate function
681  *
682  * Perform an interrogate function on the given ccw device. Return zero on
683  * success, non-zero otherwise.
684  */
685 int ccw_device_tm_intrg(struct ccw_device *cdev)
686 {
687 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
688 
689 	if (!sch->schib.pmcw.ena)
690 		return -EINVAL;
691 	if (cdev->private->state != DEV_STATE_ONLINE)
692 		return -EIO;
693 	if (!scsw_is_tm(&sch->schib.scsw) ||
694 	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
695 		return -EINVAL;
696 	return cio_tm_intrg(sch);
697 }
698 EXPORT_SYMBOL(ccw_device_tm_intrg);
699 
700 /**
701  * ccw_device_get_schid() - obtain a subchannel id
702  * @cdev: device to obtain the id for
703  * @schid: where to fill in the values
704  */
705 void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
706 {
707 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
708 
709 	*schid = sch->schid;
710 }
711 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
712 
713 /**
714  * ccw_device_pnso() - Perform Network-Subchannel Operation
715  * @cdev:		device on which PNSO is performed
716  * @pnso_area:		request and response block for the operation
717  * @resume_token:	resume token for multiblock response
718  * @cnc:		Boolean change-notification control
719  *
720  * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
721  *
722  * Returns 0 on success.
723  */
724 int ccw_device_pnso(struct ccw_device *cdev,
725 		    struct chsc_pnso_area *pnso_area,
726 		    struct chsc_pnso_resume_token resume_token,
727 		    int cnc)
728 {
729 	struct subchannel_id schid;
730 
731 	ccw_device_get_schid(cdev, &schid);
732 	return chsc_pnso(schid, pnso_area, resume_token, cnc);
733 }
734 EXPORT_SYMBOL_GPL(ccw_device_pnso);
735 
736 /*
737  * Allocate zeroed dma coherent 31 bit addressable memory using
738  * the subchannels dma pool. Maximal size of allocation supported
739  * is PAGE_SIZE.
740  */
741 void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
742 {
743 	return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
744 }
745 EXPORT_SYMBOL(ccw_device_dma_zalloc);
746 
747 void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
748 {
749 	cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
750 }
751 EXPORT_SYMBOL(ccw_device_dma_free);
752 
753 EXPORT_SYMBOL(ccw_device_set_options_mask);
754 EXPORT_SYMBOL(ccw_device_set_options);
755 EXPORT_SYMBOL(ccw_device_clear_options);
756 EXPORT_SYMBOL(ccw_device_clear);
757 EXPORT_SYMBOL(ccw_device_halt);
758 EXPORT_SYMBOL(ccw_device_resume);
759 EXPORT_SYMBOL(ccw_device_start_timeout);
760 EXPORT_SYMBOL(ccw_device_start);
761 EXPORT_SYMBOL(ccw_device_start_timeout_key);
762 EXPORT_SYMBOL(ccw_device_start_key);
763 EXPORT_SYMBOL(ccw_device_get_ciw);
764 EXPORT_SYMBOL(ccw_device_get_path_mask);
765 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
766 EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
767