xref: /linux/drivers/s390/cio/vfio_ccw_fsm.c (revision dd5b2498d845f925904cb2afabb6ba11bfc317c5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/vfio.h>
11 #include <linux/mdev.h>
12 
13 #include "ioasm.h"
14 #include "vfio_ccw_private.h"
15 
16 #define CREATE_TRACE_POINTS
17 #include "vfio_ccw_trace.h"
18 
19 static int fsm_io_helper(struct vfio_ccw_private *private)
20 {
21 	struct subchannel *sch;
22 	union orb *orb;
23 	int ccode;
24 	__u8 lpm;
25 	unsigned long flags;
26 	int ret;
27 
28 	sch = private->sch;
29 
30 	spin_lock_irqsave(sch->lock, flags);
31 	private->state = VFIO_CCW_STATE_BUSY;
32 
33 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
34 
35 	/* Issue "Start Subchannel" */
36 	ccode = ssch(sch->schid, orb);
37 
38 	switch (ccode) {
39 	case 0:
40 		/*
41 		 * Initialize device status information
42 		 */
43 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
44 		ret = 0;
45 		break;
46 	case 1:		/* Status pending */
47 	case 2:		/* Busy */
48 		ret = -EBUSY;
49 		break;
50 	case 3:		/* Device/path not operational */
51 	{
52 		lpm = orb->cmd.lpm;
53 		if (lpm != 0)
54 			sch->lpm &= ~lpm;
55 		else
56 			sch->lpm = 0;
57 
58 		if (cio_update_schib(sch))
59 			ret = -ENODEV;
60 		else
61 			ret = sch->lpm ? -EACCES : -ENODEV;
62 		break;
63 	}
64 	default:
65 		ret = ccode;
66 	}
67 	spin_unlock_irqrestore(sch->lock, flags);
68 	return ret;
69 }
70 
71 static void fsm_notoper(struct vfio_ccw_private *private,
72 			enum vfio_ccw_event event)
73 {
74 	struct subchannel *sch = private->sch;
75 
76 	/*
77 	 * TODO:
78 	 * Probably we should send the machine check to the guest.
79 	 */
80 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
81 	private->state = VFIO_CCW_STATE_NOT_OPER;
82 }
83 
84 /*
85  * No operation action.
86  */
87 static void fsm_nop(struct vfio_ccw_private *private,
88 		    enum vfio_ccw_event event)
89 {
90 }
91 
92 static void fsm_io_error(struct vfio_ccw_private *private,
93 			 enum vfio_ccw_event event)
94 {
95 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
96 	private->io_region->ret_code = -EIO;
97 }
98 
99 static void fsm_io_busy(struct vfio_ccw_private *private,
100 			enum vfio_ccw_event event)
101 {
102 	private->io_region->ret_code = -EBUSY;
103 }
104 
105 static void fsm_disabled_irq(struct vfio_ccw_private *private,
106 			     enum vfio_ccw_event event)
107 {
108 	struct subchannel *sch = private->sch;
109 
110 	/*
111 	 * An interrupt in a disabled state means a previous disable was not
112 	 * successful - should not happen, but we try to disable again.
113 	 */
114 	cio_disable_subchannel(sch);
115 }
116 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
117 {
118 	return p->sch->schid;
119 }
120 
121 /*
122  * Deal with the ccw command request from the userspace.
123  */
124 static void fsm_io_request(struct vfio_ccw_private *private,
125 			   enum vfio_ccw_event event)
126 {
127 	union orb *orb;
128 	union scsw *scsw = &private->scsw;
129 	struct ccw_io_region *io_region = private->io_region;
130 	struct mdev_device *mdev = private->mdev;
131 	char *errstr = "request";
132 
133 	private->state = VFIO_CCW_STATE_BUSY;
134 
135 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
136 
137 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
138 		orb = (union orb *)io_region->orb_area;
139 
140 		/* Don't try to build a cp if transport mode is specified. */
141 		if (orb->tm.b) {
142 			io_region->ret_code = -EOPNOTSUPP;
143 			errstr = "transport mode";
144 			goto err_out;
145 		}
146 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
147 					      orb);
148 		if (io_region->ret_code) {
149 			errstr = "cp init";
150 			goto err_out;
151 		}
152 
153 		io_region->ret_code = cp_prefetch(&private->cp);
154 		if (io_region->ret_code) {
155 			errstr = "cp prefetch";
156 			cp_free(&private->cp);
157 			goto err_out;
158 		}
159 
160 		/* Start channel program and wait for I/O interrupt. */
161 		io_region->ret_code = fsm_io_helper(private);
162 		if (io_region->ret_code) {
163 			errstr = "cp fsm_io_helper";
164 			cp_free(&private->cp);
165 			goto err_out;
166 		}
167 		return;
168 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
169 		/* XXX: Handle halt. */
170 		io_region->ret_code = -EOPNOTSUPP;
171 		goto err_out;
172 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
173 		/* XXX: Handle clear. */
174 		io_region->ret_code = -EOPNOTSUPP;
175 		goto err_out;
176 	}
177 
178 err_out:
179 	private->state = VFIO_CCW_STATE_IDLE;
180 	trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
181 			       io_region->ret_code, errstr);
182 }
183 
184 /*
185  * Got an interrupt for a normal io (state busy).
186  */
187 static void fsm_irq(struct vfio_ccw_private *private,
188 		    enum vfio_ccw_event event)
189 {
190 	struct irb *irb = this_cpu_ptr(&cio_irb);
191 
192 	memcpy(&private->irb, irb, sizeof(*irb));
193 
194 	queue_work(vfio_ccw_work_q, &private->io_work);
195 
196 	if (private->completion)
197 		complete(private->completion);
198 }
199 
200 /*
201  * Device statemachine
202  */
203 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
204 	[VFIO_CCW_STATE_NOT_OPER] = {
205 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
206 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
207 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
208 	},
209 	[VFIO_CCW_STATE_STANDBY] = {
210 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
211 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
212 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
213 	},
214 	[VFIO_CCW_STATE_IDLE] = {
215 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
216 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
217 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
218 	},
219 	[VFIO_CCW_STATE_BUSY] = {
220 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
221 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
222 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
223 	},
224 };
225