xref: /linux/drivers/spi/spi-virtio.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI bus driver for the Virtio SPI controller
4  * Copyright (C) 2023 OpenSynergy GmbH
5  * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/spi/spi.h>
13 #include <linux/stddef.h>
14 #include <linux/virtio.h>
15 #include <linux/virtio_ring.h>
16 #include <linux/virtio_spi.h>
17 
18 #define VIRTIO_SPI_MODE_MASK \
19 	(SPI_MODE_X_MASK | SPI_CS_HIGH | SPI_LSB_FIRST)
20 
21 struct virtio_spi_req {
22 	struct completion completion;
23 	const u8 *tx_buf;
24 	u8 *rx_buf;
25 	struct spi_transfer_head transfer_head	____cacheline_aligned;
26 	struct spi_transfer_result result;
27 };
28 
29 struct virtio_spi_priv {
30 	/* The virtio device we're associated with */
31 	struct virtio_device *vdev;
32 	/* Pointer to the virtqueue */
33 	struct virtqueue *vq;
34 	/* Copy of config space mode_func_supported */
35 	u32 mode_func_supported;
36 	/* Copy of config space max_freq_hz */
37 	u32 max_freq_hz;
38 };
39 
40 static void virtio_spi_msg_done(struct virtqueue *vq)
41 {
42 	struct virtio_spi_req *req;
43 	unsigned int len;
44 
45 	while ((req = virtqueue_get_buf(vq, &len)))
46 		complete(&req->completion);
47 }
48 
49 /*
50  * virtio_spi_set_delays - Set delay parameters for SPI transfer
51  *
52  * This function sets various delay parameters for SPI transfer,
53  * including delay after CS asserted, timing intervals between
54  * adjacent words within a transfer, delay before and after CS
55  * deasserted. It converts these delay parameters to nanoseconds
56  * using spi_delay_to_ns and stores the results in spi_transfer_head
57  * structure.
58  * If the conversion fails, the function logs a warning message and
59  * returns an error code.
60  *       .   .      .    .    .   .   .   .   .   .
61  * Delay + A +      + B  +    + C + D + E + F + A +
62  *       .   .      .    .    .   .   .   .   .   .
63  *    ___.   .      .    .    .   .   .___.___.   .
64  * CS#   |___.______.____.____.___.___|   .   |___._____________
65  *       .   .      .    .    .   .   .   .   .   .
66  *       .   .      .    .    .   .   .   .   .   .
67  * SCLK__.___.___NNN_____NNN__.___.___.___.___.___.___NNN_______
68  *
69  * NOTE: 1st transfer has two words, the delay between these two words are
70  * 'B' in the diagram.
71  *
72  * A => struct spi_device -> cs_setup
73  * B => max{struct spi_transfer -> word_delay, struct spi_device -> word_delay}
74  *   Note: spi_device and spi_transfer both have word_delay, Linux
75  *         choose the bigger one, refer to _spi_xfer_word_delay_update function
76  * C => struct spi_transfer -> delay
77  * D => struct spi_device -> cs_hold
78  * E => struct spi_device -> cs_inactive
79  * F => struct spi_transfer -> cs_change_delay
80  *
81  * So the corresponding relationship:
82  * A   <===> cs_setup_ns (after CS asserted)
83  * B   <===> word_delay_ns (delay between adjacent words within a transfer)
84  * C+D <===> cs_delay_hold_ns (before CS deasserted)
85  * E+F <===> cs_change_delay_inactive_ns (after CS deasserted, these two
86  * values are also recommended in the Linux driver to be added up)
87  */
88 static int virtio_spi_set_delays(struct spi_transfer_head *th,
89 				 struct spi_device *spi,
90 				 struct spi_transfer *xfer)
91 {
92 	int cs_setup;
93 	int cs_word_delay_xfer;
94 	int cs_word_delay_spi;
95 	int delay;
96 	int cs_hold;
97 	int cs_inactive;
98 	int cs_change_delay;
99 
100 	cs_setup = spi_delay_to_ns(&spi->cs_setup, xfer);
101 	if (cs_setup < 0) {
102 		dev_warn(&spi->dev, "Cannot convert cs_setup\n");
103 		return cs_setup;
104 	}
105 	th->cs_setup_ns = cpu_to_le32(cs_setup);
106 
107 	cs_word_delay_xfer = spi_delay_to_ns(&xfer->word_delay, xfer);
108 	if (cs_word_delay_xfer < 0) {
109 		dev_warn(&spi->dev, "Cannot convert cs_word_delay_xfer\n");
110 		return cs_word_delay_xfer;
111 	}
112 	cs_word_delay_spi = spi_delay_to_ns(&spi->word_delay, xfer);
113 	if (cs_word_delay_spi < 0) {
114 		dev_warn(&spi->dev, "Cannot convert cs_word_delay_spi\n");
115 		return cs_word_delay_spi;
116 	}
117 
118 	th->word_delay_ns = cpu_to_le32(max(cs_word_delay_spi, cs_word_delay_xfer));
119 
120 	delay = spi_delay_to_ns(&xfer->delay, xfer);
121 	if (delay < 0) {
122 		dev_warn(&spi->dev, "Cannot convert delay\n");
123 		return delay;
124 	}
125 	cs_hold = spi_delay_to_ns(&spi->cs_hold, xfer);
126 	if (cs_hold < 0) {
127 		dev_warn(&spi->dev, "Cannot convert cs_hold\n");
128 		return cs_hold;
129 	}
130 	th->cs_delay_hold_ns = cpu_to_le32(delay + cs_hold);
131 
132 	cs_inactive = spi_delay_to_ns(&spi->cs_inactive, xfer);
133 	if (cs_inactive < 0) {
134 		dev_warn(&spi->dev, "Cannot convert cs_inactive\n");
135 		return cs_inactive;
136 	}
137 	cs_change_delay = spi_delay_to_ns(&xfer->cs_change_delay, xfer);
138 	if (cs_change_delay < 0) {
139 		dev_warn(&spi->dev, "Cannot convert cs_change_delay\n");
140 		return cs_change_delay;
141 	}
142 	th->cs_change_delay_inactive_ns =
143 		cpu_to_le32(cs_inactive + cs_change_delay);
144 
145 	return 0;
146 }
147 
148 static int virtio_spi_transfer_one(struct spi_controller *ctrl,
149 				   struct spi_device *spi,
150 				   struct spi_transfer *xfer)
151 {
152 	struct virtio_spi_priv *priv = spi_controller_get_devdata(ctrl);
153 	struct virtio_spi_req *spi_req __free(kfree) = NULL;
154 	struct spi_transfer_head *th;
155 	struct scatterlist sg_out_head, sg_out_payload;
156 	struct scatterlist sg_in_result, sg_in_payload;
157 	struct scatterlist *sgs[4];
158 	unsigned int outcnt = 0;
159 	unsigned int incnt = 0;
160 	int ret;
161 
162 	spi_req = kzalloc(sizeof(*spi_req), GFP_KERNEL);
163 	if (!spi_req)
164 		return -ENOMEM;
165 
166 	init_completion(&spi_req->completion);
167 
168 	th = &spi_req->transfer_head;
169 
170 	/* Fill struct spi_transfer_head */
171 	th->chip_select_id = spi_get_chipselect(spi, 0);
172 	th->bits_per_word = spi->bits_per_word;
173 	th->cs_change = xfer->cs_change;
174 	th->tx_nbits = xfer->tx_nbits;
175 	th->rx_nbits = xfer->rx_nbits;
176 	th->reserved[0] = 0;
177 	th->reserved[1] = 0;
178 	th->reserved[2] = 0;
179 
180 	static_assert(VIRTIO_SPI_CPHA == SPI_CPHA,
181 		      "VIRTIO_SPI_CPHA must match SPI_CPHA");
182 	static_assert(VIRTIO_SPI_CPOL == SPI_CPOL,
183 		      "VIRTIO_SPI_CPOL must match SPI_CPOL");
184 	static_assert(VIRTIO_SPI_CS_HIGH == SPI_CS_HIGH,
185 		      "VIRTIO_SPI_CS_HIGH must match SPI_CS_HIGH");
186 	static_assert(VIRTIO_SPI_MODE_LSB_FIRST == SPI_LSB_FIRST,
187 		      "VIRTIO_SPI_MODE_LSB_FIRST must match SPI_LSB_FIRST");
188 
189 	th->mode = cpu_to_le32(spi->mode & VIRTIO_SPI_MODE_MASK);
190 	if (spi->mode & SPI_LOOP)
191 		th->mode |= cpu_to_le32(VIRTIO_SPI_MODE_LOOP);
192 
193 	th->freq = cpu_to_le32(xfer->speed_hz);
194 
195 	ret = virtio_spi_set_delays(th, spi, xfer);
196 	if (ret)
197 		goto msg_done;
198 
199 	/* Set buffers */
200 	spi_req->tx_buf = xfer->tx_buf;
201 	spi_req->rx_buf = xfer->rx_buf;
202 
203 	/* Prepare sending of virtio message */
204 	init_completion(&spi_req->completion);
205 
206 	sg_init_one(&sg_out_head, th, sizeof(*th));
207 	sgs[outcnt] = &sg_out_head;
208 	outcnt++;
209 
210 	if (spi_req->tx_buf) {
211 		sg_init_one(&sg_out_payload, spi_req->tx_buf, xfer->len);
212 		sgs[outcnt] = &sg_out_payload;
213 		outcnt++;
214 	}
215 
216 	if (spi_req->rx_buf) {
217 		sg_init_one(&sg_in_payload, spi_req->rx_buf, xfer->len);
218 		sgs[outcnt] = &sg_in_payload;
219 		incnt++;
220 	}
221 
222 	sg_init_one(&sg_in_result, &spi_req->result,
223 		    sizeof(struct spi_transfer_result));
224 	sgs[outcnt + incnt] = &sg_in_result;
225 	incnt++;
226 
227 	ret = virtqueue_add_sgs(priv->vq, sgs, outcnt, incnt, spi_req,
228 				GFP_KERNEL);
229 	if (ret)
230 		goto msg_done;
231 
232 	/* Simple implementation: There can be only one transfer in flight */
233 	virtqueue_kick(priv->vq);
234 
235 	wait_for_completion(&spi_req->completion);
236 
237 	/* Read result from message and translate return code */
238 	switch (spi_req->result.result) {
239 	case VIRTIO_SPI_TRANS_OK:
240 		break;
241 	case VIRTIO_SPI_PARAM_ERR:
242 		ret = -EINVAL;
243 		break;
244 	case VIRTIO_SPI_TRANS_ERR:
245 		ret = -EIO;
246 		break;
247 	default:
248 		ret = -EIO;
249 		break;
250 	}
251 
252 msg_done:
253 	if (ret)
254 		ctrl->cur_msg->status = ret;
255 
256 	return ret;
257 }
258 
259 static void virtio_spi_read_config(struct virtio_device *vdev)
260 {
261 	struct spi_controller *ctrl = dev_get_drvdata(&vdev->dev);
262 	struct virtio_spi_priv *priv = vdev->priv;
263 	u8 cs_max_number;
264 	u8 tx_nbits_supported;
265 	u8 rx_nbits_supported;
266 
267 	cs_max_number = virtio_cread8(vdev, offsetof(struct virtio_spi_config,
268 						     cs_max_number));
269 	ctrl->num_chipselect = cs_max_number;
270 
271 	/* Set the mode bits which are understood by this driver */
272 	priv->mode_func_supported =
273 		virtio_cread32(vdev, offsetof(struct virtio_spi_config,
274 					      mode_func_supported));
275 	ctrl->mode_bits = priv->mode_func_supported &
276 			  (VIRTIO_SPI_CS_HIGH | VIRTIO_SPI_MODE_LSB_FIRST);
277 	if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_CPHA_1)
278 		ctrl->mode_bits |= VIRTIO_SPI_CPHA;
279 	if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_CPOL_1)
280 		ctrl->mode_bits |= VIRTIO_SPI_CPOL;
281 	if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_LSB_FIRST)
282 		ctrl->mode_bits |= SPI_LSB_FIRST;
283 	if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_LOOPBACK)
284 		ctrl->mode_bits |= SPI_LOOP;
285 	tx_nbits_supported =
286 		virtio_cread8(vdev, offsetof(struct virtio_spi_config,
287 					     tx_nbits_supported));
288 	if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_DUAL)
289 		ctrl->mode_bits |= SPI_TX_DUAL;
290 	if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_QUAD)
291 		ctrl->mode_bits |= SPI_TX_QUAD;
292 	if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_OCTAL)
293 		ctrl->mode_bits |= SPI_TX_OCTAL;
294 	rx_nbits_supported =
295 		virtio_cread8(vdev, offsetof(struct virtio_spi_config,
296 					     rx_nbits_supported));
297 	if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_DUAL)
298 		ctrl->mode_bits |= SPI_RX_DUAL;
299 	if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_QUAD)
300 		ctrl->mode_bits |= SPI_RX_QUAD;
301 	if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_OCTAL)
302 		ctrl->mode_bits |= SPI_RX_OCTAL;
303 
304 	ctrl->bits_per_word_mask =
305 		virtio_cread32(vdev, offsetof(struct virtio_spi_config,
306 					      bits_per_word_mask));
307 
308 	priv->max_freq_hz =
309 		virtio_cread32(vdev, offsetof(struct virtio_spi_config,
310 					      max_freq_hz));
311 }
312 
313 static int virtio_spi_find_vqs(struct virtio_spi_priv *priv)
314 {
315 	struct virtqueue *vq;
316 
317 	vq = virtio_find_single_vq(priv->vdev, virtio_spi_msg_done, "spi-rq");
318 	if (IS_ERR(vq))
319 		return PTR_ERR(vq);
320 	priv->vq = vq;
321 	return 0;
322 }
323 
324 /* Function must not be called before virtio_spi_find_vqs() has been run */
325 static void virtio_spi_del_vq(void *data)
326 {
327 	struct virtio_device *vdev = data;
328 
329 	virtio_reset_device(vdev);
330 	vdev->config->del_vqs(vdev);
331 }
332 
333 static int virtio_spi_probe(struct virtio_device *vdev)
334 {
335 	struct virtio_spi_priv *priv;
336 	struct spi_controller *ctrl;
337 	int ret;
338 
339 	ctrl = devm_spi_alloc_host(&vdev->dev, sizeof(*priv));
340 	if (!ctrl)
341 		return -ENOMEM;
342 
343 	priv = spi_controller_get_devdata(ctrl);
344 	priv->vdev = vdev;
345 	vdev->priv = priv;
346 
347 	device_set_node(&ctrl->dev, dev_fwnode(&vdev->dev));
348 
349 	dev_set_drvdata(&vdev->dev, ctrl);
350 
351 	virtio_spi_read_config(vdev);
352 
353 	ctrl->transfer_one = virtio_spi_transfer_one;
354 
355 	ret = virtio_spi_find_vqs(priv);
356 	if (ret)
357 		return dev_err_probe(&vdev->dev, ret, "Cannot setup virtqueues\n");
358 
359 	/* Register cleanup for virtqueues using devm */
360 	ret = devm_add_action_or_reset(&vdev->dev, virtio_spi_del_vq, vdev);
361 	if (ret)
362 		return dev_err_probe(&vdev->dev, ret, "Cannot register virtqueue cleanup\n");
363 
364 	/* Use devm version to register controller */
365 	ret = devm_spi_register_controller(&vdev->dev, ctrl);
366 	if (ret)
367 		return dev_err_probe(&vdev->dev, ret, "Cannot register controller\n");
368 
369 	return 0;
370 }
371 
372 static int virtio_spi_freeze(struct device *dev)
373 {
374 	struct spi_controller *ctrl = dev_get_drvdata(dev);
375 	struct virtio_device *vdev = dev_to_virtio(dev);
376 	int ret;
377 
378 	ret = spi_controller_suspend(ctrl);
379 	if (ret) {
380 		dev_warn(dev, "cannot suspend controller (%d)\n", ret);
381 		return ret;
382 	}
383 
384 	virtio_spi_del_vq(vdev);
385 	return 0;
386 }
387 
388 static int virtio_spi_restore(struct device *dev)
389 {
390 	struct spi_controller *ctrl = dev_get_drvdata(dev);
391 	struct virtio_device *vdev = dev_to_virtio(dev);
392 	int ret;
393 
394 	ret = virtio_spi_find_vqs(vdev->priv);
395 	if (ret) {
396 		dev_err(dev, "problem starting vqueue (%d)\n", ret);
397 		return ret;
398 	}
399 
400 	ret = spi_controller_resume(ctrl);
401 	if (ret)
402 		dev_err(dev, "problem resuming controller (%d)\n", ret);
403 
404 	return ret;
405 }
406 
407 static struct virtio_device_id virtio_spi_id_table[] = {
408 	{ VIRTIO_ID_SPI, VIRTIO_DEV_ANY_ID },
409 	{}
410 };
411 MODULE_DEVICE_TABLE(virtio, virtio_spi_id_table);
412 
413 static const struct dev_pm_ops virtio_spi_pm_ops = {
414 	.freeze = pm_sleep_ptr(virtio_spi_freeze),
415 	.restore = pm_sleep_ptr(virtio_spi_restore),
416 };
417 
418 static struct virtio_driver virtio_spi_driver = {
419 	.driver = {
420 		.name = KBUILD_MODNAME,
421 		.pm = &virtio_spi_pm_ops,
422 	},
423 	.id_table = virtio_spi_id_table,
424 	.probe = virtio_spi_probe,
425 };
426 module_virtio_driver(virtio_spi_driver);
427 
428 MODULE_AUTHOR("OpenSynergy GmbH");
429 MODULE_AUTHOR("Haixu Cui <quic_haixcui@quicinc.com>");
430 MODULE_LICENSE("GPL");
431 MODULE_DESCRIPTION("Virtio SPI bus driver");
432