xref: /freebsd/sys/dev/virtio/scmi/virtio_scmi.c (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2023 Arm Ltd
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /* Driver for VirtIO SCMI device. */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/queue.h>
38 #include <sys/sglist.h>
39 
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/bus.h>
43 
44 #include <dev/virtio/virtio.h>
45 #include <dev/virtio/virtqueue.h>
46 #include <dev/virtio/scmi/virtio_scmi.h>
47 
48 struct vtscmi_pdu {
49 	enum vtscmi_chan	chan;
50 	struct sglist		sg;
51 	struct sglist_seg	segs[2];
52 	void			*buf;
53 	SLIST_ENTRY(vtscmi_pdu)	next;
54 };
55 
56 struct vtscmi_queue {
57 	device_t				dev;
58 	int					vq_id;
59 	unsigned int				vq_sz;
60 	struct virtqueue			*vq;
61 	struct mtx				vq_mtx;
62 	struct vtscmi_pdu			*pdus;
63 	SLIST_HEAD(pdus_head, vtscmi_pdu)	p_head;
64 	struct mtx				p_mtx;
65 	virtio_scmi_rx_callback_t		*rx_callback;
66 	void					*priv;
67 };
68 
69 struct vtscmi_softc {
70 	device_t	vtscmi_dev;
71 	uint64_t	vtscmi_features;
72 	uint8_t		vtscmi_vqs_cnt;
73 	struct vtscmi_queue	vtscmi_queues[VIRTIO_SCMI_CHAN_MAX];
74 	bool		has_p2a;
75 	bool		has_shared;
76 };
77 
78 static device_t vtscmi_dev;
79 
80 static int vtscmi_modevent(module_t, int, void *);
81 
82 static int	vtscmi_probe(device_t);
83 static int	vtscmi_attach(device_t);
84 static int	vtscmi_detach(device_t);
85 static int	vtscmi_shutdown(device_t);
86 static int	vtscmi_negotiate_features(struct vtscmi_softc *);
87 static int	vtscmi_setup_features(struct vtscmi_softc *);
88 static void	vtscmi_vq_intr(void *);
89 static int	vtscmi_alloc_virtqueues(struct vtscmi_softc *);
90 static int	vtscmi_alloc_queues(struct vtscmi_softc *);
91 static void	vtscmi_free_queues(struct vtscmi_softc *);
92 static void	*virtio_scmi_pdu_get(struct vtscmi_queue *, void *,
93     unsigned int, unsigned int);
94 static void	virtio_scmi_pdu_put(device_t, struct vtscmi_pdu *);
95 
96 static struct virtio_feature_desc vtscmi_feature_desc[] = {
97 	{ VIRTIO_SCMI_F_P2A_CHANNELS, "P2AChannel" },
98 	{ VIRTIO_SCMI_F_SHARED_MEMORY, "SharedMem" },
99 	{ 0, NULL }
100 };
101 
102 static device_method_t vtscmi_methods[] = {
103 	/* Device methods. */
104 	DEVMETHOD(device_probe,		vtscmi_probe),
105 	DEVMETHOD(device_attach,	vtscmi_attach),
106 	DEVMETHOD(device_detach,	vtscmi_detach),
107 	DEVMETHOD(device_shutdown,	vtscmi_shutdown),
108 
109 	DEVMETHOD_END
110 };
111 
112 static driver_t vtscmi_driver = {
113 	"vtscmi",
114 	vtscmi_methods,
115 	sizeof(struct vtscmi_softc)
116 };
117 
118 VIRTIO_DRIVER_MODULE(virtio_scmi, vtscmi_driver, vtscmi_modevent, NULL);
119 MODULE_VERSION(virtio_scmi, 1);
120 MODULE_DEPEND(virtio_scmi, virtio, 1, 1, 1);
121 
122 VIRTIO_SIMPLE_PNPINFO(virtio_scmi, VIRTIO_ID_SCMI, "VirtIO SCMI Adapter");
123 
124 static int
125 vtscmi_modevent(module_t mod, int type, void *unused)
126 {
127 	int error;
128 
129 	switch (type) {
130 	case MOD_LOAD:
131 	case MOD_QUIESCE:
132 	case MOD_UNLOAD:
133 	case MOD_SHUTDOWN:
134 		error = 0;
135 		break;
136 	default:
137 		error = EOPNOTSUPP;
138 		break;
139 	}
140 
141 	return (error);
142 }
143 
144 static int
145 vtscmi_probe(device_t dev)
146 {
147 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scmi));
148 }
149 
150 static int
151 vtscmi_attach(device_t dev)
152 {
153 	struct vtscmi_softc *sc;
154 	int error;
155 
156 	/* Only one SCMI device per-agent */
157 	if (vtscmi_dev != NULL)
158 		return (EEXIST);
159 
160 	sc = device_get_softc(dev);
161 	sc->vtscmi_dev = dev;
162 
163 	virtio_set_feature_desc(dev, vtscmi_feature_desc);
164 	error = vtscmi_setup_features(sc);
165 	if (error) {
166 		device_printf(dev, "cannot setup features\n");
167 		goto fail;
168 	}
169 
170 	error = vtscmi_alloc_virtqueues(sc);
171 	if (error) {
172 		device_printf(dev, "cannot allocate virtqueues\n");
173 		goto fail;
174 	}
175 
176 	error = vtscmi_alloc_queues(sc);
177 	if (error) {
178 		device_printf(dev, "cannot allocate queues\n");
179 		goto fail;
180 	}
181 
182 	error = virtio_setup_intr(dev, INTR_TYPE_MISC);
183 	if (error) {
184 		device_printf(dev, "cannot setup intr\n");
185 		vtscmi_free_queues(sc);
186 		goto fail;
187 	}
188 
189 	/* Save unique device */
190 	vtscmi_dev = sc->vtscmi_dev;
191 
192 fail:
193 
194 	return (error);
195 }
196 
197 static int
198 vtscmi_detach(device_t dev)
199 {
200 	struct vtscmi_softc *sc;
201 
202 	sc = device_get_softc(dev);
203 
204 	/* These also disable related interrupts */
205 	virtio_scmi_channel_callback_set(dev, VIRTIO_SCMI_CHAN_A2P, NULL, NULL);
206 	virtio_scmi_channel_callback_set(dev, VIRTIO_SCMI_CHAN_P2A, NULL, NULL);
207 
208 	virtio_stop(dev);
209 
210 	vtscmi_free_queues(sc);
211 
212 	return (0);
213 }
214 
215 static int
216 vtscmi_shutdown(device_t dev)
217 {
218 
219 	return (0);
220 }
221 
222 static int
223 vtscmi_negotiate_features(struct vtscmi_softc *sc)
224 {
225 	device_t dev;
226 	uint64_t features;
227 
228 	dev = sc->vtscmi_dev;
229 	/* We still don't support shared mem (stats)...so don't advertise it */
230 	features = VIRTIO_SCMI_F_P2A_CHANNELS;
231 
232 	sc->vtscmi_features = virtio_negotiate_features(dev, features);
233 	return (virtio_finalize_features(dev));
234 }
235 
236 static int
237 vtscmi_setup_features(struct vtscmi_softc *sc)
238 {
239 	device_t dev;
240 	int error;
241 
242 	dev = sc->vtscmi_dev;
243 	error = vtscmi_negotiate_features(sc);
244 	if (error)
245 		return (error);
246 
247 	if (virtio_with_feature(dev, VIRTIO_SCMI_F_P2A_CHANNELS))
248 		sc->has_p2a = true;
249 	if (virtio_with_feature(dev, VIRTIO_SCMI_F_SHARED_MEMORY))
250 		sc->has_shared = true;
251 
252 	device_printf(dev, "Platform %s P2A channel.\n",
253 	    sc->has_p2a ? "supports" : "does NOT support");
254 
255 	return (0);
256 }
257 
258 static int
259 vtscmi_alloc_queues(struct vtscmi_softc *sc)
260 {
261 	int idx;
262 
263 	for (idx = VIRTIO_SCMI_CHAN_A2P; idx < VIRTIO_SCMI_CHAN_MAX; idx++) {
264 		int i, vq_sz;
265 		struct vtscmi_queue *q;
266 		struct vtscmi_pdu *pdu;
267 
268 		if (idx == VIRTIO_SCMI_CHAN_P2A && !sc->has_p2a)
269 			continue;
270 
271 		q = &sc->vtscmi_queues[idx];
272 		q->dev = sc->vtscmi_dev;
273 		q->vq_id = idx;
274 		vq_sz = virtqueue_size(q->vq);
275 		q->vq_sz = idx != VIRTIO_SCMI_CHAN_A2P ? vq_sz : vq_sz / 2;
276 
277 		q->pdus = mallocarray(q->vq_sz, sizeof(*pdu), M_DEVBUF,
278 		    M_ZERO | M_WAITOK);
279 
280 		SLIST_INIT(&q->p_head);
281 		for (i = 0, pdu = q->pdus; i < q->vq_sz; i++, pdu++) {
282 			pdu->chan = idx;
283 			//XXX Maybe one seg redndant for P2A
284 			sglist_init(&pdu->sg,
285 			    idx == VIRTIO_SCMI_CHAN_A2P ? 2 : 1, pdu->segs);
286 			SLIST_INSERT_HEAD(&q->p_head, pdu, next);
287 		}
288 
289 		mtx_init(&q->p_mtx, "vtscmi_pdus", "VTSCMI", MTX_SPIN);
290 		mtx_init(&q->vq_mtx, "vtscmi_vq", "VTSCMI", MTX_SPIN);
291 	}
292 
293 	return (0);
294 }
295 
296 static void
297 vtscmi_free_queues(struct vtscmi_softc *sc)
298 {
299 	int idx;
300 
301 	for (idx = VIRTIO_SCMI_CHAN_A2P; idx < VIRTIO_SCMI_CHAN_MAX; idx++) {
302 		struct vtscmi_queue *q;
303 
304 		if (idx == VIRTIO_SCMI_CHAN_P2A && !sc->has_p2a)
305 			continue;
306 
307 		q = &sc->vtscmi_queues[idx];
308 		if (q->vq_sz == 0)
309 			continue;
310 
311 		free(q->pdus, M_DEVBUF);
312 		mtx_destroy(&q->p_mtx);
313 		mtx_destroy(&q->vq_mtx);
314 	}
315 }
316 
317 static void
318 vtscmi_vq_intr(void *arg)
319 {
320 	struct vtscmi_queue *q = arg;
321 
322 	/*
323 	 * TODO
324 	 * - consider pressure on RX by msg floods
325 	 *   + Does it need a taskqueue_ like virtio/net to postpone processing
326 	 *     under pressure ? (SCMI is low_freq compared to network though)
327 	 */
328 	for (;;) {
329 		struct vtscmi_pdu *pdu;
330 		uint32_t rx_len;
331 
332 		mtx_lock_spin(&q->vq_mtx);
333 		pdu = virtqueue_dequeue(q->vq, &rx_len);
334 		mtx_unlock_spin(&q->vq_mtx);
335 		if (!pdu)
336 			return;
337 
338 		if (q->rx_callback)
339 			q->rx_callback(pdu->buf, rx_len, q->priv);
340 
341 		/* Note that this only frees the PDU, NOT the buffer itself */
342 		virtio_scmi_pdu_put(q->dev, pdu);
343 	}
344 }
345 
346 static int
347 vtscmi_alloc_virtqueues(struct vtscmi_softc *sc)
348 {
349 	device_t dev;
350 	struct vq_alloc_info vq_info[VIRTIO_SCMI_CHAN_MAX];
351 
352 	dev = sc->vtscmi_dev;
353 	sc->vtscmi_vqs_cnt = sc->has_p2a ? 2 : 1;
354 
355 	VQ_ALLOC_INFO_INIT(&vq_info[VIRTIO_SCMI_CHAN_A2P], 0,
356 			   vtscmi_vq_intr,
357 			   &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P],
358 			   &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P].vq,
359 			   "%s cmdq", device_get_nameunit(dev));
360 
361 	if (sc->has_p2a) {
362 		VQ_ALLOC_INFO_INIT(&vq_info[VIRTIO_SCMI_CHAN_P2A], 0,
363 				   vtscmi_vq_intr,
364 				   &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_P2A],
365 				   &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_P2A].vq,
366 				   "%s evtq", device_get_nameunit(dev));
367 	}
368 
369 	return (virtio_alloc_virtqueues(dev, sc->vtscmi_vqs_cnt, vq_info));
370 }
371 
372 static void *
373 virtio_scmi_pdu_get(struct vtscmi_queue *q, void *buf, unsigned int tx_len,
374     unsigned int rx_len)
375 {
376 	struct vtscmi_pdu *pdu = NULL;
377 
378 	if (rx_len == 0)
379 		return (NULL);
380 
381 	mtx_lock_spin(&q->p_mtx);
382 	if (!SLIST_EMPTY(&q->p_head)) {
383 		pdu = SLIST_FIRST(&q->p_head);
384 		SLIST_REMOVE_HEAD(&q->p_head, next);
385 	}
386 	mtx_unlock_spin(&q->p_mtx);
387 
388 	if (pdu == NULL) {
389 		device_printf(q->dev, "Cannnot allocate PDU.\n");
390 		return (NULL);
391 	}
392 
393 	/*Save msg buffer for easy access */
394 	pdu->buf = buf;
395 	if (tx_len != 0)
396 		sglist_append(&pdu->sg, pdu->buf, tx_len);
397 	sglist_append(&pdu->sg, pdu->buf, rx_len);
398 
399 	return (pdu);
400 }
401 
402 static void
403 virtio_scmi_pdu_put(device_t dev, struct vtscmi_pdu *pdu)
404 {
405 	struct vtscmi_softc *sc;
406 	struct vtscmi_queue *q;
407 
408 	if (pdu == NULL)
409 		return;
410 
411 	sc = device_get_softc(dev);
412 	q = &sc->vtscmi_queues[pdu->chan];
413 
414 	sglist_reset(&pdu->sg);
415 
416 	mtx_lock_spin(&q->p_mtx);
417 	SLIST_INSERT_HEAD(&q->p_head, pdu, next);
418 	mtx_unlock_spin(&q->p_mtx);
419 }
420 
421 device_t
422 virtio_scmi_transport_get(void)
423 {
424 	return (vtscmi_dev);
425 }
426 
427 int
428 virtio_scmi_channel_size_get(device_t dev, enum vtscmi_chan chan)
429 {
430 	struct vtscmi_softc *sc;
431 
432 	sc = device_get_softc(dev);
433 	if (chan >= sc->vtscmi_vqs_cnt)
434 		return (0);
435 
436 	return (sc->vtscmi_queues[chan].vq_sz);
437 }
438 
439 int
440 virtio_scmi_channel_callback_set(device_t dev, enum vtscmi_chan chan,
441     virtio_scmi_rx_callback_t *cb, void *priv)
442 {
443 	struct vtscmi_softc *sc;
444 
445 	sc = device_get_softc(dev);
446 	if (chan >= sc->vtscmi_vqs_cnt)
447 		return (1);
448 
449 	if (cb == NULL)
450 		virtqueue_disable_intr(sc->vtscmi_queues[chan].vq);
451 
452 	sc->vtscmi_queues[chan].rx_callback = cb;
453 	sc->vtscmi_queues[chan].priv = priv;
454 
455 	/* Enable Interrupt on VQ once the callback is set */
456 	if (cb != NULL)
457 		/*
458 		 * TODO
459 		 * Does this need a taskqueue_ task to process already pending
460 		 * messages ?
461 		 */
462 		virtqueue_enable_intr(sc->vtscmi_queues[chan].vq);
463 
464 	device_printf(dev, "%sabled interrupts on VQ[%d].\n",
465 	    cb ? "En" : "Dis", chan);
466 
467 	return (0);
468 }
469 
470 int
471 virtio_scmi_message_enqueue(device_t dev, enum vtscmi_chan chan,
472     void *buf, unsigned int tx_len, unsigned int rx_len)
473 {
474 	struct vtscmi_softc *sc;
475 	struct vtscmi_pdu *pdu;
476 	struct vtscmi_queue *q;
477 	int ret;
478 
479 	sc = device_get_softc(dev);
480 	if (chan >= sc->vtscmi_vqs_cnt)
481 		return (1);
482 
483 	q = &sc->vtscmi_queues[chan];
484 	pdu = virtio_scmi_pdu_get(q, buf, tx_len, rx_len);
485 	if (pdu == NULL)
486 		return (ENXIO);
487 
488 	mtx_lock_spin(&q->vq_mtx);
489 	ret = virtqueue_enqueue(q->vq, pdu, &pdu->sg,
490 	    chan == VIRTIO_SCMI_CHAN_A2P ? 1 : 0, 1);
491 	if (ret == 0)
492 		virtqueue_notify(q->vq);
493 	mtx_unlock_spin(&q->vq_mtx);
494 
495 	return (ret);
496 }
497 
498 void *
499 virtio_scmi_message_poll(device_t dev, uint32_t *rx_len)
500 {
501 	struct vtscmi_softc *sc;
502 	struct vtscmi_queue *q;
503 	struct vtscmi_pdu *pdu;
504 	void *buf = NULL;
505 
506 	sc = device_get_softc(dev);
507 
508 	q = &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P];
509 
510 	mtx_lock_spin(&q->vq_mtx);
511 	/* Not using virtqueue_poll since has no configurable timeout */
512 	pdu = virtqueue_dequeue(q->vq, rx_len);
513 	mtx_unlock_spin(&q->vq_mtx);
514 	if (pdu != NULL) {
515 		buf = pdu->buf;
516 		virtio_scmi_pdu_put(dev, pdu);
517 	}
518 
519 	return (buf);
520 }
521