xref: /freebsd/sys/dev/firmware/arm/scmi.c (revision f700da1c20f1f103d2ab2899d7060d3e331f082e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2022 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2023 Arm Ltd
6  *
7  * This work was supported by Innovate UK project 105694, "Digital Security
8  * by Design (DSbD) Technology Platform Prototype".
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/_bitset.h>
35 #include <sys/bitset.h>
36 #include <sys/bus.h>
37 #include <sys/cpu.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/refcount.h>
46 
47 #include <dev/clk/clk.h>
48 #include <dev/fdt/simplebus.h>
49 #include <dev/fdt/fdt_common.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 
52 #include "scmi.h"
53 #include "scmi_protocols.h"
54 
55 #define SCMI_MAX_TOKEN		1024
56 
57 #define	SCMI_HDR_TOKEN_S		18
58 #define SCMI_HDR_TOKEN_BF		(0x3ff)
59 #define	SCMI_HDR_TOKEN_M		(SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S)
60 
61 #define	SCMI_HDR_PROTOCOL_ID_S		10
62 #define	SCMI_HDR_PROTOCOL_ID_BF		(0xff)
63 #define	SCMI_HDR_PROTOCOL_ID_M		\
64     (SCMI_HDR_PROTOCOL_ID_BF << SCMI_HDR_PROTOCOL_ID_S)
65 
66 #define	SCMI_HDR_MESSAGE_TYPE_S		8
67 #define	SCMI_HDR_MESSAGE_TYPE_BF	(0x3)
68 #define	SCMI_HDR_MESSAGE_TYPE_M		\
69     (SCMI_HDR_MESSAGE_TYPE_BF << SCMI_HDR_MESSAGE_TYPE_S)
70 
71 #define	SCMI_HDR_MESSAGE_ID_S		0
72 #define	SCMI_HDR_MESSAGE_ID_BF		(0xff)
73 #define	SCMI_HDR_MESSAGE_ID_M		\
74     (SCMI_HDR_MESSAGE_ID_BF << SCMI_HDR_MESSAGE_ID_S)
75 
76 #define SCMI_MSG_TYPE_CMD	0
77 #define SCMI_MSG_TYPE_DRESP	2
78 #define SCMI_MSG_TYPE_NOTIF	3
79 
80 #define SCMI_MSG_TYPE_CHECK(_h, _t)					\
81     ((((_h) & SCMI_HDR_MESSAGE_TYPE_M) >> SCMI_HDR_MESSAGE_TYPE_S) == (_t))
82 
83 #define SCMI_IS_MSG_TYPE_NOTIF(h)					\
84     SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
85 #define SCMI_IS_MSG_TYPE_DRESP(h)					\
86     SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
87 
88 #define SCMI_MSG_TOKEN(_hdr)		\
89     (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
90 
91 struct scmi_req {
92 	int		cnt;
93 	bool		timed_out;
94 	bool		use_polling;
95 	bool		done;
96 	struct mtx	mtx;
97 	LIST_ENTRY(scmi_req)	next;
98 	int		protocol_id;
99 	int		message_id;
100 	int		token;
101 	uint32_t	header;
102 	struct scmi_msg msg;
103 };
104 
105 #define buf_to_msg(b)	__containerof((b), struct scmi_msg, payld)
106 #define msg_to_req(m)	__containerof((m), struct scmi_req, msg)
107 #define buf_to_req(b)	msg_to_req(buf_to_msg(b))
108 
109 LIST_HEAD(reqs_head, scmi_req);
110 
111 struct scmi_reqs_pool {
112 	struct mtx		mtx;
113 	struct reqs_head	head;
114 };
115 
116 BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
117 LIST_HEAD(inflight_head, scmi_req);
118 #define	REQHASH(_sc, _tk)		\
119     (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
120 
121 struct scmi_transport {
122 	unsigned long		next_id;
123 	struct _scmi_tokens	avail_tokens;
124 	struct inflight_head	*inflight_ht;
125 	unsigned long		inflight_mask;
126 	struct scmi_reqs_pool	*chans[SCMI_CHAN_MAX];
127 	struct mtx		mtx;
128 };
129 
130 static void		scmi_transport_configure(struct scmi_transport_desc *, phandle_t);
131 static int		scmi_transport_init(struct scmi_softc *, phandle_t);
132 static void		scmi_transport_cleanup(struct scmi_softc *);
133 static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int);
134 static void		scmi_reqs_pool_free(struct scmi_reqs_pool *);
135 static struct scmi_req	*scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
136 static struct scmi_req	*scmi_req_initialized_alloc(device_t, int, int);
137 static void		scmi_req_free_unlocked(struct scmi_softc *,
138 			    enum scmi_chan, struct scmi_req *);
139 static void		scmi_req_get(struct scmi_softc *, struct scmi_req *);
140 static void		scmi_req_put(struct scmi_softc *, struct scmi_req *);
141 static int		scmi_token_pick(struct scmi_softc *);
142 static void		scmi_token_release_unlocked(struct scmi_softc *, int);
143 static int		scmi_req_track_inflight(struct scmi_softc *,
144 			    struct scmi_req *);
145 static int		scmi_req_drop_inflight(struct scmi_softc *,
146 			    struct scmi_req *);
147 static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
148 
149 static int		scmi_wait_for_response(struct scmi_softc *,
150 			    struct scmi_req *, void **);
151 static void		scmi_process_response(struct scmi_softc *, uint32_t,
152 			    unsigned int);
153 
154 int
155 scmi_attach(device_t dev)
156 {
157 	struct scmi_softc *sc;
158 	phandle_t node;
159 	int error;
160 
161 	sc = device_get_softc(dev);
162 	sc->dev = dev;
163 
164 	node = ofw_bus_get_node(dev);
165 	if (node == -1)
166 		return (ENXIO);
167 
168 	simplebus_init(dev, node);
169 
170 	error = scmi_transport_init(sc, node);
171 	if (error != 0)
172 		return (error);
173 
174 	device_printf(dev, "Transport - max_msg:%d  max_payld_sz:%lu  reply_timo_ms:%d\n",
175 	    SCMI_MAX_MSG(sc), SCMI_MAX_MSG_PAYLD_SIZE(sc), SCMI_MAX_MSG_TIMEOUT_MS(sc));
176 
177 	/*
178 	 * Allow devices to identify.
179 	 */
180 	bus_identify_children(dev);
181 
182 	/*
183 	 * Now walk the OFW tree and attach top-level devices.
184 	 */
185 	for (node = OF_child(node); node > 0; node = OF_peer(node))
186 		simplebus_add_device(dev, node, 0, NULL, -1, NULL);
187 
188 	bus_attach_children(dev);
189 
190 	return (0);
191 }
192 
193 static int
194 scmi_detach(device_t dev)
195 {
196 	struct scmi_softc *sc;
197 
198 	sc = device_get_softc(dev);
199 	scmi_transport_cleanup(sc);
200 
201 	return (0);
202 }
203 
204 static device_method_t scmi_methods[] = {
205 	DEVMETHOD(device_attach,	scmi_attach),
206 	DEVMETHOD(device_detach,	scmi_detach),
207 
208 	DEVMETHOD_END
209 };
210 
211 DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
212     simplebus_driver);
213 
214 DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
215 MODULE_VERSION(scmi, 1);
216 
217 static struct scmi_reqs_pool *
218 scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
219 {
220 	struct scmi_reqs_pool *rp;
221 	struct scmi_req *req;
222 
223 	rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK);
224 
225 	LIST_INIT(&rp->head);
226 	for (int i = 0; i < max_msg; i++) {
227 		req = malloc(sizeof(*req) + max_payld_sz,
228 		    M_DEVBUF, M_ZERO | M_WAITOK);
229 
230 		mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
231 		LIST_INSERT_HEAD(&rp->head, req, next);
232 	}
233 
234 	mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN);
235 
236 	return (rp);
237 }
238 
239 static void
240 scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
241 {
242 	struct scmi_req *req;
243 
244 	LIST_FOREACH(req, &rp->head, next) {
245 		mtx_destroy(&req->mtx);
246 		free(req, M_DEVBUF);
247 	}
248 
249 	mtx_destroy(&rp->mtx);
250 	free(rp, M_DEVBUF);
251 }
252 
253 static void
254 scmi_transport_configure(struct scmi_transport_desc *td, phandle_t node)
255 {
256 	if (OF_getencprop(node, "arm,max-msg", &td->max_msg, sizeof(td->max_msg)) == -1)
257 		td->max_msg = SCMI_DEF_MAX_MSG;
258 
259 	if (OF_getencprop(node, "arm,max-msg-size", &td->max_payld_sz,
260 	    sizeof(td->max_payld_sz)) == -1)
261 		td->max_payld_sz = SCMI_DEF_MAX_MSG_PAYLD_SIZE;
262 }
263 
264 static int
265 scmi_transport_init(struct scmi_softc *sc, phandle_t node)
266 {
267 	struct scmi_transport_desc *td = &sc->trs_desc;
268 	struct scmi_transport *trs;
269 	int ret;
270 
271 	trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
272 
273 	scmi_transport_configure(td, node);
274 
275 	BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
276 	mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
277 
278 	trs->inflight_ht = hashinit(td->max_msg, M_DEVBUF, &trs->inflight_mask);
279 
280 	trs->chans[SCMI_CHAN_A2P] =
281 	    scmi_reqs_pool_allocate(td->max_msg, td->max_payld_sz);
282 	if (trs->chans[SCMI_CHAN_A2P] == NULL) {
283 		free(trs, M_DEVBUF);
284 		return (ENOMEM);
285 	}
286 
287 	trs->chans[SCMI_CHAN_P2A] =
288 	    scmi_reqs_pool_allocate(td->max_msg, td->max_payld_sz);
289 	if (trs->chans[SCMI_CHAN_P2A] == NULL) {
290 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
291 		free(trs, M_DEVBUF);
292 		return (ENOMEM);
293 	}
294 
295 	sc->trs = trs;
296 	ret = SCMI_TRANSPORT_INIT(sc->dev);
297 	if (ret != 0) {
298 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
299 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]);
300 		free(trs, M_DEVBUF);
301 		return (ret);
302 	}
303 
304 	/* Use default transport timeout if not overridden by OF */
305 	OF_getencprop(node, "arm,max-rx-timeout-ms", &td->reply_timo_ms,
306 	    sizeof(td->reply_timo_ms));
307 
308 	return (0);
309 }
310 
311 static void
312 scmi_transport_cleanup(struct scmi_softc *sc)
313 {
314 
315 	SCMI_TRANSPORT_CLEANUP(sc->dev);
316 	mtx_destroy(&sc->trs->mtx);
317 	hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
318 	scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]);
319 	scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]);
320 	free(sc->trs, M_DEVBUF);
321 }
322 
323 static struct scmi_req *
324 scmi_req_initialized_alloc(device_t dev, int tx_payld_sz, int rx_payld_sz)
325 {
326 	struct scmi_softc *sc;
327 	struct scmi_req *req;
328 
329 	sc = device_get_softc(dev);
330 
331 	if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE(sc) ||
332 	    rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE(sc)) {
333 		device_printf(dev, "Unsupported payload size. Drop.\n");
334 		return (NULL);
335 	}
336 
337 	/* Pick one from free list */
338 	req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
339 	if (req == NULL)
340 		return (NULL);
341 
342 	req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
343 	req->msg.rx_len = rx_payld_sz ?
344 	    rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE(sc);
345 
346 	return (req);
347 }
348 
349 static struct scmi_req *
350 scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
351 {
352 	struct scmi_reqs_pool *rp;
353 	struct scmi_req *req = NULL;
354 
355 	rp = sc->trs->chans[ch_idx];
356 	mtx_lock_spin(&rp->mtx);
357 	if (!LIST_EMPTY(&rp->head)) {
358 		req = LIST_FIRST(&rp->head);
359 		LIST_REMOVE_HEAD(&rp->head, next);
360 	}
361 	mtx_unlock_spin(&rp->mtx);
362 
363 	if (req != NULL)
364 		refcount_init(&req->cnt, 1);
365 
366 	return (req);
367 }
368 
369 static void
370 scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
371     struct scmi_req *req)
372 {
373 	struct scmi_reqs_pool *rp;
374 
375 	rp = sc->trs->chans[ch_idx];
376 	mtx_lock_spin(&rp->mtx);
377 	req->timed_out = false;
378 	req->done = false;
379 	refcount_init(&req->cnt, 0);
380 	LIST_INSERT_HEAD(&rp->head, req, next);
381 	mtx_unlock_spin(&rp->mtx);
382 }
383 
384 static void
385 scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
386 {
387 	bool ok;
388 
389 	mtx_lock_spin(&req->mtx);
390 	ok = refcount_acquire_if_not_zero(&req->cnt);
391 	mtx_unlock_spin(&req->mtx);
392 
393 	if (!ok)
394 		device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
395 
396 	return;
397 }
398 
399 static void
400 scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
401 {
402 	mtx_lock_spin(&req->mtx);
403 	if (!refcount_release_if_not_last(&req->cnt)) {
404 		req->protocol_id = 0;
405 		req->message_id = 0;
406 		req->token = 0;
407 		req->header = 0;
408 		bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE(sc));
409 		scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
410 	}
411 	mtx_unlock_spin(&req->mtx);
412 }
413 
414 static int
415 scmi_token_pick(struct scmi_softc *sc)
416 {
417 	unsigned long next_msg_id, token;
418 
419 	mtx_lock_spin(&sc->trs->mtx);
420 	/*
421 	 * next_id is a monotonically increasing unsigned long that can be used
422 	 * for tracing purposes; next_msg_id is a 10-bit sequence number derived
423 	 * from it.
424 	 */
425 	next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF;
426 	token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id);
427 	/* TODO Account for wrap-arounds and holes */
428 	if (token != 0)
429 		BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens);
430 	mtx_unlock_spin(&sc->trs->mtx);
431 
432 	/*
433 	 * BIT_FFS_AT returns 1-indexed values, so 0 means failure to find a
434 	 * free slot: all possible SCMI messages are in-flight using all of the
435 	 * SCMI_MAX_TOKEN sequence numbers.
436 	 */
437 	if (!token)
438 		return (-EBUSY);
439 
440 	return ((int)(token - 1));
441 }
442 
443 static void
444 scmi_token_release_unlocked(struct scmi_softc *sc, int token)
445 {
446 
447 	BIT_SET(SCMI_MAX_TOKEN, token, &sc->trs->avail_tokens);
448 }
449 
450 static int
451 scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req)
452 {
453 	uint32_t header = 0;
454 
455 	req->token = scmi_token_pick(sc);
456 	if (req->token < 0)
457 		return (EBUSY);
458 
459 	header = req->message_id;
460 	header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
461 	header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
462 	header |= req->token << SCMI_HDR_TOKEN_S;
463 
464 	req->header = htole32(header);
465 	req->msg.hdr = htole32(header);
466 
467 	return (0);
468 }
469 
470 static int
471 scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
472 {
473 	int error;
474 
475 	/* build hdr, pick token */
476 	error = scmi_finalize_req(sc, req);
477 	if (error != 0)
478 		return (error);
479 
480 	/* Bump refcount to get hold of this in-flight transaction */
481 	scmi_req_get(sc, req);
482 	/* Register in the inflight hashtable */
483 	mtx_lock_spin(&sc->trs->mtx);
484 	LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
485 	mtx_unlock_spin(&sc->trs->mtx);
486 
487 	return (0);
488 }
489 
490 static int
491 scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
492 {
493 
494 	/* Remove from inflight hashtable at first ... */
495 	mtx_lock_spin(&sc->trs->mtx);
496 	LIST_REMOVE(req, next);
497 	scmi_token_release_unlocked(sc, req->token);
498 	mtx_unlock_spin(&sc->trs->mtx);
499 	/* ...and drop refcount..potentially releasing *req */
500 	scmi_req_put(sc, req);
501 
502 	return (0);
503 }
504 
505 static struct scmi_req *
506 scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
507 {
508 	struct scmi_req *req = NULL;
509 	unsigned int token;
510 
511 	token = SCMI_MSG_TOKEN(hdr);
512 	mtx_lock_spin(&sc->trs->mtx);
513 	LIST_FOREACH(req, REQHASH(sc, token), next) {
514 		if (req->token == token)
515 			break;
516 	}
517 	mtx_unlock_spin(&sc->trs->mtx);
518 
519 	return (req);
520 }
521 
522 static void
523 scmi_process_response(struct scmi_softc *sc, uint32_t hdr, uint32_t rx_len)
524 {
525 	bool timed_out = false;
526 	struct scmi_req *req;
527 
528 	req = scmi_req_lookup_inflight(sc, hdr);
529 	if (req == NULL) {
530 		device_printf(sc->dev,
531 		    "Unexpected reply with header |%X| - token: 0x%X Drop.\n",
532 		    hdr, SCMI_MSG_TOKEN(hdr));
533 		return;
534 	}
535 
536 	mtx_lock_spin(&req->mtx);
537 	req->done = true;
538 	req->msg.rx_len = rx_len;
539 	if (!req->timed_out) {
540 		/*
541 		 * Consider the case in which a polled message is picked
542 		 * by chance on the IRQ path on another CPU: setting poll_done
543 		 * will terminate the other poll loop.
544 		 */
545 		if (!req->msg.polling)
546 			wakeup(req);
547 		else
548 			atomic_store_rel_int(&req->msg.poll_done, 1);
549 	} else {
550 		timed_out = true;
551 	}
552 	mtx_unlock_spin(&req->mtx);
553 
554 	if (timed_out)
555 		device_printf(sc->dev,
556 		    "Late reply for timed-out request - token: 0x%X. Ignore.\n",
557 		    req->token);
558 
559 	/*
560 	 * In case of a late reply to a timed-out transaction this will
561 	 * finally free the pending scmi_req
562 	 */
563 	scmi_req_drop_inflight(sc, req);
564 }
565 
566 void
567 scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len)
568 {
569 	struct scmi_softc *sc;
570 
571 	sc = device_get_softc(dev);
572 
573 	if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
574 		device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
575 		SCMI_CLEAR_CHANNEL(dev, chan);
576 		return;
577 	}
578 
579 	scmi_process_response(sc, hdr, rx_len);
580 }
581 
582 static int
583 scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
584 {
585 	unsigned int reply_timo_ms = SCMI_MAX_MSG_TIMEOUT_MS(sc);
586 	int ret;
587 
588 	if (req->msg.polling) {
589 		bool needs_drop;
590 
591 		ret = SCMI_POLL_MSG(sc->dev, &req->msg, reply_timo_ms);
592 		/*
593 		 * Drop reference to successfully polled req unless it had
594 		 * already also been processed on the IRQ path.
595 		 * Addresses a possible race-condition between polling and
596 		 * interrupt reception paths.
597 		 */
598 		mtx_lock_spin(&req->mtx);
599 		needs_drop = (ret == 0) && !req->done;
600 		req->timed_out = ret != 0;
601 		mtx_unlock_spin(&req->mtx);
602 		if (needs_drop)
603 			scmi_req_drop_inflight(sc, req);
604 		if (ret == 0 && req->msg.hdr != req->header) {
605 			device_printf(sc->dev,
606 			    "Malformed reply with header |%08X|. Expected: |%08X|Drop.\n",
607 			    le32toh(req->msg.hdr), le32toh(req->header));
608 		}
609 	} else {
610 		ret = tsleep(req, 0, "scmi_wait4", (reply_timo_ms * hz) / 1000);
611 		/* Check for lost wakeups since there is no associated lock */
612 		mtx_lock_spin(&req->mtx);
613 		if (ret != 0 && req->done)
614 			ret = 0;
615 		req->timed_out = ret != 0;
616 		mtx_unlock_spin(&req->mtx);
617 	}
618 
619 	if (ret == 0) {
620 		SCMI_COLLECT_REPLY(sc->dev, &req->msg);
621 		if (req->msg.payld[0] != 0)
622 			ret = req->msg.payld[0];
623 		*out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
624 	} else {
625 		device_printf(sc->dev,
626 		    "Request for token 0x%X timed-out.\n", req->token);
627 	}
628 
629 	SCMI_TX_COMPLETE(sc->dev, NULL);
630 
631 	return (ret);
632 }
633 
634 void *
635 scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
636     int tx_payld_sz, int rx_payld_sz)
637 {
638 	struct scmi_req *req;
639 
640 	/* Pick a pre-built req */
641 	req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
642 	if (req == NULL)
643 		return (NULL);
644 
645 	req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
646 	req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
647 
648 	return (&req->msg.payld[0]);
649 }
650 
651 void
652 scmi_buf_put(device_t dev, void *buf)
653 {
654 	struct scmi_softc *sc;
655 	struct scmi_req *req;
656 
657 	sc = device_get_softc(dev);
658 
659 	req = buf_to_req(buf);
660 	scmi_req_put(sc, req);
661 }
662 
663 struct scmi_msg *
664 scmi_msg_get(device_t dev, int tx_payld_sz, int rx_payld_sz)
665 {
666 	struct scmi_req *req;
667 
668 	/* Pick a pre-built req */
669 	req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
670 	if (req == NULL)
671 		return (NULL);
672 
673 	return (&req->msg);
674 }
675 
676 void
677 scmi_msg_put(device_t dev, struct scmi_msg *msg)
678 {
679 	struct scmi_softc *sc;
680 	struct scmi_req *req;
681 
682 	sc = device_get_softc(dev);
683 
684 	req = msg_to_req(msg);
685 
686 	scmi_req_put(sc, req);
687 }
688 
689 int
690 scmi_request(device_t dev, void *in, void **out)
691 {
692 	struct scmi_softc *sc;
693 	struct scmi_req *req;
694 	int error;
695 
696 	sc = device_get_softc(dev);
697 
698 	req = buf_to_req(in);
699 
700 	req->msg.polling =
701 	    (cold || sc->trs_desc.no_completion_irq || req->use_polling);
702 
703 	/* Set inflight and send using transport specific method - refc-2 */
704 	error = scmi_req_track_inflight(sc, req);
705 	if (error != 0)
706 		return (error);
707 
708 	error = SCMI_XFER_MSG(sc->dev, &req->msg);
709 	if (error != 0) {
710 		scmi_req_drop_inflight(sc, req);
711 		return (error);
712 	}
713 
714 	return (scmi_wait_for_response(sc, req, out));
715 }
716