xref: /freebsd/sys/dev/firmware/arm/scmi.c (revision c57c26179033f64c2011a2d2a904ee3fa62e826a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2022 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2023 Arm Ltd
6  *
7  * This work was supported by Innovate UK project 105694, "Digital Security
8  * by Design (DSbD) Technology Platform Prototype".
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/_bitset.h>
35 #include <sys/bitset.h>
36 #include <sys/bus.h>
37 #include <sys/cpu.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/refcount.h>
46 
47 #include <dev/clk/clk.h>
48 #include <dev/fdt/simplebus.h>
49 #include <dev/fdt/fdt_common.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 
52 #include "scmi.h"
53 #include "scmi_protocols.h"
54 
55 #define SCMI_MAX_TOKEN		1024
56 
57 #define	SCMI_HDR_TOKEN_S		18
58 #define SCMI_HDR_TOKEN_BF		(0x3fff)
59 #define	SCMI_HDR_TOKEN_M		(SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S)
60 
61 #define	SCMI_HDR_PROTOCOL_ID_S		10
62 #define	SCMI_HDR_PROTOCOL_ID_BF		(0xff)
63 #define	SCMI_HDR_PROTOCOL_ID_M		\
64     (SCMI_HDR_PROTOCOL_ID_BF << SCMI_HDR_PROTOCOL_ID_S)
65 
66 #define	SCMI_HDR_MESSAGE_TYPE_S		8
67 #define	SCMI_HDR_MESSAGE_TYPE_BF	(0x3)
68 #define	SCMI_HDR_MESSAGE_TYPE_M		\
69     (SCMI_HDR_MESSAGE_TYPE_BF << SCMI_HDR_MESSAGE_TYPE_S)
70 
71 #define	SCMI_HDR_MESSAGE_ID_S		0
72 #define	SCMI_HDR_MESSAGE_ID_BF		(0xff)
73 #define	SCMI_HDR_MESSAGE_ID_M		\
74     (SCMI_HDR_MESSAGE_ID_BF << SCMI_HDR_MESSAGE_ID_S)
75 
76 #define SCMI_MSG_TYPE_CMD	0
77 #define SCMI_MSG_TYPE_DRESP	2
78 #define SCMI_MSG_TYPE_NOTIF	3
79 
80 #define SCMI_MSG_TYPE_CHECK(_h, _t)					\
81     ((((_h) & SCMI_HDR_MESSAGE_TYPE_M) >> SCMI_HDR_MESSAGE_TYPE_S) == (_t))
82 
83 #define SCMI_IS_MSG_TYPE_NOTIF(h)					\
84     SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
85 #define SCMI_IS_MSG_TYPE_DRESP(h)					\
86     SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
87 
88 #define SCMI_MSG_TOKEN(_hdr)		\
89     (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
90 
91 struct scmi_req {
92 	int		cnt;
93 	bool		timed_out;
94 	bool		use_polling;
95 	bool		done;
96 	struct mtx	mtx;
97 	LIST_ENTRY(scmi_req)	next;
98 	int		protocol_id;
99 	int		message_id;
100 	int		token;
101 	uint32_t	header;
102 	struct scmi_msg msg;
103 };
104 
105 #define buf_to_msg(b)	__containerof((b), struct scmi_msg, payld)
106 #define msg_to_req(m)	__containerof((m), struct scmi_req, msg)
107 #define buf_to_req(b)	msg_to_req(buf_to_msg(b))
108 
109 LIST_HEAD(reqs_head, scmi_req);
110 
111 struct scmi_reqs_pool {
112 	struct mtx		mtx;
113 	struct reqs_head	head;
114 };
115 
116 BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
117 LIST_HEAD(inflight_head, scmi_req);
118 #define	REQHASH(_sc, _tk)		\
119     (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
120 
121 struct scmi_transport {
122 	unsigned long		next_id;
123 	struct _scmi_tokens	avail_tokens;
124 	struct inflight_head	*inflight_ht;
125 	unsigned long		inflight_mask;
126 	struct scmi_reqs_pool	*chans[SCMI_CHAN_MAX];
127 	struct mtx		mtx;
128 };
129 
130 static int		scmi_transport_init(struct scmi_softc *);
131 static void		scmi_transport_cleanup(struct scmi_softc *);
132 static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int);
133 static void		scmi_reqs_pool_free(struct scmi_reqs_pool *);
134 static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
135 static void		scmi_req_free_unlocked(struct scmi_softc *,
136     enum scmi_chan, struct scmi_req *);
137 static void		scmi_req_get(struct scmi_softc *, struct scmi_req *);
138 static void		scmi_req_put(struct scmi_softc *, struct scmi_req *);
139 static int		scmi_token_pick(struct scmi_softc *);
140 static void		scmi_token_release_unlocked(struct scmi_softc *, int);
141 static int		scmi_req_track_inflight(struct scmi_softc *,
142 			    struct scmi_req *);
143 static int		scmi_req_drop_inflight(struct scmi_softc *,
144 			    struct scmi_req *);
145 static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
146 
147 static int		scmi_wait_for_response(struct scmi_softc *,
148 			    struct scmi_req *, void **);
149 static void		scmi_process_response(struct scmi_softc *, uint32_t);
150 
151 int
152 scmi_attach(device_t dev)
153 {
154 	struct scmi_softc *sc;
155 	phandle_t node;
156 	int error;
157 
158 	sc = device_get_softc(dev);
159 	sc->dev = dev;
160 
161 	node = ofw_bus_get_node(dev);
162 	if (node == -1)
163 		return (ENXIO);
164 
165 	simplebus_init(dev, node);
166 
167 	error = scmi_transport_init(sc);
168 	if (error != 0)
169 		return (error);
170 
171 	device_printf(dev, "Transport reply timeout initialized to %dms\n",
172 	    sc->trs_desc.reply_timo_ms);
173 
174 	/*
175 	 * Allow devices to identify.
176 	 */
177 	bus_generic_probe(dev);
178 
179 	/*
180 	 * Now walk the OFW tree and attach top-level devices.
181 	 */
182 	for (node = OF_child(node); node > 0; node = OF_peer(node))
183 		simplebus_add_device(dev, node, 0, NULL, -1, NULL);
184 
185 	error = bus_generic_attach(dev);
186 
187 	return (error);
188 }
189 
190 static int
191 scmi_detach(device_t dev)
192 {
193 	struct scmi_softc *sc;
194 
195 	sc = device_get_softc(dev);
196 	scmi_transport_cleanup(sc);
197 
198 	return (0);
199 }
200 
201 static device_method_t scmi_methods[] = {
202 	DEVMETHOD(device_attach,	scmi_attach),
203 	DEVMETHOD(device_detach,	scmi_detach),
204 
205 	DEVMETHOD_END
206 };
207 
208 DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
209     simplebus_driver);
210 
211 DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
212 MODULE_VERSION(scmi, 1);
213 
214 static struct scmi_reqs_pool *
215 scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
216 {
217 	struct scmi_reqs_pool *rp;
218 	struct scmi_req *req;
219 
220 	rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK);
221 
222 	LIST_INIT(&rp->head);
223 	for (int i = 0; i < max_msg; i++) {
224 		req = malloc(sizeof(*req) + max_payld_sz,
225 		    M_DEVBUF, M_ZERO | M_WAITOK);
226 
227 		mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
228 		LIST_INSERT_HEAD(&rp->head, req, next);
229 	}
230 
231 	mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN);
232 
233 	return (rp);
234 }
235 
236 static void
237 scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
238 {
239 	struct scmi_req *req;
240 
241 	LIST_FOREACH(req, &rp->head, next) {
242 		mtx_destroy(&req->mtx);
243 		free(req, M_DEVBUF);
244 	}
245 
246 	mtx_destroy(&rp->mtx);
247 	free(rp, M_DEVBUF);
248 }
249 
250 static int
251 scmi_transport_init(struct scmi_softc *sc)
252 {
253 	struct scmi_transport *trs;
254 	int ret;
255 
256 	trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
257 
258 	BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
259 	mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
260 
261 	trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF,
262 	    &trs->inflight_mask);
263 
264 	trs->chans[SCMI_CHAN_A2P] =
265 	    scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
266 	if (trs->chans[SCMI_CHAN_A2P] == NULL) {
267 		free(trs, M_DEVBUF);
268 		return (ENOMEM);
269 	}
270 
271 	trs->chans[SCMI_CHAN_P2A] =
272 	    scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
273 	if (trs->chans[SCMI_CHAN_P2A] == NULL) {
274 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
275 		free(trs, M_DEVBUF);
276 		return (ENOMEM);
277 	}
278 
279 	sc->trs = trs;
280 	ret = SCMI_TRANSPORT_INIT(sc->dev);
281 	if (ret != 0) {
282 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
283 		scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]);
284 		free(trs, M_DEVBUF);
285 		return (ret);
286 	}
287 
288 	return (0);
289 }
290 static void
291 scmi_transport_cleanup(struct scmi_softc *sc)
292 {
293 
294 	SCMI_TRANSPORT_CLEANUP(sc->dev);
295 	mtx_destroy(&sc->trs->mtx);
296 	hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
297 	scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]);
298 	scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]);
299 	free(sc->trs, M_DEVBUF);
300 }
301 
302 static struct scmi_req *
303 scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
304 {
305 	struct scmi_reqs_pool *rp;
306 	struct scmi_req *req = NULL;
307 
308 	rp = sc->trs->chans[ch_idx];
309 	mtx_lock_spin(&rp->mtx);
310 	if (!LIST_EMPTY(&rp->head)) {
311 		req = LIST_FIRST(&rp->head);
312 		LIST_REMOVE_HEAD(&rp->head, next);
313 	}
314 	mtx_unlock_spin(&rp->mtx);
315 
316 	if (req != NULL)
317 		refcount_init(&req->cnt, 1);
318 
319 	return (req);
320 }
321 
322 static void
323 scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
324     struct scmi_req *req)
325 {
326 	struct scmi_reqs_pool *rp;
327 
328 	rp = sc->trs->chans[ch_idx];
329 	mtx_lock_spin(&rp->mtx);
330 	req->timed_out = false;
331 	req->done = false;
332 	refcount_init(&req->cnt, 0);
333 	LIST_INSERT_HEAD(&rp->head, req, next);
334 	mtx_unlock_spin(&rp->mtx);
335 }
336 
337 static void
338 scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
339 {
340 	bool ok;
341 
342 	mtx_lock_spin(&req->mtx);
343 	ok = refcount_acquire_if_not_zero(&req->cnt);
344 	mtx_unlock_spin(&req->mtx);
345 
346 	if (!ok)
347 		device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
348 
349 	return;
350 }
351 
352 static void
353 scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
354 {
355 	mtx_lock_spin(&req->mtx);
356 	if (!refcount_release_if_not_last(&req->cnt)) {
357 		bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE);
358 		scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
359 	}
360 	mtx_unlock_spin(&req->mtx);
361 }
362 
363 static int
364 scmi_token_pick(struct scmi_softc *sc)
365 {
366 	unsigned long next_msg_id, token;
367 
368 	mtx_lock_spin(&sc->trs->mtx);
369 	/*
370 	 * next_id is a monotonically increasing unsigned long that can be used
371 	 * for tracing purposes; next_msg_id is a 10-bit sequence number derived
372 	 * from it.
373 	 */
374 	next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF;
375 	token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id);
376 	/* TODO Account for wrap-arounds and holes */
377 	if (token != 0)
378 		BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens);
379 	mtx_unlock_spin(&sc->trs->mtx);
380 
381 	/*
382 	 * BIT_FFS_AT returns 1-indexed values, so 0 means failure to find a
383 	 * free slot: all possible SCMI messages are in-flight using all of the
384 	 * SCMI_MAX_TOKEN sequence numbers.
385 	 */
386 	if (!token)
387 		return (-EBUSY);
388 
389 	return ((int)(token - 1));
390 }
391 
392 static void
393 scmi_token_release_unlocked(struct scmi_softc *sc, int token)
394 {
395 
396 	BIT_SET(SCMI_MAX_TOKEN, token, &sc->trs->avail_tokens);
397 }
398 
399 static int
400 scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req)
401 {
402 	uint32_t header = 0;
403 
404 	req->token = scmi_token_pick(sc);
405 	if (req->token < 0)
406 		return (EBUSY);
407 
408 	header = req->message_id;
409 	header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
410 	header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
411 	header |= req->token << SCMI_HDR_TOKEN_S;
412 
413 	req->header = htole32(header);
414 	req->msg.hdr = htole32(header);
415 
416 	return (0);
417 }
418 
419 static int
420 scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
421 {
422 	int error;
423 
424 	/* build hdr, pick token */
425 	error = scmi_finalize_req(sc, req);
426 	if (error != 0)
427 		return (error);
428 
429 	/* Bump refcount to get hold of this in-flight transaction */
430 	scmi_req_get(sc, req);
431 	/* Register in the inflight hashtable */
432 	mtx_lock_spin(&sc->trs->mtx);
433 	LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
434 	mtx_unlock_spin(&sc->trs->mtx);
435 
436 	return (0);
437 }
438 
439 static int
440 scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
441 {
442 
443 	/* Remove from inflight hashtable at first ... */
444 	mtx_lock_spin(&sc->trs->mtx);
445 	LIST_REMOVE(req, next);
446 	scmi_token_release_unlocked(sc, req->token);
447 	mtx_unlock_spin(&sc->trs->mtx);
448 	/* ...and drop refcount..potentially releasing *req */
449 	scmi_req_put(sc, req);
450 
451 	return (0);
452 }
453 
454 static struct scmi_req *
455 scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
456 {
457 	struct scmi_req *req = NULL;
458 	unsigned int token;
459 
460 	token = SCMI_MSG_TOKEN(hdr);
461 	mtx_lock_spin(&sc->trs->mtx);
462 	LIST_FOREACH(req, REQHASH(sc, token), next) {
463 		if (req->token == token)
464 			break;
465 	}
466 	mtx_unlock_spin(&sc->trs->mtx);
467 
468 	return (req);
469 }
470 
471 static void
472 scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
473 {
474 	bool timed_out = false;
475 	struct scmi_req *req;
476 
477 	req = scmi_req_lookup_inflight(sc, hdr);
478 	if (req == NULL) {
479 		device_printf(sc->dev,
480 		    "Unexpected reply with header |%X| - token: 0x%X Drop.\n",
481 		    hdr, SCMI_MSG_TOKEN(hdr));
482 		return;
483 	}
484 
485 	mtx_lock_spin(&req->mtx);
486 	req->done = true;
487 	if (!req->timed_out) {
488 		/*
489 		 * Consider the case in which a polled message is picked
490 		 * by chance on the IRQ path on another CPU: setting poll_done
491 		 * will terminate the other poll loop.
492 		 */
493 		if (!req->msg.polling)
494 			wakeup(req);
495 		else
496 			atomic_store_rel_int(&req->msg.poll_done, 1);
497 	} else {
498 		timed_out = true;
499 	}
500 	mtx_unlock_spin(&req->mtx);
501 
502 	if (timed_out)
503 		device_printf(sc->dev,
504 		    "Late reply for timed-out request - token: 0x%X. Ignore.\n",
505 		    req->token);
506 
507 	/*
508 	 * In case of a late reply to a timed-out transaction this will
509 	 * finally free the pending scmi_req
510 	 */
511 	scmi_req_drop_inflight(sc, req);
512 }
513 
514 void
515 scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr)
516 {
517 	struct scmi_softc *sc;
518 
519 	sc = device_get_softc(dev);
520 
521 	if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
522 		device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
523 		SCMI_CLEAR_CHANNEL(dev, chan);
524 		return;
525 	}
526 
527 	scmi_process_response(sc, hdr);
528 }
529 
530 static int
531 scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
532 {
533 	int ret;
534 
535 	if (req->msg.polling) {
536 		bool needs_drop;
537 
538 		ret = SCMI_POLL_MSG(sc->dev, &req->msg,
539 		    sc->trs_desc.reply_timo_ms);
540 		/*
541 		 * Drop reference to successfully polled req unless it had
542 		 * already also been processed on the IRQ path.
543 		 * Addresses a possible race-condition between polling and
544 		 * interrupt reception paths.
545 		 */
546 		mtx_lock_spin(&req->mtx);
547 		needs_drop = (ret == 0) && !req->done;
548 		mtx_unlock_spin(&req->mtx);
549 		if (needs_drop)
550 			scmi_req_drop_inflight(sc, req);
551 		if (ret == 0 && req->msg.hdr != req->header) {
552 			device_printf(sc->dev,
553 			    "Malformed reply with header |%08X|. Expected: |%08X|Drop.\n",
554 			    le32toh(req->msg.hdr), le32toh(req->header));
555 		}
556 	} else {
557 		ret = tsleep(req, 0, "scmi_wait4",
558 		    (sc->trs_desc.reply_timo_ms * hz) / 1000);
559 		/* Check for lost wakeups since there is no associated lock */
560 		mtx_lock_spin(&req->mtx);
561 		if (ret != 0 && req->done)
562 			ret = 0;
563 		mtx_unlock_spin(&req->mtx);
564 	}
565 
566 	if (ret == 0) {
567 		SCMI_COLLECT_REPLY(sc->dev, &req->msg);
568 		if (req->msg.payld[0] != 0)
569 			ret = req->msg.payld[0];
570 		*out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
571 	} else {
572 		mtx_lock_spin(&req->mtx);
573 		req->timed_out = true;
574 		mtx_unlock_spin(&req->mtx);
575 		device_printf(sc->dev,
576 		    "Request for token 0x%X timed-out.\n", req->token);
577 	}
578 
579 	SCMI_TX_COMPLETE(sc->dev, NULL);
580 
581 	return (ret);
582 }
583 
584 void *
585 scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
586     int tx_payld_sz, int rx_payld_sz)
587 {
588 	struct scmi_softc *sc;
589 	struct scmi_req *req;
590 
591 	sc = device_get_softc(dev);
592 
593 	if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE ||
594 	    rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE) {
595 		device_printf(dev, "Unsupported payload size. Drop.\n");
596 		return (NULL);
597 	}
598 
599 	/* Pick one from free list */
600 	req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
601 	if (req == NULL)
602 		return (NULL);
603 
604 	req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
605 	req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
606 	req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
607 	req->msg.rx_len = rx_payld_sz ?
608 	    rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE;
609 
610 	return (&req->msg.payld[0]);
611 }
612 
613 void
614 scmi_buf_put(device_t dev, void *buf)
615 {
616 	struct scmi_softc *sc;
617 	struct scmi_req *req;
618 
619 	sc = device_get_softc(dev);
620 
621 	req = buf_to_req(buf);
622 	scmi_req_put(sc, req);
623 }
624 
625 int
626 scmi_request(device_t dev, void *in, void **out)
627 {
628 	struct scmi_softc *sc;
629 	struct scmi_req *req;
630 	int error;
631 
632 	sc = device_get_softc(dev);
633 
634 	req = buf_to_req(in);
635 
636 	req->msg.polling =
637 	    (cold || sc->trs_desc.no_completion_irq || req->use_polling);
638 
639 	/* Set inflight and send using transport specific method - refc-2 */
640 	error = scmi_req_track_inflight(sc, req);
641 	if (error != 0)
642 		return (error);
643 
644 	error = SCMI_XFER_MSG(sc->dev, &req->msg);
645 	if (error != 0) {
646 		scmi_req_drop_inflight(sc, req);
647 		return (error);
648 	}
649 
650 	return (scmi_wait_for_response(sc, req, out));
651 }
652