scmi.c (3595f18fc78b9f799010a1a45fb890e3b087394d) scmi.c (35f93203963f83161012cd731e858a56548c2ef9)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Ruslan Bukin <br@bsdpad.com>
5 * Copyright (c) 2023 Arm Ltd
6 *
7 * This work was supported by Innovate UK project 105694, "Digital Security
8 * by Design (DSbD) Technology Platform Prototype".

--- 28 unchanged lines hidden (view full) ---

37#include <sys/cpu.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/queue.h>
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Ruslan Bukin <br@bsdpad.com>
5 * Copyright (c) 2023 Arm Ltd
6 *
7 * This work was supported by Innovate UK project 105694, "Digital Security
8 * by Design (DSbD) Technology Platform Prototype".

--- 28 unchanged lines hidden (view full) ---

37#include <sys/cpu.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/queue.h>
45#include <sys/refcount.h>
45
46#include <dev/clk/clk.h>
47#include <dev/fdt/simplebus.h>
48#include <dev/fdt/fdt_common.h>
49#include <dev/ofw/ofw_bus_subr.h>
50
51#include "scmi.h"
52#include "scmi_protocols.h"

--- 29 unchanged lines hidden (view full) ---

82#define SCMI_IS_MSG_TYPE_NOTIF(h) \
83 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
84#define SCMI_IS_MSG_TYPE_DRESP(h) \
85 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
86
87#define SCMI_MSG_TOKEN(_hdr) \
88 (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
89
46
47#include <dev/clk/clk.h>
48#include <dev/fdt/simplebus.h>
49#include <dev/fdt/fdt_common.h>
50#include <dev/ofw/ofw_bus_subr.h>
51
52#include "scmi.h"
53#include "scmi_protocols.h"

--- 29 unchanged lines hidden (view full) ---

83#define SCMI_IS_MSG_TYPE_NOTIF(h) \
84 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
85#define SCMI_IS_MSG_TYPE_DRESP(h) \
86 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
87
88#define SCMI_MSG_TOKEN(_hdr) \
89 (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
90
91struct scmi_req {
92 int cnt;
93 bool timed_out;
94 bool use_polling;
95 bool done;
96 struct mtx mtx;
97 LIST_ENTRY(scmi_req) next;
98 int protocol_id;
99 int message_id;
100 int token;
101 uint32_t header;
102 struct scmi_msg msg;
103};
104
105#define buf_to_msg(b) __containerof((b), struct scmi_msg, payld)
106#define msg_to_req(m) __containerof((m), struct scmi_req, msg)
107#define buf_to_req(b) msg_to_req(buf_to_msg(b))
108
109LIST_HEAD(reqs_head, scmi_req);
110
111struct scmi_reqs_pool {
112 struct mtx mtx;
113 struct reqs_head head;
114};
115
90BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
91LIST_HEAD(inflight_head, scmi_req);
92#define REQHASH(_sc, _tk) \
93 (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
94
95struct scmi_transport {
96 unsigned long next_id;
97 struct _scmi_tokens avail_tokens;
98 struct inflight_head *inflight_ht;
99 unsigned long inflight_mask;
116BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
117LIST_HEAD(inflight_head, scmi_req);
118#define REQHASH(_sc, _tk) \
119 (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
120
121struct scmi_transport {
122 unsigned long next_id;
123 struct _scmi_tokens avail_tokens;
124 struct inflight_head *inflight_ht;
125 unsigned long inflight_mask;
126 struct scmi_reqs_pool *chans[SCMI_CHAN_MAX];
100 struct mtx mtx;
101};
102
103static int scmi_transport_init(struct scmi_softc *);
104static void scmi_transport_cleanup(struct scmi_softc *);
127 struct mtx mtx;
128};
129
130static int scmi_transport_init(struct scmi_softc *);
131static void scmi_transport_cleanup(struct scmi_softc *);
132static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int);
133static void scmi_reqs_pool_free(struct scmi_reqs_pool *);
134static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
135static void scmi_req_free_unlocked(struct scmi_softc *,
136 enum scmi_chan, struct scmi_req *);
137static void scmi_req_get(struct scmi_softc *, struct scmi_req *);
138static void scmi_req_put(struct scmi_softc *, struct scmi_req *);
105static int scmi_token_pick(struct scmi_softc *);
106static void scmi_token_release_unlocked(struct scmi_softc *, int);
107static int scmi_req_track_inflight(struct scmi_softc *,
108 struct scmi_req *);
109static int scmi_req_drop_inflight(struct scmi_softc *,
110 struct scmi_req *);
111static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
112
113static int scmi_wait_for_response(struct scmi_softc *,
139static int scmi_token_pick(struct scmi_softc *);
140static void scmi_token_release_unlocked(struct scmi_softc *, int);
141static int scmi_req_track_inflight(struct scmi_softc *,
142 struct scmi_req *);
143static int scmi_req_drop_inflight(struct scmi_softc *,
144 struct scmi_req *);
145static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
146
147static int scmi_wait_for_response(struct scmi_softc *,
114 struct scmi_req *);
148 struct scmi_req *, void **);
115static void scmi_process_response(struct scmi_softc *, uint32_t);
116
117int
118scmi_attach(device_t dev)
119{
120 struct scmi_softc *sc;
121 phandle_t node;
122 int error;

--- 49 unchanged lines hidden (view full) ---

172};
173
174DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
175 simplebus_driver);
176
177DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
178MODULE_VERSION(scmi, 1);
179
149static void scmi_process_response(struct scmi_softc *, uint32_t);
150
151int
152scmi_attach(device_t dev)
153{
154 struct scmi_softc *sc;
155 phandle_t node;
156 int error;

--- 49 unchanged lines hidden (view full) ---

206};
207
208DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
209 simplebus_driver);
210
211DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
212MODULE_VERSION(scmi, 1);
213
214static struct scmi_reqs_pool *
215scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
216{
217 struct scmi_reqs_pool *rp;
218 struct scmi_req *req;
219
220 rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK);
221
222 LIST_INIT(&rp->head);
223 for (int i = 0; i < max_msg; i++) {
224 req = malloc(sizeof(*req) + max_payld_sz,
225 M_DEVBUF, M_ZERO | M_WAITOK);
226
227 mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
228 LIST_INSERT_HEAD(&rp->head, req, next);
229 }
230
231 mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN);
232
233 return (rp);
234}
235
236static void
237scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
238{
239 struct scmi_req *req;
240
241 LIST_FOREACH(req, &rp->head, next) {
242 mtx_destroy(&req->mtx);
243 free(req, M_DEVBUF);
244 }
245
246 mtx_destroy(&rp->mtx);
247 free(rp, M_DEVBUF);
248}
249
180static int
181scmi_transport_init(struct scmi_softc *sc)
182{
183 struct scmi_transport *trs;
184 int ret;
185
186 trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
187
188 BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
189 mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
190
191 trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF,
192 &trs->inflight_mask);
193
250static int
251scmi_transport_init(struct scmi_softc *sc)
252{
253 struct scmi_transport *trs;
254 int ret;
255
256 trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
257
258 BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
259 mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
260
261 trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF,
262 &trs->inflight_mask);
263
264 trs->chans[SCMI_CHAN_A2P] =
265 scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
266 if (trs->chans[SCMI_CHAN_A2P] == NULL) {
267 free(trs, M_DEVBUF);
268 return (ENOMEM);
269 }
270
271 trs->chans[SCMI_CHAN_P2A] =
272 scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
273 if (trs->chans[SCMI_CHAN_P2A] == NULL) {
274 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
275 free(trs, M_DEVBUF);
276 return (ENOMEM);
277 }
278
194 sc->trs = trs;
195 ret = SCMI_TRANSPORT_INIT(sc->dev);
196 if (ret != 0) {
279 sc->trs = trs;
280 ret = SCMI_TRANSPORT_INIT(sc->dev);
281 if (ret != 0) {
282 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
283 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]);
197 free(trs, M_DEVBUF);
198 return (ret);
199 }
200
201 return (0);
202}
203static void
204scmi_transport_cleanup(struct scmi_softc *sc)
205{
206
207 SCMI_TRANSPORT_CLEANUP(sc->dev);
208 mtx_destroy(&sc->trs->mtx);
209 hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
284 free(trs, M_DEVBUF);
285 return (ret);
286 }
287
288 return (0);
289}
290static void
291scmi_transport_cleanup(struct scmi_softc *sc)
292{
293
294 SCMI_TRANSPORT_CLEANUP(sc->dev);
295 mtx_destroy(&sc->trs->mtx);
296 hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
297 scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]);
298 scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]);
210 free(sc->trs, M_DEVBUF);
211}
212
299 free(sc->trs, M_DEVBUF);
300}
301
302static struct scmi_req *
303scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
304{
305 struct scmi_reqs_pool *rp;
306 struct scmi_req *req = NULL;
307
308 rp = sc->trs->chans[ch_idx];
309 mtx_lock_spin(&rp->mtx);
310 if (!LIST_EMPTY(&rp->head)) {
311 req = LIST_FIRST(&rp->head);
312 LIST_REMOVE_HEAD(&rp->head, next);
313 }
314 mtx_unlock_spin(&rp->mtx);
315
316 if (req != NULL)
317 refcount_init(&req->cnt, 1);
318
319 return (req);
320}
321
322static void
323scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
324 struct scmi_req *req)
325{
326 struct scmi_reqs_pool *rp;
327
328 rp = sc->trs->chans[ch_idx];
329 mtx_lock_spin(&rp->mtx);
330 req->timed_out = false;
331 req->done = false;
332 refcount_init(&req->cnt, 0);
333 LIST_INSERT_HEAD(&rp->head, req, next);
334 mtx_unlock_spin(&rp->mtx);
335}
336
337static void
338scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
339{
340 bool ok;
341
342 mtx_lock_spin(&req->mtx);
343 ok = refcount_acquire_if_not_zero(&req->cnt);
344 mtx_unlock_spin(&req->mtx);
345
346 if (!ok)
347 device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
348
349 return;
350}
351
352static void
353scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
354{
355 mtx_lock_spin(&req->mtx);
356 if (!refcount_release_if_not_last(&req->cnt)) {
357 bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE);
358 scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
359 }
360 mtx_unlock_spin(&req->mtx);
361}
362
213static int
214scmi_token_pick(struct scmi_softc *sc)
215{
216 unsigned long next_msg_id, token;
217
218 mtx_lock_spin(&sc->trs->mtx);
219 /*
220 * next_id is a monotonically increasing unsigned long that can be used

--- 34 unchanged lines hidden (view full) ---

255 if (req->token < 0)
256 return (EBUSY);
257
258 header = req->message_id;
259 header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
260 header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
261 header |= req->token << SCMI_HDR_TOKEN_S;
262
363static int
364scmi_token_pick(struct scmi_softc *sc)
365{
366 unsigned long next_msg_id, token;
367
368 mtx_lock_spin(&sc->trs->mtx);
369 /*
370 * next_id is a monotonically increasing unsigned long that can be used

--- 34 unchanged lines hidden (view full) ---

405 if (req->token < 0)
406 return (EBUSY);
407
408 header = req->message_id;
409 header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
410 header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
411 header |= req->token << SCMI_HDR_TOKEN_S;
412
263 req->msg_header = htole32(header);
413 req->header = htole32(header);
414 req->msg.hdr = htole32(header);
264
265 return (0);
266}
267
268static int
269scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
270{
271 int error;
272
273 /* build hdr, pick token */
274 error = scmi_finalize_req(sc, req);
275 if (error != 0)
276 return (error);
277
415
416 return (0);
417}
418
419static int
420scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
421{
422 int error;
423
424 /* build hdr, pick token */
425 error = scmi_finalize_req(sc, req);
426 if (error != 0)
427 return (error);
428
278 /* TODO Review/simplify locking around inflight ?*/
429 /* Bump refcount to get hold of this in-flight transaction */
430 scmi_req_get(sc, req);
431 /* Register in the inflight hashtable */
279 mtx_lock_spin(&sc->trs->mtx);
280 LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
281 mtx_unlock_spin(&sc->trs->mtx);
282
283 return (0);
284}
285
286static int
287scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
288{
289
432 mtx_lock_spin(&sc->trs->mtx);
433 LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
434 mtx_unlock_spin(&sc->trs->mtx);
435
436 return (0);
437}
438
439static int
440scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
441{
442
443 /* Remove from inflight hashtable at first ... */
290 mtx_lock_spin(&sc->trs->mtx);
291 LIST_REMOVE(req, next);
292 scmi_token_release_unlocked(sc, req->token);
293 mtx_unlock_spin(&sc->trs->mtx);
444 mtx_lock_spin(&sc->trs->mtx);
445 LIST_REMOVE(req, next);
446 scmi_token_release_unlocked(sc, req->token);
447 mtx_unlock_spin(&sc->trs->mtx);
448 /* ...and drop refcount..potentially releasing *req */
449 scmi_req_put(sc, req);
294
295 return (0);
296}
297
298static struct scmi_req *
299scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
300{
301 struct scmi_req *req = NULL;

--- 8 unchanged lines hidden (view full) ---

310 mtx_unlock_spin(&sc->trs->mtx);
311
312 return (req);
313}
314
315static void
316scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
317{
450
451 return (0);
452}
453
454static struct scmi_req *
455scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
456{
457 struct scmi_req *req = NULL;

--- 8 unchanged lines hidden (view full) ---

466 mtx_unlock_spin(&sc->trs->mtx);
467
468 return (req);
469}
470
471static void
472scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
473{
474 bool timed_out = false;
318 struct scmi_req *req;
319
320 req = scmi_req_lookup_inflight(sc, hdr);
321 if (req == NULL) {
322 device_printf(sc->dev,
323 "Unexpected reply with header |%X| - token: 0x%X Drop.\n",
324 hdr, SCMI_MSG_TOKEN(hdr));
325 return;
326 }
327
475 struct scmi_req *req;
476
477 req = scmi_req_lookup_inflight(sc, hdr);
478 if (req == NULL) {
479 device_printf(sc->dev,
480 "Unexpected reply with header |%X| - token: 0x%X Drop.\n",
481 hdr, SCMI_MSG_TOKEN(hdr));
482 return;
483 }
484
485 mtx_lock_spin(&req->mtx);
328 req->done = true;
486 req->done = true;
329 wakeup(req);
487 if (!req->timed_out)
488 wakeup(req);
489 else
490 timed_out = true;
491 mtx_unlock_spin(&req->mtx);
492
493 if (timed_out)
494 device_printf(sc->dev,
495 "Late reply for timed-out request - token: 0x%X. Ignore.\n",
496 req->token);
497
498 /*
499 * In case of a late reply to a timed-out transaction this will
500 * finally free the pending scmi_req
501 */
502 scmi_req_drop_inflight(sc, req);
330}
331
332void
333scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr)
334{
335 struct scmi_softc *sc;
336
337 sc = device_get_softc(dev);
338
339 if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
340 device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
341 SCMI_CLEAR_CHANNEL(dev, chan);
342 return;
343 }
344
345 scmi_process_response(sc, hdr);
346}
347
348static int
503}
504
505void
506scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr)
507{
508 struct scmi_softc *sc;
509
510 sc = device_get_softc(dev);
511
512 if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
513 device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
514 SCMI_CLEAR_CHANNEL(dev, chan);
515 return;
516 }
517
518 scmi_process_response(sc, hdr);
519}
520
521static int
349scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req)
522scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
350{
351 int ret;
352
523{
524 int ret;
525
353 if (req->use_polling) {
354 ret = SCMI_POLL_MSG(sc->dev, req, sc->trs_desc.reply_timo_ms);
526 if (req->msg.polling) {
527 bool needs_drop;
528
529 ret = SCMI_POLL_MSG(sc->dev, &req->msg,
530 sc->trs_desc.reply_timo_ms);
531 /*
532 * Drop reference to successfully polled req unless it had
533 * already also been processed on the IRQ path.
534 * Addresses a possible race-condition between polling and
535 * interrupt reception paths.
536 */
537 mtx_lock_spin(&req->mtx);
538 needs_drop = (ret == 0) && !req->done;
539 mtx_unlock_spin(&req->mtx);
540 if (needs_drop)
541 scmi_req_drop_inflight(sc, req);
542 if (ret == 0 && req->msg.hdr != req->header) {
543 device_printf(sc->dev,
544 "Malformed reply with header |%08X|. Expected: |%08X|Drop.\n",
545 le32toh(req->msg.hdr), le32toh(req->header));
546 }
355 } else {
356 ret = tsleep(req, 0, "scmi_wait4",
357 (sc->trs_desc.reply_timo_ms * hz) / 1000);
358 /* Check for lost wakeups since there is no associated lock */
547 } else {
548 ret = tsleep(req, 0, "scmi_wait4",
549 (sc->trs_desc.reply_timo_ms * hz) / 1000);
550 /* Check for lost wakeups since there is no associated lock */
551 mtx_lock_spin(&req->mtx);
359 if (ret != 0 && req->done)
360 ret = 0;
552 if (ret != 0 && req->done)
553 ret = 0;
554 mtx_unlock_spin(&req->mtx);
361 }
362
555 }
556
363 if (ret == 0)
364 SCMI_COLLECT_REPLY(sc->dev, req);
365 else
557 if (ret == 0) {
558 SCMI_COLLECT_REPLY(sc->dev, &req->msg);
559 if (req->msg.payld[0] != 0)
560 ret = req->msg.payld[0];
561 *out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
562 } else {
563 mtx_lock_spin(&req->mtx);
564 req->timed_out = true;
565 mtx_unlock_spin(&req->mtx);
366 device_printf(sc->dev,
367 "Request for token 0x%X timed-out.\n", req->token);
566 device_printf(sc->dev,
567 "Request for token 0x%X timed-out.\n", req->token);
568 }
368
369 SCMI_TX_COMPLETE(sc->dev, NULL);
370
371 return (ret);
372}
373
569
570 SCMI_TX_COMPLETE(sc->dev, NULL);
571
572 return (ret);
573}
574
575void *
576scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
577 int tx_payld_sz, int rx_payld_sz)
578{
579 struct scmi_softc *sc;
580 struct scmi_req *req;
581
582 sc = device_get_softc(dev);
583
584 if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE ||
585 rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE) {
586 device_printf(dev, "Unsupported payload size. Drop.\n");
587 return (NULL);
588 }
589
590 /* Pick one from free list */
591 req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
592 if (req == NULL)
593 return (NULL);
594
595 req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
596 req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
597 req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
598 req->msg.rx_len = rx_payld_sz ?
599 rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE;
600
601 return (&req->msg.payld[0]);
602}
603
604void
605scmi_buf_put(device_t dev, void *buf)
606{
607 struct scmi_softc *sc;
608 struct scmi_req *req;
609
610 sc = device_get_softc(dev);
611
612 req = buf_to_req(buf);
613 scmi_req_put(sc, req);
614}
615
374int
616int
375scmi_request(device_t dev, struct scmi_req *req)
617scmi_request(device_t dev, void *in, void **out)
376{
377 struct scmi_softc *sc;
618{
619 struct scmi_softc *sc;
620 struct scmi_req *req;
378 int error;
379
380 sc = device_get_softc(dev);
381
621 int error;
622
623 sc = device_get_softc(dev);
624
382 req->use_polling = cold || sc->trs_desc.no_completion_irq;
625 req = buf_to_req(in);
383
626
627 req->msg.polling =
628 (cold || sc->trs_desc.no_completion_irq || req->use_polling);
629
384 /* Set inflight and send using transport specific method - refc-2 */
385 error = scmi_req_track_inflight(sc, req);
386 if (error != 0)
387 return (error);
388
630 /* Set inflight and send using transport specific method - refc-2 */
631 error = scmi_req_track_inflight(sc, req);
632 if (error != 0)
633 return (error);
634
389 error = SCMI_XFER_MSG(sc->dev, req);
390 if (error == 0)
391 error = scmi_wait_for_response(sc, req);
635 error = SCMI_XFER_MSG(sc->dev, &req->msg);
636 if (error != 0) {
637 scmi_req_drop_inflight(sc, req);
638 return (error);
639 }
392
640
393 scmi_req_drop_inflight(sc, req);
394
395 return (error);
641 return (scmi_wait_for_response(sc, req, out));
396}
642}