1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Ruslan Bukin <br@bsdpad.com>
5 * Copyright (c) 2023 Arm Ltd
6 *
7 * This work was supported by Innovate UK project 105694, "Digital Security
8 * by Design (DSbD) Technology Platform Prototype".
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/_bitset.h>
35 #include <sys/bitset.h>
36 #include <sys/bus.h>
37 #include <sys/cpu.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/refcount.h>
46 #include <sys/sdt.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49
50 #include <dev/clk/clk.h>
51 #include <dev/fdt/simplebus.h>
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54
55 #include "scmi.h"
56 #include "scmi_protocols.h"
57
58 SDT_PROVIDER_DEFINE(scmi);
59 SDT_PROBE_DEFINE3(scmi, func, scmi_req_alloc, req_alloc,
60 "int", "int", "int");
61 SDT_PROBE_DEFINE3(scmi, func, scmi_req_free_unlocked, req_alloc,
62 "int", "int", "int");
63 SDT_PROBE_DEFINE3(scmi, func, scmi_req_get, req_alloc,
64 "int", "int", "int");
65 SDT_PROBE_DEFINE3(scmi, func, scmi_req_put, req_alloc,
66 "int", "int", "int");
67 SDT_PROBE_DEFINE5(scmi, func, scmi_request_tx, xfer_track,
68 "int", "int", "int", "int", "int");
69 SDT_PROBE_DEFINE5(scmi, entry, scmi_wait_for_response, xfer_track,
70 "int", "int", "int", "int", "int");
71 SDT_PROBE_DEFINE5(scmi, exit, scmi_wait_for_response, xfer_track,
72 "int", "int", "int", "int", "int");
73 SDT_PROBE_DEFINE2(scmi, func, scmi_rx_irq_callback, hdr_dump,
74 "int", "int");
75 SDT_PROBE_DEFINE5(scmi, func, scmi_process_response, xfer_track,
76 "int", "int", "int", "int", "int");
77
78 #define SCMI_MAX_TOKEN 1024
79
80 #define SCMI_HDR_TOKEN_S 18
81 #define SCMI_HDR_TOKEN_BF (0x3ff)
82 #define SCMI_HDR_TOKEN_M (SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S)
83
84 #define SCMI_HDR_PROTOCOL_ID_S 10
85 #define SCMI_HDR_PROTOCOL_ID_BF (0xff)
86 #define SCMI_HDR_PROTOCOL_ID_M \
87 (SCMI_HDR_PROTOCOL_ID_BF << SCMI_HDR_PROTOCOL_ID_S)
88
89 #define SCMI_HDR_MESSAGE_TYPE_S 8
90 #define SCMI_HDR_MESSAGE_TYPE_BF (0x3)
91 #define SCMI_HDR_MESSAGE_TYPE_M \
92 (SCMI_HDR_MESSAGE_TYPE_BF << SCMI_HDR_MESSAGE_TYPE_S)
93
94 #define SCMI_HDR_MESSAGE_ID_S 0
95 #define SCMI_HDR_MESSAGE_ID_BF (0xff)
96 #define SCMI_HDR_MESSAGE_ID_M \
97 (SCMI_HDR_MESSAGE_ID_BF << SCMI_HDR_MESSAGE_ID_S)
98
99 #define SCMI_MSG_TYPE_CMD 0
100 #define SCMI_MSG_TYPE_DRESP 2
101 #define SCMI_MSG_TYPE_NOTIF 3
102
103 #define SCMI_MSG_TYPE_CHECK(_h, _t) \
104 ((((_h) & SCMI_HDR_MESSAGE_TYPE_M) >> SCMI_HDR_MESSAGE_TYPE_S) == (_t))
105
106 #define SCMI_IS_MSG_TYPE_NOTIF(h) \
107 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
108 #define SCMI_IS_MSG_TYPE_DRESP(h) \
109 SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
110
111 #define SCMI_MSG_TOKEN(_hdr) \
112 (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
113 #define SCMI_MSG_PROTOCOL_ID(_hdr) \
114 (((_hdr) & SCMI_HDR_PROTOCOL_ID_M) >> SCMI_HDR_PROTOCOL_ID_S)
115 #define SCMI_MSG_MESSAGE_ID(_hdr) \
116 (((_hdr) & SCMI_HDR_MESSAGE_ID_M) >> SCMI_HDR_MESSAGE_ID_S)
117 #define SCMI_MSG_TYPE(_hdr) \
118 (((_hdr) & SCMI_HDR_TYPE_ID_M) >> SCMI_HDR_TYPE_ID_S)
119
120 struct scmi_req {
121 int cnt;
122 bool timed_out;
123 bool use_polling;
124 bool done;
125 bool is_raw;
126 device_t dev;
127 struct task tsk;
128 struct mtx mtx;
129 LIST_ENTRY(scmi_req) next;
130 int protocol_id;
131 int message_id;
132 int token;
133 uint32_t header;
134 struct scmi_msg msg;
135 };
136
137 #define tsk_to_req(t) __containerof((t), struct scmi_req, tsk)
138 #define buf_to_msg(b) __containerof((b), struct scmi_msg, payld)
139 #define msg_to_req(m) __containerof((m), struct scmi_req, msg)
140 #define buf_to_req(b) msg_to_req(buf_to_msg(b))
141
142 LIST_HEAD(reqs_head, scmi_req);
143
144 struct scmi_reqs_pool {
145 struct mtx mtx;
146 struct reqs_head head;
147 };
148
149 BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
150 LIST_HEAD(inflight_head, scmi_req);
151 #define REQHASH(_sc, _tk) \
152 (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
153
154 struct scmi_transport {
155 unsigned long next_id;
156 struct _scmi_tokens avail_tokens;
157 struct inflight_head *inflight_ht;
158 unsigned long inflight_mask;
159 struct scmi_reqs_pool *chans[SCMI_CHAN_MAX];
160 struct mtx mtx;
161 };
162
163 static void scmi_transport_configure(struct scmi_transport_desc *, phandle_t);
164 static int scmi_transport_init(struct scmi_softc *, phandle_t);
165 static void scmi_transport_cleanup(struct scmi_softc *);
166 static void scmi_req_async_waiter(void *, int);
167 static struct scmi_reqs_pool *scmi_reqs_pool_allocate(device_t, const int,
168 const int);
169 static void scmi_reqs_pool_free(struct scmi_reqs_pool *);
170 static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
171 static struct scmi_req *scmi_req_initialized_alloc(device_t, int, int);
172 static void scmi_req_free_unlocked(struct scmi_softc *,
173 enum scmi_chan, struct scmi_req *);
174 static void scmi_req_get(struct scmi_softc *, struct scmi_req *);
175 static void scmi_req_put(struct scmi_softc *, struct scmi_req *);
176 static int scmi_token_pick(struct scmi_softc *);
177 static int scmi_token_reserve(struct scmi_softc *, uint16_t);
178 static void scmi_token_release_unlocked(struct scmi_softc *, int);
179 static int scmi_req_track_inflight(struct scmi_softc *,
180 struct scmi_req *);
181 static int scmi_req_drop_inflight(struct scmi_softc *,
182 struct scmi_req *);
183 static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
184
185 static int scmi_wait_for_response(struct scmi_softc *,
186 struct scmi_req *, void **);
187 static void scmi_process_response(struct scmi_softc *, uint32_t,
188 unsigned int);
189
190 int
scmi_attach(device_t dev)191 scmi_attach(device_t dev)
192 {
193 struct sysctl_oid *sysctl_trans;
194 struct scmi_softc *sc;
195 phandle_t node;
196 int error;
197
198 sc = device_get_softc(dev);
199 sc->dev = dev;
200
201 node = ofw_bus_get_node(dev);
202 if (node == -1)
203 return (ENXIO);
204
205 simplebus_init(dev, node);
206
207 error = scmi_transport_init(sc, node);
208 if (error != 0)
209 return (error);
210
211 device_printf(dev, "Transport - max_msg:%d max_payld_sz:%lu reply_timo_ms:%d\n",
212 SCMI_MAX_MSG(sc), SCMI_MAX_MSG_PAYLD_SIZE(sc), SCMI_MAX_MSG_TIMEOUT_MS(sc));
213
214 sc->sysctl_root = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw),
215 OID_AUTO, "scmi", CTLFLAG_RD, 0, "SCMI root");
216 sysctl_trans = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(sc->sysctl_root),
217 OID_AUTO, "transport", CTLFLAG_RD, 0, "SCMI Transport properties");
218 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_msg",
219 CTLFLAG_RD, &sc->trs_desc.max_msg, 0, "SCMI Max number of inflight messages");
220 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_msg_size",
221 CTLFLAG_RD, &sc->trs_desc.max_payld_sz, 0, "SCMI Max message payload size");
222 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_rx_timeout_ms",
223 CTLFLAG_RD, &sc->trs_desc.reply_timo_ms, 0, "SCMI Max message RX timeout ms");
224
225 /*
226 * Allow devices to identify.
227 */
228 bus_identify_children(dev);
229
230 /*
231 * Now walk the OFW tree and attach top-level devices.
232 */
233 for (node = OF_child(node); node > 0; node = OF_peer(node))
234 simplebus_add_device(dev, node, 0, NULL, -1, NULL);
235
236 bus_attach_children(dev);
237
238 return (0);
239 }
240
241 static int
scmi_detach(device_t dev)242 scmi_detach(device_t dev)
243 {
244 struct scmi_softc *sc;
245
246 sc = device_get_softc(dev);
247 scmi_transport_cleanup(sc);
248
249 return (0);
250 }
251
252 static device_method_t scmi_methods[] = {
253 DEVMETHOD(device_attach, scmi_attach),
254 DEVMETHOD(device_detach, scmi_detach),
255
256 DEVMETHOD_END
257 };
258
259 DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
260 simplebus_driver);
261
262 DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
263 MODULE_VERSION(scmi, 1);
264
265 static struct scmi_reqs_pool *
scmi_reqs_pool_allocate(device_t dev,const int max_msg,const int max_payld_sz)266 scmi_reqs_pool_allocate(device_t dev, const int max_msg, const int max_payld_sz)
267 {
268 struct scmi_reqs_pool *rp;
269 struct scmi_req *req;
270
271 rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK);
272
273 LIST_INIT(&rp->head);
274 for (int i = 0; i < max_msg; i++) {
275 req = malloc(sizeof(*req) + max_payld_sz,
276 M_DEVBUF, M_ZERO | M_WAITOK);
277
278 req->dev = dev;
279 req->tsk.ta_context = &req->tsk;
280 req->tsk.ta_func = scmi_req_async_waiter;
281
282 mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
283 LIST_INSERT_HEAD(&rp->head, req, next);
284 }
285
286 mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN);
287
288 return (rp);
289 }
290
291 static void
scmi_reqs_pool_free(struct scmi_reqs_pool * rp)292 scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
293 {
294 struct scmi_req *req;
295
296 LIST_FOREACH(req, &rp->head, next) {
297 mtx_destroy(&req->mtx);
298 free(req, M_DEVBUF);
299 }
300
301 mtx_destroy(&rp->mtx);
302 free(rp, M_DEVBUF);
303 }
304
305 static void
scmi_transport_configure(struct scmi_transport_desc * td,phandle_t node)306 scmi_transport_configure(struct scmi_transport_desc *td, phandle_t node)
307 {
308 if (OF_getencprop(node, "arm,max-msg", &td->max_msg, sizeof(td->max_msg)) == -1)
309 td->max_msg = SCMI_DEF_MAX_MSG;
310
311 if (OF_getencprop(node, "arm,max-msg-size", &td->max_payld_sz,
312 sizeof(td->max_payld_sz)) == -1)
313 td->max_payld_sz = SCMI_DEF_MAX_MSG_PAYLD_SIZE;
314 }
315
316 static int
scmi_transport_init(struct scmi_softc * sc,phandle_t node)317 scmi_transport_init(struct scmi_softc *sc, phandle_t node)
318 {
319 struct scmi_transport_desc *td = &sc->trs_desc;
320 struct scmi_transport *trs;
321 int ret;
322
323 trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
324
325 scmi_transport_configure(td, node);
326
327 BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
328 mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
329
330 trs->inflight_ht = hashinit(td->max_msg, M_DEVBUF, &trs->inflight_mask);
331
332 trs->chans[SCMI_CHAN_A2P] =
333 scmi_reqs_pool_allocate(sc->dev, td->max_msg, td->max_payld_sz);
334 if (trs->chans[SCMI_CHAN_A2P] == NULL) {
335 free(trs, M_DEVBUF);
336 return (ENOMEM);
337 }
338
339 trs->chans[SCMI_CHAN_P2A] =
340 scmi_reqs_pool_allocate(sc->dev, td->max_msg, td->max_payld_sz);
341 if (trs->chans[SCMI_CHAN_P2A] == NULL) {
342 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
343 free(trs, M_DEVBUF);
344 return (ENOMEM);
345 }
346
347 sc->trs = trs;
348 ret = SCMI_TRANSPORT_INIT(sc->dev);
349 if (ret != 0) {
350 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
351 scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]);
352 free(trs, M_DEVBUF);
353 return (ret);
354 }
355
356 /* Use default transport timeout if not overridden by OF */
357 OF_getencprop(node, "arm,max-rx-timeout-ms", &td->reply_timo_ms,
358 sizeof(td->reply_timo_ms));
359
360 return (0);
361 }
362
363 static void
scmi_transport_cleanup(struct scmi_softc * sc)364 scmi_transport_cleanup(struct scmi_softc *sc)
365 {
366
367 SCMI_TRANSPORT_CLEANUP(sc->dev);
368 mtx_destroy(&sc->trs->mtx);
369 hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
370 scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]);
371 scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]);
372 free(sc->trs, M_DEVBUF);
373 }
374
375 static struct scmi_req *
scmi_req_initialized_alloc(device_t dev,int tx_payld_sz,int rx_payld_sz)376 scmi_req_initialized_alloc(device_t dev, int tx_payld_sz, int rx_payld_sz)
377 {
378 struct scmi_softc *sc;
379 struct scmi_req *req;
380
381 sc = device_get_softc(dev);
382
383 if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE(sc) ||
384 rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE(sc)) {
385 device_printf(dev, "Unsupported payload size. Drop.\n");
386 return (NULL);
387 }
388
389 /* Pick one from free list */
390 req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
391 if (req == NULL)
392 return (NULL);
393
394 req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
395 req->msg.rx_len = rx_payld_sz ?
396 rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE(sc);
397
398 return (req);
399 }
400
401 static struct scmi_req *
scmi_req_alloc(struct scmi_softc * sc,enum scmi_chan ch_idx)402 scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
403 {
404 struct scmi_reqs_pool *rp;
405 struct scmi_req *req = NULL;
406
407 rp = sc->trs->chans[ch_idx];
408 mtx_lock_spin(&rp->mtx);
409 if (!LIST_EMPTY(&rp->head)) {
410 req = LIST_FIRST(&rp->head);
411 LIST_REMOVE_HEAD(&rp->head, next);
412 }
413 mtx_unlock_spin(&rp->mtx);
414
415 if (req != NULL) {
416 refcount_init(&req->cnt, 1);
417 SDT_PROBE3(scmi, func, scmi_req_alloc, req_alloc,
418 req, refcount_load(&req->cnt), -1);
419 }
420
421 return (req);
422 }
423
424 static void
scmi_req_free_unlocked(struct scmi_softc * sc,enum scmi_chan ch_idx,struct scmi_req * req)425 scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
426 struct scmi_req *req)
427 {
428 struct scmi_reqs_pool *rp;
429
430 rp = sc->trs->chans[ch_idx];
431 mtx_lock_spin(&rp->mtx);
432 req->timed_out = false;
433 req->done = false;
434 req->is_raw = false;
435 refcount_init(&req->cnt, 0);
436 LIST_INSERT_HEAD(&rp->head, req, next);
437 mtx_unlock_spin(&rp->mtx);
438
439 SDT_PROBE3(scmi, func, scmi_req_free_unlocked, req_alloc,
440 req, refcount_load(&req->cnt), -1);
441 }
442
443 static void
scmi_req_get(struct scmi_softc * sc,struct scmi_req * req)444 scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
445 {
446 bool ok;
447
448 mtx_lock_spin(&req->mtx);
449 ok = refcount_acquire_if_not_zero(&req->cnt);
450 mtx_unlock_spin(&req->mtx);
451
452 if (!ok)
453 device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
454
455 SDT_PROBE3(scmi, func, scmi_req_get, req_alloc,
456 req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr));
457
458 return;
459 }
460
461 static void
scmi_req_put(struct scmi_softc * sc,struct scmi_req * req)462 scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
463 {
464 mtx_lock_spin(&req->mtx);
465 if (!refcount_release_if_not_last(&req->cnt)) {
466 req->protocol_id = 0;
467 req->message_id = 0;
468 req->token = 0;
469 req->header = 0;
470 bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE(sc));
471 scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
472 } else {
473 SDT_PROBE3(scmi, func, scmi_req_put, req_alloc,
474 req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr));
475 }
476 mtx_unlock_spin(&req->mtx);
477 }
478
479 static int
scmi_token_pick(struct scmi_softc * sc)480 scmi_token_pick(struct scmi_softc *sc)
481 {
482 unsigned long next_msg_id, token;
483
484 mtx_lock_spin(&sc->trs->mtx);
485 /*
486 * next_id is a monotonically increasing unsigned long that can be used
487 * for tracing purposes; next_msg_id is a 10-bit sequence number derived
488 * from it.
489 */
490 next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF;
491 token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id);
492 if (token != 0)
493 BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens);
494 mtx_unlock_spin(&sc->trs->mtx);
495
496 /*
497 * BIT_FFS_AT returns 1-indexed values, so 0 means failure to find a
498 * free slot: all possible SCMI messages are in-flight using all of the
499 * SCMI_MAX_TOKEN sequence numbers.
500 */
501 if (!token)
502 return (-EBUSY);
503
504 return ((int)(token - 1));
505 }
506
507 static int
scmi_token_reserve(struct scmi_softc * sc,uint16_t candidate)508 scmi_token_reserve(struct scmi_softc *sc, uint16_t candidate)
509 {
510 int token = -EBUSY, retries = 3;
511
512 do {
513 mtx_lock_spin(&sc->trs->mtx);
514 if (BIT_ISSET(SCMI_MAX_TOKEN, candidate, &sc->trs->avail_tokens)) {
515 BIT_CLR(SCMI_MAX_TOKEN, candidate, &sc->trs->avail_tokens);
516 token = candidate;
517 sc->trs->next_id++;
518 }
519 mtx_unlock_spin(&sc->trs->mtx);
520 if (token == candidate || retries-- == 0)
521 break;
522
523 pause("scmi_tk_reserve", hz);
524 } while (1);
525
526 return (token);
527 }
528
529 static void
scmi_token_release_unlocked(struct scmi_softc * sc,int token)530 scmi_token_release_unlocked(struct scmi_softc *sc, int token)
531 {
532
533 BIT_SET(SCMI_MAX_TOKEN, token, &sc->trs->avail_tokens);
534 }
535
536 static int
scmi_finalize_req(struct scmi_softc * sc,struct scmi_req * req)537 scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req)
538 {
539 if (!req->is_raw)
540 req->token = scmi_token_pick(sc);
541 else
542 req->token = scmi_token_reserve(sc, SCMI_MSG_TOKEN(req->msg.hdr));
543
544 if (req->token < 0)
545 return (EBUSY);
546
547 if (!req->is_raw) {
548 req->msg.hdr = req->message_id;
549 req->msg.hdr |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
550 req->msg.hdr |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
551 req->msg.hdr |= req->token << SCMI_HDR_TOKEN_S;
552 }
553
554 /* Save requested header */
555 req->header = req->msg.hdr;
556
557 return (0);
558 }
559
560 static int
scmi_req_track_inflight(struct scmi_softc * sc,struct scmi_req * req)561 scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
562 {
563 int error;
564
565 /* build hdr, pick token */
566 error = scmi_finalize_req(sc, req);
567 if (error != 0)
568 return (error);
569
570 /* Bump refcount to get hold of this in-flight transaction */
571 scmi_req_get(sc, req);
572 /* Register in the inflight hashtable */
573 mtx_lock_spin(&sc->trs->mtx);
574 LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
575 mtx_unlock_spin(&sc->trs->mtx);
576
577 return (0);
578 }
579
580 static int
scmi_req_drop_inflight(struct scmi_softc * sc,struct scmi_req * req)581 scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
582 {
583
584 /* Remove from inflight hashtable at first ... */
585 mtx_lock_spin(&sc->trs->mtx);
586 LIST_REMOVE(req, next);
587 scmi_token_release_unlocked(sc, req->token);
588 mtx_unlock_spin(&sc->trs->mtx);
589 /* ...and drop refcount..potentially releasing *req */
590 scmi_req_put(sc, req);
591
592 return (0);
593 }
594
595 static struct scmi_req *
scmi_req_lookup_inflight(struct scmi_softc * sc,uint32_t hdr)596 scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
597 {
598 struct scmi_req *req = NULL;
599 unsigned int token;
600
601 token = SCMI_MSG_TOKEN(hdr);
602 mtx_lock_spin(&sc->trs->mtx);
603 LIST_FOREACH(req, REQHASH(sc, token), next) {
604 if (req->token == token)
605 break;
606 }
607 mtx_unlock_spin(&sc->trs->mtx);
608
609 return (req);
610 }
611
612 static void
scmi_process_response(struct scmi_softc * sc,uint32_t hdr,uint32_t rx_len)613 scmi_process_response(struct scmi_softc *sc, uint32_t hdr, uint32_t rx_len)
614 {
615 bool timed_out = false;
616 struct scmi_req *req;
617
618 req = scmi_req_lookup_inflight(sc, hdr);
619 if (req == NULL) {
620 device_printf(sc->dev,
621 "Unexpected reply with header |%X| - token: 0x%X Drop.\n",
622 hdr, SCMI_MSG_TOKEN(hdr));
623 return;
624 }
625
626 SDT_PROBE5(scmi, func, scmi_process_response, xfer_track, req,
627 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
628 SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out);
629
630 mtx_lock_spin(&req->mtx);
631 req->done = true;
632 req->msg.rx_len = rx_len;
633 if (!req->timed_out) {
634 /*
635 * Consider the case in which a polled message is picked
636 * by chance on the IRQ path on another CPU: setting poll_done
637 * will terminate the other poll loop.
638 */
639 if (!req->msg.polling)
640 wakeup(req);
641 else
642 atomic_store_rel_int(&req->msg.poll_done, 1);
643 } else {
644 timed_out = true;
645 }
646 mtx_unlock_spin(&req->mtx);
647
648 if (timed_out)
649 device_printf(sc->dev,
650 "Late reply for timed-out request - token: 0x%X. Ignore.\n",
651 req->token);
652
653 /*
654 * In case of a late reply to a timed-out transaction this will
655 * finally free the pending scmi_req
656 */
657 scmi_req_drop_inflight(sc, req);
658 }
659
660 void
scmi_rx_irq_callback(device_t dev,void * chan,uint32_t hdr,uint32_t rx_len)661 scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len)
662 {
663 struct scmi_softc *sc;
664
665 sc = device_get_softc(dev);
666
667 SDT_PROBE2(scmi, func, scmi_rx_irq_callback, hdr_dump, hdr, rx_len);
668
669 if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
670 device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
671 SCMI_CLEAR_CHANNEL(dev, chan);
672 return;
673 }
674
675 scmi_process_response(sc, hdr, rx_len);
676 }
677
678 static int
scmi_wait_for_response(struct scmi_softc * sc,struct scmi_req * req,void ** out)679 scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
680 {
681 unsigned int reply_timo_ms = SCMI_MAX_MSG_TIMEOUT_MS(sc);
682 int ret;
683
684 SDT_PROBE5(scmi, entry, scmi_wait_for_response, xfer_track, req,
685 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
686 SCMI_MSG_TOKEN(req->msg.hdr), reply_timo_ms);
687
688 if (req->msg.polling) {
689 bool needs_drop;
690
691 ret = SCMI_POLL_MSG(sc->dev, &req->msg, reply_timo_ms);
692 /*
693 * Drop reference to successfully polled req unless it had
694 * already also been processed on the IRQ path.
695 * Addresses a possible race-condition between polling and
696 * interrupt reception paths.
697 */
698 mtx_lock_spin(&req->mtx);
699 needs_drop = (ret == 0) && !req->done;
700 req->timed_out = ret != 0;
701 mtx_unlock_spin(&req->mtx);
702 if (needs_drop)
703 scmi_req_drop_inflight(sc, req);
704 if (ret == 0 && req->msg.hdr != req->header) {
705 device_printf(sc->dev,
706 "Malformed reply with header |%08X|. Expected: |%08X|Drop.\n",
707 le32toh(req->msg.hdr), le32toh(req->header));
708 }
709 } else {
710 ret = tsleep(req, 0, "scmi_wait4", (reply_timo_ms * hz) / 1000);
711 /* Check for lost wakeups since there is no associated lock */
712 mtx_lock_spin(&req->mtx);
713 if (ret != 0 && req->done)
714 ret = 0;
715 req->timed_out = ret != 0;
716 mtx_unlock_spin(&req->mtx);
717 }
718
719 if (ret == 0) {
720 SCMI_COLLECT_REPLY(sc->dev, &req->msg);
721 if (req->msg.payld[0] != 0)
722 ret = req->msg.payld[0];
723 if (out != NULL)
724 *out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
725 } else {
726 device_printf(sc->dev,
727 "Request for token 0x%X timed-out.\n", req->token);
728 }
729
730 SCMI_TX_COMPLETE(sc->dev, NULL);
731
732 SDT_PROBE5(scmi, exit, scmi_wait_for_response, xfer_track, req,
733 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
734 SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out);
735
736 return (ret);
737 }
738
739 void *
scmi_buf_get(device_t dev,uint8_t protocol_id,uint8_t message_id,int tx_payld_sz,int rx_payld_sz)740 scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
741 int tx_payld_sz, int rx_payld_sz)
742 {
743 struct scmi_req *req;
744
745 /* Pick a pre-built req */
746 req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
747 if (req == NULL)
748 return (NULL);
749
750 req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
751 req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
752
753 return (&req->msg.payld[0]);
754 }
755
756 void
scmi_buf_put(device_t dev,void * buf)757 scmi_buf_put(device_t dev, void *buf)
758 {
759 struct scmi_softc *sc;
760 struct scmi_req *req;
761
762 sc = device_get_softc(dev);
763
764 req = buf_to_req(buf);
765 scmi_req_put(sc, req);
766 }
767
768 struct scmi_msg *
scmi_msg_get(device_t dev,int tx_payld_sz,int rx_payld_sz)769 scmi_msg_get(device_t dev, int tx_payld_sz, int rx_payld_sz)
770 {
771 struct scmi_req *req;
772
773 /* Pick a pre-built req */
774 req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
775 if (req == NULL)
776 return (NULL);
777
778 req->is_raw = true;
779
780 return (&req->msg);
781 }
782
783 static void
scmi_req_async_waiter(void * context,int pending)784 scmi_req_async_waiter(void *context, int pending)
785 {
786 struct task *ta = context;
787 struct scmi_softc *sc;
788 struct scmi_req *req;
789
790 req = tsk_to_req(ta);
791 sc = device_get_softc(req->dev);
792 scmi_wait_for_response(sc, req, NULL);
793
794 scmi_msg_put(req->dev, &req->msg);
795 }
796
797 void
scmi_msg_put(device_t dev,struct scmi_msg * msg)798 scmi_msg_put(device_t dev, struct scmi_msg *msg)
799 {
800 struct scmi_softc *sc;
801 struct scmi_req *req;
802
803 sc = device_get_softc(dev);
804
805 req = msg_to_req(msg);
806
807 scmi_req_put(sc, req);
808 }
809
810 int
scmi_request_tx(device_t dev,void * in)811 scmi_request_tx(device_t dev, void *in)
812 {
813 struct scmi_softc *sc;
814 struct scmi_req *req;
815 int error;
816
817 sc = device_get_softc(dev);
818
819 req = buf_to_req(in);
820
821 req->msg.polling =
822 (cold || sc->trs_desc.no_completion_irq || req->use_polling);
823
824 /* Set inflight and send using transport specific method - refc-2 */
825 error = scmi_req_track_inflight(sc, req);
826 if (error != 0) {
827 device_printf(dev, "Failed to build req with HDR |%0X|\n",
828 req->msg.hdr);
829 return (error);
830 }
831
832 error = SCMI_XFER_MSG(sc->dev, &req->msg);
833 if (error != 0) {
834 scmi_req_drop_inflight(sc, req);
835 return (error);
836 }
837
838 SDT_PROBE5(scmi, func, scmi_request_tx, xfer_track, req,
839 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
840 SCMI_MSG_TOKEN(req->msg.hdr), req->msg.polling);
841
842 return (0);
843 }
844
845 int
scmi_request(device_t dev,void * in,void ** out)846 scmi_request(device_t dev, void *in, void **out)
847 {
848 struct scmi_softc *sc;
849 struct scmi_req *req;
850 int error;
851
852 error = scmi_request_tx(dev, in);
853 if (error != 0)
854 return (error);
855
856 sc = device_get_softc(dev);
857 req = buf_to_req(in);
858
859 return (scmi_wait_for_response(sc, req, out));
860 }
861
862 int
scmi_msg_async_enqueue(struct scmi_msg * msg)863 scmi_msg_async_enqueue(struct scmi_msg *msg)
864 {
865 struct scmi_req *req;
866
867 req = msg_to_req(msg);
868
869 return taskqueue_enqueue_flags(taskqueue_thread, &req->tsk,
870 TASKQUEUE_FAIL_IF_PENDING | TASKQUEUE_FAIL_IF_CANCELING);
871 }
872