1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Message Mailbox Transport 4 * driver. 5 * 6 * Copyright (C) 2019-2024 ARM Ltd. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/device.h> 11 #include <linux/mailbox_client.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 17 #include "../common.h" 18 19 /** 20 * struct scmi_mailbox - Structure representing a SCMI mailbox transport 21 * 22 * @cl: Mailbox Client 23 * @chan: Transmit/Receive mailbox uni/bi-directional channel 24 * @chan_receiver: Optional Receiver mailbox unidirectional channel 25 * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel 26 * @cinfo: SCMI channel info 27 * @shmem: Transmit/Receive shared memory area 28 * @chan_lock: Lock that prevents multiple xfers from being queued 29 * @io_ops: Transport specific I/O operations 30 */ 31 struct scmi_mailbox { 32 struct mbox_client cl; 33 struct mbox_chan *chan; 34 struct mbox_chan *chan_receiver; 35 struct mbox_chan *chan_platform_receiver; 36 struct scmi_chan_info *cinfo; 37 struct scmi_shared_mem __iomem *shmem; 38 struct mutex chan_lock; 39 struct scmi_shmem_io_ops *io_ops; 40 }; 41 42 #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) 43 44 static struct scmi_transport_core_operations *core; 45 46 static void tx_prepare(struct mbox_client *cl, void *m) 47 { 48 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 49 50 core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo, 51 smbox->io_ops->toio); 52 } 53 54 static void rx_callback(struct mbox_client *cl, void *m) 55 { 56 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 57 58 /* 59 * An A2P IRQ is NOT valid when received while the platform still has 60 * the ownership of the channel, because the platform at first releases 61 * the SMT channel and then sends the completion interrupt. 62 * 63 * This addresses a possible race condition in which a spurious IRQ from 64 * a previous timed-out reply which arrived late could be wrongly 65 * associated with the next pending transaction. 66 */ 67 if (cl->knows_txdone && 68 !core->shmem->channel_free(smbox->shmem)) { 69 dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); 70 core->bad_message_trace(smbox->cinfo, 71 core->shmem->read_header(smbox->shmem), 72 MSG_MBOX_SPURIOUS); 73 return; 74 } 75 76 core->rx_callback(smbox->cinfo, 77 core->shmem->read_header(smbox->shmem), NULL); 78 } 79 80 static bool mailbox_chan_available(struct device_node *of_node, int idx) 81 { 82 int num_mb; 83 84 /* 85 * Just check if bidirrectional channels are involved, and check the 86 * index accordingly; proper full validation will be made later 87 * in mailbox_chan_setup(). 88 */ 89 num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells"); 90 if (num_mb == 3 && idx == 1) 91 idx = 2; 92 93 return !of_parse_phandle_with_args(of_node, "mboxes", 94 "#mbox-cells", idx, NULL); 95 } 96 97 /** 98 * mailbox_chan_validate - Validate transport configuration and map channels 99 * 100 * @cdev: Reference to the underlying transport device carrying the 101 * of_node descriptor to analyze. 102 * @a2p_rx_chan: A reference to an optional unidirectional channel to use 103 * for replies on the a2p channel. Set as zero if not present. 104 * @p2a_chan: A reference to the optional p2a channel. 105 * Set as zero if not present. 106 * @p2a_rx_chan: A reference to the optional p2a completion channel. 107 * Set as zero if not present. 108 * 109 * At first, validate the transport configuration as described in terms of 110 * 'mboxes' and 'shmem', then determin which mailbox channel indexes are 111 * appropriate to be use in the current configuration. 112 * 113 * Return: 0 on Success or error 114 */ 115 static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, 116 int *p2a_chan, int *p2a_rx_chan) 117 { 118 int num_mb, num_sh, ret = 0; 119 struct device_node *np = cdev->of_node; 120 121 num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 122 num_sh = of_count_phandle_with_args(np, "shmem", NULL); 123 dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); 124 125 /* Bail out if mboxes and shmem descriptors are inconsistent */ 126 if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || 127 (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || 128 (num_mb == 4 && num_sh != 2)) { 129 dev_warn(cdev, 130 "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n", 131 np, num_mb, num_sh); 132 return -EINVAL; 133 } 134 135 /* Bail out if provided shmem descriptors do not refer distinct areas */ 136 if (num_sh > 1) { 137 struct device_node *np_tx __free(device_node) = 138 of_parse_phandle(np, "shmem", 0); 139 struct device_node *np_rx __free(device_node) = 140 of_parse_phandle(np, "shmem", 1); 141 142 if (!np_tx || !np_rx || np_tx == np_rx) { 143 dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np); 144 ret = -EINVAL; 145 } 146 } 147 148 /* Calculate channels IDs to use depending on mboxes/shmem layout */ 149 if (!ret) { 150 switch (num_mb) { 151 case 1: 152 *a2p_rx_chan = 0; 153 *p2a_chan = 0; 154 *p2a_rx_chan = 0; 155 break; 156 case 2: 157 if (num_sh == 2) { 158 *a2p_rx_chan = 0; 159 *p2a_chan = 1; 160 } else { 161 *a2p_rx_chan = 1; 162 *p2a_chan = 0; 163 } 164 *p2a_rx_chan = 0; 165 break; 166 case 3: 167 *a2p_rx_chan = 1; 168 *p2a_chan = 2; 169 *p2a_rx_chan = 0; 170 break; 171 case 4: 172 *a2p_rx_chan = 1; 173 *p2a_chan = 2; 174 *p2a_rx_chan = 3; 175 break; 176 } 177 } 178 179 return ret; 180 } 181 182 static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 183 bool tx) 184 { 185 const char *desc = tx ? "Tx" : "Rx"; 186 struct device *cdev = cinfo->dev; 187 struct scmi_mailbox *smbox; 188 int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; 189 struct mbox_client *cl; 190 191 ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); 192 if (ret) 193 return ret; 194 195 if (!tx && !p2a_chan) 196 return -ENODEV; 197 198 smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); 199 if (!smbox) 200 return -ENOMEM; 201 202 smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL, 203 &smbox->io_ops); 204 if (IS_ERR(smbox->shmem)) 205 return PTR_ERR(smbox->shmem); 206 207 cl = &smbox->cl; 208 cl->dev = cdev; 209 cl->tx_prepare = tx ? tx_prepare : NULL; 210 cl->rx_callback = rx_callback; 211 cl->tx_block = false; 212 cl->knows_txdone = tx; 213 214 smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan); 215 if (IS_ERR(smbox->chan)) { 216 ret = PTR_ERR(smbox->chan); 217 if (ret != -EPROBE_DEFER) 218 dev_err(cdev, 219 "failed to request SCMI %s mailbox\n", desc); 220 return ret; 221 } 222 223 /* Additional unidirectional channel for TX if needed */ 224 if (tx && a2p_rx_chan) { 225 smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan); 226 if (IS_ERR(smbox->chan_receiver)) { 227 ret = PTR_ERR(smbox->chan_receiver); 228 if (ret != -EPROBE_DEFER) 229 dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n"); 230 return ret; 231 } 232 } 233 234 if (!tx && p2a_rx_chan) { 235 smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); 236 if (IS_ERR(smbox->chan_platform_receiver)) { 237 ret = PTR_ERR(smbox->chan_platform_receiver); 238 if (ret != -EPROBE_DEFER) 239 dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); 240 return ret; 241 } 242 } 243 244 cinfo->transport_info = smbox; 245 smbox->cinfo = cinfo; 246 mutex_init(&smbox->chan_lock); 247 248 return 0; 249 } 250 251 static int mailbox_chan_free(int id, void *p, void *data) 252 { 253 struct scmi_chan_info *cinfo = p; 254 struct scmi_mailbox *smbox = cinfo->transport_info; 255 256 if (smbox && !IS_ERR(smbox->chan)) { 257 mbox_free_channel(smbox->chan); 258 mbox_free_channel(smbox->chan_receiver); 259 mbox_free_channel(smbox->chan_platform_receiver); 260 cinfo->transport_info = NULL; 261 smbox->chan = NULL; 262 smbox->chan_receiver = NULL; 263 smbox->chan_platform_receiver = NULL; 264 smbox->cinfo = NULL; 265 } 266 267 return 0; 268 } 269 270 static int mailbox_send_message(struct scmi_chan_info *cinfo, 271 struct scmi_xfer *xfer) 272 { 273 struct scmi_mailbox *smbox = cinfo->transport_info; 274 int ret; 275 276 /* 277 * The mailbox layer has its own queue. However the mailbox queue 278 * confuses the per message SCMI timeouts since the clock starts when 279 * the message is submitted into the mailbox queue. So when multiple 280 * messages are queued up the clock starts on all messages instead of 281 * only the one inflight. 282 */ 283 mutex_lock(&smbox->chan_lock); 284 285 ret = mbox_send_message(smbox->chan, xfer); 286 /* mbox_send_message returns non-negative value on success */ 287 if (ret < 0) { 288 mutex_unlock(&smbox->chan_lock); 289 return ret; 290 } 291 292 return 0; 293 } 294 295 static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, 296 struct scmi_xfer *__unused) 297 { 298 struct scmi_mailbox *smbox = cinfo->transport_info; 299 300 mbox_client_txdone(smbox->chan, ret); 301 302 /* Release channel */ 303 mutex_unlock(&smbox->chan_lock); 304 } 305 306 static void mailbox_fetch_response(struct scmi_chan_info *cinfo, 307 struct scmi_xfer *xfer) 308 { 309 struct scmi_mailbox *smbox = cinfo->transport_info; 310 311 core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio); 312 } 313 314 static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, 315 size_t max_len, struct scmi_xfer *xfer) 316 { 317 struct scmi_mailbox *smbox = cinfo->transport_info; 318 319 core->shmem->fetch_notification(smbox->shmem, max_len, xfer, 320 smbox->io_ops->fromio); 321 } 322 323 static void mailbox_clear_channel(struct scmi_chan_info *cinfo) 324 { 325 struct scmi_mailbox *smbox = cinfo->transport_info; 326 struct mbox_chan *intr_chan; 327 int ret; 328 329 core->shmem->clear_channel(smbox->shmem); 330 331 if (!core->shmem->channel_intr_enabled(smbox->shmem)) 332 return; 333 334 if (smbox->chan_platform_receiver) 335 intr_chan = smbox->chan_platform_receiver; 336 else if (smbox->chan) 337 intr_chan = smbox->chan; 338 else 339 return; 340 341 ret = mbox_send_message(intr_chan, NULL); 342 /* mbox_send_message returns non-negative value on success, so reset */ 343 if (ret > 0) 344 ret = 0; 345 346 mbox_client_txdone(intr_chan, ret); 347 } 348 349 static bool 350 mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) 351 { 352 struct scmi_mailbox *smbox = cinfo->transport_info; 353 354 return core->shmem->poll_done(smbox->shmem, xfer); 355 } 356 357 static const struct scmi_transport_ops scmi_mailbox_ops = { 358 .chan_available = mailbox_chan_available, 359 .chan_setup = mailbox_chan_setup, 360 .chan_free = mailbox_chan_free, 361 .send_message = mailbox_send_message, 362 .mark_txdone = mailbox_mark_txdone, 363 .fetch_response = mailbox_fetch_response, 364 .fetch_notification = mailbox_fetch_notification, 365 .clear_channel = mailbox_clear_channel, 366 .poll_done = mailbox_poll_done, 367 }; 368 369 static struct scmi_desc scmi_mailbox_desc = { 370 .ops = &scmi_mailbox_ops, 371 .max_rx_timeout_ms = 30, /* We may increase this if required */ 372 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 373 .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, 374 }; 375 376 static const struct of_device_id scmi_of_match[] = { 377 { .compatible = "arm,scmi" }, 378 { /* Sentinel */ }, 379 }; 380 MODULE_DEVICE_TABLE(of, scmi_of_match); 381 382 DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver, 383 scmi_mailbox_desc, scmi_of_match, core); 384 module_platform_driver(scmi_mailbox_driver); 385 386 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 387 MODULE_DESCRIPTION("SCMI Mailbox Transport driver"); 388 MODULE_LICENSE("GPL"); 389