1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Message Mailbox Transport 4 * driver. 5 * 6 * Copyright (C) 2019-2024 ARM Ltd. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/device.h> 11 #include <linux/mailbox_client.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 17 #include "../common.h" 18 19 /** 20 * struct scmi_mailbox - Structure representing a SCMI mailbox transport 21 * 22 * @cl: Mailbox Client 23 * @chan: Transmit/Receive mailbox uni/bi-directional channel 24 * @chan_receiver: Optional Receiver mailbox unidirectional channel 25 * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel 26 * @cinfo: SCMI channel info 27 * @shmem: Transmit/Receive shared memory area 28 * @chan_lock: Lock that prevents multiple xfers from being queued 29 * @io_ops: Transport specific I/O operations 30 */ 31 struct scmi_mailbox { 32 struct mbox_client cl; 33 struct mbox_chan *chan; 34 struct mbox_chan *chan_receiver; 35 struct mbox_chan *chan_platform_receiver; 36 struct scmi_chan_info *cinfo; 37 struct scmi_shared_mem __iomem *shmem; 38 struct mutex chan_lock; 39 struct scmi_shmem_io_ops *io_ops; 40 }; 41 42 #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) 43 44 static struct scmi_transport_core_operations *core; 45 46 static void tx_prepare(struct mbox_client *cl, void *m) 47 { 48 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 49 50 core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo, 51 smbox->io_ops->toio); 52 } 53 54 static void rx_callback(struct mbox_client *cl, void *m) 55 { 56 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 57 58 /* 59 * An A2P IRQ is NOT valid when received while the platform still has 60 * the ownership of the channel, because the platform at first releases 61 * the SMT channel and then sends the completion interrupt. 62 * 63 * This addresses a possible race condition in which a spurious IRQ from 64 * a previous timed-out reply which arrived late could be wrongly 65 * associated with the next pending transaction. 66 */ 67 if (cl->knows_txdone && 68 !core->shmem->channel_free(smbox->shmem)) { 69 dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); 70 core->bad_message_trace(smbox->cinfo, 71 core->shmem->read_header(smbox->shmem), 72 MSG_MBOX_SPURIOUS); 73 return; 74 } 75 76 core->rx_callback(smbox->cinfo, 77 core->shmem->read_header(smbox->shmem), NULL); 78 } 79 80 static bool mailbox_chan_available(struct device_node *of_node, int idx) 81 { 82 int num_mb; 83 84 /* 85 * Just check if bidirrectional channels are involved, and check the 86 * index accordingly; proper full validation will be made later 87 * in mailbox_chan_setup(). 88 */ 89 num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells"); 90 if (num_mb == 3 && idx == 1) 91 idx = 2; 92 93 return !of_parse_phandle_with_args(of_node, "mboxes", 94 "#mbox-cells", idx, NULL); 95 } 96 97 /** 98 * mailbox_chan_validate - Validate transport configuration and map channels 99 * 100 * @cdev: Reference to the underlying transport device carrying the 101 * of_node descriptor to analyze. 102 * @a2p_rx_chan: A reference to an optional unidirectional channel to use 103 * for replies on the a2p channel. Set as zero if not present. 104 * @p2a_chan: A reference to the optional p2a channel. 105 * Set as zero if not present. 106 * @p2a_rx_chan: A reference to the optional p2a completion channel. 107 * Set as zero if not present. 108 * 109 * At first, validate the transport configuration as described in terms of 110 * 'mboxes' and 'shmem', then determin which mailbox channel indexes are 111 * appropriate to be use in the current configuration. 112 * 113 * Return: 0 on Success or error 114 */ 115 static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, 116 int *p2a_chan, int *p2a_rx_chan) 117 { 118 int num_mb, num_sh, ret = 0; 119 struct device_node *np = cdev->of_node; 120 121 num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 122 num_sh = of_count_phandle_with_args(np, "shmem", NULL); 123 dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); 124 125 /* Bail out if mboxes and shmem descriptors are inconsistent */ 126 if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || 127 (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || 128 (num_mb == 4 && num_sh != 2)) { 129 dev_warn(cdev, 130 "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", 131 of_node_full_name(np), num_mb, num_sh); 132 return -EINVAL; 133 } 134 135 /* Bail out if provided shmem descriptors do not refer distinct areas */ 136 if (num_sh > 1) { 137 struct device_node *np_tx __free(device_node) = 138 of_parse_phandle(np, "shmem", 0); 139 struct device_node *np_rx __free(device_node) = 140 of_parse_phandle(np, "shmem", 1); 141 142 if (!np_tx || !np_rx || np_tx == np_rx) { 143 dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", 144 of_node_full_name(np)); 145 ret = -EINVAL; 146 } 147 } 148 149 /* Calculate channels IDs to use depending on mboxes/shmem layout */ 150 if (!ret) { 151 switch (num_mb) { 152 case 1: 153 *a2p_rx_chan = 0; 154 *p2a_chan = 0; 155 *p2a_rx_chan = 0; 156 break; 157 case 2: 158 if (num_sh == 2) { 159 *a2p_rx_chan = 0; 160 *p2a_chan = 1; 161 } else { 162 *a2p_rx_chan = 1; 163 *p2a_chan = 0; 164 } 165 *p2a_rx_chan = 0; 166 break; 167 case 3: 168 *a2p_rx_chan = 1; 169 *p2a_chan = 2; 170 *p2a_rx_chan = 0; 171 break; 172 case 4: 173 *a2p_rx_chan = 1; 174 *p2a_chan = 2; 175 *p2a_rx_chan = 3; 176 break; 177 } 178 } 179 180 return ret; 181 } 182 183 static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 184 bool tx) 185 { 186 const char *desc = tx ? "Tx" : "Rx"; 187 struct device *cdev = cinfo->dev; 188 struct scmi_mailbox *smbox; 189 int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; 190 struct mbox_client *cl; 191 192 ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); 193 if (ret) 194 return ret; 195 196 if (!tx && !p2a_chan) 197 return -ENODEV; 198 199 smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); 200 if (!smbox) 201 return -ENOMEM; 202 203 smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL, 204 &smbox->io_ops); 205 if (IS_ERR(smbox->shmem)) 206 return PTR_ERR(smbox->shmem); 207 208 cl = &smbox->cl; 209 cl->dev = cdev; 210 cl->tx_prepare = tx ? tx_prepare : NULL; 211 cl->rx_callback = rx_callback; 212 cl->tx_block = false; 213 cl->knows_txdone = tx; 214 215 smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan); 216 if (IS_ERR(smbox->chan)) { 217 ret = PTR_ERR(smbox->chan); 218 if (ret != -EPROBE_DEFER) 219 dev_err(cdev, 220 "failed to request SCMI %s mailbox\n", desc); 221 return ret; 222 } 223 224 /* Additional unidirectional channel for TX if needed */ 225 if (tx && a2p_rx_chan) { 226 smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan); 227 if (IS_ERR(smbox->chan_receiver)) { 228 ret = PTR_ERR(smbox->chan_receiver); 229 if (ret != -EPROBE_DEFER) 230 dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n"); 231 return ret; 232 } 233 } 234 235 if (!tx && p2a_rx_chan) { 236 smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); 237 if (IS_ERR(smbox->chan_platform_receiver)) { 238 ret = PTR_ERR(smbox->chan_platform_receiver); 239 if (ret != -EPROBE_DEFER) 240 dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); 241 return ret; 242 } 243 } 244 245 cinfo->transport_info = smbox; 246 smbox->cinfo = cinfo; 247 mutex_init(&smbox->chan_lock); 248 249 return 0; 250 } 251 252 static int mailbox_chan_free(int id, void *p, void *data) 253 { 254 struct scmi_chan_info *cinfo = p; 255 struct scmi_mailbox *smbox = cinfo->transport_info; 256 257 if (smbox && !IS_ERR(smbox->chan)) { 258 mbox_free_channel(smbox->chan); 259 mbox_free_channel(smbox->chan_receiver); 260 mbox_free_channel(smbox->chan_platform_receiver); 261 cinfo->transport_info = NULL; 262 smbox->chan = NULL; 263 smbox->chan_receiver = NULL; 264 smbox->chan_platform_receiver = NULL; 265 smbox->cinfo = NULL; 266 } 267 268 return 0; 269 } 270 271 static int mailbox_send_message(struct scmi_chan_info *cinfo, 272 struct scmi_xfer *xfer) 273 { 274 struct scmi_mailbox *smbox = cinfo->transport_info; 275 int ret; 276 277 /* 278 * The mailbox layer has its own queue. However the mailbox queue 279 * confuses the per message SCMI timeouts since the clock starts when 280 * the message is submitted into the mailbox queue. So when multiple 281 * messages are queued up the clock starts on all messages instead of 282 * only the one inflight. 283 */ 284 mutex_lock(&smbox->chan_lock); 285 286 ret = mbox_send_message(smbox->chan, xfer); 287 /* mbox_send_message returns non-negative value on success */ 288 if (ret < 0) { 289 mutex_unlock(&smbox->chan_lock); 290 return ret; 291 } 292 293 return 0; 294 } 295 296 static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, 297 struct scmi_xfer *__unused) 298 { 299 struct scmi_mailbox *smbox = cinfo->transport_info; 300 301 mbox_client_txdone(smbox->chan, ret); 302 303 /* Release channel */ 304 mutex_unlock(&smbox->chan_lock); 305 } 306 307 static void mailbox_fetch_response(struct scmi_chan_info *cinfo, 308 struct scmi_xfer *xfer) 309 { 310 struct scmi_mailbox *smbox = cinfo->transport_info; 311 312 core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio); 313 } 314 315 static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, 316 size_t max_len, struct scmi_xfer *xfer) 317 { 318 struct scmi_mailbox *smbox = cinfo->transport_info; 319 320 core->shmem->fetch_notification(smbox->shmem, max_len, xfer, 321 smbox->io_ops->fromio); 322 } 323 324 static void mailbox_clear_channel(struct scmi_chan_info *cinfo) 325 { 326 struct scmi_mailbox *smbox = cinfo->transport_info; 327 struct mbox_chan *intr_chan; 328 int ret; 329 330 core->shmem->clear_channel(smbox->shmem); 331 332 if (!core->shmem->channel_intr_enabled(smbox->shmem)) 333 return; 334 335 if (smbox->chan_platform_receiver) 336 intr_chan = smbox->chan_platform_receiver; 337 else if (smbox->chan) 338 intr_chan = smbox->chan; 339 else 340 return; 341 342 ret = mbox_send_message(intr_chan, NULL); 343 /* mbox_send_message returns non-negative value on success, so reset */ 344 if (ret > 0) 345 ret = 0; 346 347 mbox_client_txdone(intr_chan, ret); 348 } 349 350 static bool 351 mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) 352 { 353 struct scmi_mailbox *smbox = cinfo->transport_info; 354 355 return core->shmem->poll_done(smbox->shmem, xfer); 356 } 357 358 static const struct scmi_transport_ops scmi_mailbox_ops = { 359 .chan_available = mailbox_chan_available, 360 .chan_setup = mailbox_chan_setup, 361 .chan_free = mailbox_chan_free, 362 .send_message = mailbox_send_message, 363 .mark_txdone = mailbox_mark_txdone, 364 .fetch_response = mailbox_fetch_response, 365 .fetch_notification = mailbox_fetch_notification, 366 .clear_channel = mailbox_clear_channel, 367 .poll_done = mailbox_poll_done, 368 }; 369 370 static struct scmi_desc scmi_mailbox_desc = { 371 .ops = &scmi_mailbox_ops, 372 .max_rx_timeout_ms = 30, /* We may increase this if required */ 373 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 374 .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, 375 }; 376 377 static const struct of_device_id scmi_of_match[] = { 378 { .compatible = "arm,scmi" }, 379 { /* Sentinel */ }, 380 }; 381 382 DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver, 383 scmi_mailbox_desc, scmi_of_match, core); 384 module_platform_driver(scmi_mailbox_driver); 385 386 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 387 MODULE_DESCRIPTION("SCMI Mailbox Transport driver"); 388 MODULE_LICENSE("GPL"); 389