1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Message Mailbox Transport 4 * driver. 5 * 6 * Copyright (C) 2019-2024 ARM Ltd. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/device.h> 11 #include <linux/mailbox_client.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 17 #include "../common.h" 18 19 /** 20 * struct scmi_mailbox - Structure representing a SCMI mailbox transport 21 * 22 * @cl: Mailbox Client 23 * @chan: Transmit/Receive mailbox uni/bi-directional channel 24 * @chan_receiver: Optional Receiver mailbox unidirectional channel 25 * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel 26 * @cinfo: SCMI channel info 27 * @shmem: Transmit/Receive shared memory area 28 * @chan_lock: Lock that prevents multiple xfers from being queued 29 */ 30 struct scmi_mailbox { 31 struct mbox_client cl; 32 struct mbox_chan *chan; 33 struct mbox_chan *chan_receiver; 34 struct mbox_chan *chan_platform_receiver; 35 struct scmi_chan_info *cinfo; 36 struct scmi_shared_mem __iomem *shmem; 37 struct mutex chan_lock; 38 }; 39 40 #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) 41 42 static struct scmi_transport_core_operations *core; 43 44 static void tx_prepare(struct mbox_client *cl, void *m) 45 { 46 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 47 48 core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo); 49 } 50 51 static void rx_callback(struct mbox_client *cl, void *m) 52 { 53 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 54 55 /* 56 * An A2P IRQ is NOT valid when received while the platform still has 57 * the ownership of the channel, because the platform at first releases 58 * the SMT channel and then sends the completion interrupt. 59 * 60 * This addresses a possible race condition in which a spurious IRQ from 61 * a previous timed-out reply which arrived late could be wrongly 62 * associated with the next pending transaction. 63 */ 64 if (cl->knows_txdone && 65 !core->shmem->channel_free(smbox->shmem)) { 66 dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); 67 core->bad_message_trace(smbox->cinfo, 68 core->shmem->read_header(smbox->shmem), 69 MSG_MBOX_SPURIOUS); 70 return; 71 } 72 73 core->rx_callback(smbox->cinfo, 74 core->shmem->read_header(smbox->shmem), NULL); 75 } 76 77 static bool mailbox_chan_available(struct device_node *of_node, int idx) 78 { 79 int num_mb; 80 81 /* 82 * Just check if bidirrectional channels are involved, and check the 83 * index accordingly; proper full validation will be made later 84 * in mailbox_chan_setup(). 85 */ 86 num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells"); 87 if (num_mb == 3 && idx == 1) 88 idx = 2; 89 90 return !of_parse_phandle_with_args(of_node, "mboxes", 91 "#mbox-cells", idx, NULL); 92 } 93 94 /** 95 * mailbox_chan_validate - Validate transport configuration and map channels 96 * 97 * @cdev: Reference to the underlying transport device carrying the 98 * of_node descriptor to analyze. 99 * @a2p_rx_chan: A reference to an optional unidirectional channel to use 100 * for replies on the a2p channel. Set as zero if not present. 101 * @p2a_chan: A reference to the optional p2a channel. 102 * Set as zero if not present. 103 * @p2a_rx_chan: A reference to the optional p2a completion channel. 104 * Set as zero if not present. 105 * 106 * At first, validate the transport configuration as described in terms of 107 * 'mboxes' and 'shmem', then determin which mailbox channel indexes are 108 * appropriate to be use in the current configuration. 109 * 110 * Return: 0 on Success or error 111 */ 112 static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, 113 int *p2a_chan, int *p2a_rx_chan) 114 { 115 int num_mb, num_sh, ret = 0; 116 struct device_node *np = cdev->of_node; 117 118 num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 119 num_sh = of_count_phandle_with_args(np, "shmem", NULL); 120 dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); 121 122 /* Bail out if mboxes and shmem descriptors are inconsistent */ 123 if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || 124 (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || 125 (num_mb == 4 && num_sh != 2)) { 126 dev_warn(cdev, 127 "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", 128 of_node_full_name(np), num_mb, num_sh); 129 return -EINVAL; 130 } 131 132 /* Bail out if provided shmem descriptors do not refer distinct areas */ 133 if (num_sh > 1) { 134 struct device_node *np_tx __free(device_node) = 135 of_parse_phandle(np, "shmem", 0); 136 struct device_node *np_rx __free(device_node) = 137 of_parse_phandle(np, "shmem", 1); 138 139 if (!np_tx || !np_rx || np_tx == np_rx) { 140 dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", 141 of_node_full_name(np)); 142 ret = -EINVAL; 143 } 144 } 145 146 /* Calculate channels IDs to use depending on mboxes/shmem layout */ 147 if (!ret) { 148 switch (num_mb) { 149 case 1: 150 *a2p_rx_chan = 0; 151 *p2a_chan = 0; 152 *p2a_rx_chan = 0; 153 break; 154 case 2: 155 if (num_sh == 2) { 156 *a2p_rx_chan = 0; 157 *p2a_chan = 1; 158 } else { 159 *a2p_rx_chan = 1; 160 *p2a_chan = 0; 161 } 162 *p2a_rx_chan = 0; 163 break; 164 case 3: 165 *a2p_rx_chan = 1; 166 *p2a_chan = 2; 167 *p2a_rx_chan = 0; 168 break; 169 case 4: 170 *a2p_rx_chan = 1; 171 *p2a_chan = 2; 172 *p2a_rx_chan = 3; 173 break; 174 } 175 } 176 177 return ret; 178 } 179 180 static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 181 bool tx) 182 { 183 const char *desc = tx ? "Tx" : "Rx"; 184 struct device *cdev = cinfo->dev; 185 struct scmi_mailbox *smbox; 186 int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; 187 struct mbox_client *cl; 188 189 ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); 190 if (ret) 191 return ret; 192 193 if (!tx && !p2a_chan) 194 return -ENODEV; 195 196 smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); 197 if (!smbox) 198 return -ENOMEM; 199 200 smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL); 201 if (IS_ERR(smbox->shmem)) 202 return PTR_ERR(smbox->shmem); 203 204 cl = &smbox->cl; 205 cl->dev = cdev; 206 cl->tx_prepare = tx ? tx_prepare : NULL; 207 cl->rx_callback = rx_callback; 208 cl->tx_block = false; 209 cl->knows_txdone = tx; 210 211 smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan); 212 if (IS_ERR(smbox->chan)) { 213 ret = PTR_ERR(smbox->chan); 214 if (ret != -EPROBE_DEFER) 215 dev_err(cdev, 216 "failed to request SCMI %s mailbox\n", desc); 217 return ret; 218 } 219 220 /* Additional unidirectional channel for TX if needed */ 221 if (tx && a2p_rx_chan) { 222 smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan); 223 if (IS_ERR(smbox->chan_receiver)) { 224 ret = PTR_ERR(smbox->chan_receiver); 225 if (ret != -EPROBE_DEFER) 226 dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n"); 227 return ret; 228 } 229 } 230 231 if (!tx && p2a_rx_chan) { 232 smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); 233 if (IS_ERR(smbox->chan_platform_receiver)) { 234 ret = PTR_ERR(smbox->chan_platform_receiver); 235 if (ret != -EPROBE_DEFER) 236 dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); 237 return ret; 238 } 239 } 240 241 cinfo->transport_info = smbox; 242 smbox->cinfo = cinfo; 243 mutex_init(&smbox->chan_lock); 244 245 return 0; 246 } 247 248 static int mailbox_chan_free(int id, void *p, void *data) 249 { 250 struct scmi_chan_info *cinfo = p; 251 struct scmi_mailbox *smbox = cinfo->transport_info; 252 253 if (smbox && !IS_ERR(smbox->chan)) { 254 mbox_free_channel(smbox->chan); 255 mbox_free_channel(smbox->chan_receiver); 256 mbox_free_channel(smbox->chan_platform_receiver); 257 cinfo->transport_info = NULL; 258 smbox->chan = NULL; 259 smbox->chan_receiver = NULL; 260 smbox->chan_platform_receiver = NULL; 261 smbox->cinfo = NULL; 262 } 263 264 return 0; 265 } 266 267 static int mailbox_send_message(struct scmi_chan_info *cinfo, 268 struct scmi_xfer *xfer) 269 { 270 struct scmi_mailbox *smbox = cinfo->transport_info; 271 int ret; 272 273 /* 274 * The mailbox layer has its own queue. However the mailbox queue 275 * confuses the per message SCMI timeouts since the clock starts when 276 * the message is submitted into the mailbox queue. So when multiple 277 * messages are queued up the clock starts on all messages instead of 278 * only the one inflight. 279 */ 280 mutex_lock(&smbox->chan_lock); 281 282 ret = mbox_send_message(smbox->chan, xfer); 283 /* mbox_send_message returns non-negative value on success */ 284 if (ret < 0) { 285 mutex_unlock(&smbox->chan_lock); 286 return ret; 287 } 288 289 return 0; 290 } 291 292 static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, 293 struct scmi_xfer *__unused) 294 { 295 struct scmi_mailbox *smbox = cinfo->transport_info; 296 297 mbox_client_txdone(smbox->chan, ret); 298 299 /* Release channel */ 300 mutex_unlock(&smbox->chan_lock); 301 } 302 303 static void mailbox_fetch_response(struct scmi_chan_info *cinfo, 304 struct scmi_xfer *xfer) 305 { 306 struct scmi_mailbox *smbox = cinfo->transport_info; 307 308 core->shmem->fetch_response(smbox->shmem, xfer); 309 } 310 311 static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, 312 size_t max_len, struct scmi_xfer *xfer) 313 { 314 struct scmi_mailbox *smbox = cinfo->transport_info; 315 316 core->shmem->fetch_notification(smbox->shmem, max_len, xfer); 317 } 318 319 static void mailbox_clear_channel(struct scmi_chan_info *cinfo) 320 { 321 struct scmi_mailbox *smbox = cinfo->transport_info; 322 struct mbox_chan *intr_chan; 323 int ret; 324 325 core->shmem->clear_channel(smbox->shmem); 326 327 if (!core->shmem->channel_intr_enabled(smbox->shmem)) 328 return; 329 330 if (smbox->chan_platform_receiver) 331 intr_chan = smbox->chan_platform_receiver; 332 else if (smbox->chan) 333 intr_chan = smbox->chan; 334 else 335 return; 336 337 ret = mbox_send_message(intr_chan, NULL); 338 /* mbox_send_message returns non-negative value on success, so reset */ 339 if (ret > 0) 340 ret = 0; 341 342 mbox_client_txdone(intr_chan, ret); 343 } 344 345 static bool 346 mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) 347 { 348 struct scmi_mailbox *smbox = cinfo->transport_info; 349 350 return core->shmem->poll_done(smbox->shmem, xfer); 351 } 352 353 static const struct scmi_transport_ops scmi_mailbox_ops = { 354 .chan_available = mailbox_chan_available, 355 .chan_setup = mailbox_chan_setup, 356 .chan_free = mailbox_chan_free, 357 .send_message = mailbox_send_message, 358 .mark_txdone = mailbox_mark_txdone, 359 .fetch_response = mailbox_fetch_response, 360 .fetch_notification = mailbox_fetch_notification, 361 .clear_channel = mailbox_clear_channel, 362 .poll_done = mailbox_poll_done, 363 }; 364 365 static struct scmi_desc scmi_mailbox_desc = { 366 .ops = &scmi_mailbox_ops, 367 .max_rx_timeout_ms = 30, /* We may increase this if required */ 368 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 369 .max_msg_size = 128, 370 }; 371 372 static const struct of_device_id scmi_of_match[] = { 373 { .compatible = "arm,scmi" }, 374 { /* Sentinel */ }, 375 }; 376 377 DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver, 378 scmi_mailbox_desc, scmi_of_match, core); 379 module_platform_driver(scmi_mailbox_driver); 380 381 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 382 MODULE_DESCRIPTION("SCMI Mailbox Transport driver"); 383 MODULE_LICENSE("GPL"); 384