1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * MediaTek GPUEB mailbox driver for SoCs such as the MT8196 4 * 5 * Copyright (C) 2025, Collabora Ltd. 6 * 7 * Developers harmed in the making of this driver: 8 * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> 9 */ 10 11 #include <linux/atomic.h> 12 #include <linux/clk.h> 13 #include <linux/device.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/iopoll.h> 17 #include <linux/mailbox_controller.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 22 #define GPUEB_MBOX_CTL_TX_STS 0x00 23 #define GPUEB_MBOX_CTL_IRQ_SET 0x04 24 #define GPUEB_MBOX_CTL_IRQ_CLR 0x74 25 #define GPUEB_MBOX_CTL_RX_STS 0x78 26 27 #define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */ 28 #define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */ 29 30 #define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */ 31 32 struct mtk_gpueb_mbox { 33 struct device *dev; 34 struct clk *clk; 35 void __iomem *mbox_mmio; 36 void __iomem *mbox_ctl; 37 struct mbox_controller mbox; 38 struct mtk_gpueb_mbox_chan *ch; 39 int irq; 40 const struct mtk_gpueb_mbox_variant *v; 41 }; 42 43 /** 44 * struct mtk_gpueb_mbox_chan - per-channel runtime data 45 * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox 46 * @full_name: descriptive name of channel for IRQ subsystem 47 * @num: channel number, starting at 0 48 * @rx_status: signifies whether channel reception is turned off, or full 49 * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data 50 */ 51 struct mtk_gpueb_mbox_chan { 52 struct mtk_gpueb_mbox *ebm; 53 char *full_name; 54 u8 num; 55 atomic_t rx_status; 56 const struct mtk_gpueb_mbox_chan_desc *c; 57 }; 58 59 /** 60 * struct mtk_gpueb_mbox_chan_desc - per-channel constant data 61 * @name: name of this channel 62 * @num: index of this channel, starting at 0 63 * @tx_offset: byte offset measured from mmio base for outgoing data 64 * @tx_len: size, in bytes, of the outgoing data on this channel 65 * @rx_offset: bytes offset measured from mmio base for incoming data 66 * @rx_len: size, in bytes, of the incoming data on this channel 67 */ 68 struct mtk_gpueb_mbox_chan_desc { 69 const char *name; 70 const u8 num; 71 const u16 tx_offset; 72 const u8 tx_len; 73 const u16 rx_offset; 74 const u8 rx_len; 75 }; 76 77 struct mtk_gpueb_mbox_variant { 78 const u8 num_channels; 79 const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels); 80 }; 81 82 /** 83 * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer 84 * @buf: buffer to read into 85 * @chan: pointer to the channel to read 86 */ 87 static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan) 88 { 89 memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len); 90 } 91 92 static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data) 93 { 94 struct mtk_gpueb_mbox_chan *ch = data; 95 u32 rx_sts; 96 97 rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS); 98 99 if (rx_sts & BIT(ch->num)) { 100 if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) 101 return IRQ_WAKE_THREAD; 102 } 103 104 return IRQ_NONE; 105 } 106 107 static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data) 108 { 109 struct mtk_gpueb_mbox_chan *ch = data; 110 int status; 111 112 status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED, 113 GPUEB_MBOX_FULL); 114 if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) { 115 u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {}; 116 117 mtk_gpueb_mbox_read_rx(buf, ch); 118 writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); 119 mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf); 120 atomic_set(&ch->rx_status, 0); 121 return IRQ_HANDLED; 122 } 123 124 return IRQ_NONE; 125 } 126 127 static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data) 128 { 129 struct mtk_gpueb_mbox_chan *ch = chan->con_priv; 130 u32 *values = data; 131 int i; 132 133 if (atomic_read(&ch->rx_status)) 134 return -EBUSY; 135 136 /* 137 * We don't want any fancy nonsense, just write the 32-bit values in 138 * order. memcpy_toio/__iowrite32_copy don't work here, as they may use 139 * writes of different sizes or memory ordering characteristics depending 140 * on the architecture, alignment and the current phase of the moon. 141 */ 142 for (i = 0; i < ch->c->tx_len; i += 4) 143 writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i); 144 145 writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET); 146 147 return 0; 148 } 149 150 static int mtk_gpueb_mbox_startup(struct mbox_chan *chan) 151 { 152 struct mtk_gpueb_mbox_chan *ch = chan->con_priv; 153 int ret; 154 155 atomic_set(&ch->rx_status, 0); 156 157 ret = clk_enable(ch->ebm->clk); 158 if (ret) { 159 dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n", 160 ERR_PTR(ret)); 161 goto err_block; 162 } 163 164 writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); 165 166 ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr, 167 mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT, 168 ch->full_name, ch); 169 if (ret) { 170 dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n", 171 ERR_PTR(ret)); 172 goto err_unclk; 173 } 174 175 return 0; 176 177 err_unclk: 178 clk_disable(ch->ebm->clk); 179 err_block: 180 atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); 181 182 return ret; 183 } 184 185 static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan) 186 { 187 struct mtk_gpueb_mbox_chan *ch = chan->con_priv; 188 189 atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); 190 191 devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch); 192 193 clk_disable(ch->ebm->clk); 194 } 195 196 static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan) 197 { 198 struct mtk_gpueb_mbox_chan *ch = chan->con_priv; 199 200 return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num)); 201 } 202 203 const struct mbox_chan_ops mtk_gpueb_mbox_ops = { 204 .send_data = mtk_gpueb_mbox_send_data, 205 .startup = mtk_gpueb_mbox_startup, 206 .shutdown = mtk_gpueb_mbox_shutdown, 207 .last_tx_done = mtk_gpueb_mbox_last_tx_done, 208 }; 209 210 static int mtk_gpueb_mbox_probe(struct platform_device *pdev) 211 { 212 struct mtk_gpueb_mbox_chan *ch; 213 struct mtk_gpueb_mbox *ebm; 214 unsigned int i; 215 216 ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL); 217 if (!ebm) 218 return -ENOMEM; 219 220 ebm->dev = &pdev->dev; 221 ebm->v = of_device_get_match_data(ebm->dev); 222 223 ebm->irq = platform_get_irq(pdev, 0); 224 if (ebm->irq < 0) 225 return ebm->irq; 226 227 ebm->clk = devm_clk_get_prepared(ebm->dev, NULL); 228 if (IS_ERR(ebm->clk)) 229 return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk), 230 "Failed to get 'eb' clock\n"); 231 232 ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0); 233 if (IS_ERR(ebm->mbox_mmio)) 234 return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio), 235 "Couldn't map mailbox data registers\n"); 236 237 ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1); 238 if (IS_ERR(ebm->mbox_ctl)) 239 return dev_err_probe( 240 ebm->dev, PTR_ERR(ebm->mbox_ctl), 241 "Couldn't map mailbox control registers\n"); 242 243 ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels, 244 sizeof(*ebm->ch), GFP_KERNEL); 245 if (!ebm->ch) 246 return -ENOMEM; 247 248 ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels, 249 sizeof(struct mbox_chan), GFP_KERNEL); 250 if (!ebm->mbox.chans) 251 return -ENOMEM; 252 253 for (i = 0; i < ebm->v->num_channels; i++) { 254 ch = &ebm->ch[i]; 255 ch->c = &ebm->v->channels[i]; 256 if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) { 257 dev_err(ebm->dev, "Channel %s RX size (%d) too large\n", 258 ch->c->name, ch->c->rx_len); 259 return -EINVAL; 260 } 261 ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s", 262 dev_name(ebm->dev), ch->c->name); 263 if (!ch->full_name) 264 return -ENOMEM; 265 266 ch->ebm = ebm; 267 ch->num = i; 268 spin_lock_init(&ebm->mbox.chans[i].lock); 269 ebm->mbox.chans[i].con_priv = ch; 270 atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); 271 } 272 273 ebm->mbox.dev = ebm->dev; 274 ebm->mbox.num_chans = ebm->v->num_channels; 275 ebm->mbox.txdone_poll = true; 276 ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */ 277 ebm->mbox.ops = &mtk_gpueb_mbox_ops; 278 279 dev_set_drvdata(ebm->dev, ebm); 280 281 return devm_mbox_controller_register(ebm->dev, &ebm->mbox); 282 } 283 284 static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = { 285 .num_channels = 12, 286 .channels = { 287 { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 }, 288 { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 }, 289 { "sleep", 2, 0x0030, 12, 0x0110, 4 }, 290 { "timer", 3, 0x003c, 24, 0x0114, 4 }, 291 { "fhctl", 4, 0x0054, 36, 0x0118, 4 }, 292 { "ccf", 5, 0x0078, 16, 0x011c, 16 }, 293 { "gpumpu", 6, 0x0088, 24, 0x012c, 4 }, 294 { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 }, 295 { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 }, 296 { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 }, 297 { "brisket", 10, 0x00cc, 16, 0x015c, 16 }, 298 { "ppb", 11, 0x00dc, 4, 0x016c, 4 }, 299 }, 300 }; 301 302 static const struct of_device_id mtk_gpueb_mbox_of_ids[] = { 303 { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 }, 304 { /* Sentinel */ } 305 }; 306 MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids); 307 308 static struct platform_driver mtk_gpueb_mbox_drv = { 309 .probe = mtk_gpueb_mbox_probe, 310 .driver = { 311 .name = "mtk-gpueb-mbox", 312 .of_match_table = mtk_gpueb_mbox_of_ids, 313 } 314 }; 315 module_platform_driver(mtk_gpueb_mbox_drv); 316 317 MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); 318 MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver"); 319 MODULE_LICENSE("GPL"); 320