1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018, NVIDIA CORPORATION. 4 */ 5 6 #include <linux/genalloc.h> 7 #include <linux/io.h> 8 #include <linux/mailbox_client.h> 9 #include <linux/of_reserved_mem.h> 10 #include <linux/platform_device.h> 11 12 #include <soc/tegra/bpmp.h> 13 #include <soc/tegra/bpmp-abi.h> 14 #include <soc/tegra/ivc.h> 15 16 #include "bpmp-private.h" 17 18 struct tegra186_bpmp { 19 struct tegra_bpmp *parent; 20 21 struct { 22 struct gen_pool *pool; 23 union { 24 void __iomem *sram; 25 void *dram; 26 }; 27 dma_addr_t phys; 28 } tx, rx; 29 30 struct { 31 struct mbox_client client; 32 struct mbox_chan *channel; 33 } mbox; 34 }; 35 36 static inline struct tegra_bpmp * 37 mbox_client_to_bpmp(struct mbox_client *client) 38 { 39 struct tegra186_bpmp *priv; 40 41 priv = container_of(client, struct tegra186_bpmp, mbox.client); 42 43 return priv->parent; 44 } 45 46 static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel) 47 { 48 int err; 49 50 err = tegra_ivc_read_get_next_frame(channel->ivc, &channel->ib); 51 if (err) { 52 iosys_map_clear(&channel->ib); 53 return false; 54 } 55 56 return true; 57 } 58 59 static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel) 60 { 61 int err; 62 63 err = tegra_ivc_write_get_next_frame(channel->ivc, &channel->ob); 64 if (err) { 65 iosys_map_clear(&channel->ob); 66 return false; 67 } 68 69 return true; 70 } 71 72 static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel) 73 { 74 return tegra_ivc_read_advance(channel->ivc); 75 } 76 77 static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel) 78 { 79 return tegra_ivc_write_advance(channel->ivc); 80 } 81 82 static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp) 83 { 84 struct tegra186_bpmp *priv = bpmp->priv; 85 int err; 86 87 err = mbox_send_message(priv->mbox.channel, NULL); 88 if (err < 0) 89 return err; 90 91 mbox_client_txdone(priv->mbox.channel, 0); 92 93 return 0; 94 } 95 96 static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data) 97 { 98 struct tegra_bpmp *bpmp = data; 99 struct tegra186_bpmp *priv = bpmp->priv; 100 101 if (WARN_ON(priv->mbox.channel == NULL)) 102 return; 103 104 tegra186_bpmp_ring_doorbell(bpmp); 105 } 106 107 static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel, 108 struct tegra_bpmp *bpmp, 109 unsigned int index) 110 { 111 struct tegra186_bpmp *priv = bpmp->priv; 112 size_t message_size, queue_size; 113 struct iosys_map rx, tx; 114 unsigned int offset; 115 int err; 116 117 channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc), 118 GFP_KERNEL); 119 if (!channel->ivc) 120 return -ENOMEM; 121 122 message_size = tegra_ivc_align(MSG_MIN_SZ); 123 queue_size = tegra_ivc_total_queue_size(message_size); 124 offset = queue_size * index; 125 126 if (priv->rx.pool) { 127 iosys_map_set_vaddr_iomem(&rx, priv->rx.sram + offset); 128 iosys_map_set_vaddr_iomem(&tx, priv->tx.sram + offset); 129 } else { 130 iosys_map_set_vaddr(&rx, priv->rx.dram + offset); 131 iosys_map_set_vaddr(&tx, priv->tx.dram + offset); 132 } 133 134 err = tegra_ivc_init(channel->ivc, NULL, &rx, priv->rx.phys + offset, &tx, 135 priv->tx.phys + offset, 1, message_size, tegra186_bpmp_ivc_notify, 136 bpmp); 137 if (err < 0) { 138 dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n", 139 index, err); 140 return err; 141 } 142 143 init_completion(&channel->completion); 144 channel->bpmp = bpmp; 145 146 return 0; 147 } 148 149 static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel) 150 { 151 /* reset the channel state */ 152 tegra_ivc_reset(channel->ivc); 153 154 /* sync the channel state with BPMP */ 155 while (tegra_ivc_notified(channel->ivc)) 156 ; 157 } 158 159 static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) 160 { 161 tegra_ivc_cleanup(channel->ivc); 162 } 163 164 static void mbox_handle_rx(struct mbox_client *client, void *data) 165 { 166 struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client); 167 168 tegra_bpmp_handle_rx(bpmp); 169 } 170 171 static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp) 172 { 173 struct tegra186_bpmp *priv = bpmp->priv; 174 unsigned int i; 175 176 for (i = 0; i < bpmp->threaded.count; i++) { 177 if (!bpmp->threaded_channels[i].bpmp) 178 continue; 179 180 tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]); 181 } 182 183 tegra186_bpmp_channel_cleanup(bpmp->rx_channel); 184 tegra186_bpmp_channel_cleanup(bpmp->tx_channel); 185 186 if (priv->tx.pool) { 187 gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096); 188 gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.sram, 4096); 189 } 190 } 191 192 static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp) 193 { 194 struct tegra186_bpmp *priv = bpmp->priv; 195 struct resource res; 196 size_t size; 197 int err; 198 199 err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res); 200 if (err < 0) { 201 if (err != -ENODEV) 202 dev_warn(bpmp->dev, 203 "failed to parse memory region: %d\n", err); 204 205 return err; 206 } 207 208 size = resource_size(&res); 209 210 if (size < SZ_8K) { 211 dev_warn(bpmp->dev, "DRAM region must be larger than 8 KiB\n"); 212 return -EINVAL; 213 } 214 215 priv->tx.phys = res.start; 216 priv->rx.phys = res.start + SZ_4K; 217 218 priv->tx.dram = devm_memremap(bpmp->dev, priv->tx.phys, size, 219 MEMREMAP_WC); 220 if (IS_ERR(priv->tx.dram)) { 221 err = PTR_ERR(priv->tx.dram); 222 dev_warn(bpmp->dev, "failed to map DRAM region: %d\n", err); 223 return err; 224 } 225 226 priv->rx.dram = priv->tx.dram + SZ_4K; 227 228 return 0; 229 } 230 231 static int tegra186_bpmp_sram_init(struct tegra_bpmp *bpmp) 232 { 233 struct tegra186_bpmp *priv = bpmp->priv; 234 int err; 235 236 priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0); 237 if (!priv->tx.pool) { 238 dev_err(bpmp->dev, "TX shmem pool not found\n"); 239 return -EPROBE_DEFER; 240 } 241 242 priv->tx.sram = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096, 243 &priv->tx.phys); 244 if (!priv->tx.sram) { 245 dev_err(bpmp->dev, "failed to allocate from TX pool\n"); 246 return -ENOMEM; 247 } 248 249 priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1); 250 if (!priv->rx.pool) { 251 dev_err(bpmp->dev, "RX shmem pool not found\n"); 252 err = -EPROBE_DEFER; 253 goto free_tx; 254 } 255 256 priv->rx.sram = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096, 257 &priv->rx.phys); 258 if (!priv->rx.sram) { 259 dev_err(bpmp->dev, "failed to allocate from RX pool\n"); 260 err = -ENOMEM; 261 goto free_tx; 262 } 263 264 return 0; 265 266 free_tx: 267 gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096); 268 269 return err; 270 } 271 272 static int tegra186_bpmp_setup_channels(struct tegra_bpmp *bpmp) 273 { 274 unsigned int i; 275 int err; 276 277 err = tegra186_bpmp_dram_init(bpmp); 278 if (err == -ENODEV) { 279 err = tegra186_bpmp_sram_init(bpmp); 280 if (err < 0) 281 return err; 282 } 283 284 err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp, 285 bpmp->soc->channels.cpu_tx.offset); 286 if (err < 0) 287 return err; 288 289 err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp, 290 bpmp->soc->channels.cpu_rx.offset); 291 if (err < 0) { 292 tegra186_bpmp_channel_cleanup(bpmp->tx_channel); 293 return err; 294 } 295 296 for (i = 0; i < bpmp->threaded.count; i++) { 297 unsigned int index = bpmp->soc->channels.thread.offset + i; 298 299 err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i], 300 bpmp, index); 301 if (err < 0) 302 break; 303 } 304 305 if (err < 0) 306 tegra186_bpmp_teardown_channels(bpmp); 307 308 return err; 309 } 310 311 static void tegra186_bpmp_reset_channels(struct tegra_bpmp *bpmp) 312 { 313 unsigned int i; 314 315 /* reset message channels */ 316 tegra186_bpmp_channel_reset(bpmp->tx_channel); 317 tegra186_bpmp_channel_reset(bpmp->rx_channel); 318 319 for (i = 0; i < bpmp->threaded.count; i++) 320 tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]); 321 } 322 323 static int tegra186_bpmp_init(struct tegra_bpmp *bpmp) 324 { 325 struct tegra186_bpmp *priv; 326 int err; 327 328 priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL); 329 if (!priv) 330 return -ENOMEM; 331 332 priv->parent = bpmp; 333 bpmp->priv = priv; 334 335 err = tegra186_bpmp_setup_channels(bpmp); 336 if (err < 0) 337 return err; 338 339 /* mbox registration */ 340 priv->mbox.client.dev = bpmp->dev; 341 priv->mbox.client.rx_callback = mbox_handle_rx; 342 priv->mbox.client.tx_block = false; 343 priv->mbox.client.knows_txdone = false; 344 345 priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0); 346 if (IS_ERR(priv->mbox.channel)) { 347 err = PTR_ERR(priv->mbox.channel); 348 dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err); 349 tegra186_bpmp_teardown_channels(bpmp); 350 return err; 351 } 352 353 tegra186_bpmp_reset_channels(bpmp); 354 355 return 0; 356 } 357 358 static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp) 359 { 360 struct tegra186_bpmp *priv = bpmp->priv; 361 362 mbox_free_channel(priv->mbox.channel); 363 364 tegra186_bpmp_teardown_channels(bpmp); 365 } 366 367 static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp) 368 { 369 tegra186_bpmp_reset_channels(bpmp); 370 371 return 0; 372 } 373 374 const struct tegra_bpmp_ops tegra186_bpmp_ops = { 375 .init = tegra186_bpmp_init, 376 .deinit = tegra186_bpmp_deinit, 377 .is_response_ready = tegra186_bpmp_is_message_ready, 378 .is_request_ready = tegra186_bpmp_is_message_ready, 379 .ack_response = tegra186_bpmp_ack_message, 380 .ack_request = tegra186_bpmp_ack_message, 381 .is_response_channel_free = tegra186_bpmp_is_channel_free, 382 .is_request_channel_free = tegra186_bpmp_is_channel_free, 383 .post_response = tegra186_bpmp_post_message, 384 .post_request = tegra186_bpmp_post_message, 385 .ring_doorbell = tegra186_bpmp_ring_doorbell, 386 .resume = tegra186_bpmp_resume, 387 }; 388