1bf3022a4SAnup Patel // SPDX-License-Identifier: GPL-2.0 2bf3022a4SAnup Patel /* 3bf3022a4SAnup Patel * RISC-V SBI Message Proxy (MPXY) mailbox controller driver 4bf3022a4SAnup Patel * 5bf3022a4SAnup Patel * Copyright (C) 2025 Ventana Micro Systems Inc. 6bf3022a4SAnup Patel */ 7bf3022a4SAnup Patel 8*7e64042fSSunil V L #include <linux/acpi.h> 9bf3022a4SAnup Patel #include <linux/cpu.h> 10bf3022a4SAnup Patel #include <linux/errno.h> 11bf3022a4SAnup Patel #include <linux/init.h> 12*7e64042fSSunil V L #include <linux/irqchip/riscv-imsic.h> 13bf3022a4SAnup Patel #include <linux/mailbox_controller.h> 14bf3022a4SAnup Patel #include <linux/mailbox/riscv-rpmi-message.h> 15bf3022a4SAnup Patel #include <linux/minmax.h> 16bf3022a4SAnup Patel #include <linux/mm.h> 17bf3022a4SAnup Patel #include <linux/module.h> 18bf3022a4SAnup Patel #include <linux/msi.h> 19bf3022a4SAnup Patel #include <linux/of_irq.h> 20bf3022a4SAnup Patel #include <linux/percpu.h> 21bf3022a4SAnup Patel #include <linux/platform_device.h> 22bf3022a4SAnup Patel #include <linux/smp.h> 23bf3022a4SAnup Patel #include <linux/string.h> 24bf3022a4SAnup Patel #include <linux/types.h> 25bf3022a4SAnup Patel #include <asm/byteorder.h> 26bf3022a4SAnup Patel #include <asm/sbi.h> 27bf3022a4SAnup Patel 28bf3022a4SAnup Patel /* ====== SBI MPXY extension data structures ====== */ 29bf3022a4SAnup Patel 30bf3022a4SAnup Patel /* SBI MPXY MSI related channel attributes */ 31bf3022a4SAnup Patel struct sbi_mpxy_msi_info { 32bf3022a4SAnup Patel /* Lower 32-bits of the MSI target address */ 33bf3022a4SAnup Patel u32 msi_addr_lo; 34bf3022a4SAnup Patel /* Upper 32-bits of the MSI target address */ 35bf3022a4SAnup Patel u32 msi_addr_hi; 36bf3022a4SAnup Patel /* MSI data value */ 37bf3022a4SAnup Patel u32 msi_data; 38bf3022a4SAnup Patel }; 39bf3022a4SAnup Patel 40bf3022a4SAnup Patel /* 41bf3022a4SAnup Patel * SBI MPXY standard channel attributes. 42bf3022a4SAnup Patel * 43bf3022a4SAnup Patel * NOTE: The sequence of attribute fields are as-per the 44bf3022a4SAnup Patel * defined sequence in the attribute table in spec (or 45bf3022a4SAnup Patel * as-per the enum sbi_mpxy_attribute_id). 46bf3022a4SAnup Patel */ 47bf3022a4SAnup Patel struct sbi_mpxy_channel_attrs { 48bf3022a4SAnup Patel /* Message protocol ID */ 49bf3022a4SAnup Patel u32 msg_proto_id; 50bf3022a4SAnup Patel /* Message protocol version */ 51bf3022a4SAnup Patel u32 msg_proto_version; 52bf3022a4SAnup Patel /* Message protocol maximum message length */ 53bf3022a4SAnup Patel u32 msg_max_len; 54bf3022a4SAnup Patel /* Message protocol message send timeout in microseconds */ 55bf3022a4SAnup Patel u32 msg_send_timeout; 56bf3022a4SAnup Patel /* Message protocol message completion timeout in microseconds */ 57bf3022a4SAnup Patel u32 msg_completion_timeout; 58bf3022a4SAnup Patel /* Bit array for channel capabilities */ 59bf3022a4SAnup Patel u32 capability; 60bf3022a4SAnup Patel /* SSE event ID */ 61bf3022a4SAnup Patel u32 sse_event_id; 62bf3022a4SAnup Patel /* MSI enable/disable control knob */ 63bf3022a4SAnup Patel u32 msi_control; 64bf3022a4SAnup Patel /* Channel MSI info */ 65bf3022a4SAnup Patel struct sbi_mpxy_msi_info msi_info; 66bf3022a4SAnup Patel /* Events state control */ 67bf3022a4SAnup Patel u32 events_state_ctrl; 68bf3022a4SAnup Patel }; 69bf3022a4SAnup Patel 70bf3022a4SAnup Patel /* 71bf3022a4SAnup Patel * RPMI specific SBI MPXY channel attributes. 72bf3022a4SAnup Patel * 73bf3022a4SAnup Patel * NOTE: The sequence of attribute fields are as-per the 74bf3022a4SAnup Patel * defined sequence in the attribute table in spec (or 75bf3022a4SAnup Patel * as-per the enum sbi_mpxy_rpmi_attribute_id). 76bf3022a4SAnup Patel */ 77bf3022a4SAnup Patel struct sbi_mpxy_rpmi_channel_attrs { 78bf3022a4SAnup Patel /* RPMI service group ID */ 79bf3022a4SAnup Patel u32 servicegroup_id; 80bf3022a4SAnup Patel /* RPMI service group version */ 81bf3022a4SAnup Patel u32 servicegroup_version; 82bf3022a4SAnup Patel /* RPMI implementation ID */ 83bf3022a4SAnup Patel u32 impl_id; 84bf3022a4SAnup Patel /* RPMI implementation version */ 85bf3022a4SAnup Patel u32 impl_version; 86bf3022a4SAnup Patel }; 87bf3022a4SAnup Patel 88bf3022a4SAnup Patel /* SBI MPXY channel IDs data in shared memory */ 89bf3022a4SAnup Patel struct sbi_mpxy_channel_ids_data { 90bf3022a4SAnup Patel /* Remaining number of channel ids */ 91bf3022a4SAnup Patel __le32 remaining; 92bf3022a4SAnup Patel /* Returned channel ids in current function call */ 93bf3022a4SAnup Patel __le32 returned; 94bf3022a4SAnup Patel /* Returned channel id array */ 95bf3022a4SAnup Patel __le32 channel_array[]; 96bf3022a4SAnup Patel }; 97bf3022a4SAnup Patel 98bf3022a4SAnup Patel /* SBI MPXY notification data in shared memory */ 99bf3022a4SAnup Patel struct sbi_mpxy_notification_data { 100bf3022a4SAnup Patel /* Remaining number of notification events */ 101bf3022a4SAnup Patel __le32 remaining; 102bf3022a4SAnup Patel /* Number of notification events returned */ 103bf3022a4SAnup Patel __le32 returned; 104bf3022a4SAnup Patel /* Number of notification events lost */ 105bf3022a4SAnup Patel __le32 lost; 106bf3022a4SAnup Patel /* Reserved for future use */ 107bf3022a4SAnup Patel __le32 reserved; 108bf3022a4SAnup Patel /* Returned channel id array */ 109bf3022a4SAnup Patel u8 events_data[]; 110bf3022a4SAnup Patel }; 111bf3022a4SAnup Patel 112bf3022a4SAnup Patel /* ====== MPXY data structures & helper routines ====== */ 113bf3022a4SAnup Patel 114bf3022a4SAnup Patel /* MPXY Per-CPU or local context */ 115bf3022a4SAnup Patel struct mpxy_local { 116bf3022a4SAnup Patel /* Shared memory base address */ 117bf3022a4SAnup Patel void *shmem; 118bf3022a4SAnup Patel /* Shared memory physical address */ 119bf3022a4SAnup Patel phys_addr_t shmem_phys_addr; 120bf3022a4SAnup Patel /* Flag representing whether shared memory is active or not */ 121bf3022a4SAnup Patel bool shmem_active; 122bf3022a4SAnup Patel }; 123bf3022a4SAnup Patel 124bf3022a4SAnup Patel static DEFINE_PER_CPU(struct mpxy_local, mpxy_local); 125bf3022a4SAnup Patel static unsigned long mpxy_shmem_size; 126bf3022a4SAnup Patel static bool mpxy_shmem_init_done; 127bf3022a4SAnup Patel 128bf3022a4SAnup Patel static int mpxy_get_channel_count(u32 *channel_count) 129bf3022a4SAnup Patel { 130bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 131bf3022a4SAnup Patel struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem; 132bf3022a4SAnup Patel u32 remaining, returned; 133bf3022a4SAnup Patel struct sbiret sret; 134bf3022a4SAnup Patel 135bf3022a4SAnup Patel if (!mpxy->shmem_active) 136bf3022a4SAnup Patel return -ENODEV; 137bf3022a4SAnup Patel if (!channel_count) 138bf3022a4SAnup Patel return -EINVAL; 139bf3022a4SAnup Patel 140bf3022a4SAnup Patel get_cpu(); 141bf3022a4SAnup Patel 142bf3022a4SAnup Patel /* Get the remaining and returned fields to calculate total */ 143bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS, 144bf3022a4SAnup Patel 0, 0, 0, 0, 0, 0); 145bf3022a4SAnup Patel if (sret.error) 146bf3022a4SAnup Patel goto err_put_cpu; 147bf3022a4SAnup Patel 148bf3022a4SAnup Patel remaining = le32_to_cpu(sdata->remaining); 149bf3022a4SAnup Patel returned = le32_to_cpu(sdata->returned); 150bf3022a4SAnup Patel *channel_count = remaining + returned; 151bf3022a4SAnup Patel 152bf3022a4SAnup Patel err_put_cpu: 153bf3022a4SAnup Patel put_cpu(); 154bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 155bf3022a4SAnup Patel } 156bf3022a4SAnup Patel 157bf3022a4SAnup Patel static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids) 158bf3022a4SAnup Patel { 159bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 160bf3022a4SAnup Patel struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem; 161bf3022a4SAnup Patel u32 remaining, returned, count, start_index = 0; 162bf3022a4SAnup Patel struct sbiret sret; 163bf3022a4SAnup Patel 164bf3022a4SAnup Patel if (!mpxy->shmem_active) 165bf3022a4SAnup Patel return -ENODEV; 166bf3022a4SAnup Patel if (!channel_count || !channel_ids) 167bf3022a4SAnup Patel return -EINVAL; 168bf3022a4SAnup Patel 169bf3022a4SAnup Patel get_cpu(); 170bf3022a4SAnup Patel 171bf3022a4SAnup Patel do { 172bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS, 173bf3022a4SAnup Patel start_index, 0, 0, 0, 0, 0); 174bf3022a4SAnup Patel if (sret.error) 175bf3022a4SAnup Patel goto err_put_cpu; 176bf3022a4SAnup Patel 177bf3022a4SAnup Patel remaining = le32_to_cpu(sdata->remaining); 178bf3022a4SAnup Patel returned = le32_to_cpu(sdata->returned); 179bf3022a4SAnup Patel 180bf3022a4SAnup Patel count = returned < (channel_count - start_index) ? 181bf3022a4SAnup Patel returned : (channel_count - start_index); 182bf3022a4SAnup Patel memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count); 183bf3022a4SAnup Patel start_index += count; 184bf3022a4SAnup Patel } while (remaining && start_index < channel_count); 185bf3022a4SAnup Patel 186bf3022a4SAnup Patel err_put_cpu: 187bf3022a4SAnup Patel put_cpu(); 188bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 189bf3022a4SAnup Patel } 190bf3022a4SAnup Patel 191bf3022a4SAnup Patel static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count, 192bf3022a4SAnup Patel u32 *attrs_buf) 193bf3022a4SAnup Patel { 194bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 195bf3022a4SAnup Patel struct sbiret sret; 196bf3022a4SAnup Patel 197bf3022a4SAnup Patel if (!mpxy->shmem_active) 198bf3022a4SAnup Patel return -ENODEV; 199bf3022a4SAnup Patel if (!attr_count || !attrs_buf) 200bf3022a4SAnup Patel return -EINVAL; 201bf3022a4SAnup Patel 202bf3022a4SAnup Patel get_cpu(); 203bf3022a4SAnup Patel 204bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS, 205bf3022a4SAnup Patel channel_id, base_attrid, attr_count, 0, 0, 0); 206bf3022a4SAnup Patel if (sret.error) 207bf3022a4SAnup Patel goto err_put_cpu; 208bf3022a4SAnup Patel 209bf3022a4SAnup Patel memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count); 210bf3022a4SAnup Patel 211bf3022a4SAnup Patel err_put_cpu: 212bf3022a4SAnup Patel put_cpu(); 213bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 214bf3022a4SAnup Patel } 215bf3022a4SAnup Patel 216bf3022a4SAnup Patel static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count, 217bf3022a4SAnup Patel u32 *attrs_buf) 218bf3022a4SAnup Patel { 219bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 220bf3022a4SAnup Patel struct sbiret sret; 221bf3022a4SAnup Patel 222bf3022a4SAnup Patel if (!mpxy->shmem_active) 223bf3022a4SAnup Patel return -ENODEV; 224bf3022a4SAnup Patel if (!attr_count || !attrs_buf) 225bf3022a4SAnup Patel return -EINVAL; 226bf3022a4SAnup Patel 227bf3022a4SAnup Patel get_cpu(); 228bf3022a4SAnup Patel 229bf3022a4SAnup Patel memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count); 230bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS, 231bf3022a4SAnup Patel channel_id, base_attrid, attr_count, 0, 0, 0); 232bf3022a4SAnup Patel 233bf3022a4SAnup Patel put_cpu(); 234bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 235bf3022a4SAnup Patel } 236bf3022a4SAnup Patel 237bf3022a4SAnup Patel static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id, 238bf3022a4SAnup Patel void *tx, unsigned long tx_len, 239bf3022a4SAnup Patel void *rx, unsigned long max_rx_len, 240bf3022a4SAnup Patel unsigned long *rx_len) 241bf3022a4SAnup Patel { 242bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 243bf3022a4SAnup Patel unsigned long rx_bytes; 244bf3022a4SAnup Patel struct sbiret sret; 245bf3022a4SAnup Patel 246bf3022a4SAnup Patel if (!mpxy->shmem_active) 247bf3022a4SAnup Patel return -ENODEV; 248bf3022a4SAnup Patel if (!tx && tx_len) 249bf3022a4SAnup Patel return -EINVAL; 250bf3022a4SAnup Patel 251bf3022a4SAnup Patel get_cpu(); 252bf3022a4SAnup Patel 253bf3022a4SAnup Patel /* Message protocols allowed to have no data in messages */ 254bf3022a4SAnup Patel if (tx_len) 255bf3022a4SAnup Patel memcpy(mpxy->shmem, tx, tx_len); 256bf3022a4SAnup Patel 257bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP, 258bf3022a4SAnup Patel channel_id, msg_id, tx_len, 0, 0, 0); 259bf3022a4SAnup Patel if (rx && !sret.error) { 260bf3022a4SAnup Patel rx_bytes = sret.value; 261bf3022a4SAnup Patel if (rx_bytes > max_rx_len) { 262bf3022a4SAnup Patel put_cpu(); 263bf3022a4SAnup Patel return -ENOSPC; 264bf3022a4SAnup Patel } 265bf3022a4SAnup Patel 266bf3022a4SAnup Patel memcpy(rx, mpxy->shmem, rx_bytes); 267bf3022a4SAnup Patel if (rx_len) 268bf3022a4SAnup Patel *rx_len = rx_bytes; 269bf3022a4SAnup Patel } 270bf3022a4SAnup Patel 271bf3022a4SAnup Patel put_cpu(); 272bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 273bf3022a4SAnup Patel } 274bf3022a4SAnup Patel 275bf3022a4SAnup Patel static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id, 276bf3022a4SAnup Patel void *tx, unsigned long tx_len) 277bf3022a4SAnup Patel { 278bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 279bf3022a4SAnup Patel struct sbiret sret; 280bf3022a4SAnup Patel 281bf3022a4SAnup Patel if (!mpxy->shmem_active) 282bf3022a4SAnup Patel return -ENODEV; 283bf3022a4SAnup Patel if (!tx && tx_len) 284bf3022a4SAnup Patel return -EINVAL; 285bf3022a4SAnup Patel 286bf3022a4SAnup Patel get_cpu(); 287bf3022a4SAnup Patel 288bf3022a4SAnup Patel /* Message protocols allowed to have no data in messages */ 289bf3022a4SAnup Patel if (tx_len) 290bf3022a4SAnup Patel memcpy(mpxy->shmem, tx, tx_len); 291bf3022a4SAnup Patel 292bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP, 293bf3022a4SAnup Patel channel_id, msg_id, tx_len, 0, 0, 0); 294bf3022a4SAnup Patel 295bf3022a4SAnup Patel put_cpu(); 296bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 297bf3022a4SAnup Patel } 298bf3022a4SAnup Patel 299bf3022a4SAnup Patel static int mpxy_get_notifications(u32 channel_id, 300bf3022a4SAnup Patel struct sbi_mpxy_notification_data *notif_data, 301bf3022a4SAnup Patel unsigned long *events_data_len) 302bf3022a4SAnup Patel { 303bf3022a4SAnup Patel struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); 304bf3022a4SAnup Patel struct sbiret sret; 305bf3022a4SAnup Patel 306bf3022a4SAnup Patel if (!mpxy->shmem_active) 307bf3022a4SAnup Patel return -ENODEV; 308bf3022a4SAnup Patel if (!notif_data || !events_data_len) 309bf3022a4SAnup Patel return -EINVAL; 310bf3022a4SAnup Patel 311bf3022a4SAnup Patel get_cpu(); 312bf3022a4SAnup Patel 313bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS, 314bf3022a4SAnup Patel channel_id, 0, 0, 0, 0, 0); 315bf3022a4SAnup Patel if (sret.error) 316bf3022a4SAnup Patel goto err_put_cpu; 317bf3022a4SAnup Patel 318bf3022a4SAnup Patel memcpy(notif_data, mpxy->shmem, sret.value + 16); 319bf3022a4SAnup Patel *events_data_len = sret.value; 320bf3022a4SAnup Patel 321bf3022a4SAnup Patel err_put_cpu: 322bf3022a4SAnup Patel put_cpu(); 323bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 324bf3022a4SAnup Patel } 325bf3022a4SAnup Patel 326bf3022a4SAnup Patel static int mpxy_get_shmem_size(unsigned long *shmem_size) 327bf3022a4SAnup Patel { 328bf3022a4SAnup Patel struct sbiret sret; 329bf3022a4SAnup Patel 330bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE, 331bf3022a4SAnup Patel 0, 0, 0, 0, 0, 0); 332bf3022a4SAnup Patel if (sret.error) 333bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 334bf3022a4SAnup Patel if (shmem_size) 335bf3022a4SAnup Patel *shmem_size = sret.value; 336bf3022a4SAnup Patel return 0; 337bf3022a4SAnup Patel } 338bf3022a4SAnup Patel 339bf3022a4SAnup Patel static int mpxy_setup_shmem(unsigned int cpu) 340bf3022a4SAnup Patel { 341bf3022a4SAnup Patel struct page *shmem_page; 342bf3022a4SAnup Patel struct mpxy_local *mpxy; 343bf3022a4SAnup Patel struct sbiret sret; 344bf3022a4SAnup Patel 345bf3022a4SAnup Patel mpxy = per_cpu_ptr(&mpxy_local, cpu); 346bf3022a4SAnup Patel if (mpxy->shmem_active) 347bf3022a4SAnup Patel return 0; 348bf3022a4SAnup Patel 349bf3022a4SAnup Patel shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size)); 350bf3022a4SAnup Patel if (!shmem_page) 351bf3022a4SAnup Patel return -ENOMEM; 352bf3022a4SAnup Patel 353bf3022a4SAnup Patel /* 354bf3022a4SAnup Patel * Linux setup of shmem is done in mpxy OVERWRITE mode. 355bf3022a4SAnup Patel * flags[1:0] = 00b 356bf3022a4SAnup Patel */ 357bf3022a4SAnup Patel sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM, 358bf3022a4SAnup Patel page_to_phys(shmem_page), 0, 0, 0, 0, 0); 359bf3022a4SAnup Patel if (sret.error) { 360bf3022a4SAnup Patel free_pages((unsigned long)page_to_virt(shmem_page), 361bf3022a4SAnup Patel get_order(mpxy_shmem_size)); 362bf3022a4SAnup Patel return sbi_err_map_linux_errno(sret.error); 363bf3022a4SAnup Patel } 364bf3022a4SAnup Patel 365bf3022a4SAnup Patel mpxy->shmem = page_to_virt(shmem_page); 366bf3022a4SAnup Patel mpxy->shmem_phys_addr = page_to_phys(shmem_page); 367bf3022a4SAnup Patel mpxy->shmem_active = true; 368bf3022a4SAnup Patel 369bf3022a4SAnup Patel return 0; 370bf3022a4SAnup Patel } 371bf3022a4SAnup Patel 372bf3022a4SAnup Patel /* ====== MPXY mailbox data structures ====== */ 373bf3022a4SAnup Patel 374bf3022a4SAnup Patel /* MPXY mailbox channel */ 375bf3022a4SAnup Patel struct mpxy_mbox_channel { 376bf3022a4SAnup Patel struct mpxy_mbox *mbox; 377bf3022a4SAnup Patel u32 channel_id; 378bf3022a4SAnup Patel struct sbi_mpxy_channel_attrs attrs; 379bf3022a4SAnup Patel struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs; 380bf3022a4SAnup Patel struct sbi_mpxy_notification_data *notif; 381bf3022a4SAnup Patel u32 max_xfer_len; 382bf3022a4SAnup Patel bool have_events_state; 383bf3022a4SAnup Patel u32 msi_index; 384bf3022a4SAnup Patel u32 msi_irq; 385bf3022a4SAnup Patel bool started; 386bf3022a4SAnup Patel }; 387bf3022a4SAnup Patel 388bf3022a4SAnup Patel /* MPXY mailbox */ 389bf3022a4SAnup Patel struct mpxy_mbox { 390bf3022a4SAnup Patel struct device *dev; 391bf3022a4SAnup Patel u32 channel_count; 392bf3022a4SAnup Patel struct mpxy_mbox_channel *channels; 393bf3022a4SAnup Patel u32 msi_count; 394bf3022a4SAnup Patel struct mpxy_mbox_channel **msi_index_to_channel; 395bf3022a4SAnup Patel struct mbox_controller controller; 396bf3022a4SAnup Patel }; 397bf3022a4SAnup Patel 398bf3022a4SAnup Patel /* ====== MPXY RPMI processing ====== */ 399bf3022a4SAnup Patel 400bf3022a4SAnup Patel static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan, 401bf3022a4SAnup Patel struct rpmi_mbox_message *msg) 402bf3022a4SAnup Patel { 403bf3022a4SAnup Patel msg->error = 0; 404bf3022a4SAnup Patel switch (msg->type) { 405bf3022a4SAnup Patel case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE: 406bf3022a4SAnup Patel switch (msg->attr.id) { 407bf3022a4SAnup Patel case RPMI_MBOX_ATTR_SPEC_VERSION: 408bf3022a4SAnup Patel msg->attr.value = mchan->attrs.msg_proto_version; 409bf3022a4SAnup Patel break; 410bf3022a4SAnup Patel case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE: 411bf3022a4SAnup Patel msg->attr.value = mchan->max_xfer_len; 412bf3022a4SAnup Patel break; 413bf3022a4SAnup Patel case RPMI_MBOX_ATTR_SERVICEGROUP_ID: 414bf3022a4SAnup Patel msg->attr.value = mchan->rpmi_attrs.servicegroup_id; 415bf3022a4SAnup Patel break; 416bf3022a4SAnup Patel case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION: 417bf3022a4SAnup Patel msg->attr.value = mchan->rpmi_attrs.servicegroup_version; 418bf3022a4SAnup Patel break; 419bf3022a4SAnup Patel case RPMI_MBOX_ATTR_IMPL_ID: 420bf3022a4SAnup Patel msg->attr.value = mchan->rpmi_attrs.impl_id; 421bf3022a4SAnup Patel break; 422bf3022a4SAnup Patel case RPMI_MBOX_ATTR_IMPL_VERSION: 423bf3022a4SAnup Patel msg->attr.value = mchan->rpmi_attrs.impl_version; 424bf3022a4SAnup Patel break; 425bf3022a4SAnup Patel default: 426bf3022a4SAnup Patel msg->error = -EOPNOTSUPP; 427bf3022a4SAnup Patel break; 428bf3022a4SAnup Patel } 429bf3022a4SAnup Patel break; 430bf3022a4SAnup Patel case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE: 431bf3022a4SAnup Patel /* None of the RPMI linux mailbox attributes are writeable */ 432bf3022a4SAnup Patel msg->error = -EOPNOTSUPP; 433bf3022a4SAnup Patel break; 434bf3022a4SAnup Patel case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE: 435bf3022a4SAnup Patel if ((!msg->data.request && msg->data.request_len) || 436bf3022a4SAnup Patel (msg->data.request && msg->data.request_len > mchan->max_xfer_len) || 437bf3022a4SAnup Patel (!msg->data.response && msg->data.max_response_len)) { 438bf3022a4SAnup Patel msg->error = -EINVAL; 439bf3022a4SAnup Patel break; 440bf3022a4SAnup Patel } 441bf3022a4SAnup Patel if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) { 442bf3022a4SAnup Patel msg->error = -EIO; 443bf3022a4SAnup Patel break; 444bf3022a4SAnup Patel } 445bf3022a4SAnup Patel msg->error = mpxy_send_message_with_resp(mchan->channel_id, 446bf3022a4SAnup Patel msg->data.service_id, 447bf3022a4SAnup Patel msg->data.request, 448bf3022a4SAnup Patel msg->data.request_len, 449bf3022a4SAnup Patel msg->data.response, 450bf3022a4SAnup Patel msg->data.max_response_len, 451bf3022a4SAnup Patel &msg->data.out_response_len); 452bf3022a4SAnup Patel break; 453bf3022a4SAnup Patel case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE: 454bf3022a4SAnup Patel if ((!msg->data.request && msg->data.request_len) || 455bf3022a4SAnup Patel (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) { 456bf3022a4SAnup Patel msg->error = -EINVAL; 457bf3022a4SAnup Patel break; 458bf3022a4SAnup Patel } 459bf3022a4SAnup Patel if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) { 460bf3022a4SAnup Patel msg->error = -EIO; 461bf3022a4SAnup Patel break; 462bf3022a4SAnup Patel } 463bf3022a4SAnup Patel msg->error = mpxy_send_message_without_resp(mchan->channel_id, 464bf3022a4SAnup Patel msg->data.service_id, 465bf3022a4SAnup Patel msg->data.request, 466bf3022a4SAnup Patel msg->data.request_len); 467bf3022a4SAnup Patel break; 468bf3022a4SAnup Patel default: 469bf3022a4SAnup Patel msg->error = -EOPNOTSUPP; 470bf3022a4SAnup Patel break; 471bf3022a4SAnup Patel } 472bf3022a4SAnup Patel } 473bf3022a4SAnup Patel 474bf3022a4SAnup Patel static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan, 475bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan, 476bf3022a4SAnup Patel struct sbi_mpxy_notification_data *notif, 477bf3022a4SAnup Patel unsigned long events_data_len) 478bf3022a4SAnup Patel { 479bf3022a4SAnup Patel struct rpmi_notification_event *event; 480bf3022a4SAnup Patel struct rpmi_mbox_message msg; 481bf3022a4SAnup Patel unsigned long pos = 0; 482bf3022a4SAnup Patel 483bf3022a4SAnup Patel while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) { 484bf3022a4SAnup Patel event = (struct rpmi_notification_event *)(notif->events_data + pos); 485bf3022a4SAnup Patel 486bf3022a4SAnup Patel msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT; 487bf3022a4SAnup Patel msg.notif.event_datalen = le16_to_cpu(event->event_datalen); 488bf3022a4SAnup Patel msg.notif.event_id = event->event_id; 489bf3022a4SAnup Patel msg.notif.event_data = event->event_data; 490bf3022a4SAnup Patel msg.error = 0; 491bf3022a4SAnup Patel 492bf3022a4SAnup Patel mbox_chan_received_data(chan, &msg); 493bf3022a4SAnup Patel pos += sizeof(*event) + msg.notif.event_datalen; 494bf3022a4SAnup Patel } 495bf3022a4SAnup Patel } 496bf3022a4SAnup Patel 497bf3022a4SAnup Patel static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan) 498bf3022a4SAnup Patel { 499bf3022a4SAnup Patel return mpxy_read_attrs(mchan->channel_id, 500bf3022a4SAnup Patel SBI_MPXY_ATTR_MSGPROTO_ATTR_START, 501bf3022a4SAnup Patel sizeof(mchan->rpmi_attrs) / sizeof(u32), 502bf3022a4SAnup Patel (u32 *)&mchan->rpmi_attrs); 503bf3022a4SAnup Patel } 504bf3022a4SAnup Patel 505bf3022a4SAnup Patel /* ====== MPXY mailbox callbacks ====== */ 506bf3022a4SAnup Patel 507bf3022a4SAnup Patel static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data) 508bf3022a4SAnup Patel { 509bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan = chan->con_priv; 510bf3022a4SAnup Patel 511bf3022a4SAnup Patel if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) { 512bf3022a4SAnup Patel mpxy_mbox_send_rpmi_data(mchan, data); 513bf3022a4SAnup Patel return 0; 514bf3022a4SAnup Patel } 515bf3022a4SAnup Patel 516bf3022a4SAnup Patel return -EOPNOTSUPP; 517bf3022a4SAnup Patel } 518bf3022a4SAnup Patel 519bf3022a4SAnup Patel static bool mpxy_mbox_peek_data(struct mbox_chan *chan) 520bf3022a4SAnup Patel { 521bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan = chan->con_priv; 522bf3022a4SAnup Patel struct sbi_mpxy_notification_data *notif = mchan->notif; 523bf3022a4SAnup Patel bool have_notifications = false; 524bf3022a4SAnup Patel unsigned long data_len; 525bf3022a4SAnup Patel int rc; 526bf3022a4SAnup Patel 527bf3022a4SAnup Patel if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS)) 528bf3022a4SAnup Patel return false; 529bf3022a4SAnup Patel 530bf3022a4SAnup Patel do { 531bf3022a4SAnup Patel rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len); 532bf3022a4SAnup Patel if (rc || !data_len) 533bf3022a4SAnup Patel break; 534bf3022a4SAnup Patel 535bf3022a4SAnup Patel if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) 536bf3022a4SAnup Patel mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len); 537bf3022a4SAnup Patel 538bf3022a4SAnup Patel have_notifications = true; 539bf3022a4SAnup Patel } while (1); 540bf3022a4SAnup Patel 541bf3022a4SAnup Patel return have_notifications; 542bf3022a4SAnup Patel } 543bf3022a4SAnup Patel 544bf3022a4SAnup Patel static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id) 545bf3022a4SAnup Patel { 546bf3022a4SAnup Patel mpxy_mbox_peek_data(dev_id); 547bf3022a4SAnup Patel return IRQ_HANDLED; 548bf3022a4SAnup Patel } 549bf3022a4SAnup Patel 550bf3022a4SAnup Patel static int mpxy_mbox_setup_msi(struct mbox_chan *chan, 551bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan) 552bf3022a4SAnup Patel { 553bf3022a4SAnup Patel struct device *dev = mchan->mbox->dev; 554bf3022a4SAnup Patel int rc; 555bf3022a4SAnup Patel 556bf3022a4SAnup Patel /* Do nothing if MSI not supported */ 557bf3022a4SAnup Patel if (mchan->msi_irq == U32_MAX) 558bf3022a4SAnup Patel return 0; 559bf3022a4SAnup Patel 560bf3022a4SAnup Patel /* Fail if MSI already enabled */ 561bf3022a4SAnup Patel if (mchan->attrs.msi_control) 562bf3022a4SAnup Patel return -EALREADY; 563bf3022a4SAnup Patel 564bf3022a4SAnup Patel /* Request channel MSI handler */ 565bf3022a4SAnup Patel rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread, 566bf3022a4SAnup Patel 0, dev_name(dev), chan); 567bf3022a4SAnup Patel if (rc) { 568bf3022a4SAnup Patel dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n", 569bf3022a4SAnup Patel mchan->channel_id); 570bf3022a4SAnup Patel return rc; 571bf3022a4SAnup Patel } 572bf3022a4SAnup Patel 573bf3022a4SAnup Patel /* Enable channel MSI control */ 574bf3022a4SAnup Patel mchan->attrs.msi_control = 1; 575bf3022a4SAnup Patel rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL, 576bf3022a4SAnup Patel 1, &mchan->attrs.msi_control); 577bf3022a4SAnup Patel if (rc) { 578bf3022a4SAnup Patel dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n", 579bf3022a4SAnup Patel mchan->channel_id); 580bf3022a4SAnup Patel mchan->attrs.msi_control = 0; 581bf3022a4SAnup Patel free_irq(mchan->msi_irq, chan); 582bf3022a4SAnup Patel return rc; 583bf3022a4SAnup Patel } 584bf3022a4SAnup Patel 585bf3022a4SAnup Patel return 0; 586bf3022a4SAnup Patel } 587bf3022a4SAnup Patel 588bf3022a4SAnup Patel static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan, 589bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan) 590bf3022a4SAnup Patel { 591bf3022a4SAnup Patel struct device *dev = mchan->mbox->dev; 592bf3022a4SAnup Patel int rc; 593bf3022a4SAnup Patel 594bf3022a4SAnup Patel /* Do nothing if MSI not supported */ 595bf3022a4SAnup Patel if (mchan->msi_irq == U32_MAX) 596bf3022a4SAnup Patel return; 597bf3022a4SAnup Patel 598bf3022a4SAnup Patel /* Do nothing if MSI already disabled */ 599bf3022a4SAnup Patel if (!mchan->attrs.msi_control) 600bf3022a4SAnup Patel return; 601bf3022a4SAnup Patel 602bf3022a4SAnup Patel /* Disable channel MSI control */ 603bf3022a4SAnup Patel mchan->attrs.msi_control = 0; 604bf3022a4SAnup Patel rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL, 605bf3022a4SAnup Patel 1, &mchan->attrs.msi_control); 606bf3022a4SAnup Patel if (rc) { 607bf3022a4SAnup Patel dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n", 608bf3022a4SAnup Patel mchan->channel_id); 609bf3022a4SAnup Patel } 610bf3022a4SAnup Patel 611bf3022a4SAnup Patel /* Free channel MSI handler */ 612bf3022a4SAnup Patel free_irq(mchan->msi_irq, chan); 613bf3022a4SAnup Patel } 614bf3022a4SAnup Patel 615bf3022a4SAnup Patel static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan) 616bf3022a4SAnup Patel { 617bf3022a4SAnup Patel struct device *dev = mchan->mbox->dev; 618bf3022a4SAnup Patel int rc; 619bf3022a4SAnup Patel 620bf3022a4SAnup Patel /* Do nothing if events state not supported */ 621bf3022a4SAnup Patel if (!mchan->have_events_state) 622bf3022a4SAnup Patel return 0; 623bf3022a4SAnup Patel 624bf3022a4SAnup Patel /* Fail if events state already enabled */ 625bf3022a4SAnup Patel if (mchan->attrs.events_state_ctrl) 626bf3022a4SAnup Patel return -EALREADY; 627bf3022a4SAnup Patel 628bf3022a4SAnup Patel /* Enable channel events state */ 629bf3022a4SAnup Patel mchan->attrs.events_state_ctrl = 1; 630bf3022a4SAnup Patel rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL, 631bf3022a4SAnup Patel 1, &mchan->attrs.events_state_ctrl); 632bf3022a4SAnup Patel if (rc) { 633bf3022a4SAnup Patel dev_err(dev, "enable events state failed for MPXY channel 0x%x\n", 634bf3022a4SAnup Patel mchan->channel_id); 635bf3022a4SAnup Patel mchan->attrs.events_state_ctrl = 0; 636bf3022a4SAnup Patel return rc; 637bf3022a4SAnup Patel } 638bf3022a4SAnup Patel 639bf3022a4SAnup Patel return 0; 640bf3022a4SAnup Patel } 641bf3022a4SAnup Patel 642bf3022a4SAnup Patel static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan) 643bf3022a4SAnup Patel { 644bf3022a4SAnup Patel struct device *dev = mchan->mbox->dev; 645bf3022a4SAnup Patel int rc; 646bf3022a4SAnup Patel 647bf3022a4SAnup Patel /* Do nothing if events state not supported */ 648bf3022a4SAnup Patel if (!mchan->have_events_state) 649bf3022a4SAnup Patel return; 650bf3022a4SAnup Patel 651bf3022a4SAnup Patel /* Do nothing if events state already disabled */ 652bf3022a4SAnup Patel if (!mchan->attrs.events_state_ctrl) 653bf3022a4SAnup Patel return; 654bf3022a4SAnup Patel 655bf3022a4SAnup Patel /* Disable channel events state */ 656bf3022a4SAnup Patel mchan->attrs.events_state_ctrl = 0; 657bf3022a4SAnup Patel rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL, 658bf3022a4SAnup Patel 1, &mchan->attrs.events_state_ctrl); 659bf3022a4SAnup Patel if (rc) 660bf3022a4SAnup Patel dev_err(dev, "disable events state failed for MPXY channel 0x%x\n", 661bf3022a4SAnup Patel mchan->channel_id); 662bf3022a4SAnup Patel } 663bf3022a4SAnup Patel 664bf3022a4SAnup Patel static int mpxy_mbox_startup(struct mbox_chan *chan) 665bf3022a4SAnup Patel { 666bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan = chan->con_priv; 667bf3022a4SAnup Patel int rc; 668bf3022a4SAnup Patel 669bf3022a4SAnup Patel if (mchan->started) 670bf3022a4SAnup Patel return -EALREADY; 671bf3022a4SAnup Patel 672bf3022a4SAnup Patel /* Setup channel MSI */ 673bf3022a4SAnup Patel rc = mpxy_mbox_setup_msi(chan, mchan); 674bf3022a4SAnup Patel if (rc) 675bf3022a4SAnup Patel return rc; 676bf3022a4SAnup Patel 677bf3022a4SAnup Patel /* Setup channel notification events */ 678bf3022a4SAnup Patel rc = mpxy_mbox_setup_events(mchan); 679bf3022a4SAnup Patel if (rc) { 680bf3022a4SAnup Patel mpxy_mbox_cleanup_msi(chan, mchan); 681bf3022a4SAnup Patel return rc; 682bf3022a4SAnup Patel } 683bf3022a4SAnup Patel 684bf3022a4SAnup Patel /* Mark the channel as started */ 685bf3022a4SAnup Patel mchan->started = true; 686bf3022a4SAnup Patel 687bf3022a4SAnup Patel return 0; 688bf3022a4SAnup Patel } 689bf3022a4SAnup Patel 690bf3022a4SAnup Patel static void mpxy_mbox_shutdown(struct mbox_chan *chan) 691bf3022a4SAnup Patel { 692bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan = chan->con_priv; 693bf3022a4SAnup Patel 694bf3022a4SAnup Patel if (!mchan->started) 695bf3022a4SAnup Patel return; 696bf3022a4SAnup Patel 697bf3022a4SAnup Patel /* Mark the channel as stopped */ 698bf3022a4SAnup Patel mchan->started = false; 699bf3022a4SAnup Patel 700bf3022a4SAnup Patel /* Cleanup channel notification events */ 701bf3022a4SAnup Patel mpxy_mbox_cleanup_events(mchan); 702bf3022a4SAnup Patel 703bf3022a4SAnup Patel /* Cleanup channel MSI */ 704bf3022a4SAnup Patel mpxy_mbox_cleanup_msi(chan, mchan); 705bf3022a4SAnup Patel } 706bf3022a4SAnup Patel 707bf3022a4SAnup Patel static const struct mbox_chan_ops mpxy_mbox_ops = { 708bf3022a4SAnup Patel .send_data = mpxy_mbox_send_data, 709bf3022a4SAnup Patel .peek_data = mpxy_mbox_peek_data, 710bf3022a4SAnup Patel .startup = mpxy_mbox_startup, 711bf3022a4SAnup Patel .shutdown = mpxy_mbox_shutdown, 712bf3022a4SAnup Patel }; 713bf3022a4SAnup Patel 714bf3022a4SAnup Patel /* ====== MPXY platform driver ===== */ 715bf3022a4SAnup Patel 716bf3022a4SAnup Patel static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) 717bf3022a4SAnup Patel { 718bf3022a4SAnup Patel struct device *dev = msi_desc_to_dev(desc); 719bf3022a4SAnup Patel struct mpxy_mbox *mbox = dev_get_drvdata(dev); 720bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan; 721bf3022a4SAnup Patel struct sbi_mpxy_msi_info *minfo; 722bf3022a4SAnup Patel int rc; 723bf3022a4SAnup Patel 724bf3022a4SAnup Patel mchan = mbox->msi_index_to_channel[desc->msi_index]; 725bf3022a4SAnup Patel if (!mchan) { 726bf3022a4SAnup Patel dev_warn(dev, "MPXY channel not available for MSI index %d\n", 727bf3022a4SAnup Patel desc->msi_index); 728bf3022a4SAnup Patel return; 729bf3022a4SAnup Patel } 730bf3022a4SAnup Patel 731bf3022a4SAnup Patel minfo = &mchan->attrs.msi_info; 732bf3022a4SAnup Patel minfo->msi_addr_lo = msg->address_lo; 733bf3022a4SAnup Patel minfo->msi_addr_hi = msg->address_hi; 734bf3022a4SAnup Patel minfo->msi_data = msg->data; 735bf3022a4SAnup Patel 736bf3022a4SAnup Patel rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO, 737bf3022a4SAnup Patel sizeof(*minfo) / sizeof(u32), (u32 *)minfo); 738bf3022a4SAnup Patel if (rc) { 739bf3022a4SAnup Patel dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n", 740bf3022a4SAnup Patel mchan->channel_id); 741bf3022a4SAnup Patel } 742bf3022a4SAnup Patel } 743bf3022a4SAnup Patel 744bf3022a4SAnup Patel static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr, 745bf3022a4SAnup Patel const struct fwnode_reference_args *pa) 746bf3022a4SAnup Patel { 747bf3022a4SAnup Patel struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller); 748bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan; 749bf3022a4SAnup Patel u32 i; 750bf3022a4SAnup Patel 751bf3022a4SAnup Patel if (pa->nargs != 2) 752bf3022a4SAnup Patel return ERR_PTR(-EINVAL); 753bf3022a4SAnup Patel 754bf3022a4SAnup Patel for (i = 0; i < mbox->channel_count; i++) { 755bf3022a4SAnup Patel mchan = &mbox->channels[i]; 756bf3022a4SAnup Patel if (mchan->channel_id == pa->args[0] && 757bf3022a4SAnup Patel mchan->attrs.msg_proto_id == pa->args[1]) 758bf3022a4SAnup Patel return &mbox->controller.chans[i]; 759bf3022a4SAnup Patel } 760bf3022a4SAnup Patel 761bf3022a4SAnup Patel return ERR_PTR(-ENOENT); 762bf3022a4SAnup Patel } 763bf3022a4SAnup Patel 764bf3022a4SAnup Patel static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox) 765bf3022a4SAnup Patel { 766bf3022a4SAnup Patel u32 i, *channel_ids __free(kfree) = NULL; 767bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan; 768bf3022a4SAnup Patel int rc; 769bf3022a4SAnup Patel 770bf3022a4SAnup Patel /* Find-out of number of channels */ 771bf3022a4SAnup Patel rc = mpxy_get_channel_count(&mbox->channel_count); 772bf3022a4SAnup Patel if (rc) 773bf3022a4SAnup Patel return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n"); 774bf3022a4SAnup Patel if (!mbox->channel_count) 775bf3022a4SAnup Patel return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n"); 776bf3022a4SAnup Patel 777bf3022a4SAnup Patel /* Allocate and fetch all channel IDs */ 778bf3022a4SAnup Patel channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL); 779bf3022a4SAnup Patel if (!channel_ids) 780bf3022a4SAnup Patel return -ENOMEM; 781bf3022a4SAnup Patel rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids); 782bf3022a4SAnup Patel if (rc) 783bf3022a4SAnup Patel return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n"); 784bf3022a4SAnup Patel 785bf3022a4SAnup Patel /* Populate all channels */ 786bf3022a4SAnup Patel mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count, 787bf3022a4SAnup Patel sizeof(*mbox->channels), GFP_KERNEL); 788bf3022a4SAnup Patel if (!mbox->channels) 789bf3022a4SAnup Patel return -ENOMEM; 790bf3022a4SAnup Patel for (i = 0; i < mbox->channel_count; i++) { 791bf3022a4SAnup Patel mchan = &mbox->channels[i]; 792bf3022a4SAnup Patel mchan->mbox = mbox; 793bf3022a4SAnup Patel mchan->channel_id = channel_ids[i]; 794bf3022a4SAnup Patel 795bf3022a4SAnup Patel rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID, 796bf3022a4SAnup Patel sizeof(mchan->attrs) / sizeof(u32), 797bf3022a4SAnup Patel (u32 *)&mchan->attrs); 798bf3022a4SAnup Patel if (rc) { 799bf3022a4SAnup Patel return dev_err_probe(mbox->dev, rc, 800bf3022a4SAnup Patel "MPXY channel 0x%x read attrs failed\n", 801bf3022a4SAnup Patel mchan->channel_id); 802bf3022a4SAnup Patel } 803bf3022a4SAnup Patel 804bf3022a4SAnup Patel if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) { 805bf3022a4SAnup Patel rc = mpxy_mbox_read_rpmi_attrs(mchan); 806bf3022a4SAnup Patel if (rc) { 807bf3022a4SAnup Patel return dev_err_probe(mbox->dev, rc, 808bf3022a4SAnup Patel "MPXY channel 0x%x read RPMI attrs failed\n", 809bf3022a4SAnup Patel mchan->channel_id); 810bf3022a4SAnup Patel } 811bf3022a4SAnup Patel } 812bf3022a4SAnup Patel 813bf3022a4SAnup Patel mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL); 814bf3022a4SAnup Patel if (!mchan->notif) 815bf3022a4SAnup Patel return -ENOMEM; 816bf3022a4SAnup Patel 817bf3022a4SAnup Patel mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len); 818bf3022a4SAnup Patel 819bf3022a4SAnup Patel if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) && 820bf3022a4SAnup Patel (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE)) 821bf3022a4SAnup Patel mchan->have_events_state = true; 822bf3022a4SAnup Patel 823bf3022a4SAnup Patel if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) && 824bf3022a4SAnup Patel (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI)) 825bf3022a4SAnup Patel mchan->msi_index = mbox->msi_count++; 826bf3022a4SAnup Patel else 827bf3022a4SAnup Patel mchan->msi_index = U32_MAX; 828bf3022a4SAnup Patel mchan->msi_irq = U32_MAX; 829bf3022a4SAnup Patel } 830bf3022a4SAnup Patel 831bf3022a4SAnup Patel return 0; 832bf3022a4SAnup Patel } 833bf3022a4SAnup Patel 834bf3022a4SAnup Patel static int mpxy_mbox_probe(struct platform_device *pdev) 835bf3022a4SAnup Patel { 836bf3022a4SAnup Patel struct device *dev = &pdev->dev; 837bf3022a4SAnup Patel struct mpxy_mbox_channel *mchan; 838bf3022a4SAnup Patel struct mpxy_mbox *mbox; 839bf3022a4SAnup Patel int msi_idx, rc; 840bf3022a4SAnup Patel u32 i; 841bf3022a4SAnup Patel 842bf3022a4SAnup Patel /* 843bf3022a4SAnup Patel * Initialize MPXY shared memory only once. This also ensures 844bf3022a4SAnup Patel * that SBI MPXY mailbox is probed only once. 845bf3022a4SAnup Patel */ 846bf3022a4SAnup Patel if (mpxy_shmem_init_done) { 847bf3022a4SAnup Patel dev_err(dev, "SBI MPXY mailbox already initialized\n"); 848bf3022a4SAnup Patel return -EALREADY; 849bf3022a4SAnup Patel } 850bf3022a4SAnup Patel 851bf3022a4SAnup Patel /* Probe for SBI MPXY extension */ 852bf3022a4SAnup Patel if (sbi_spec_version < sbi_mk_version(1, 0) || 853bf3022a4SAnup Patel sbi_probe_extension(SBI_EXT_MPXY) <= 0) { 854bf3022a4SAnup Patel dev_info(dev, "SBI MPXY extension not available\n"); 855bf3022a4SAnup Patel return -ENODEV; 856bf3022a4SAnup Patel } 857bf3022a4SAnup Patel 858bf3022a4SAnup Patel /* Find-out shared memory size */ 859bf3022a4SAnup Patel rc = mpxy_get_shmem_size(&mpxy_shmem_size); 860bf3022a4SAnup Patel if (rc) 861bf3022a4SAnup Patel return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n"); 862bf3022a4SAnup Patel 863bf3022a4SAnup Patel /* 864bf3022a4SAnup Patel * Setup MPXY shared memory on each CPU 865bf3022a4SAnup Patel * 866bf3022a4SAnup Patel * Note: Don't cleanup MPXY shared memory upon CPU power-down 867bf3022a4SAnup Patel * because the RPMI System MSI irqchip driver needs it to be 868bf3022a4SAnup Patel * available when migrating IRQs in CPU power-down path. 869bf3022a4SAnup Patel */ 870bf3022a4SAnup Patel cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem", 871bf3022a4SAnup Patel mpxy_setup_shmem, NULL); 872bf3022a4SAnup Patel 873bf3022a4SAnup Patel /* Mark as MPXY shared memory initialization done */ 874bf3022a4SAnup Patel mpxy_shmem_init_done = true; 875bf3022a4SAnup Patel 876bf3022a4SAnup Patel /* Allocate mailbox instance */ 877bf3022a4SAnup Patel mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); 878bf3022a4SAnup Patel if (!mbox) 879bf3022a4SAnup Patel return -ENOMEM; 880bf3022a4SAnup Patel mbox->dev = dev; 881bf3022a4SAnup Patel platform_set_drvdata(pdev, mbox); 882bf3022a4SAnup Patel 883bf3022a4SAnup Patel /* Populate mailbox channels */ 884bf3022a4SAnup Patel rc = mpxy_mbox_populate_channels(mbox); 885bf3022a4SAnup Patel if (rc) 886bf3022a4SAnup Patel return rc; 887bf3022a4SAnup Patel 888bf3022a4SAnup Patel /* Initialize mailbox controller */ 889bf3022a4SAnup Patel mbox->controller.txdone_irq = false; 890bf3022a4SAnup Patel mbox->controller.txdone_poll = false; 891bf3022a4SAnup Patel mbox->controller.ops = &mpxy_mbox_ops; 892bf3022a4SAnup Patel mbox->controller.dev = dev; 893bf3022a4SAnup Patel mbox->controller.num_chans = mbox->channel_count; 894bf3022a4SAnup Patel mbox->controller.fw_xlate = mpxy_mbox_fw_xlate; 895bf3022a4SAnup Patel mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count, 896bf3022a4SAnup Patel sizeof(*mbox->controller.chans), 897bf3022a4SAnup Patel GFP_KERNEL); 898bf3022a4SAnup Patel if (!mbox->controller.chans) 899bf3022a4SAnup Patel return -ENOMEM; 900bf3022a4SAnup Patel for (i = 0; i < mbox->channel_count; i++) 901bf3022a4SAnup Patel mbox->controller.chans[i].con_priv = &mbox->channels[i]; 902bf3022a4SAnup Patel 903bf3022a4SAnup Patel /* Setup MSIs for mailbox (if required) */ 904bf3022a4SAnup Patel if (mbox->msi_count) { 905bf3022a4SAnup Patel /* 906bf3022a4SAnup Patel * The device MSI domain for platform devices on RISC-V architecture 907bf3022a4SAnup Patel * is only available after the MSI controller driver is probed so, 908bf3022a4SAnup Patel * explicitly configure here. 909bf3022a4SAnup Patel */ 910bf3022a4SAnup Patel if (!dev_get_msi_domain(dev)) { 911*7e64042fSSunil V L struct fwnode_handle *fwnode = dev_fwnode(dev); 912*7e64042fSSunil V L 913bf3022a4SAnup Patel /* 914bf3022a4SAnup Patel * The device MSI domain for OF devices is only set at the 915bf3022a4SAnup Patel * time of populating/creating OF device. If the device MSI 916bf3022a4SAnup Patel * domain is discovered later after the OF device is created 917bf3022a4SAnup Patel * then we need to set it explicitly before using any platform 918bf3022a4SAnup Patel * MSI functions. 919bf3022a4SAnup Patel */ 920*7e64042fSSunil V L if (is_of_node(fwnode)) { 921bf3022a4SAnup Patel of_msi_configure(dev, dev_of_node(dev)); 922*7e64042fSSunil V L } else if (is_acpi_device_node(fwnode)) { 923*7e64042fSSunil V L struct irq_domain *msi_domain; 924*7e64042fSSunil V L 925*7e64042fSSunil V L msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), 926*7e64042fSSunil V L DOMAIN_BUS_PLATFORM_MSI); 927*7e64042fSSunil V L dev_set_msi_domain(dev, msi_domain); 928*7e64042fSSunil V L } 929bf3022a4SAnup Patel 930bf3022a4SAnup Patel if (!dev_get_msi_domain(dev)) 931bf3022a4SAnup Patel return -EPROBE_DEFER; 932bf3022a4SAnup Patel } 933bf3022a4SAnup Patel 934bf3022a4SAnup Patel mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count, 935bf3022a4SAnup Patel sizeof(*mbox->msi_index_to_channel), 936bf3022a4SAnup Patel GFP_KERNEL); 937bf3022a4SAnup Patel if (!mbox->msi_index_to_channel) 938bf3022a4SAnup Patel return -ENOMEM; 939bf3022a4SAnup Patel 940bf3022a4SAnup Patel for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) { 941bf3022a4SAnup Patel for (i = 0; i < mbox->channel_count; i++) { 942bf3022a4SAnup Patel mchan = &mbox->channels[i]; 943bf3022a4SAnup Patel if (mchan->msi_index == msi_idx) { 944bf3022a4SAnup Patel mbox->msi_index_to_channel[msi_idx] = mchan; 945bf3022a4SAnup Patel break; 946bf3022a4SAnup Patel } 947bf3022a4SAnup Patel } 948bf3022a4SAnup Patel } 949bf3022a4SAnup Patel 950bf3022a4SAnup Patel rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count, 951bf3022a4SAnup Patel mpxy_mbox_msi_write); 952bf3022a4SAnup Patel if (rc) { 953bf3022a4SAnup Patel return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n", 954bf3022a4SAnup Patel mbox->msi_count); 955bf3022a4SAnup Patel } 956bf3022a4SAnup Patel 957bf3022a4SAnup Patel for (i = 0; i < mbox->channel_count; i++) { 958bf3022a4SAnup Patel mchan = &mbox->channels[i]; 959bf3022a4SAnup Patel if (mchan->msi_index == U32_MAX) 960bf3022a4SAnup Patel continue; 961bf3022a4SAnup Patel mchan->msi_irq = msi_get_virq(dev, mchan->msi_index); 962bf3022a4SAnup Patel } 963bf3022a4SAnup Patel } 964bf3022a4SAnup Patel 965bf3022a4SAnup Patel /* Register mailbox controller */ 966bf3022a4SAnup Patel rc = devm_mbox_controller_register(dev, &mbox->controller); 967bf3022a4SAnup Patel if (rc) { 968bf3022a4SAnup Patel dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n"); 969bf3022a4SAnup Patel if (mbox->msi_count) 970bf3022a4SAnup Patel platform_device_msi_free_irqs_all(dev); 971bf3022a4SAnup Patel return rc; 972bf3022a4SAnup Patel } 973bf3022a4SAnup Patel 974*7e64042fSSunil V L #ifdef CONFIG_ACPI 975*7e64042fSSunil V L struct acpi_device *adev = ACPI_COMPANION(dev); 976*7e64042fSSunil V L 977*7e64042fSSunil V L if (adev) 978*7e64042fSSunil V L acpi_dev_clear_dependencies(adev); 979*7e64042fSSunil V L #endif 980*7e64042fSSunil V L 981bf3022a4SAnup Patel dev_info(dev, "mailbox registered with %d channels\n", 982bf3022a4SAnup Patel mbox->channel_count); 983bf3022a4SAnup Patel return 0; 984bf3022a4SAnup Patel } 985bf3022a4SAnup Patel 986bf3022a4SAnup Patel static void mpxy_mbox_remove(struct platform_device *pdev) 987bf3022a4SAnup Patel { 988bf3022a4SAnup Patel struct mpxy_mbox *mbox = platform_get_drvdata(pdev); 989bf3022a4SAnup Patel 990bf3022a4SAnup Patel if (mbox->msi_count) 991bf3022a4SAnup Patel platform_device_msi_free_irqs_all(mbox->dev); 992bf3022a4SAnup Patel } 993bf3022a4SAnup Patel 994bf3022a4SAnup Patel static const struct of_device_id mpxy_mbox_of_match[] = { 995bf3022a4SAnup Patel { .compatible = "riscv,sbi-mpxy-mbox" }, 996bf3022a4SAnup Patel {} 997bf3022a4SAnup Patel }; 998bf3022a4SAnup Patel MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match); 999bf3022a4SAnup Patel 1000*7e64042fSSunil V L static const struct acpi_device_id mpxy_mbox_acpi_match[] = { 1001*7e64042fSSunil V L { "RSCV0005" }, 1002*7e64042fSSunil V L {} 1003*7e64042fSSunil V L }; 1004*7e64042fSSunil V L MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match); 1005*7e64042fSSunil V L 1006bf3022a4SAnup Patel static struct platform_driver mpxy_mbox_driver = { 1007bf3022a4SAnup Patel .driver = { 1008bf3022a4SAnup Patel .name = "riscv-sbi-mpxy-mbox", 1009bf3022a4SAnup Patel .of_match_table = mpxy_mbox_of_match, 1010*7e64042fSSunil V L .acpi_match_table = mpxy_mbox_acpi_match, 1011bf3022a4SAnup Patel }, 1012bf3022a4SAnup Patel .probe = mpxy_mbox_probe, 1013bf3022a4SAnup Patel .remove = mpxy_mbox_remove, 1014bf3022a4SAnup Patel }; 1015bf3022a4SAnup Patel module_platform_driver(mpxy_mbox_driver); 1016bf3022a4SAnup Patel 1017bf3022a4SAnup Patel MODULE_LICENSE("GPL"); 1018bf3022a4SAnup Patel MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>"); 1019bf3022a4SAnup Patel MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver"); 1020