1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * sbrmi-core.c - file defining SB-RMI protocols compliant 4 * AMD SoC device. 5 * 6 * Copyright (C) 2025 Advanced Micro Devices, Inc. 7 */ 8 #include <linux/delay.h> 9 #include <linux/err.h> 10 #include <linux/fs.h> 11 #include <linux/i2c.h> 12 #include <linux/miscdevice.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/regmap.h> 16 #include "rmi-core.h" 17 18 /* Mask for Status Register bit[1] */ 19 #define SW_ALERT_MASK 0x2 20 /* Mask to check H/W Alert status bit */ 21 #define HW_ALERT_MASK 0x80 22 23 /* Software Interrupt for triggering */ 24 #define START_CMD 0x80 25 #define TRIGGER_MAILBOX 0x01 26 27 /* Default message lengths as per APML command protocol */ 28 /* CPUID */ 29 #define CPUID_RD_DATA_LEN 0x8 30 #define CPUID_WR_DATA_LEN 0x8 31 #define CPUID_RD_REG_LEN 0xa 32 #define CPUID_WR_REG_LEN 0x9 33 /* MSR */ 34 #define MSR_RD_REG_LEN 0xa 35 #define MSR_WR_REG_LEN 0x8 36 #define MSR_RD_DATA_LEN 0x8 37 #define MSR_WR_DATA_LEN 0x7 38 39 /* CPUID MSR Command Ids */ 40 #define CPUID_MCA_CMD 0x73 41 #define RD_CPUID_CMD 0x91 42 #define RD_MCA_CMD 0x86 43 44 /* CPUID MCAMSR mask & index */ 45 #define CPUID_MCA_THRD_INDEX 32 46 #define CPUID_MCA_FUNC_MASK GENMASK(31, 0) 47 #define CPUID_EXT_FUNC_INDEX 56 48 49 /* input for bulk write to CPUID protocol */ 50 struct cpu_msr_indata { 51 u8 wr_len; /* const value */ 52 u8 rd_len; /* const value */ 53 u8 proto_cmd; /* const value */ 54 u8 thread; /* thread number */ 55 union { 56 u8 reg_offset[4]; /* input value */ 57 u32 value; 58 } __packed; 59 u8 ext; /* extended function */ 60 }; 61 62 /* output for bulk read from CPUID protocol */ 63 struct cpu_msr_outdata { 64 u8 num_bytes; /* number of bytes return */ 65 u8 status; /* Protocol status code */ 66 union { 67 u64 value; 68 u8 reg_data[8]; 69 } __packed; 70 }; 71 72 static inline void prepare_cpuid_input_message(struct cpu_msr_indata *input, 73 u8 thread_id, u32 func, 74 u8 ext_func) 75 { 76 input->rd_len = CPUID_RD_DATA_LEN; 77 input->wr_len = CPUID_WR_DATA_LEN; 78 input->proto_cmd = RD_CPUID_CMD; 79 input->thread = thread_id << 1; 80 input->value = func; 81 input->ext = ext_func; 82 } 83 84 static inline void prepare_mca_msr_input_message(struct cpu_msr_indata *input, 85 u8 thread_id, u32 data_in) 86 { 87 input->rd_len = MSR_RD_DATA_LEN; 88 input->wr_len = MSR_WR_DATA_LEN; 89 input->proto_cmd = RD_MCA_CMD; 90 input->thread = thread_id << 1; 91 input->value = data_in; 92 } 93 94 static int sbrmi_get_rev(struct sbrmi_data *data) 95 { 96 unsigned int rev; 97 u16 offset = SBRMI_REV; 98 int ret; 99 100 ret = regmap_read(data->regmap, offset, &rev); 101 if (ret < 0) 102 return ret; 103 104 data->rev = rev; 105 return 0; 106 } 107 108 /* Read CPUID function protocol */ 109 static int rmi_cpuid_read(struct sbrmi_data *data, 110 struct apml_cpuid_msg *msg) 111 { 112 struct cpu_msr_indata input = {0}; 113 struct cpu_msr_outdata output = {0}; 114 int val = 0; 115 int ret, hw_status; 116 u16 thread; 117 118 mutex_lock(&data->lock); 119 /* cache the rev value to identify if protocol is supported or not */ 120 if (!data->rev) { 121 ret = sbrmi_get_rev(data); 122 if (ret < 0) 123 goto exit_unlock; 124 } 125 /* CPUID protocol for REV 0x10 is not supported*/ 126 if (data->rev == 0x10) { 127 ret = -EOPNOTSUPP; 128 goto exit_unlock; 129 } 130 131 thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX; 132 133 /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ 134 if (thread > 127) { 135 thread -= 128; 136 val = 1; 137 } 138 ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val); 139 if (ret < 0) 140 goto exit_unlock; 141 142 prepare_cpuid_input_message(&input, thread, 143 msg->cpu_in_out & CPUID_MCA_FUNC_MASK, 144 msg->cpu_in_out >> CPUID_EXT_FUNC_INDEX); 145 146 ret = regmap_bulk_write(data->regmap, CPUID_MCA_CMD, 147 &input, CPUID_WR_REG_LEN); 148 if (ret < 0) 149 goto exit_unlock; 150 151 /* 152 * For RMI Rev 0x20, new h/w status bit is introduced. which is used 153 * by firmware to indicate completion of commands (0x71, 0x72, 0x73). 154 * wait for the status bit to be set by the hardware before 155 * reading the data out. 156 */ 157 ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status, 158 hw_status & HW_ALERT_MASK, 500, 2000000); 159 if (ret) 160 goto exit_unlock; 161 162 ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD, 163 &output, CPUID_RD_REG_LEN); 164 if (ret < 0) 165 goto exit_unlock; 166 167 ret = regmap_write(data->regmap, SBRMI_STATUS, 168 HW_ALERT_MASK); 169 if (ret < 0) 170 goto exit_unlock; 171 172 if (output.num_bytes != CPUID_RD_REG_LEN - 1) { 173 ret = -EMSGSIZE; 174 goto exit_unlock; 175 } 176 if (output.status) { 177 ret = -EPROTOTYPE; 178 msg->fw_ret_code = output.status; 179 goto exit_unlock; 180 } 181 msg->cpu_in_out = output.value; 182 exit_unlock: 183 if (ret < 0) 184 msg->cpu_in_out = 0; 185 mutex_unlock(&data->lock); 186 return ret; 187 } 188 189 /* MCA MSR protocol */ 190 static int rmi_mca_msr_read(struct sbrmi_data *data, 191 struct apml_mcamsr_msg *msg) 192 { 193 struct cpu_msr_outdata output = {0}; 194 struct cpu_msr_indata input = {0}; 195 int ret, val = 0; 196 int hw_status; 197 u16 thread; 198 199 mutex_lock(&data->lock); 200 /* cache the rev value to identify if protocol is supported or not */ 201 if (!data->rev) { 202 ret = sbrmi_get_rev(data); 203 if (ret < 0) 204 goto exit_unlock; 205 } 206 /* MCA MSR protocol for REV 0x10 is not supported*/ 207 if (data->rev == 0x10) { 208 ret = -EOPNOTSUPP; 209 goto exit_unlock; 210 } 211 212 thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX; 213 214 /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ 215 if (thread > 127) { 216 thread -= 128; 217 val = 1; 218 } 219 ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val); 220 if (ret < 0) 221 goto exit_unlock; 222 223 prepare_mca_msr_input_message(&input, thread, 224 msg->mcamsr_in_out & CPUID_MCA_FUNC_MASK); 225 226 ret = regmap_bulk_write(data->regmap, CPUID_MCA_CMD, 227 &input, MSR_WR_REG_LEN); 228 if (ret < 0) 229 goto exit_unlock; 230 231 /* 232 * For RMI Rev 0x20, new h/w status bit is introduced. which is used 233 * by firmware to indicate completion of commands (0x71, 0x72, 0x73). 234 * wait for the status bit to be set by the hardware before 235 * reading the data out. 236 */ 237 ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status, 238 hw_status & HW_ALERT_MASK, 500, 2000000); 239 if (ret) 240 goto exit_unlock; 241 242 ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD, 243 &output, MSR_RD_REG_LEN); 244 if (ret < 0) 245 goto exit_unlock; 246 247 ret = regmap_write(data->regmap, SBRMI_STATUS, 248 HW_ALERT_MASK); 249 if (ret < 0) 250 goto exit_unlock; 251 252 if (output.num_bytes != MSR_RD_REG_LEN - 1) { 253 ret = -EMSGSIZE; 254 goto exit_unlock; 255 } 256 if (output.status) { 257 ret = -EPROTOTYPE; 258 msg->fw_ret_code = output.status; 259 goto exit_unlock; 260 } 261 msg->mcamsr_in_out = output.value; 262 263 exit_unlock: 264 mutex_unlock(&data->lock); 265 return ret; 266 } 267 268 int rmi_mailbox_xfer(struct sbrmi_data *data, 269 struct apml_mbox_msg *msg) 270 { 271 unsigned int bytes, ec; 272 int i, ret; 273 int sw_status; 274 u8 byte; 275 276 mutex_lock(&data->lock); 277 278 msg->fw_ret_code = 0; 279 280 /* Indicate firmware a command is to be serviced */ 281 ret = regmap_write(data->regmap, SBRMI_INBNDMSG7, START_CMD); 282 if (ret < 0) 283 goto exit_unlock; 284 285 /* Write the command to SBRMI::InBndMsg_inst0 */ 286 ret = regmap_write(data->regmap, SBRMI_INBNDMSG0, msg->cmd); 287 if (ret < 0) 288 goto exit_unlock; 289 290 /* 291 * For both read and write the initiator (BMC) writes 292 * Command Data In[31:0] to SBRMI::InBndMsg_inst[4:1] 293 * SBRMI_x3C(MSB):SBRMI_x39(LSB) 294 */ 295 for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) { 296 byte = (msg->mb_in_out >> i * 8) & 0xff; 297 ret = regmap_write(data->regmap, SBRMI_INBNDMSG1 + i, byte); 298 if (ret < 0) 299 goto exit_unlock; 300 } 301 302 /* 303 * Write 0x01 to SBRMI::SoftwareInterrupt to notify firmware to 304 * perform the requested read or write command 305 */ 306 ret = regmap_write(data->regmap, SBRMI_SW_INTERRUPT, TRIGGER_MAILBOX); 307 if (ret < 0) 308 goto exit_unlock; 309 310 /* 311 * Firmware will write SBRMI::Status[SwAlertSts]=1 to generate 312 * an ALERT (if enabled) to initiator (BMC) to indicate completion 313 * of the requested command 314 */ 315 ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, sw_status, 316 sw_status & SW_ALERT_MASK, 500, 2000000); 317 if (ret) 318 goto exit_unlock; 319 320 ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec); 321 if (ret || ec) 322 goto exit_clear_alert; 323 324 /* Clear the input value before updating the output data */ 325 msg->mb_in_out = 0; 326 327 /* 328 * For a read operation, the initiator (BMC) reads the firmware 329 * response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1] 330 * {SBRMI_x34(MSB):SBRMI_x31(LSB)}. 331 */ 332 for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) { 333 ret = regmap_read(data->regmap, 334 SBRMI_OUTBNDMSG1 + i, &bytes); 335 if (ret < 0) 336 break; 337 msg->mb_in_out |= bytes << i * 8; 338 } 339 340 exit_clear_alert: 341 /* 342 * BMC must write 1'b1 to SBRMI::Status[SwAlertSts] to clear the 343 * ALERT to initiator 344 */ 345 ret = regmap_write(data->regmap, SBRMI_STATUS, 346 sw_status | SW_ALERT_MASK); 347 if (ec) { 348 ret = -EPROTOTYPE; 349 msg->fw_ret_code = ec; 350 } 351 exit_unlock: 352 mutex_unlock(&data->lock); 353 return ret; 354 } 355 356 static int apml_rmi_reg_xfer(struct sbrmi_data *data, 357 struct apml_reg_xfer_msg __user *arg) 358 { 359 struct apml_reg_xfer_msg msg = { 0 }; 360 unsigned int data_read; 361 int ret; 362 363 /* Copy the structure from user */ 364 if (copy_from_user(&msg, arg, sizeof(struct apml_reg_xfer_msg))) 365 return -EFAULT; 366 367 mutex_lock(&data->lock); 368 if (msg.rflag) { 369 ret = regmap_read(data->regmap, msg.reg_addr, &data_read); 370 if (!ret) 371 msg.data_in_out = data_read; 372 } else { 373 ret = regmap_write(data->regmap, msg.reg_addr, msg.data_in_out); 374 } 375 376 mutex_unlock(&data->lock); 377 378 if (msg.rflag && !ret) 379 if (copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg))) 380 return -EFAULT; 381 return ret; 382 } 383 384 static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __user *arg) 385 { 386 struct apml_mbox_msg msg = { 0 }; 387 int ret; 388 389 /* Copy the structure from user */ 390 if (copy_from_user(&msg, arg, sizeof(struct apml_mbox_msg))) 391 return -EFAULT; 392 393 /* Mailbox protocol */ 394 ret = rmi_mailbox_xfer(data, &msg); 395 if (ret && ret != -EPROTOTYPE) 396 return ret; 397 398 if (copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg))) 399 return -EFAULT; 400 return ret; 401 } 402 403 static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg) 404 { 405 struct apml_cpuid_msg msg = { 0 }; 406 int ret; 407 408 /* Copy the structure from user */ 409 if (copy_from_user(&msg, arg, sizeof(struct apml_cpuid_msg))) 410 return -EFAULT; 411 412 /* CPUID Protocol */ 413 ret = rmi_cpuid_read(data, &msg); 414 if (ret && ret != -EPROTOTYPE) 415 return ret; 416 417 if (copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg))) 418 return -EFAULT; 419 return ret; 420 } 421 422 static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg) 423 { 424 struct apml_mcamsr_msg msg = { 0 }; 425 int ret; 426 427 /* Copy the structure from user */ 428 if (copy_from_user(&msg, arg, sizeof(struct apml_mcamsr_msg))) 429 return -EFAULT; 430 431 /* MCAMSR Protocol */ 432 ret = rmi_mca_msr_read(data, &msg); 433 if (ret && ret != -EPROTOTYPE) 434 return ret; 435 436 if (copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg))) 437 return -EFAULT; 438 return ret; 439 } 440 441 static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) 442 { 443 void __user *argp = (void __user *)arg; 444 struct sbrmi_data *data; 445 446 data = container_of(fp->private_data, struct sbrmi_data, sbrmi_misc_dev); 447 switch (cmd) { 448 case SBRMI_IOCTL_MBOX_CMD: 449 return apml_mailbox_xfer(data, argp); 450 case SBRMI_IOCTL_CPUID_CMD: 451 return apml_cpuid_xfer(data, argp); 452 case SBRMI_IOCTL_MCAMSR_CMD: 453 return apml_mcamsr_xfer(data, argp); 454 case SBRMI_IOCTL_REG_XFER_CMD: 455 return apml_rmi_reg_xfer(data, argp); 456 default: 457 return -ENOTTY; 458 } 459 } 460 461 static const struct file_operations sbrmi_fops = { 462 .owner = THIS_MODULE, 463 .unlocked_ioctl = sbrmi_ioctl, 464 .compat_ioctl = compat_ptr_ioctl, 465 }; 466 467 int create_misc_rmi_device(struct sbrmi_data *data, 468 struct device *dev) 469 { 470 data->sbrmi_misc_dev.name = devm_kasprintf(dev, 471 GFP_KERNEL, 472 "sbrmi-%x", 473 data->dev_static_addr); 474 data->sbrmi_misc_dev.minor = MISC_DYNAMIC_MINOR; 475 data->sbrmi_misc_dev.fops = &sbrmi_fops; 476 data->sbrmi_misc_dev.parent = dev; 477 data->sbrmi_misc_dev.nodename = devm_kasprintf(dev, 478 GFP_KERNEL, 479 "sbrmi-%x", 480 data->dev_static_addr); 481 data->sbrmi_misc_dev.mode = 0600; 482 483 return misc_register(&data->sbrmi_misc_dev); 484 } 485