1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD Alert Standard Format Platform Driver 4 * 5 * Copyright (c) 2024, Advanced Micro Devices, Inc. 6 * All Rights Reserved. 7 * 8 * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> 9 * Sanket Goswami <Sanket.Goswami@amd.com> 10 */ 11 12 #include <linux/bitops.h> 13 #include <linux/device.h> 14 #include <linux/devm-helpers.h> 15 #include <linux/errno.h> 16 #include <linux/gfp_types.h> 17 #include <linux/i2c.h> 18 #include <linux/io.h> 19 #include <linux/ioport.h> 20 #include <linux/module.h> 21 #include <linux/mod_devicetable.h> 22 #include <linux/platform_device.h> 23 #include <linux/sprintf.h> 24 25 #include "i2c-piix4.h" 26 27 /* ASF register bits */ 28 #define ASF_SLV_LISTN 0 29 #define ASF_SLV_INTR 1 30 #define ASF_SLV_RST 4 31 #define ASF_PEC_SP 5 32 #define ASF_DATA_EN 7 33 #define ASF_MSTR_EN 16 34 #define ASF_CLK_EN 17 35 36 /* ASF address offsets */ 37 #define ASFINDEX (0x07 + piix4_smba) 38 #define ASFLISADDR (0x09 + piix4_smba) 39 #define ASFSTA (0x0A + piix4_smba) 40 #define ASFSLVSTA (0x0D + piix4_smba) 41 #define ASFDATARWPTR (0x11 + piix4_smba) 42 #define ASFSETDATARDPTR (0x12 + piix4_smba) 43 #define ASFDATABNKSEL (0x13 + piix4_smba) 44 #define ASFSLVEN (0x15 + piix4_smba) 45 46 #define ASF_BLOCK_MAX_BYTES 72 47 #define ASF_ERROR_STATUS GENMASK(3, 1) 48 49 struct amd_asf_dev { 50 struct i2c_adapter adap; 51 void __iomem *eoi_base; 52 struct i2c_client *target; 53 struct delayed_work work_buf; 54 struct sb800_mmio_cfg mmio_cfg; 55 struct resource *port_addr; 56 }; 57 58 static void amd_asf_process_target(struct work_struct *work) 59 { 60 struct amd_asf_dev *dev = container_of(work, struct amd_asf_dev, work_buf.work); 61 unsigned short piix4_smba = dev->port_addr->start; 62 u8 data[ASF_BLOCK_MAX_BYTES]; 63 u8 bank, reg, cmd; 64 u8 len = 0, idx, val; 65 66 /* Read target status register */ 67 reg = inb_p(ASFSLVSTA); 68 69 /* Check if no error bits are set in target status register */ 70 if (reg & ASF_ERROR_STATUS) { 71 /* Set bank as full */ 72 cmd = 0; 73 reg |= GENMASK(3, 2); 74 outb_p(reg, ASFDATABNKSEL); 75 } else { 76 /* Read data bank */ 77 reg = inb_p(ASFDATABNKSEL); 78 bank = (reg & BIT(3)) ? 1 : 0; 79 80 /* Set read data bank */ 81 if (bank) { 82 reg |= BIT(4); 83 reg &= ~BIT(3); 84 } else { 85 reg &= ~BIT(4); 86 reg &= ~BIT(2); 87 } 88 89 /* Read command register */ 90 outb_p(reg, ASFDATABNKSEL); 91 cmd = inb_p(ASFINDEX); 92 len = inb_p(ASFDATARWPTR); 93 for (idx = 0; idx < len; idx++) 94 data[idx] = inb_p(ASFINDEX); 95 96 /* Clear data bank status */ 97 if (bank) { 98 reg |= BIT(3); 99 outb_p(reg, ASFDATABNKSEL); 100 } else { 101 reg |= BIT(2); 102 outb_p(reg, ASFDATABNKSEL); 103 } 104 } 105 106 outb_p(0, ASFSETDATARDPTR); 107 if (cmd & BIT(0)) 108 return; 109 110 /* 111 * Although i2c_slave_event() returns an appropriate error code, we 112 * don't check it here because we're operating in the workqueue context. 113 */ 114 i2c_slave_event(dev->target, I2C_SLAVE_WRITE_REQUESTED, &val); 115 for (idx = 0; idx < len; idx++) { 116 val = data[idx]; 117 i2c_slave_event(dev->target, I2C_SLAVE_WRITE_RECEIVED, &val); 118 } 119 i2c_slave_event(dev->target, I2C_SLAVE_STOP, &val); 120 } 121 122 static void amd_asf_update_ioport_target(unsigned short piix4_smba, u8 bit, 123 unsigned long offset, bool set) 124 { 125 unsigned long reg; 126 127 reg = inb_p(offset); 128 __assign_bit(bit, ®, set); 129 outb_p(reg, offset); 130 } 131 132 static void amd_asf_update_mmio_target(struct amd_asf_dev *dev, u8 bit, bool set) 133 { 134 unsigned long reg; 135 136 reg = ioread32(dev->mmio_cfg.addr); 137 __assign_bit(bit, ®, set); 138 iowrite32(reg, dev->mmio_cfg.addr); 139 } 140 141 static void amd_asf_setup_target(struct amd_asf_dev *dev) 142 { 143 unsigned short piix4_smba = dev->port_addr->start; 144 145 /* Reset both host and target before setting up */ 146 outb_p(0, SMBHSTSTS); 147 outb_p(0, ASFSLVSTA); 148 outb_p(0, ASFSTA); 149 150 /* Update target address */ 151 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, true); 152 /* Enable target and set the clock */ 153 amd_asf_update_mmio_target(dev, ASF_MSTR_EN, false); 154 amd_asf_update_mmio_target(dev, ASF_CLK_EN, true); 155 /* Enable target interrupt */ 156 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, true); 157 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, false); 158 /* Enable PEC and PEC append */ 159 amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); 160 amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); 161 } 162 163 static int amd_asf_access(struct i2c_adapter *adap, u16 addr, u8 command, u8 *data) 164 { 165 struct amd_asf_dev *dev = i2c_get_adapdata(adap); 166 unsigned short piix4_smba = dev->port_addr->start; 167 u8 i, len; 168 169 outb_p((addr << 1), SMBHSTADD); 170 outb_p(command, SMBHSTCMD); 171 len = data[0]; 172 if (len == 0 || len > ASF_BLOCK_MAX_BYTES) 173 return -EINVAL; 174 175 outb_p(len, SMBHSTDAT0); 176 /* Reset SMBBLKDAT */ 177 inb_p(SMBHSTCNT); 178 for (i = 1; i <= len; i++) 179 outb_p(data[i], SMBBLKDAT); 180 181 outb_p(PIIX4_BLOCK_DATA, SMBHSTCNT); 182 /* Enable PEC and PEC append */ 183 amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); 184 amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); 185 186 return piix4_transaction(adap, piix4_smba); 187 } 188 189 static int amd_asf_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 190 { 191 struct amd_asf_dev *dev = i2c_get_adapdata(adap); 192 unsigned short piix4_smba = dev->port_addr->start; 193 u8 asf_data[ASF_BLOCK_MAX_BYTES]; 194 struct i2c_msg *dev_msgs = msgs; 195 u8 prev_port; 196 int ret; 197 198 if (msgs->flags & I2C_M_RD) { 199 dev_err(&adap->dev, "ASF: Read not supported\n"); 200 return -EOPNOTSUPP; 201 } 202 203 /* Exclude the receive header and PEC */ 204 if (msgs->len > ASF_BLOCK_MAX_BYTES - 3) { 205 dev_warn(&adap->dev, "ASF: max message length exceeded\n"); 206 return -EOPNOTSUPP; 207 } 208 209 asf_data[0] = dev_msgs->len; 210 memcpy(asf_data + 1, dev_msgs[0].buf, dev_msgs->len); 211 212 ret = piix4_sb800_region_request(&adap->dev, &dev->mmio_cfg); 213 if (ret) 214 return ret; 215 216 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); 217 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, false); 218 /* Clear ASF target status */ 219 outb_p(0, ASFSLVSTA); 220 221 /* Enable ASF SMBus controller function */ 222 amd_asf_update_mmio_target(dev, ASF_MSTR_EN, true); 223 prev_port = piix4_sb800_port_sel(0, &dev->mmio_cfg); 224 ret = amd_asf_access(adap, msgs->addr, msgs[0].buf[0], asf_data); 225 piix4_sb800_port_sel(prev_port, &dev->mmio_cfg); 226 amd_asf_setup_target(dev); 227 piix4_sb800_region_release(&adap->dev, &dev->mmio_cfg); 228 return ret; 229 } 230 231 static int amd_asf_reg_target(struct i2c_client *target) 232 { 233 struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); 234 unsigned short piix4_smba = dev->port_addr->start; 235 int ret; 236 u8 reg; 237 238 if (dev->target) 239 return -EBUSY; 240 241 ret = piix4_sb800_region_request(&target->dev, &dev->mmio_cfg); 242 if (ret) 243 return ret; 244 245 reg = (target->addr << 1) | I2C_M_RD; 246 outb_p(reg, ASFLISADDR); 247 248 amd_asf_setup_target(dev); 249 dev->target = target; 250 amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, ASFDATABNKSEL, false); 251 piix4_sb800_region_release(&target->dev, &dev->mmio_cfg); 252 253 return 0; 254 } 255 256 static int amd_asf_unreg_target(struct i2c_client *target) 257 { 258 struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); 259 unsigned short piix4_smba = dev->port_addr->start; 260 261 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, false); 262 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); 263 dev->target = NULL; 264 265 return 0; 266 } 267 268 static u32 amd_asf_func(struct i2c_adapter *adapter) 269 { 270 return I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | 271 I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SLAVE; 272 } 273 274 static const struct i2c_algorithm amd_asf_smbus_algorithm = { 275 .master_xfer = amd_asf_xfer, 276 .reg_slave = amd_asf_reg_target, 277 .unreg_slave = amd_asf_unreg_target, 278 .functionality = amd_asf_func, 279 }; 280 281 static irqreturn_t amd_asf_irq_handler(int irq, void *ptr) 282 { 283 struct amd_asf_dev *dev = ptr; 284 unsigned short piix4_smba = dev->port_addr->start; 285 u8 target_int = inb_p(ASFSTA); 286 287 if (target_int & BIT(6)) { 288 /* Target Interrupt */ 289 outb_p(target_int | BIT(6), ASFSTA); 290 schedule_delayed_work(&dev->work_buf, HZ); 291 } else { 292 /* Controller Interrupt */ 293 amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true); 294 } 295 296 return IRQ_HANDLED; 297 } 298 299 static int amd_asf_probe(struct platform_device *pdev) 300 { 301 struct device *dev = &pdev->dev; 302 struct amd_asf_dev *asf_dev; 303 struct resource *eoi_addr; 304 int ret, irq; 305 306 asf_dev = devm_kzalloc(dev, sizeof(*asf_dev), GFP_KERNEL); 307 if (!asf_dev) 308 return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); 309 310 asf_dev->mmio_cfg.use_mmio = true; 311 asf_dev->port_addr = platform_get_resource(pdev, IORESOURCE_IO, 0); 312 if (!asf_dev->port_addr) 313 return dev_err_probe(dev, -EINVAL, "missing IO resources\n"); 314 315 /* 316 * The resource obtained via ACPI might not belong to the ASF device address space. Instead, 317 * it could be within other IP blocks of the ASIC, which are crucial for generating 318 * subsequent interrupts. Therefore, we avoid using devm_platform_ioremap_resource() and 319 * use platform_get_resource() and devm_ioremap() separately to prevent any address space 320 * conflicts. 321 */ 322 eoi_addr = platform_get_resource(pdev, IORESOURCE_MEM, 0); 323 if (!eoi_addr) 324 return dev_err_probe(dev, -EINVAL, "missing MEM resources\n"); 325 326 asf_dev->eoi_base = devm_ioremap(dev, eoi_addr->start, resource_size(eoi_addr)); 327 if (!asf_dev->eoi_base) 328 return dev_err_probe(dev, -EBUSY, "failed mapping IO region\n"); 329 330 ret = devm_delayed_work_autocancel(dev, &asf_dev->work_buf, amd_asf_process_target); 331 if (ret) 332 return dev_err_probe(dev, ret, "failed to create work queue\n"); 333 334 irq = platform_get_irq(pdev, 0); 335 if (irq < 0) 336 return dev_err_probe(dev, irq, "missing IRQ resources\n"); 337 338 ret = devm_request_irq(dev, irq, amd_asf_irq_handler, IRQF_SHARED, "amd_asf", asf_dev); 339 if (ret) 340 return dev_err_probe(dev, ret, "Unable to request irq: %d for use\n", irq); 341 342 asf_dev->adap.owner = THIS_MODULE; 343 asf_dev->adap.algo = &amd_asf_smbus_algorithm; 344 asf_dev->adap.dev.parent = dev; 345 346 i2c_set_adapdata(&asf_dev->adap, asf_dev); 347 snprintf(asf_dev->adap.name, sizeof(asf_dev->adap.name), "AMD ASF adapter"); 348 349 return devm_i2c_add_adapter(dev, &asf_dev->adap); 350 } 351 352 static const struct acpi_device_id amd_asf_acpi_ids[] = { 353 { "AMDI001A" }, 354 { } 355 }; 356 MODULE_DEVICE_TABLE(acpi, amd_asf_acpi_ids); 357 358 static struct platform_driver amd_asf_driver = { 359 .driver = { 360 .name = "i2c-amd-asf", 361 .acpi_match_table = amd_asf_acpi_ids, 362 }, 363 .probe = amd_asf_probe, 364 }; 365 module_platform_driver(amd_asf_driver); 366 367 MODULE_IMPORT_NS("PIIX4_SMBUS"); 368 MODULE_LICENSE("GPL"); 369 MODULE_DESCRIPTION("AMD Alert Standard Format Driver"); 370