1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel Granite Rapids-D vGPIO driver 4 * 5 * Copyright (c) 2024, Intel Corporation. 6 * 7 * Author: Aapo Vienamo <aapo.vienamo@linux.intel.com> 8 */ 9 10 #include <linux/array_size.h> 11 #include <linux/bitfield.h> 12 #include <linux/bitmap.h> 13 #include <linux/cleanup.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/gfp_types.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/irq.h> 20 #include <linux/math.h> 21 #include <linux/mod_devicetable.h> 22 #include <linux/module.h> 23 #include <linux/overflow.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 29 #include <linux/gpio/driver.h> 30 31 #define GNR_NUM_PINS 128 32 #define GNR_PINS_PER_REG 32 33 #define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG) 34 35 #define GNR_CFG_PADBAR 0x00 36 #define GNR_CFG_LOCK_OFFSET 0x04 37 #define GNR_GPI_STATUS_OFFSET 0x14 38 #define GNR_GPI_ENABLE_OFFSET 0x24 39 40 #define GNR_CFG_DW_HOSTSW_MODE BIT(27) 41 #define GNR_CFG_DW_RX_MASK GENMASK(23, 22) 42 #define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14) 43 #define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2) 44 #define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1) 45 #define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0) 46 #define GNR_CFG_DW_RXDIS BIT(4) 47 #define GNR_CFG_DW_TXDIS BIT(3) 48 #define GNR_CFG_DW_RXSTATE BIT(1) 49 #define GNR_CFG_DW_TXSTATE BIT(0) 50 51 /** 52 * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state 53 * @gc: GPIO controller interface 54 * @reg_base: base address of the GPIO registers 55 * @pad_base: base address of the vGPIO pad configuration registers 56 * @ro_bitmap: bitmap of read-only pins 57 * @lock: guard the registers 58 * @pad_backup: backup of the register state for suspend 59 */ 60 struct gnr_gpio { 61 struct gpio_chip gc; 62 void __iomem *reg_base; 63 void __iomem *pad_base; 64 DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS); 65 raw_spinlock_t lock; 66 u32 pad_backup[]; 67 }; 68 69 static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv, 70 unsigned int gpio) 71 { 72 return priv->pad_base + gpio * sizeof(u32); 73 } 74 75 static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio, 76 u32 clear_mask, u32 set_mask) 77 { 78 struct gnr_gpio *priv = gpiochip_get_data(gc); 79 void __iomem *addr = gnr_gpio_get_padcfg_addr(priv, gpio); 80 u32 dw; 81 82 if (test_bit(gpio, priv->ro_bitmap)) 83 return -EACCES; 84 85 guard(raw_spinlock_irqsave)(&priv->lock); 86 87 dw = readl(addr); 88 dw &= ~clear_mask; 89 dw |= set_mask; 90 writel(dw, addr); 91 92 return 0; 93 } 94 95 static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio) 96 { 97 struct gnr_gpio *priv = gpiochip_get_data(gc); 98 u32 dw; 99 100 dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); 101 if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) { 102 dev_warn(gc->parent, "GPIO %u is not owned by host", gpio); 103 return -EBUSY; 104 } 105 106 return 0; 107 } 108 109 static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio) 110 { 111 const struct gnr_gpio *priv = gpiochip_get_data(gc); 112 u32 dw; 113 114 dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); 115 116 return !!(dw & GNR_CFG_DW_RXSTATE); 117 } 118 119 static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) 120 { 121 u32 clear = 0; 122 u32 set = 0; 123 124 if (value) 125 set = GNR_CFG_DW_TXSTATE; 126 else 127 clear = GNR_CFG_DW_TXSTATE; 128 129 gnr_gpio_configure_line(gc, gpio, clear, set); 130 } 131 132 static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) 133 { 134 struct gnr_gpio *priv = gpiochip_get_data(gc); 135 u32 dw; 136 137 dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); 138 139 if (dw & GNR_CFG_DW_TXDIS) 140 return GPIO_LINE_DIRECTION_IN; 141 142 return GPIO_LINE_DIRECTION_OUT; 143 } 144 145 static int gnr_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) 146 { 147 return gnr_gpio_configure_line(gc, gpio, GNR_CFG_DW_RXDIS, 0); 148 } 149 150 static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value) 151 { 152 u32 clear = GNR_CFG_DW_TXDIS; 153 u32 set = value ? GNR_CFG_DW_TXSTATE : 0; 154 155 return gnr_gpio_configure_line(gc, gpio, clear, set); 156 } 157 158 static const struct gpio_chip gnr_gpio_chip = { 159 .owner = THIS_MODULE, 160 .request = gnr_gpio_request, 161 .get = gnr_gpio_get, 162 .set = gnr_gpio_set, 163 .get_direction = gnr_gpio_get_direction, 164 .direction_input = gnr_gpio_direction_input, 165 .direction_output = gnr_gpio_direction_output, 166 }; 167 168 static void __iomem *gnr_gpio_get_reg_addr(const struct gnr_gpio *priv, 169 unsigned int base, 170 unsigned int gpio) 171 { 172 return priv->reg_base + base + gpio * sizeof(u32); 173 } 174 175 static void gnr_gpio_irq_ack(struct irq_data *d) 176 { 177 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 178 struct gnr_gpio *priv = gpiochip_get_data(gc); 179 irq_hw_number_t gpio = irqd_to_hwirq(d); 180 unsigned int reg_idx = gpio / GNR_PINS_PER_REG; 181 unsigned int bit_idx = gpio % GNR_PINS_PER_REG; 182 void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_STATUS_OFFSET, reg_idx); 183 u32 reg; 184 185 guard(raw_spinlock_irqsave)(&priv->lock); 186 187 reg = readl(addr); 188 reg |= BIT(bit_idx); 189 writel(reg, addr); 190 } 191 192 static void gnr_gpio_irq_mask_unmask(struct gpio_chip *gc, unsigned long gpio, bool mask) 193 { 194 struct gnr_gpio *priv = gpiochip_get_data(gc); 195 unsigned int reg_idx = gpio / GNR_PINS_PER_REG; 196 unsigned int bit_idx = gpio % GNR_PINS_PER_REG; 197 void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_ENABLE_OFFSET, reg_idx); 198 u32 reg; 199 200 guard(raw_spinlock_irqsave)(&priv->lock); 201 202 reg = readl(addr); 203 if (mask) 204 reg &= ~BIT(bit_idx); 205 else 206 reg |= BIT(bit_idx); 207 writel(reg, addr); 208 } 209 210 static void gnr_gpio_irq_mask(struct irq_data *d) 211 { 212 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 213 irq_hw_number_t hwirq = irqd_to_hwirq(d); 214 215 gnr_gpio_irq_mask_unmask(gc, hwirq, true); 216 gpiochip_disable_irq(gc, hwirq); 217 } 218 219 static void gnr_gpio_irq_unmask(struct irq_data *d) 220 { 221 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 222 irq_hw_number_t hwirq = irqd_to_hwirq(d); 223 224 gpiochip_enable_irq(gc, hwirq); 225 gnr_gpio_irq_mask_unmask(gc, hwirq, false); 226 } 227 228 static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type) 229 { 230 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 231 struct gnr_gpio *priv = gpiochip_get_data(gc); 232 irq_hw_number_t hwirq = irqd_to_hwirq(d); 233 u32 reg; 234 u32 set; 235 236 /* Allow interrupts only if Interrupt Select field is non-zero */ 237 reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq)); 238 if (!(reg & GNR_CFG_DW_INTSEL_MASK)) { 239 dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq); 240 return -EPERM; 241 } 242 243 /* Falling edge and level low triggers not supported by the GPIO controller */ 244 switch (type) { 245 case IRQ_TYPE_NONE: 246 set = GNR_CFG_DW_RX_DISABLE; 247 break; 248 case IRQ_TYPE_EDGE_RISING: 249 set = GNR_CFG_DW_RX_EDGE; 250 irq_set_handler_locked(d, handle_edge_irq); 251 break; 252 case IRQ_TYPE_LEVEL_HIGH: 253 set = GNR_CFG_DW_RX_LEVEL; 254 irq_set_handler_locked(d, handle_level_irq); 255 break; 256 default: 257 return -EINVAL; 258 } 259 260 return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set); 261 } 262 263 static const struct irq_chip gnr_gpio_irq_chip = { 264 .name = "gpio-graniterapids", 265 .irq_ack = gnr_gpio_irq_ack, 266 .irq_mask = gnr_gpio_irq_mask, 267 .irq_unmask = gnr_gpio_irq_unmask, 268 .irq_set_type = gnr_gpio_irq_set_type, 269 .flags = IRQCHIP_IMMUTABLE, 270 GPIOCHIP_IRQ_RESOURCE_HELPERS, 271 }; 272 273 static void gnr_gpio_init_pin_ro_bits(struct device *dev, 274 const void __iomem *cfg_lock_base, 275 unsigned long *ro_bitmap) 276 { 277 u32 tmp[GNR_NUM_REGS]; 278 279 memcpy_fromio(tmp, cfg_lock_base, sizeof(tmp)); 280 bitmap_from_arr32(ro_bitmap, tmp, GNR_NUM_PINS); 281 } 282 283 static irqreturn_t gnr_gpio_irq(int irq, void *data) 284 { 285 struct gnr_gpio *priv = data; 286 unsigned int handled = 0; 287 288 for (unsigned int i = 0; i < GNR_NUM_REGS; i++) { 289 const void __iomem *reg = priv->reg_base + i * sizeof(u32); 290 unsigned long pending; 291 unsigned long enabled; 292 unsigned int bit_idx; 293 294 scoped_guard(raw_spinlock, &priv->lock) { 295 pending = readl(reg + GNR_GPI_STATUS_OFFSET); 296 enabled = readl(reg + GNR_GPI_ENABLE_OFFSET); 297 } 298 299 /* Only enabled interrupts */ 300 pending &= enabled; 301 302 for_each_set_bit(bit_idx, &pending, GNR_PINS_PER_REG) { 303 unsigned int hwirq = i * GNR_PINS_PER_REG + bit_idx; 304 305 generic_handle_domain_irq(priv->gc.irq.domain, hwirq); 306 } 307 308 handled += pending ? 1 : 0; 309 310 } 311 return IRQ_RETVAL(handled); 312 } 313 314 static int gnr_gpio_probe(struct platform_device *pdev) 315 { 316 size_t num_backup_pins = IS_ENABLED(CONFIG_PM_SLEEP) ? GNR_NUM_PINS : 0; 317 struct device *dev = &pdev->dev; 318 struct gpio_irq_chip *girq; 319 struct gnr_gpio *priv; 320 void __iomem *regs; 321 int irq, ret; 322 u32 offset; 323 324 priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL); 325 if (!priv) 326 return -ENOMEM; 327 328 raw_spin_lock_init(&priv->lock); 329 330 regs = devm_platform_ioremap_resource(pdev, 0); 331 if (IS_ERR(regs)) 332 return PTR_ERR(regs); 333 334 priv->reg_base = regs; 335 offset = readl(priv->reg_base + GNR_CFG_PADBAR); 336 priv->pad_base = priv->reg_base + offset; 337 338 irq = platform_get_irq(pdev, 0); 339 if (irq < 0) 340 return irq; 341 342 ret = devm_request_irq(dev, irq, gnr_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, 343 dev_name(dev), priv); 344 if (ret) 345 return dev_err_probe(dev, ret, "failed to request interrupt\n"); 346 347 gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET, 348 priv->ro_bitmap); 349 350 priv->gc = gnr_gpio_chip; 351 priv->gc.label = dev_name(dev); 352 priv->gc.parent = dev; 353 priv->gc.ngpio = GNR_NUM_PINS; 354 priv->gc.base = -1; 355 356 girq = &priv->gc.irq; 357 gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip); 358 girq->parent_handler = NULL; 359 girq->num_parents = 0; 360 girq->parents = NULL; 361 girq->default_type = IRQ_TYPE_NONE; 362 girq->handler = handle_bad_irq; 363 364 platform_set_drvdata(pdev, priv); 365 366 return devm_gpiochip_add_data(dev, &priv->gc, priv); 367 } 368 369 static int gnr_gpio_suspend(struct device *dev) 370 { 371 struct gnr_gpio *priv = dev_get_drvdata(dev); 372 unsigned int i; 373 374 guard(raw_spinlock_irqsave)(&priv->lock); 375 376 for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio) 377 priv->pad_backup[i] = readl(gnr_gpio_get_padcfg_addr(priv, i)); 378 379 return 0; 380 } 381 382 static int gnr_gpio_resume(struct device *dev) 383 { 384 struct gnr_gpio *priv = dev_get_drvdata(dev); 385 unsigned int i; 386 387 guard(raw_spinlock_irqsave)(&priv->lock); 388 389 for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio) 390 writel(priv->pad_backup[i], gnr_gpio_get_padcfg_addr(priv, i)); 391 392 return 0; 393 } 394 395 static DEFINE_SIMPLE_DEV_PM_OPS(gnr_gpio_pm_ops, gnr_gpio_suspend, gnr_gpio_resume); 396 397 static const struct acpi_device_id gnr_gpio_acpi_match[] = { 398 { "INTC1109" }, 399 {} 400 }; 401 MODULE_DEVICE_TABLE(acpi, gnr_gpio_acpi_match); 402 403 static struct platform_driver gnr_gpio_driver = { 404 .driver = { 405 .name = "gpio-graniterapids", 406 .pm = pm_sleep_ptr(&gnr_gpio_pm_ops), 407 .acpi_match_table = gnr_gpio_acpi_match, 408 }, 409 .probe = gnr_gpio_probe, 410 }; 411 module_platform_driver(gnr_gpio_driver); 412 413 MODULE_LICENSE("GPL"); 414 MODULE_AUTHOR("Aapo Vienamo <aapo.vienamo@linux.intel.com>"); 415 MODULE_DESCRIPTION("Intel Granite Rapids-D vGPIO driver"); 416