1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Granite Rapids-D vGPIO driver
4 *
5 * Copyright (c) 2024, Intel Corporation.
6 *
7 * Author: Aapo Vienamo <aapo.vienamo@linux.intel.com>
8 */
9
10 #include <linux/array_size.h>
11 #include <linux/bitfield.h>
12 #include <linux/bitmap.h>
13 #include <linux/cleanup.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/gfp_types.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/irq.h>
20 #include <linux/math.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/overflow.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28
29 #include <linux/gpio/driver.h>
30
31 #define GNR_NUM_PINS 128
32 #define GNR_PINS_PER_REG 32
33 #define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
34
35 #define GNR_CFG_BAR 0x00
36 #define GNR_CFG_LOCK_OFFSET 0x04
37 #define GNR_GPI_STATUS_OFFSET 0x20
38 #define GNR_GPI_ENABLE_OFFSET 0x24
39
40 #define GNR_CFG_DW_RX_MASK GENMASK(25, 22)
41 #define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
42 #define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
43 #define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
44 #define GNR_CFG_DW_RXDIS BIT(4)
45 #define GNR_CFG_DW_TXDIS BIT(3)
46 #define GNR_CFG_DW_RXSTATE BIT(1)
47 #define GNR_CFG_DW_TXSTATE BIT(0)
48
49 /**
50 * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
51 * @gc: GPIO controller interface
52 * @reg_base: base address of the GPIO registers
53 * @ro_bitmap: bitmap of read-only pins
54 * @lock: guard the registers
55 * @pad_backup: backup of the register state for suspend
56 */
57 struct gnr_gpio {
58 struct gpio_chip gc;
59 void __iomem *reg_base;
60 DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
61 raw_spinlock_t lock;
62 u32 pad_backup[];
63 };
64
gnr_gpio_get_padcfg_addr(const struct gnr_gpio * priv,unsigned int gpio)65 static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
66 unsigned int gpio)
67 {
68 return priv->reg_base + gpio * sizeof(u32);
69 }
70
gnr_gpio_configure_line(struct gpio_chip * gc,unsigned int gpio,u32 clear_mask,u32 set_mask)71 static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
72 u32 clear_mask, u32 set_mask)
73 {
74 struct gnr_gpio *priv = gpiochip_get_data(gc);
75 void __iomem *addr = gnr_gpio_get_padcfg_addr(priv, gpio);
76 u32 dw;
77
78 if (test_bit(gpio, priv->ro_bitmap))
79 return -EACCES;
80
81 guard(raw_spinlock_irqsave)(&priv->lock);
82
83 dw = readl(addr);
84 dw &= ~clear_mask;
85 dw |= set_mask;
86 writel(dw, addr);
87
88 return 0;
89 }
90
gnr_gpio_get(struct gpio_chip * gc,unsigned int gpio)91 static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
92 {
93 const struct gnr_gpio *priv = gpiochip_get_data(gc);
94 u32 dw;
95
96 dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
97
98 return !!(dw & GNR_CFG_DW_RXSTATE);
99 }
100
gnr_gpio_set(struct gpio_chip * gc,unsigned int gpio,int value)101 static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
102 {
103 u32 clear = 0;
104 u32 set = 0;
105
106 if (value)
107 set = GNR_CFG_DW_TXSTATE;
108 else
109 clear = GNR_CFG_DW_TXSTATE;
110
111 gnr_gpio_configure_line(gc, gpio, clear, set);
112 }
113
gnr_gpio_get_direction(struct gpio_chip * gc,unsigned int gpio)114 static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
115 {
116 struct gnr_gpio *priv = gpiochip_get_data(gc);
117 u32 dw;
118
119 dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
120
121 if (dw & GNR_CFG_DW_TXDIS)
122 return GPIO_LINE_DIRECTION_IN;
123
124 return GPIO_LINE_DIRECTION_OUT;
125 }
126
gnr_gpio_direction_input(struct gpio_chip * gc,unsigned int gpio)127 static int gnr_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
128 {
129 return gnr_gpio_configure_line(gc, gpio, GNR_CFG_DW_RXDIS, 0);
130 }
131
gnr_gpio_direction_output(struct gpio_chip * gc,unsigned int gpio,int value)132 static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value)
133 {
134 u32 clear = GNR_CFG_DW_TXDIS;
135 u32 set = value ? GNR_CFG_DW_TXSTATE : 0;
136
137 return gnr_gpio_configure_line(gc, gpio, clear, set);
138 }
139
140 static const struct gpio_chip gnr_gpio_chip = {
141 .owner = THIS_MODULE,
142 .get = gnr_gpio_get,
143 .set = gnr_gpio_set,
144 .get_direction = gnr_gpio_get_direction,
145 .direction_input = gnr_gpio_direction_input,
146 .direction_output = gnr_gpio_direction_output,
147 };
148
gnr_gpio_get_reg_addr(const struct gnr_gpio * priv,unsigned int base,unsigned int gpio)149 static void __iomem *gnr_gpio_get_reg_addr(const struct gnr_gpio *priv,
150 unsigned int base,
151 unsigned int gpio)
152 {
153 return priv->reg_base + base + gpio * sizeof(u32);
154 }
155
gnr_gpio_irq_ack(struct irq_data * d)156 static void gnr_gpio_irq_ack(struct irq_data *d)
157 {
158 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
159 struct gnr_gpio *priv = gpiochip_get_data(gc);
160 irq_hw_number_t gpio = irqd_to_hwirq(d);
161 unsigned int reg_idx = gpio / GNR_PINS_PER_REG;
162 unsigned int bit_idx = gpio % GNR_PINS_PER_REG;
163 void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_STATUS_OFFSET, reg_idx);
164 u32 reg;
165
166 guard(raw_spinlock_irqsave)(&priv->lock);
167
168 reg = readl(addr);
169 reg &= ~BIT(bit_idx);
170 writel(reg, addr);
171 }
172
gnr_gpio_irq_mask_unmask(struct gpio_chip * gc,unsigned long gpio,bool mask)173 static void gnr_gpio_irq_mask_unmask(struct gpio_chip *gc, unsigned long gpio, bool mask)
174 {
175 struct gnr_gpio *priv = gpiochip_get_data(gc);
176 unsigned int reg_idx = gpio / GNR_PINS_PER_REG;
177 unsigned int bit_idx = gpio % GNR_PINS_PER_REG;
178 void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_ENABLE_OFFSET, reg_idx);
179 u32 reg;
180
181 guard(raw_spinlock_irqsave)(&priv->lock);
182
183 reg = readl(addr);
184 if (mask)
185 reg &= ~BIT(bit_idx);
186 else
187 reg |= BIT(bit_idx);
188 writel(reg, addr);
189 }
190
gnr_gpio_irq_mask(struct irq_data * d)191 static void gnr_gpio_irq_mask(struct irq_data *d)
192 {
193 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
194 irq_hw_number_t hwirq = irqd_to_hwirq(d);
195
196 gnr_gpio_irq_mask_unmask(gc, hwirq, true);
197 gpiochip_disable_irq(gc, hwirq);
198 }
199
gnr_gpio_irq_unmask(struct irq_data * d)200 static void gnr_gpio_irq_unmask(struct irq_data *d)
201 {
202 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
203 irq_hw_number_t hwirq = irqd_to_hwirq(d);
204
205 gpiochip_enable_irq(gc, hwirq);
206 gnr_gpio_irq_mask_unmask(gc, hwirq, false);
207 }
208
gnr_gpio_irq_set_type(struct irq_data * d,unsigned int type)209 static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
210 {
211 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
212 irq_hw_number_t pin = irqd_to_hwirq(d);
213 u32 mask = GNR_CFG_DW_RX_MASK;
214 u32 set;
215
216 /* Falling edge and level low triggers not supported by the GPIO controller */
217 switch (type) {
218 case IRQ_TYPE_NONE:
219 set = GNR_CFG_DW_RX_DISABLE;
220 break;
221 case IRQ_TYPE_EDGE_RISING:
222 set = GNR_CFG_DW_RX_EDGE;
223 irq_set_handler_locked(d, handle_edge_irq);
224 break;
225 case IRQ_TYPE_LEVEL_HIGH:
226 set = GNR_CFG_DW_RX_LEVEL;
227 irq_set_handler_locked(d, handle_level_irq);
228 break;
229 default:
230 return -EINVAL;
231 }
232
233 return gnr_gpio_configure_line(gc, pin, mask, set);
234 }
235
236 static const struct irq_chip gnr_gpio_irq_chip = {
237 .irq_ack = gnr_gpio_irq_ack,
238 .irq_mask = gnr_gpio_irq_mask,
239 .irq_unmask = gnr_gpio_irq_unmask,
240 .irq_set_type = gnr_gpio_irq_set_type,
241 .flags = IRQCHIP_IMMUTABLE,
242 GPIOCHIP_IRQ_RESOURCE_HELPERS,
243 };
244
gnr_gpio_init_pin_ro_bits(struct device * dev,const void __iomem * cfg_lock_base,unsigned long * ro_bitmap)245 static void gnr_gpio_init_pin_ro_bits(struct device *dev,
246 const void __iomem *cfg_lock_base,
247 unsigned long *ro_bitmap)
248 {
249 u32 tmp[GNR_NUM_REGS];
250
251 memcpy_fromio(tmp, cfg_lock_base, sizeof(tmp));
252 bitmap_from_arr32(ro_bitmap, tmp, GNR_NUM_PINS);
253 }
254
gnr_gpio_irq(int irq,void * data)255 static irqreturn_t gnr_gpio_irq(int irq, void *data)
256 {
257 struct gnr_gpio *priv = data;
258 unsigned int handled = 0;
259
260 for (unsigned int i = 0; i < GNR_NUM_REGS; i++) {
261 const void __iomem *reg = priv->reg_base + i * sizeof(u32);
262 unsigned long pending;
263 unsigned long enabled;
264 unsigned int bit_idx;
265
266 scoped_guard(raw_spinlock, &priv->lock) {
267 pending = readl(reg + GNR_GPI_STATUS_OFFSET);
268 enabled = readl(reg + GNR_GPI_ENABLE_OFFSET);
269 }
270
271 /* Only enabled interrupts */
272 pending &= enabled;
273
274 for_each_set_bit(bit_idx, &pending, GNR_PINS_PER_REG) {
275 unsigned int hwirq = i * GNR_PINS_PER_REG + bit_idx;
276
277 generic_handle_domain_irq(priv->gc.irq.domain, hwirq);
278 }
279
280 handled += pending ? 1 : 0;
281
282 }
283 return IRQ_RETVAL(handled);
284 }
285
gnr_gpio_probe(struct platform_device * pdev)286 static int gnr_gpio_probe(struct platform_device *pdev)
287 {
288 size_t num_backup_pins = IS_ENABLED(CONFIG_PM_SLEEP) ? GNR_NUM_PINS : 0;
289 struct device *dev = &pdev->dev;
290 struct gpio_irq_chip *girq;
291 struct gnr_gpio *priv;
292 void __iomem *regs;
293 int irq, ret;
294
295 priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
296 if (!priv)
297 return -ENOMEM;
298
299 raw_spin_lock_init(&priv->lock);
300
301 regs = devm_platform_ioremap_resource(pdev, 0);
302 if (IS_ERR(regs))
303 return PTR_ERR(regs);
304
305 irq = platform_get_irq(pdev, 0);
306 if (irq < 0)
307 return irq;
308
309 ret = devm_request_irq(dev, irq, gnr_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD,
310 dev_name(dev), priv);
311 if (ret)
312 return dev_err_probe(dev, ret, "failed to request interrupt\n");
313
314 priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
315
316 gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
317 priv->ro_bitmap);
318
319 priv->gc = gnr_gpio_chip;
320 priv->gc.label = dev_name(dev);
321 priv->gc.parent = dev;
322 priv->gc.ngpio = GNR_NUM_PINS;
323 priv->gc.base = -1;
324
325 girq = &priv->gc.irq;
326 gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
327 girq->chip->name = dev_name(dev);
328 girq->parent_handler = NULL;
329 girq->num_parents = 0;
330 girq->parents = NULL;
331 girq->default_type = IRQ_TYPE_NONE;
332 girq->handler = handle_bad_irq;
333
334 platform_set_drvdata(pdev, priv);
335
336 return devm_gpiochip_add_data(dev, &priv->gc, priv);
337 }
338
gnr_gpio_suspend(struct device * dev)339 static int gnr_gpio_suspend(struct device *dev)
340 {
341 struct gnr_gpio *priv = dev_get_drvdata(dev);
342 unsigned int i;
343
344 guard(raw_spinlock_irqsave)(&priv->lock);
345
346 for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio)
347 priv->pad_backup[i] = readl(gnr_gpio_get_padcfg_addr(priv, i));
348
349 return 0;
350 }
351
gnr_gpio_resume(struct device * dev)352 static int gnr_gpio_resume(struct device *dev)
353 {
354 struct gnr_gpio *priv = dev_get_drvdata(dev);
355 unsigned int i;
356
357 guard(raw_spinlock_irqsave)(&priv->lock);
358
359 for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio)
360 writel(priv->pad_backup[i], gnr_gpio_get_padcfg_addr(priv, i));
361
362 return 0;
363 }
364
365 static DEFINE_SIMPLE_DEV_PM_OPS(gnr_gpio_pm_ops, gnr_gpio_suspend, gnr_gpio_resume);
366
367 static const struct acpi_device_id gnr_gpio_acpi_match[] = {
368 { "INTC1109" },
369 {}
370 };
371 MODULE_DEVICE_TABLE(acpi, gnr_gpio_acpi_match);
372
373 static struct platform_driver gnr_gpio_driver = {
374 .driver = {
375 .name = "gpio-graniterapids",
376 .pm = pm_sleep_ptr(&gnr_gpio_pm_ops),
377 .acpi_match_table = gnr_gpio_acpi_match,
378 },
379 .probe = gnr_gpio_probe,
380 };
381 module_platform_driver(gnr_gpio_driver);
382
383 MODULE_LICENSE("GPL");
384 MODULE_AUTHOR("Aapo Vienamo <aapo.vienamo@linux.intel.com>");
385 MODULE_DESCRIPTION("Intel Granite Rapids-D vGPIO driver");
386