xref: /linux/drivers/gpio/gpio-aggregator.c (revision da32d155f4a8937952ca6fd55d3270fec1c3799f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // GPIO Aggregator
4 //
5 // Copyright (C) 2019-2020 Glider bv
6 
7 #define DRV_NAME       "gpio-aggregator"
8 #define pr_fmt(fmt)	DRV_NAME ": " fmt
9 
10 #include <linux/bitmap.h>
11 #include <linux/bitops.h>
12 #include <linux/configfs.h>
13 #include <linux/ctype.h>
14 #include <linux/delay.h>
15 #include <linux/export.h>
16 #include <linux/idr.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/lockdep.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/overflow.h>
24 #include <linux/platform_device.h>
25 #include <linux/property.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29 
30 #include <linux/gpio/consumer.h>
31 #include <linux/gpio/driver.h>
32 #include <linux/gpio/forwarder.h>
33 #include <linux/gpio/machine.h>
34 
35 #include "dev-sync-probe.h"
36 
37 #define AGGREGATOR_MAX_GPIOS 512
38 #define AGGREGATOR_LEGACY_PREFIX "_sysfs"
39 
40 /*
41  * GPIO Aggregator sysfs interface
42  */
43 
44 struct gpio_aggregator {
45 	struct dev_sync_probe_data probe_data;
46 	struct config_group group;
47 	struct gpiod_lookup_table *lookups;
48 	struct mutex lock;
49 	int id;
50 
51 	/* List of gpio_aggregator_line. Always added in order */
52 	struct list_head list_head;
53 
54 	/* used by legacy sysfs interface only */
55 	bool init_via_sysfs;
56 	char args[];
57 };
58 
59 struct gpio_aggregator_line {
60 	struct config_group group;
61 	struct gpio_aggregator *parent;
62 	struct list_head entry;
63 
64 	/* Line index within the aggregator device */
65 	unsigned int idx;
66 
67 	/* Custom name for the virtual line */
68 	const char *name;
69 	/* GPIO chip label or line name */
70 	const char *key;
71 	/* Can be negative to indicate lookup by line name */
72 	int offset;
73 
74 	enum gpio_lookup_flags flags;
75 };
76 
77 struct gpio_aggregator_pdev_meta {
78 	bool init_via_sysfs;
79 };
80 
81 static DEFINE_MUTEX(gpio_aggregator_lock);	/* protects idr */
82 static DEFINE_IDR(gpio_aggregator_idr);
83 
gpio_aggregator_alloc(struct gpio_aggregator ** aggr,size_t arg_size)84 static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
85 {
86 	int ret;
87 
88 	struct gpio_aggregator *new __free(kfree) = kzalloc(
89 					sizeof(*new) + arg_size, GFP_KERNEL);
90 	if (!new)
91 		return -ENOMEM;
92 
93 	scoped_guard(mutex, &gpio_aggregator_lock)
94 		ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
95 
96 	if (ret < 0)
97 		return ret;
98 
99 	new->id = ret;
100 	INIT_LIST_HEAD(&new->list_head);
101 	mutex_init(&new->lock);
102 	*aggr = no_free_ptr(new);
103 	return 0;
104 }
105 
gpio_aggregator_free(struct gpio_aggregator * aggr)106 static void gpio_aggregator_free(struct gpio_aggregator *aggr)
107 {
108 	scoped_guard(mutex, &gpio_aggregator_lock)
109 		idr_remove(&gpio_aggregator_idr, aggr->id);
110 
111 	mutex_destroy(&aggr->lock);
112 	kfree(aggr);
113 }
114 
gpio_aggregator_add_gpio(struct gpio_aggregator * aggr,const char * key,int hwnum,unsigned int * n)115 static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
116 				    const char *key, int hwnum, unsigned int *n)
117 {
118 	struct gpiod_lookup_table *lookups;
119 
120 	lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
121 			   GFP_KERNEL);
122 	if (!lookups)
123 		return -ENOMEM;
124 
125 	lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
126 
127 	(*n)++;
128 	memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
129 
130 	aggr->lookups = lookups;
131 	return 0;
132 }
133 
gpio_aggregator_is_active(struct gpio_aggregator * aggr)134 static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
135 {
136 	lockdep_assert_held(&aggr->lock);
137 
138 	return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
139 }
140 
141 /* Only aggregators created via legacy sysfs can be "activating". */
gpio_aggregator_is_activating(struct gpio_aggregator * aggr)142 static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
143 {
144 	lockdep_assert_held(&aggr->lock);
145 
146 	return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
147 }
148 
gpio_aggregator_count_lines(struct gpio_aggregator * aggr)149 static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
150 {
151 	lockdep_assert_held(&aggr->lock);
152 
153 	return list_count_nodes(&aggr->list_head);
154 }
155 
156 static struct gpio_aggregator_line *
gpio_aggregator_line_alloc(struct gpio_aggregator * parent,unsigned int idx,char * key,int offset)157 gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
158 			   char *key, int offset)
159 {
160 	struct gpio_aggregator_line *line;
161 
162 	line = kzalloc(sizeof(*line), GFP_KERNEL);
163 	if (!line)
164 		return ERR_PTR(-ENOMEM);
165 
166 	if (key) {
167 		line->key = kstrdup(key, GFP_KERNEL);
168 		if (!line->key) {
169 			kfree(line);
170 			return ERR_PTR(-ENOMEM);
171 		}
172 	}
173 
174 	line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
175 	line->parent = parent;
176 	line->idx = idx;
177 	line->offset = offset;
178 	INIT_LIST_HEAD(&line->entry);
179 
180 	return line;
181 }
182 
gpio_aggregator_line_add(struct gpio_aggregator * aggr,struct gpio_aggregator_line * line)183 static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
184 				     struct gpio_aggregator_line *line)
185 {
186 	struct gpio_aggregator_line *tmp;
187 
188 	lockdep_assert_held(&aggr->lock);
189 
190 	list_for_each_entry(tmp, &aggr->list_head, entry) {
191 		if (tmp->idx > line->idx) {
192 			list_add_tail(&line->entry, &tmp->entry);
193 			return;
194 		}
195 	}
196 	list_add_tail(&line->entry, &aggr->list_head);
197 }
198 
gpio_aggregator_line_del(struct gpio_aggregator * aggr,struct gpio_aggregator_line * line)199 static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
200 				     struct gpio_aggregator_line *line)
201 {
202 	lockdep_assert_held(&aggr->lock);
203 
204 	list_del(&line->entry);
205 }
206 
gpio_aggregator_free_lines(struct gpio_aggregator * aggr)207 static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
208 {
209 	struct gpio_aggregator_line *line, *tmp;
210 
211 	list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
212 		configfs_unregister_group(&line->group);
213 		/*
214 		 * Normally, we acquire aggr->lock within the configfs
215 		 * callback. However, in the legacy sysfs interface case,
216 		 * calling configfs_(un)register_group while holding
217 		 * aggr->lock could cause a deadlock. Fortunately, this is
218 		 * unnecessary because the new_device/delete_device path
219 		 * and the module unload path are mutually exclusive,
220 		 * thanks to an explicit try_module_get. That's why this
221 		 * minimal scoped_guard suffices.
222 		 */
223 		scoped_guard(mutex, &aggr->lock)
224 			gpio_aggregator_line_del(aggr, line);
225 		kfree(line->key);
226 		kfree(line->name);
227 		kfree(line);
228 	}
229 }
230 
231 
232 /*
233  *  GPIO Forwarder
234  */
235 
236 struct gpiochip_fwd_timing {
237 	u32 ramp_up_us;
238 	u32 ramp_down_us;
239 };
240 
241 struct gpiochip_fwd {
242 	struct gpio_chip chip;
243 	struct gpio_desc **descs;
244 	union {
245 		struct mutex mlock;	/* protects tmp[] if can_sleep */
246 		spinlock_t slock;	/* protects tmp[] if !can_sleep */
247 	};
248 	struct gpiochip_fwd_timing *delay_timings;
249 	void *data;
250 	unsigned long *valid_mask;
251 	unsigned long tmp[];		/* values and descs for multiple ops */
252 };
253 
254 #define fwd_tmp_values(fwd)	(&(fwd)->tmp[0])
255 #define fwd_tmp_descs(fwd)	((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
256 
257 #define fwd_tmp_size(ngpios)	(BITS_TO_LONGS((ngpios)) + (ngpios))
258 
gpio_fwd_request(struct gpio_chip * chip,unsigned int offset)259 static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
260 {
261 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
262 
263 	return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
264 }
265 
gpio_fwd_get_direction(struct gpio_chip * chip,unsigned int offset)266 static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
267 {
268 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
269 
270 	/*
271 	 * get_direction() is called during gpiochip registration, return
272 	 * -ENODEV if there is no GPIO desc for the line.
273 	 */
274 	if (!test_bit(offset, fwd->valid_mask))
275 		return -ENODEV;
276 
277 	return gpiod_get_direction(fwd->descs[offset]);
278 }
279 
gpio_fwd_direction_input(struct gpio_chip * chip,unsigned int offset)280 static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
281 {
282 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
283 
284 	return gpiod_direction_input(fwd->descs[offset]);
285 }
286 
gpio_fwd_direction_output(struct gpio_chip * chip,unsigned int offset,int value)287 static int gpio_fwd_direction_output(struct gpio_chip *chip,
288 				     unsigned int offset, int value)
289 {
290 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
291 
292 	return gpiod_direction_output(fwd->descs[offset], value);
293 }
294 
gpio_fwd_get(struct gpio_chip * chip,unsigned int offset)295 static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
296 {
297 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
298 
299 	return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
300 			       : gpiod_get_value(fwd->descs[offset]);
301 }
302 
gpio_fwd_get_multiple(struct gpiochip_fwd * fwd,unsigned long * mask,unsigned long * bits)303 static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
304 				 unsigned long *bits)
305 {
306 	struct gpio_desc **descs = fwd_tmp_descs(fwd);
307 	unsigned long *values = fwd_tmp_values(fwd);
308 	unsigned int i, j = 0;
309 	int error;
310 
311 	bitmap_clear(values, 0, fwd->chip.ngpio);
312 	for_each_set_bit(i, mask, fwd->chip.ngpio)
313 		descs[j++] = fwd->descs[i];
314 
315 	if (fwd->chip.can_sleep)
316 		error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
317 	else
318 		error = gpiod_get_array_value(j, descs, NULL, values);
319 	if (error)
320 		return error;
321 
322 	j = 0;
323 	for_each_set_bit(i, mask, fwd->chip.ngpio)
324 		__assign_bit(i, bits, test_bit(j++, values));
325 
326 	return 0;
327 }
328 
gpio_fwd_get_multiple_locked(struct gpio_chip * chip,unsigned long * mask,unsigned long * bits)329 static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip,
330 					unsigned long *mask, unsigned long *bits)
331 {
332 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
333 	unsigned long flags;
334 	int error;
335 
336 	if (chip->can_sleep) {
337 		mutex_lock(&fwd->mlock);
338 		error = gpio_fwd_get_multiple(fwd, mask, bits);
339 		mutex_unlock(&fwd->mlock);
340 	} else {
341 		spin_lock_irqsave(&fwd->slock, flags);
342 		error = gpio_fwd_get_multiple(fwd, mask, bits);
343 		spin_unlock_irqrestore(&fwd->slock, flags);
344 	}
345 
346 	return error;
347 }
348 
gpio_fwd_delay(struct gpio_chip * chip,unsigned int offset,int value)349 static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value)
350 {
351 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
352 	const struct gpiochip_fwd_timing *delay_timings;
353 	bool is_active_low = gpiod_is_active_low(fwd->descs[offset]);
354 	u32 delay_us;
355 
356 	delay_timings = &fwd->delay_timings[offset];
357 	if ((!is_active_low && value) || (is_active_low && !value))
358 		delay_us = delay_timings->ramp_up_us;
359 	else
360 		delay_us = delay_timings->ramp_down_us;
361 	if (!delay_us)
362 		return;
363 
364 	if (chip->can_sleep)
365 		fsleep(delay_us);
366 	else
367 		udelay(delay_us);
368 }
369 
gpio_fwd_set(struct gpio_chip * chip,unsigned int offset,int value)370 static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
371 {
372 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
373 	int ret;
374 
375 	if (chip->can_sleep)
376 		ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
377 	else
378 		ret = gpiod_set_value(fwd->descs[offset], value);
379 	if (ret)
380 		return ret;
381 
382 	if (fwd->delay_timings)
383 		gpio_fwd_delay(chip, offset, value);
384 
385 	return ret;
386 }
387 
gpio_fwd_set_multiple(struct gpiochip_fwd * fwd,unsigned long * mask,unsigned long * bits)388 static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
389 				 unsigned long *bits)
390 {
391 	struct gpio_desc **descs = fwd_tmp_descs(fwd);
392 	unsigned long *values = fwd_tmp_values(fwd);
393 	unsigned int i, j = 0, ret;
394 
395 	for_each_set_bit(i, mask, fwd->chip.ngpio) {
396 		__assign_bit(j, values, test_bit(i, bits));
397 		descs[j++] = fwd->descs[i];
398 	}
399 
400 	if (fwd->chip.can_sleep)
401 		ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
402 	else
403 		ret = gpiod_set_array_value(j, descs, NULL, values);
404 
405 	return ret;
406 }
407 
gpio_fwd_set_multiple_locked(struct gpio_chip * chip,unsigned long * mask,unsigned long * bits)408 static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
409 					unsigned long *mask, unsigned long *bits)
410 {
411 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
412 	unsigned long flags;
413 	int ret;
414 
415 	if (chip->can_sleep) {
416 		mutex_lock(&fwd->mlock);
417 		ret = gpio_fwd_set_multiple(fwd, mask, bits);
418 		mutex_unlock(&fwd->mlock);
419 	} else {
420 		spin_lock_irqsave(&fwd->slock, flags);
421 		ret = gpio_fwd_set_multiple(fwd, mask, bits);
422 		spin_unlock_irqrestore(&fwd->slock, flags);
423 	}
424 
425 	return ret;
426 }
427 
gpio_fwd_set_config(struct gpio_chip * chip,unsigned int offset,unsigned long config)428 static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
429 			       unsigned long config)
430 {
431 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
432 
433 	return gpiod_set_config(fwd->descs[offset], config);
434 }
435 
gpio_fwd_to_irq(struct gpio_chip * chip,unsigned int offset)436 static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset)
437 {
438 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
439 
440 	return gpiod_to_irq(fwd->descs[offset]);
441 }
442 
443 /*
444  * The GPIO delay provides a way to configure platform specific delays
445  * for the GPIO ramp-up or ramp-down delays. This can serve the following
446  * purposes:
447  *   - Open-drain output using an RC filter
448  */
449 #define FWD_FEATURE_DELAY		BIT(0)
450 
451 #ifdef CONFIG_OF_GPIO
gpiochip_fwd_delay_of_xlate(struct gpio_chip * chip,const struct of_phandle_args * gpiospec,u32 * flags)452 static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
453 				       const struct of_phandle_args *gpiospec,
454 				       u32 *flags)
455 {
456 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
457 	struct gpiochip_fwd_timing *timings;
458 	u32 line;
459 
460 	if (gpiospec->args_count != chip->of_gpio_n_cells)
461 		return -EINVAL;
462 
463 	line = gpiospec->args[0];
464 	if (line >= chip->ngpio)
465 		return -EINVAL;
466 
467 	timings = &fwd->delay_timings[line];
468 	timings->ramp_up_us = gpiospec->args[1];
469 	timings->ramp_down_us = gpiospec->args[2];
470 
471 	return line;
472 }
473 
gpiochip_fwd_setup_delay_line(struct gpiochip_fwd * fwd)474 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
475 {
476 	struct gpio_chip *chip = &fwd->chip;
477 
478 	fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
479 					  sizeof(*fwd->delay_timings),
480 					  GFP_KERNEL);
481 	if (!fwd->delay_timings)
482 		return -ENOMEM;
483 
484 	chip->of_xlate = gpiochip_fwd_delay_of_xlate;
485 	chip->of_gpio_n_cells = 3;
486 
487 	return 0;
488 }
489 #else
gpiochip_fwd_setup_delay_line(struct gpiochip_fwd * fwd)490 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
491 {
492 	return 0;
493 }
494 #endif	/* !CONFIG_OF_GPIO */
495 
496 /**
497  * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
498  * @fwd: GPIO forwarder
499  *
500  * Returns: The GPIO chip for the GPIO forwarder
501  */
gpiochip_fwd_get_gpiochip(struct gpiochip_fwd * fwd)502 struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
503 {
504 	return &fwd->chip;
505 }
506 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
507 
508 /**
509  * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
510  * @fwd: GPIO forwarder
511  *
512  * Returns: The driver-private data for the GPIO forwarder
513  */
gpiochip_fwd_get_data(struct gpiochip_fwd * fwd)514 void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
515 {
516 	return fwd->data;
517 }
518 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
519 
520 /**
521  * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
522  * @fwd: GPIO forwarder
523  * @offset: the offset of the line to request
524  *
525  * Returns: 0 on success, or negative errno on failure.
526  */
gpiochip_fwd_gpio_request(struct gpiochip_fwd * fwd,unsigned int offset)527 int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
528 {
529 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
530 
531 	return gpio_fwd_request(gc, offset);
532 }
533 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
534 
535 /**
536  * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
537  * @fwd: GPIO forwarder
538  * @offset: the offset of the line
539  *
540  * Returns: 0 for output, 1 for input, or an error code in case of error.
541  */
gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd * fwd,unsigned int offset)542 int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
543 {
544 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
545 
546 	return gpio_fwd_get_direction(gc, offset);
547 }
548 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
549 
550 /**
551  * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
552  * output
553  * @fwd: GPIO forwarder
554  * @offset: the offset of the line
555  * @value: value to set
556  *
557  * Returns: 0 on success, or negative errno on failure.
558  */
gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd * fwd,unsigned int offset,int value)559 int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
560 				       int value)
561 {
562 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
563 
564 	return gpio_fwd_direction_output(gc, offset, value);
565 }
566 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
567 
568 /**
569  * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
570  * @fwd: GPIO forwarder
571  * @offset: the offset of the line
572  *
573  * Returns: 0 on success, or negative errno on failure.
574  */
gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd * fwd,unsigned int offset)575 int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
576 {
577 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
578 
579 	return gpio_fwd_direction_input(gc, offset);
580 }
581 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
582 
583 /**
584  * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
585  * @fwd: GPIO forwarder
586  * @offset: the offset of the line
587  *
588  * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
589  * account, or negative errno on failure.
590  */
gpiochip_fwd_gpio_get(struct gpiochip_fwd * fwd,unsigned int offset)591 int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
592 {
593 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
594 
595 	return gpio_fwd_get(gc, offset);
596 }
597 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
598 
599 /**
600  * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
601  * @fwd: GPIO forwarder
602  * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
603  *        which lines are to be read
604  * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
605  *        contains the read values for the lines specified by mask
606  *
607  * Returns: 0 on success, or negative errno on failure.
608  */
gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd * fwd,unsigned long * mask,unsigned long * bits)609 int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
610 				   unsigned long *bits)
611 {
612 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
613 
614 	return gpio_fwd_get_multiple_locked(gc, mask, bits);
615 }
616 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
617 
618 /**
619  * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
620  * @fwd: GPIO forwarder
621  * @offset: the offset of the line
622  * @value: value to set
623  *
624  * Returns: 0 on success, or negative errno on failure.
625  */
gpiochip_fwd_gpio_set(struct gpiochip_fwd * fwd,unsigned int offset,int value)626 int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
627 {
628 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
629 
630 	return gpio_fwd_set(gc, offset, value);
631 }
632 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
633 
634 /**
635  * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
636  * @fwd: GPIO forwarder
637  * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
638  *        defines which outputs are to be changed
639  * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
640  *        defines the values the outputs specified by mask are to be set to
641  *
642  * Returns: 0 on success, or negative errno on failure.
643  */
gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd * fwd,unsigned long * mask,unsigned long * bits)644 int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
645 				   unsigned long *bits)
646 {
647 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
648 
649 	return gpio_fwd_set_multiple_locked(gc, mask, bits);
650 }
651 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
652 
653 /**
654  * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
655  * @fwd: GPIO forwarder
656  * @offset: the offset of the line
657  * @config: Same packed config format as generic pinconf
658  *
659  * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
660  * the configuration.
661  */
gpiochip_fwd_gpio_set_config(struct gpiochip_fwd * fwd,unsigned int offset,unsigned long config)662 int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
663 				 unsigned long config)
664 {
665 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
666 
667 	return gpio_fwd_set_config(gc, offset, config);
668 }
669 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
670 
671 /**
672  * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
673  * @fwd: GPIO forwarder
674  * @offset: the offset of the line
675  *
676  * Returns: The Linux IRQ corresponding to the passed line, or an error code in
677  * case of error.
678  */
gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd * fwd,unsigned int offset)679 int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
680 {
681 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
682 
683 	return gpio_fwd_to_irq(gc, offset);
684 }
685 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
686 
687 /**
688  * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
689  * @dev: Parent device pointer
690  * @ngpios: Number of GPIOs in the forwarder
691  *
692  * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
693  * code on failure.
694  */
devm_gpiochip_fwd_alloc(struct device * dev,unsigned int ngpios)695 struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
696 					     unsigned int ngpios)
697 {
698 	struct gpiochip_fwd *fwd;
699 	struct gpio_chip *chip;
700 
701 	fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
702 	if (!fwd)
703 		return ERR_PTR(-ENOMEM);
704 
705 	fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
706 	if (!fwd->descs)
707 		return ERR_PTR(-ENOMEM);
708 
709 	fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
710 	if (!fwd->valid_mask)
711 		return ERR_PTR(-ENOMEM);
712 
713 	chip = &fwd->chip;
714 
715 	chip->label = dev_name(dev);
716 	chip->parent = dev;
717 	chip->owner = THIS_MODULE;
718 	chip->request = gpio_fwd_request;
719 	chip->get_direction = gpio_fwd_get_direction;
720 	chip->direction_input = gpio_fwd_direction_input;
721 	chip->direction_output = gpio_fwd_direction_output;
722 	chip->get = gpio_fwd_get;
723 	chip->get_multiple = gpio_fwd_get_multiple_locked;
724 	chip->set = gpio_fwd_set;
725 	chip->set_multiple = gpio_fwd_set_multiple_locked;
726 	chip->set_config = gpio_fwd_set_config;
727 	chip->to_irq = gpio_fwd_to_irq;
728 	chip->base = -1;
729 	chip->ngpio = ngpios;
730 
731 	return fwd;
732 }
733 EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
734 
735 /**
736  * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
737  * @fwd: GPIO forwarder
738  * @desc: GPIO descriptor to register
739  * @offset: offset for the GPIO in the forwarder
740  *
741  * Returns: 0 on success, or negative errno on failure.
742  */
gpiochip_fwd_desc_add(struct gpiochip_fwd * fwd,struct gpio_desc * desc,unsigned int offset)743 int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
744 			  unsigned int offset)
745 {
746 	struct gpio_chip *chip = &fwd->chip;
747 
748 	if (offset >= chip->ngpio)
749 		return -EINVAL;
750 
751 	if (test_and_set_bit(offset, fwd->valid_mask))
752 		return -EEXIST;
753 
754 	/*
755 	 * If any of the GPIO lines are sleeping, then the entire forwarder
756 	 * will be sleeping.
757 	 */
758 	if (gpiod_cansleep(desc))
759 		chip->can_sleep = true;
760 
761 	fwd->descs[offset] = desc;
762 
763 	dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
764 		desc_to_gpio(desc), gpiod_to_irq(desc));
765 
766 	return 0;
767 }
768 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
769 
770 /**
771  * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
772  * @fwd: GPIO forwarder
773  * @offset: offset of GPIO desc to remove
774  */
gpiochip_fwd_desc_free(struct gpiochip_fwd * fwd,unsigned int offset)775 void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
776 {
777 	if (test_and_clear_bit(offset, fwd->valid_mask))
778 		gpiod_put(fwd->descs[offset]);
779 }
780 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
781 
782 /**
783  * gpiochip_fwd_register - Register a GPIO forwarder
784  * @fwd: GPIO forwarder
785  * @data: driver-private data associated with this forwarder
786  *
787  * Returns: 0 on success, or negative errno on failure.
788  */
gpiochip_fwd_register(struct gpiochip_fwd * fwd,void * data)789 int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
790 {
791 	struct gpio_chip *chip = &fwd->chip;
792 
793 	/*
794 	 * Some gpio_desc were not registered. They will be registered at runtime
795 	 * but we have to suppose they can sleep.
796 	 */
797 	if (!bitmap_full(fwd->valid_mask, chip->ngpio))
798 		chip->can_sleep = true;
799 
800 	if (chip->can_sleep)
801 		mutex_init(&fwd->mlock);
802 	else
803 		spin_lock_init(&fwd->slock);
804 
805 	fwd->data = data;
806 
807 	return devm_gpiochip_add_data(chip->parent, chip, fwd);
808 }
809 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
810 
811 /**
812  * gpiochip_fwd_create() - Create a new GPIO forwarder
813  * @dev: Parent device pointer
814  * @ngpios: Number of GPIOs in the forwarder.
815  * @descs: Array containing the GPIO descriptors to forward to.
816  *         This array must contain @ngpios entries, and can be deallocated
817  *         as the forwarder has its own array.
818  * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
819  *
820  * This function creates a new gpiochip, which forwards all GPIO operations to
821  * the passed GPIO descriptors.
822  *
823  * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
824  *         code on failure.
825  */
gpiochip_fwd_create(struct device * dev,unsigned int ngpios,struct gpio_desc * descs[],unsigned long features)826 static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
827 						unsigned int ngpios,
828 						struct gpio_desc *descs[],
829 						unsigned long features)
830 {
831 	struct gpiochip_fwd *fwd;
832 	unsigned int i;
833 	int error;
834 
835 	fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
836 	if (IS_ERR(fwd))
837 		return fwd;
838 
839 	for (i = 0; i < ngpios; i++) {
840 		error = gpiochip_fwd_desc_add(fwd, descs[i], i);
841 		if (error)
842 			return ERR_PTR(error);
843 	}
844 
845 	if (features & FWD_FEATURE_DELAY) {
846 		error = gpiochip_fwd_setup_delay_line(fwd);
847 		if (error)
848 			return ERR_PTR(error);
849 	}
850 
851 	error = gpiochip_fwd_register(fwd, NULL);
852 	if (error)
853 		return ERR_PTR(error);
854 
855 	return fwd;
856 }
857 
858 /*
859  * Configfs interface
860  */
861 
862 static struct gpio_aggregator *
to_gpio_aggregator(struct config_item * item)863 to_gpio_aggregator(struct config_item *item)
864 {
865 	struct config_group *group = to_config_group(item);
866 
867 	return container_of(group, struct gpio_aggregator, group);
868 }
869 
870 static struct gpio_aggregator_line *
to_gpio_aggregator_line(struct config_item * item)871 to_gpio_aggregator_line(struct config_item *item)
872 {
873 	struct config_group *group = to_config_group(item);
874 
875 	return container_of(group, struct gpio_aggregator_line, group);
876 }
877 
878 static struct fwnode_handle *
gpio_aggregator_make_device_sw_node(struct gpio_aggregator * aggr)879 gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
880 {
881 	struct property_entry properties[2];
882 	struct gpio_aggregator_line *line;
883 	size_t num_lines;
884 	int n = 0;
885 
886 	memset(properties, 0, sizeof(properties));
887 
888 	num_lines = gpio_aggregator_count_lines(aggr);
889 	if (num_lines == 0)
890 		return NULL;
891 
892 	const char **line_names __free(kfree) = kcalloc(
893 				num_lines, sizeof(*line_names), GFP_KERNEL);
894 	if (!line_names)
895 		return ERR_PTR(-ENOMEM);
896 
897 	/* The list is always sorted as new elements are inserted in order. */
898 	list_for_each_entry(line, &aggr->list_head, entry)
899 		line_names[n++] = line->name ?: "";
900 
901 	properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
902 					"gpio-line-names",
903 					line_names, num_lines);
904 
905 	return fwnode_create_software_node(properties, NULL);
906 }
907 
gpio_aggregator_activate(struct gpio_aggregator * aggr)908 static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
909 {
910 	struct platform_device_info pdevinfo;
911 	struct gpio_aggregator_line *line;
912 	struct fwnode_handle *swnode;
913 	unsigned int n = 0;
914 	int ret = 0;
915 
916 	if (gpio_aggregator_count_lines(aggr) == 0)
917 		return -EINVAL;
918 
919 	aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
920 				GFP_KERNEL);
921 	if (!aggr->lookups)
922 		return -ENOMEM;
923 
924 	swnode = gpio_aggregator_make_device_sw_node(aggr);
925 	if (IS_ERR(swnode)) {
926 		ret = PTR_ERR(swnode);
927 		goto err_remove_lookups;
928 	}
929 
930 	memset(&pdevinfo, 0, sizeof(pdevinfo));
931 	pdevinfo.name = DRV_NAME;
932 	pdevinfo.id = aggr->id;
933 	pdevinfo.fwnode = swnode;
934 
935 	/* The list is always sorted as new elements are inserted in order. */
936 	list_for_each_entry(line, &aggr->list_head, entry) {
937 		/*
938 		 * - Either GPIO chip label or line name must be configured
939 		 *   (i.e. line->key must be non-NULL)
940 		 * - Line directories must be named with sequential numeric
941 		 *   suffixes starting from 0. (i.e. ./line0, ./line1, ...)
942 		 */
943 		if (!line->key || line->idx != n) {
944 			ret = -EINVAL;
945 			goto err_remove_swnode;
946 		}
947 
948 		if (line->offset < 0)
949 			ret = gpio_aggregator_add_gpio(aggr, line->key,
950 						       U16_MAX, &n);
951 		else
952 			ret = gpio_aggregator_add_gpio(aggr, line->key,
953 						       line->offset, &n);
954 		if (ret)
955 			goto err_remove_swnode;
956 	}
957 
958 	aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
959 	if (!aggr->lookups->dev_id) {
960 		ret = -ENOMEM;
961 		goto err_remove_swnode;
962 	}
963 
964 	gpiod_add_lookup_table(aggr->lookups);
965 
966 	ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
967 	if (ret)
968 		goto err_remove_lookup_table;
969 
970 	return 0;
971 
972 err_remove_lookup_table:
973 	kfree(aggr->lookups->dev_id);
974 	gpiod_remove_lookup_table(aggr->lookups);
975 err_remove_swnode:
976 	fwnode_remove_software_node(swnode);
977 err_remove_lookups:
978 	kfree(aggr->lookups);
979 
980 	return ret;
981 }
982 
gpio_aggregator_deactivate(struct gpio_aggregator * aggr)983 static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
984 {
985 	dev_sync_probe_unregister(&aggr->probe_data);
986 	gpiod_remove_lookup_table(aggr->lookups);
987 	kfree(aggr->lookups->dev_id);
988 	kfree(aggr->lookups);
989 }
990 
gpio_aggregator_lockup_configfs(struct gpio_aggregator * aggr,bool lock)991 static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
992 					    bool lock)
993 {
994 	struct configfs_subsystem *subsys = aggr->group.cg_subsys;
995 	struct gpio_aggregator_line *line;
996 
997 	/*
998 	 * The device only needs to depend on leaf lines. This is
999 	 * sufficient to lock up all the configfs entries that the
1000 	 * instantiated, alive device depends on.
1001 	 */
1002 	list_for_each_entry(line, &aggr->list_head, entry) {
1003 		if (lock)
1004 			configfs_depend_item_unlocked(
1005 					subsys, &line->group.cg_item);
1006 		else
1007 			configfs_undepend_item_unlocked(
1008 					&line->group.cg_item);
1009 	}
1010 }
1011 
1012 static ssize_t
gpio_aggregator_line_key_show(struct config_item * item,char * page)1013 gpio_aggregator_line_key_show(struct config_item *item, char *page)
1014 {
1015 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1016 	struct gpio_aggregator *aggr = line->parent;
1017 
1018 	guard(mutex)(&aggr->lock);
1019 
1020 	return sysfs_emit(page, "%s\n", line->key ?: "");
1021 }
1022 
1023 static ssize_t
gpio_aggregator_line_key_store(struct config_item * item,const char * page,size_t count)1024 gpio_aggregator_line_key_store(struct config_item *item, const char *page,
1025 			       size_t count)
1026 {
1027 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1028 	struct gpio_aggregator *aggr = line->parent;
1029 
1030 	char *key __free(kfree) = kstrndup(skip_spaces(page), count,
1031 					   GFP_KERNEL);
1032 	if (!key)
1033 		return -ENOMEM;
1034 
1035 	strim(key);
1036 
1037 	guard(mutex)(&aggr->lock);
1038 
1039 	if (gpio_aggregator_is_activating(aggr) ||
1040 	    gpio_aggregator_is_active(aggr))
1041 		return -EBUSY;
1042 
1043 	kfree(line->key);
1044 	line->key = no_free_ptr(key);
1045 
1046 	return count;
1047 }
1048 CONFIGFS_ATTR(gpio_aggregator_line_, key);
1049 
1050 static ssize_t
gpio_aggregator_line_name_show(struct config_item * item,char * page)1051 gpio_aggregator_line_name_show(struct config_item *item, char *page)
1052 {
1053 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1054 	struct gpio_aggregator *aggr = line->parent;
1055 
1056 	guard(mutex)(&aggr->lock);
1057 
1058 	return sysfs_emit(page, "%s\n", line->name ?: "");
1059 }
1060 
1061 static ssize_t
gpio_aggregator_line_name_store(struct config_item * item,const char * page,size_t count)1062 gpio_aggregator_line_name_store(struct config_item *item, const char *page,
1063 				size_t count)
1064 {
1065 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1066 	struct gpio_aggregator *aggr = line->parent;
1067 
1068 	char *name __free(kfree) = kstrndup(skip_spaces(page), count,
1069 					    GFP_KERNEL);
1070 	if (!name)
1071 		return -ENOMEM;
1072 
1073 	strim(name);
1074 
1075 	guard(mutex)(&aggr->lock);
1076 
1077 	if (gpio_aggregator_is_activating(aggr) ||
1078 	    gpio_aggregator_is_active(aggr))
1079 		return -EBUSY;
1080 
1081 	kfree(line->name);
1082 	line->name = no_free_ptr(name);
1083 
1084 	return count;
1085 }
1086 CONFIGFS_ATTR(gpio_aggregator_line_, name);
1087 
1088 static ssize_t
gpio_aggregator_line_offset_show(struct config_item * item,char * page)1089 gpio_aggregator_line_offset_show(struct config_item *item, char *page)
1090 {
1091 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1092 	struct gpio_aggregator *aggr = line->parent;
1093 
1094 	guard(mutex)(&aggr->lock);
1095 
1096 	return sysfs_emit(page, "%d\n", line->offset);
1097 }
1098 
1099 static ssize_t
gpio_aggregator_line_offset_store(struct config_item * item,const char * page,size_t count)1100 gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
1101 				  size_t count)
1102 {
1103 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1104 	struct gpio_aggregator *aggr = line->parent;
1105 	int offset, ret;
1106 
1107 	ret = kstrtoint(page, 0, &offset);
1108 	if (ret)
1109 		return ret;
1110 
1111 	/*
1112 	 * When offset == -1, 'key' represents a line name to lookup.
1113 	 * When 0 <= offset < 65535, 'key' represents the label of the chip with
1114 	 * the 'offset' value representing the line within that chip.
1115 	 *
1116 	 * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
1117 	 * the greatest offset we can accept is (U16_MAX - 1).
1118 	 */
1119 	if (offset > (U16_MAX - 1) || offset < -1)
1120 		return -EINVAL;
1121 
1122 	guard(mutex)(&aggr->lock);
1123 
1124 	if (gpio_aggregator_is_activating(aggr) ||
1125 	    gpio_aggregator_is_active(aggr))
1126 		return -EBUSY;
1127 
1128 	line->offset = offset;
1129 
1130 	return count;
1131 }
1132 CONFIGFS_ATTR(gpio_aggregator_line_, offset);
1133 
1134 static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
1135 	&gpio_aggregator_line_attr_key,
1136 	&gpio_aggregator_line_attr_name,
1137 	&gpio_aggregator_line_attr_offset,
1138 	NULL
1139 };
1140 
1141 static ssize_t
gpio_aggregator_device_dev_name_show(struct config_item * item,char * page)1142 gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
1143 {
1144 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1145 	struct platform_device *pdev;
1146 
1147 	guard(mutex)(&aggr->lock);
1148 
1149 	pdev = aggr->probe_data.pdev;
1150 	if (pdev)
1151 		return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
1152 
1153 	return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
1154 }
1155 CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
1156 
1157 static ssize_t
gpio_aggregator_device_live_show(struct config_item * item,char * page)1158 gpio_aggregator_device_live_show(struct config_item *item, char *page)
1159 {
1160 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1161 
1162 	guard(mutex)(&aggr->lock);
1163 
1164 	return sysfs_emit(page, "%c\n",
1165 			  gpio_aggregator_is_active(aggr) ? '1' : '0');
1166 }
1167 
1168 static ssize_t
gpio_aggregator_device_live_store(struct config_item * item,const char * page,size_t count)1169 gpio_aggregator_device_live_store(struct config_item *item, const char *page,
1170 				  size_t count)
1171 {
1172 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1173 	int ret = 0;
1174 	bool live;
1175 
1176 	ret = kstrtobool(page, &live);
1177 	if (ret)
1178 		return ret;
1179 
1180 	if (!try_module_get(THIS_MODULE))
1181 		return -ENOENT;
1182 
1183 	if (live && !aggr->init_via_sysfs)
1184 		gpio_aggregator_lockup_configfs(aggr, true);
1185 
1186 	scoped_guard(mutex, &aggr->lock) {
1187 		if (gpio_aggregator_is_activating(aggr) ||
1188 		    (live == gpio_aggregator_is_active(aggr)))
1189 			ret = -EPERM;
1190 		else if (live)
1191 			ret = gpio_aggregator_activate(aggr);
1192 		else
1193 			gpio_aggregator_deactivate(aggr);
1194 	}
1195 
1196 	/*
1197 	 * Undepend is required only if device disablement (live == 0)
1198 	 * succeeds or if device enablement (live == 1) fails.
1199 	 */
1200 	if (live == !!ret && !aggr->init_via_sysfs)
1201 		gpio_aggregator_lockup_configfs(aggr, false);
1202 
1203 	module_put(THIS_MODULE);
1204 
1205 	return ret ?: count;
1206 }
1207 CONFIGFS_ATTR(gpio_aggregator_device_, live);
1208 
1209 static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
1210 	&gpio_aggregator_device_attr_dev_name,
1211 	&gpio_aggregator_device_attr_live,
1212 	NULL
1213 };
1214 
1215 static void
gpio_aggregator_line_release(struct config_item * item)1216 gpio_aggregator_line_release(struct config_item *item)
1217 {
1218 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1219 	struct gpio_aggregator *aggr = line->parent;
1220 
1221 	guard(mutex)(&aggr->lock);
1222 
1223 	gpio_aggregator_line_del(aggr, line);
1224 	kfree(line->key);
1225 	kfree(line->name);
1226 	kfree(line);
1227 }
1228 
1229 static struct configfs_item_operations gpio_aggregator_line_item_ops = {
1230 	.release	= gpio_aggregator_line_release,
1231 };
1232 
1233 static const struct config_item_type gpio_aggregator_line_type = {
1234 	.ct_item_ops	= &gpio_aggregator_line_item_ops,
1235 	.ct_attrs	= gpio_aggregator_line_attrs,
1236 	.ct_owner	= THIS_MODULE,
1237 };
1238 
gpio_aggregator_device_release(struct config_item * item)1239 static void gpio_aggregator_device_release(struct config_item *item)
1240 {
1241 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1242 
1243 	/*
1244 	 * At this point, aggr is neither active nor activating,
1245 	 * so calling gpio_aggregator_deactivate() is always unnecessary.
1246 	 */
1247 	gpio_aggregator_free(aggr);
1248 }
1249 
1250 static struct configfs_item_operations gpio_aggregator_device_item_ops = {
1251 	.release	= gpio_aggregator_device_release,
1252 };
1253 
1254 static struct config_group *
gpio_aggregator_device_make_group(struct config_group * group,const char * name)1255 gpio_aggregator_device_make_group(struct config_group *group, const char *name)
1256 {
1257 	struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
1258 	struct gpio_aggregator_line *line;
1259 	unsigned int idx;
1260 	int ret, nchar;
1261 
1262 	ret = sscanf(name, "line%u%n", &idx, &nchar);
1263 	if (ret != 1 || nchar != strlen(name))
1264 		return ERR_PTR(-EINVAL);
1265 
1266 	if (aggr->init_via_sysfs)
1267 		/*
1268 		 * Aggregators created via legacy sysfs interface are exposed as
1269 		 * default groups, which means rmdir(2) is prohibited for them.
1270 		 * For simplicity, and to avoid confusion, we also prohibit
1271 		 * mkdir(2).
1272 		 */
1273 		return ERR_PTR(-EPERM);
1274 
1275 	guard(mutex)(&aggr->lock);
1276 
1277 	if (gpio_aggregator_is_active(aggr))
1278 		return ERR_PTR(-EBUSY);
1279 
1280 	list_for_each_entry(line, &aggr->list_head, entry)
1281 		if (line->idx == idx)
1282 			return ERR_PTR(-EINVAL);
1283 
1284 	line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
1285 	if (IS_ERR(line))
1286 		return ERR_CAST(line);
1287 
1288 	config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
1289 
1290 	gpio_aggregator_line_add(aggr, line);
1291 
1292 	return &line->group;
1293 }
1294 
1295 static struct configfs_group_operations gpio_aggregator_device_group_ops = {
1296 	.make_group	= gpio_aggregator_device_make_group,
1297 };
1298 
1299 static const struct config_item_type gpio_aggregator_device_type = {
1300 	.ct_group_ops	= &gpio_aggregator_device_group_ops,
1301 	.ct_item_ops	= &gpio_aggregator_device_item_ops,
1302 	.ct_attrs	= gpio_aggregator_device_attrs,
1303 	.ct_owner	= THIS_MODULE,
1304 };
1305 
1306 static struct config_group *
gpio_aggregator_make_group(struct config_group * group,const char * name)1307 gpio_aggregator_make_group(struct config_group *group, const char *name)
1308 {
1309 	struct gpio_aggregator *aggr;
1310 	int ret;
1311 
1312 	/*
1313 	 * "_sysfs" prefix is reserved for auto-generated config group
1314 	 * for devices create via legacy sysfs interface.
1315 	 */
1316 	if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
1317 		    sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
1318 		return ERR_PTR(-EINVAL);
1319 
1320 	/* arg space is unneeded */
1321 	ret = gpio_aggregator_alloc(&aggr, 0);
1322 	if (ret)
1323 		return ERR_PTR(ret);
1324 
1325 	config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1326 	dev_sync_probe_init(&aggr->probe_data);
1327 
1328 	return &aggr->group;
1329 }
1330 
1331 static struct configfs_group_operations gpio_aggregator_group_ops = {
1332 	.make_group	= gpio_aggregator_make_group,
1333 };
1334 
1335 static const struct config_item_type gpio_aggregator_type = {
1336 	.ct_group_ops	= &gpio_aggregator_group_ops,
1337 	.ct_owner	= THIS_MODULE,
1338 };
1339 
1340 static struct configfs_subsystem gpio_aggregator_subsys = {
1341 	.su_group = {
1342 		.cg_item = {
1343 			.ci_namebuf	= DRV_NAME,
1344 			.ci_type	= &gpio_aggregator_type,
1345 		},
1346 	},
1347 };
1348 
1349 /*
1350  * Sysfs interface
1351  */
gpio_aggregator_parse(struct gpio_aggregator * aggr)1352 static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
1353 {
1354 	char *args = skip_spaces(aggr->args);
1355 	struct gpio_aggregator_line *line;
1356 	char name[CONFIGFS_ITEM_NAME_LEN];
1357 	char *key, *offsets, *p;
1358 	unsigned int i, n = 0;
1359 	int error = 0;
1360 
1361 	unsigned long *bitmap __free(bitmap) =
1362 			bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
1363 	if (!bitmap)
1364 		return -ENOMEM;
1365 
1366 	args = next_arg(args, &key, &p);
1367 	while (*args) {
1368 		args = next_arg(args, &offsets, &p);
1369 
1370 		p = get_options(offsets, 0, &error);
1371 		if (error == 0 || *p) {
1372 			/* Named GPIO line */
1373 			scnprintf(name, sizeof(name), "line%u", n);
1374 			line = gpio_aggregator_line_alloc(aggr, n, key, -1);
1375 			if (IS_ERR(line)) {
1376 				error = PTR_ERR(line);
1377 				goto err;
1378 			}
1379 			config_group_init_type_name(&line->group, name,
1380 						    &gpio_aggregator_line_type);
1381 			error = configfs_register_group(&aggr->group,
1382 							&line->group);
1383 			if (error)
1384 				goto err;
1385 			scoped_guard(mutex, &aggr->lock)
1386 				gpio_aggregator_line_add(aggr, line);
1387 
1388 			error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
1389 			if (error)
1390 				goto err;
1391 
1392 			key = offsets;
1393 			continue;
1394 		}
1395 
1396 		/* GPIO chip + offset(s) */
1397 		error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
1398 		if (error) {
1399 			pr_err("Cannot parse %s: %d\n", offsets, error);
1400 			goto err;
1401 		}
1402 
1403 		for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
1404 			scnprintf(name, sizeof(name), "line%u", n);
1405 			line = gpio_aggregator_line_alloc(aggr, n, key, i);
1406 			if (IS_ERR(line)) {
1407 				error = PTR_ERR(line);
1408 				goto err;
1409 			}
1410 			config_group_init_type_name(&line->group, name,
1411 						    &gpio_aggregator_line_type);
1412 			error = configfs_register_group(&aggr->group,
1413 							&line->group);
1414 			if (error)
1415 				goto err;
1416 			scoped_guard(mutex, &aggr->lock)
1417 				gpio_aggregator_line_add(aggr, line);
1418 
1419 			error = gpio_aggregator_add_gpio(aggr, key, i, &n);
1420 			if (error)
1421 				goto err;
1422 		}
1423 
1424 		args = next_arg(args, &key, &p);
1425 	}
1426 
1427 	if (!n) {
1428 		pr_err("No GPIOs specified\n");
1429 		error = -EINVAL;
1430 		goto err;
1431 	}
1432 
1433 	return 0;
1434 
1435 err:
1436 	gpio_aggregator_free_lines(aggr);
1437 	return error;
1438 }
1439 
gpio_aggregator_new_device_store(struct device_driver * driver,const char * buf,size_t count)1440 static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
1441 						const char *buf, size_t count)
1442 {
1443 	struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
1444 	char name[CONFIGFS_ITEM_NAME_LEN];
1445 	struct gpio_aggregator *aggr;
1446 	struct platform_device *pdev;
1447 	int res;
1448 
1449 	if (!try_module_get(THIS_MODULE))
1450 		return -ENOENT;
1451 
1452 	/* kernfs guarantees string termination, so count + 1 is safe */
1453 	res = gpio_aggregator_alloc(&aggr, count + 1);
1454 	if (res)
1455 		goto put_module;
1456 
1457 	memcpy(aggr->args, buf, count + 1);
1458 
1459 	aggr->init_via_sysfs = true;
1460 	aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
1461 				GFP_KERNEL);
1462 	if (!aggr->lookups) {
1463 		res = -ENOMEM;
1464 		goto free_ga;
1465 	}
1466 
1467 	aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
1468 	if (!aggr->lookups->dev_id) {
1469 		res = -ENOMEM;
1470 		goto free_table;
1471 	}
1472 
1473 	scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
1474 	config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1475 
1476 	/*
1477 	 * Since the device created by sysfs might be toggled via configfs
1478 	 * 'live' attribute later, this initialization is needed.
1479 	 */
1480 	dev_sync_probe_init(&aggr->probe_data);
1481 
1482 	/* Expose to configfs */
1483 	res = configfs_register_group(&gpio_aggregator_subsys.su_group,
1484 				      &aggr->group);
1485 	if (res)
1486 		goto free_dev_id;
1487 
1488 	res = gpio_aggregator_parse(aggr);
1489 	if (res)
1490 		goto unregister_group;
1491 
1492 	gpiod_add_lookup_table(aggr->lookups);
1493 
1494 	pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
1495 	if (IS_ERR(pdev)) {
1496 		res = PTR_ERR(pdev);
1497 		goto remove_table;
1498 	}
1499 
1500 	aggr->probe_data.pdev = pdev;
1501 	module_put(THIS_MODULE);
1502 	return count;
1503 
1504 remove_table:
1505 	gpiod_remove_lookup_table(aggr->lookups);
1506 unregister_group:
1507 	configfs_unregister_group(&aggr->group);
1508 free_dev_id:
1509 	kfree(aggr->lookups->dev_id);
1510 free_table:
1511 	kfree(aggr->lookups);
1512 free_ga:
1513 	gpio_aggregator_free(aggr);
1514 put_module:
1515 	module_put(THIS_MODULE);
1516 	return res;
1517 }
1518 
1519 static struct driver_attribute driver_attr_gpio_aggregator_new_device =
1520 	__ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
1521 
gpio_aggregator_destroy(struct gpio_aggregator * aggr)1522 static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
1523 {
1524 	scoped_guard(mutex, &aggr->lock) {
1525 		if (gpio_aggregator_is_activating(aggr) ||
1526 		    gpio_aggregator_is_active(aggr))
1527 			gpio_aggregator_deactivate(aggr);
1528 	}
1529 	gpio_aggregator_free_lines(aggr);
1530 	configfs_unregister_group(&aggr->group);
1531 	kfree(aggr);
1532 }
1533 
gpio_aggregator_delete_device_store(struct device_driver * driver,const char * buf,size_t count)1534 static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
1535 						   const char *buf, size_t count)
1536 {
1537 	struct gpio_aggregator *aggr;
1538 	unsigned int id;
1539 	int error;
1540 
1541 	if (!str_has_prefix(buf, DRV_NAME "."))
1542 		return -EINVAL;
1543 
1544 	error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
1545 	if (error)
1546 		return error;
1547 
1548 	if (!try_module_get(THIS_MODULE))
1549 		return -ENOENT;
1550 
1551 	mutex_lock(&gpio_aggregator_lock);
1552 	aggr = idr_find(&gpio_aggregator_idr, id);
1553 	/*
1554 	 * For simplicity, devices created via configfs cannot be deleted
1555 	 * via sysfs.
1556 	 */
1557 	if (aggr && aggr->init_via_sysfs)
1558 		idr_remove(&gpio_aggregator_idr, id);
1559 	else {
1560 		mutex_unlock(&gpio_aggregator_lock);
1561 		module_put(THIS_MODULE);
1562 		return -ENOENT;
1563 	}
1564 	mutex_unlock(&gpio_aggregator_lock);
1565 
1566 	gpio_aggregator_destroy(aggr);
1567 	module_put(THIS_MODULE);
1568 	return count;
1569 }
1570 
1571 static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
1572 	__ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
1573 
1574 static struct attribute *gpio_aggregator_attrs[] = {
1575 	&driver_attr_gpio_aggregator_new_device.attr,
1576 	&driver_attr_gpio_aggregator_delete_device.attr,
1577 	NULL
1578 };
1579 ATTRIBUTE_GROUPS(gpio_aggregator);
1580 
1581 /*
1582  *  GPIO Aggregator platform device
1583  */
1584 
gpio_aggregator_probe(struct platform_device * pdev)1585 static int gpio_aggregator_probe(struct platform_device *pdev)
1586 {
1587 	struct gpio_aggregator_pdev_meta *meta;
1588 	struct device *dev = &pdev->dev;
1589 	bool init_via_sysfs = false;
1590 	struct gpio_desc **descs;
1591 	struct gpiochip_fwd *fwd;
1592 	unsigned long features;
1593 	int i, n;
1594 
1595 	n = gpiod_count(dev, NULL);
1596 	if (n < 0)
1597 		return n;
1598 
1599 	descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
1600 	if (!descs)
1601 		return -ENOMEM;
1602 
1603 	meta = dev_get_platdata(&pdev->dev);
1604 	if (meta && meta->init_via_sysfs)
1605 		init_via_sysfs = true;
1606 
1607 	for (i = 0; i < n; i++) {
1608 		descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
1609 		if (IS_ERR(descs[i])) {
1610 			/*
1611 			 * Deferred probing is not suitable when the aggregator
1612 			 * is created via configfs. They should just retry later
1613 			 * whenever they like. For device creation via sysfs,
1614 			 * error is propagated without overriding for backward
1615 			 * compatibility. .prevent_deferred_probe is kept unset
1616 			 * for other cases.
1617 			 */
1618 			if (!init_via_sysfs && !dev_of_node(dev) &&
1619 			    descs[i] == ERR_PTR(-EPROBE_DEFER)) {
1620 				pr_warn("Deferred probe canceled for creation via configfs.\n");
1621 				return -ENODEV;
1622 			}
1623 			return PTR_ERR(descs[i]);
1624 		}
1625 	}
1626 
1627 	features = (uintptr_t)device_get_match_data(dev);
1628 	fwd = gpiochip_fwd_create(dev, n, descs, features);
1629 	if (IS_ERR(fwd))
1630 		return PTR_ERR(fwd);
1631 
1632 	platform_set_drvdata(pdev, fwd);
1633 	devm_kfree(dev, descs);
1634 	return 0;
1635 }
1636 
1637 static const struct of_device_id gpio_aggregator_dt_ids[] = {
1638 	{
1639 		.compatible = "gpio-delay",
1640 		.data = (void *)FWD_FEATURE_DELAY,
1641 	},
1642 	/*
1643 	 * Add GPIO-operated devices controlled from userspace below,
1644 	 * or use "driver_override" in sysfs.
1645 	 */
1646 	{}
1647 };
1648 MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
1649 
1650 static struct platform_driver gpio_aggregator_driver = {
1651 	.probe = gpio_aggregator_probe,
1652 	.driver = {
1653 		.name = DRV_NAME,
1654 		.groups = gpio_aggregator_groups,
1655 		.of_match_table = gpio_aggregator_dt_ids,
1656 	},
1657 };
1658 
gpio_aggregator_idr_remove(int id,void * p,void * data)1659 static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
1660 {
1661 	/*
1662 	 * There should be no aggregator created via configfs, as their
1663 	 * presence would prevent module unloading.
1664 	 */
1665 	gpio_aggregator_destroy(p);
1666 	return 0;
1667 }
1668 
gpio_aggregator_remove_all(void)1669 static void __exit gpio_aggregator_remove_all(void)
1670 {
1671 	/*
1672 	 * Configfs callbacks acquire gpio_aggregator_lock when accessing
1673 	 * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
1674 	 * cannot protect idr_for_each invocation here with
1675 	 * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
1676 	 * configfs groups. Fortunately, the new_device/delete_device path
1677 	 * and the module unload path are mutually exclusive, thanks to an
1678 	 * explicit try_module_get inside of those driver attr handlers.
1679 	 * Also, when we reach here, no configfs entries present or being
1680 	 * created. Therefore, no need to protect with gpio_aggregator_lock
1681 	 * below.
1682 	 */
1683 	idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
1684 	idr_destroy(&gpio_aggregator_idr);
1685 }
1686 
gpio_aggregator_init(void)1687 static int __init gpio_aggregator_init(void)
1688 {
1689 	int ret = 0;
1690 
1691 	config_group_init(&gpio_aggregator_subsys.su_group);
1692 	mutex_init(&gpio_aggregator_subsys.su_mutex);
1693 	ret = configfs_register_subsystem(&gpio_aggregator_subsys);
1694 	if (ret) {
1695 		pr_err("Failed to register the '%s' configfs subsystem: %d\n",
1696 		       gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
1697 		mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1698 		return ret;
1699 	}
1700 
1701 	/*
1702 	 * CAVEAT: This must occur after configfs registration. Otherwise,
1703 	 * a race condition could arise: driver attribute groups might be
1704 	 * exposed and accessed by users before configfs registration
1705 	 * completes. new_device_store() does not expect a partially
1706 	 * initialized configfs state.
1707 	 */
1708 	ret = platform_driver_register(&gpio_aggregator_driver);
1709 	if (ret) {
1710 		pr_err("Failed to register the platform driver: %d\n", ret);
1711 		mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1712 		configfs_unregister_subsystem(&gpio_aggregator_subsys);
1713 	}
1714 
1715 	return ret;
1716 }
1717 module_init(gpio_aggregator_init);
1718 
gpio_aggregator_exit(void)1719 static void __exit gpio_aggregator_exit(void)
1720 {
1721 	gpio_aggregator_remove_all();
1722 	platform_driver_unregister(&gpio_aggregator_driver);
1723 	configfs_unregister_subsystem(&gpio_aggregator_subsys);
1724 }
1725 module_exit(gpio_aggregator_exit);
1726 
1727 MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
1728 MODULE_DESCRIPTION("GPIO Aggregator");
1729 MODULE_LICENSE("GPL v2");
1730