xref: /linux/drivers/gpio/gpio-aggregator.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // GPIO Aggregator
4 //
5 // Copyright (C) 2019-2020 Glider bv
6 
7 #define DRV_NAME       "gpio-aggregator"
8 #define pr_fmt(fmt)	DRV_NAME ": " fmt
9 
10 #include <linux/bitmap.h>
11 #include <linux/bitops.h>
12 #include <linux/configfs.h>
13 #include <linux/ctype.h>
14 #include <linux/delay.h>
15 #include <linux/export.h>
16 #include <linux/idr.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/lockdep.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/overflow.h>
24 #include <linux/platform_device.h>
25 #include <linux/property.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29 
30 #include <linux/gpio/consumer.h>
31 #include <linux/gpio/driver.h>
32 #include <linux/gpio/forwarder.h>
33 #include <linux/gpio/machine.h>
34 
35 #define AGGREGATOR_MAX_GPIOS 512
36 #define AGGREGATOR_LEGACY_PREFIX "_sysfs"
37 
38 /*
39  * GPIO Aggregator sysfs interface
40  */
41 
42 struct gpio_aggregator {
43 	struct platform_device *pdev;
44 	struct config_group group;
45 	struct gpiod_lookup_table *lookups;
46 	struct mutex lock;
47 	int id;
48 
49 	/* List of gpio_aggregator_line. Always added in order */
50 	struct list_head list_head;
51 
52 	/* used by legacy sysfs interface only */
53 	bool init_via_sysfs;
54 	char args[];
55 };
56 
57 struct gpio_aggregator_line {
58 	struct config_group group;
59 	struct gpio_aggregator *parent;
60 	struct list_head entry;
61 
62 	/* Line index within the aggregator device */
63 	unsigned int idx;
64 
65 	/* Custom name for the virtual line */
66 	const char *name;
67 	/* GPIO chip label or line name */
68 	const char *key;
69 	/* Can be negative to indicate lookup by line name */
70 	int offset;
71 
72 	enum gpio_lookup_flags flags;
73 };
74 
75 struct gpio_aggregator_pdev_meta {
76 	bool init_via_sysfs;
77 };
78 
79 static DEFINE_MUTEX(gpio_aggregator_lock);	/* protects idr */
80 static DEFINE_IDR(gpio_aggregator_idr);
81 
82 static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
83 {
84 	int ret;
85 
86 	struct gpio_aggregator *new __free(kfree) = kzalloc(
87 					sizeof(*new) + arg_size, GFP_KERNEL);
88 	if (!new)
89 		return -ENOMEM;
90 
91 	scoped_guard(mutex, &gpio_aggregator_lock)
92 		ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
93 
94 	if (ret < 0)
95 		return ret;
96 
97 	new->id = ret;
98 	INIT_LIST_HEAD(&new->list_head);
99 	mutex_init(&new->lock);
100 	*aggr = no_free_ptr(new);
101 	return 0;
102 }
103 
104 static void gpio_aggregator_free(struct gpio_aggregator *aggr)
105 {
106 	scoped_guard(mutex, &gpio_aggregator_lock)
107 		idr_remove(&gpio_aggregator_idr, aggr->id);
108 
109 	mutex_destroy(&aggr->lock);
110 	kfree(aggr);
111 }
112 
113 static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
114 				    const char *key, int hwnum, unsigned int *n)
115 {
116 	struct gpiod_lookup_table *lookups;
117 
118 	lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
119 			   GFP_KERNEL);
120 	if (!lookups)
121 		return -ENOMEM;
122 
123 	lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
124 
125 	(*n)++;
126 	memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
127 
128 	aggr->lookups = lookups;
129 	return 0;
130 }
131 
132 static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
133 {
134 	lockdep_assert_held(&aggr->lock);
135 
136 	return aggr->pdev && platform_get_drvdata(aggr->pdev);
137 }
138 
139 /* Only aggregators created via legacy sysfs can be "activating". */
140 static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
141 {
142 	lockdep_assert_held(&aggr->lock);
143 
144 	return aggr->pdev && !platform_get_drvdata(aggr->pdev);
145 }
146 
147 static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
148 {
149 	lockdep_assert_held(&aggr->lock);
150 
151 	return list_count_nodes(&aggr->list_head);
152 }
153 
154 static struct gpio_aggregator_line *
155 gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
156 			   char *key, int offset)
157 {
158 	struct gpio_aggregator_line *line;
159 
160 	line = kzalloc_obj(*line);
161 	if (!line)
162 		return ERR_PTR(-ENOMEM);
163 
164 	if (key) {
165 		line->key = kstrdup(key, GFP_KERNEL);
166 		if (!line->key) {
167 			kfree(line);
168 			return ERR_PTR(-ENOMEM);
169 		}
170 	}
171 
172 	line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
173 	line->parent = parent;
174 	line->idx = idx;
175 	line->offset = offset;
176 	INIT_LIST_HEAD(&line->entry);
177 
178 	return line;
179 }
180 
181 static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
182 				     struct gpio_aggregator_line *line)
183 {
184 	struct gpio_aggregator_line *tmp;
185 
186 	lockdep_assert_held(&aggr->lock);
187 
188 	list_for_each_entry(tmp, &aggr->list_head, entry) {
189 		if (tmp->idx > line->idx) {
190 			list_add_tail(&line->entry, &tmp->entry);
191 			return;
192 		}
193 	}
194 	list_add_tail(&line->entry, &aggr->list_head);
195 }
196 
197 static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
198 				     struct gpio_aggregator_line *line)
199 {
200 	lockdep_assert_held(&aggr->lock);
201 
202 	list_del(&line->entry);
203 }
204 
205 static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
206 {
207 	struct gpio_aggregator_line *line, *tmp;
208 
209 	list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
210 		configfs_unregister_group(&line->group);
211 		/*
212 		 * Normally, we acquire aggr->lock within the configfs
213 		 * callback. However, in the legacy sysfs interface case,
214 		 * calling configfs_(un)register_group while holding
215 		 * aggr->lock could cause a deadlock. Fortunately, this is
216 		 * unnecessary because the new_device/delete_device path
217 		 * and the module unload path are mutually exclusive,
218 		 * thanks to an explicit try_module_get. That's why this
219 		 * minimal scoped_guard suffices.
220 		 */
221 		scoped_guard(mutex, &aggr->lock)
222 			gpio_aggregator_line_del(aggr, line);
223 		kfree(line->key);
224 		kfree(line->name);
225 		kfree(line);
226 	}
227 }
228 
229 
230 /*
231  *  GPIO Forwarder
232  */
233 
234 struct gpiochip_fwd_timing {
235 	u32 ramp_up_us;
236 	u32 ramp_down_us;
237 };
238 
239 struct gpiochip_fwd {
240 	struct gpio_chip chip;
241 	struct gpio_desc **descs;
242 	union {
243 		struct mutex mlock;	/* protects tmp[] if can_sleep */
244 		spinlock_t slock;	/* protects tmp[] if !can_sleep */
245 	};
246 	struct gpiochip_fwd_timing *delay_timings;
247 	void *data;
248 	unsigned long *valid_mask;
249 	unsigned long tmp[];		/* values and descs for multiple ops */
250 };
251 
252 #define fwd_tmp_values(fwd)	(&(fwd)->tmp[0])
253 #define fwd_tmp_descs(fwd)	((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
254 
255 #define fwd_tmp_size(ngpios)	(BITS_TO_LONGS((ngpios)) + (ngpios))
256 
257 static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
258 {
259 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
260 
261 	return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
262 }
263 
264 static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
265 {
266 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
267 
268 	/*
269 	 * get_direction() is called during gpiochip registration, return
270 	 * -ENODEV if there is no GPIO desc for the line.
271 	 */
272 	if (!test_bit(offset, fwd->valid_mask))
273 		return -ENODEV;
274 
275 	return gpiod_get_direction(fwd->descs[offset]);
276 }
277 
278 static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
279 {
280 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
281 
282 	return gpiod_direction_input(fwd->descs[offset]);
283 }
284 
285 static int gpio_fwd_direction_output(struct gpio_chip *chip,
286 				     unsigned int offset, int value)
287 {
288 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
289 
290 	return gpiod_direction_output(fwd->descs[offset], value);
291 }
292 
293 static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
294 {
295 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
296 
297 	return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
298 			       : gpiod_get_value(fwd->descs[offset]);
299 }
300 
301 static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
302 				 unsigned long *bits)
303 {
304 	struct gpio_desc **descs = fwd_tmp_descs(fwd);
305 	unsigned long *values = fwd_tmp_values(fwd);
306 	unsigned int i, j = 0;
307 	int error;
308 
309 	bitmap_clear(values, 0, fwd->chip.ngpio);
310 	for_each_set_bit(i, mask, fwd->chip.ngpio)
311 		descs[j++] = fwd->descs[i];
312 
313 	if (fwd->chip.can_sleep)
314 		error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
315 	else
316 		error = gpiod_get_array_value(j, descs, NULL, values);
317 	if (error)
318 		return error;
319 
320 	j = 0;
321 	for_each_set_bit(i, mask, fwd->chip.ngpio)
322 		__assign_bit(i, bits, test_bit(j++, values));
323 
324 	return 0;
325 }
326 
327 static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip,
328 					unsigned long *mask, unsigned long *bits)
329 {
330 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
331 	unsigned long flags;
332 	int error;
333 
334 	if (chip->can_sleep) {
335 		mutex_lock(&fwd->mlock);
336 		error = gpio_fwd_get_multiple(fwd, mask, bits);
337 		mutex_unlock(&fwd->mlock);
338 	} else {
339 		spin_lock_irqsave(&fwd->slock, flags);
340 		error = gpio_fwd_get_multiple(fwd, mask, bits);
341 		spin_unlock_irqrestore(&fwd->slock, flags);
342 	}
343 
344 	return error;
345 }
346 
347 static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value)
348 {
349 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
350 	const struct gpiochip_fwd_timing *delay_timings;
351 	bool is_active_low = gpiod_is_active_low(fwd->descs[offset]);
352 	u32 delay_us;
353 
354 	delay_timings = &fwd->delay_timings[offset];
355 	if ((!is_active_low && value) || (is_active_low && !value))
356 		delay_us = delay_timings->ramp_up_us;
357 	else
358 		delay_us = delay_timings->ramp_down_us;
359 	if (!delay_us)
360 		return;
361 
362 	if (chip->can_sleep)
363 		fsleep(delay_us);
364 	else
365 		udelay(delay_us);
366 }
367 
368 static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
369 {
370 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
371 	int ret;
372 
373 	if (chip->can_sleep)
374 		ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
375 	else
376 		ret = gpiod_set_value(fwd->descs[offset], value);
377 	if (ret)
378 		return ret;
379 
380 	if (fwd->delay_timings)
381 		gpio_fwd_delay(chip, offset, value);
382 
383 	return ret;
384 }
385 
386 static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
387 				 unsigned long *bits)
388 {
389 	struct gpio_desc **descs = fwd_tmp_descs(fwd);
390 	unsigned long *values = fwd_tmp_values(fwd);
391 	unsigned int i, j = 0, ret;
392 
393 	for_each_set_bit(i, mask, fwd->chip.ngpio) {
394 		__assign_bit(j, values, test_bit(i, bits));
395 		descs[j++] = fwd->descs[i];
396 	}
397 
398 	if (fwd->chip.can_sleep)
399 		ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
400 	else
401 		ret = gpiod_set_array_value(j, descs, NULL, values);
402 
403 	return ret;
404 }
405 
406 static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
407 					unsigned long *mask, unsigned long *bits)
408 {
409 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
410 	unsigned long flags;
411 	int ret;
412 
413 	if (chip->can_sleep) {
414 		mutex_lock(&fwd->mlock);
415 		ret = gpio_fwd_set_multiple(fwd, mask, bits);
416 		mutex_unlock(&fwd->mlock);
417 	} else {
418 		spin_lock_irqsave(&fwd->slock, flags);
419 		ret = gpio_fwd_set_multiple(fwd, mask, bits);
420 		spin_unlock_irqrestore(&fwd->slock, flags);
421 	}
422 
423 	return ret;
424 }
425 
426 static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
427 			       unsigned long config)
428 {
429 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
430 
431 	return gpiod_set_config(fwd->descs[offset], config);
432 }
433 
434 static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset)
435 {
436 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
437 
438 	return gpiod_to_irq(fwd->descs[offset]);
439 }
440 
441 /*
442  * The GPIO delay provides a way to configure platform specific delays
443  * for the GPIO ramp-up or ramp-down delays. This can serve the following
444  * purposes:
445  *   - Open-drain output using an RC filter
446  */
447 #define FWD_FEATURE_DELAY		BIT(0)
448 
449 #ifdef CONFIG_OF_GPIO
450 static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
451 				       const struct of_phandle_args *gpiospec,
452 				       u32 *flags)
453 {
454 	struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
455 	struct gpiochip_fwd_timing *timings;
456 	u32 line;
457 
458 	if (gpiospec->args_count != chip->of_gpio_n_cells)
459 		return -EINVAL;
460 
461 	line = gpiospec->args[0];
462 	if (line >= chip->ngpio)
463 		return -EINVAL;
464 
465 	timings = &fwd->delay_timings[line];
466 	timings->ramp_up_us = gpiospec->args[1];
467 	timings->ramp_down_us = gpiospec->args[2];
468 
469 	return line;
470 }
471 
472 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
473 {
474 	struct gpio_chip *chip = &fwd->chip;
475 
476 	fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
477 					  sizeof(*fwd->delay_timings),
478 					  GFP_KERNEL);
479 	if (!fwd->delay_timings)
480 		return -ENOMEM;
481 
482 	chip->of_xlate = gpiochip_fwd_delay_of_xlate;
483 	chip->of_gpio_n_cells = 3;
484 
485 	return 0;
486 }
487 #else
488 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
489 {
490 	return 0;
491 }
492 #endif	/* !CONFIG_OF_GPIO */
493 
494 /**
495  * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
496  * @fwd: GPIO forwarder
497  *
498  * Returns: The GPIO chip for the GPIO forwarder
499  */
500 struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
501 {
502 	return &fwd->chip;
503 }
504 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
505 
506 /**
507  * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
508  * @fwd: GPIO forwarder
509  *
510  * Returns: The driver-private data for the GPIO forwarder
511  */
512 void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
513 {
514 	return fwd->data;
515 }
516 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
517 
518 /**
519  * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
520  * @fwd: GPIO forwarder
521  * @offset: the offset of the line to request
522  *
523  * Returns: 0 on success, or negative errno on failure.
524  */
525 int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
526 {
527 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
528 
529 	return gpio_fwd_request(gc, offset);
530 }
531 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
532 
533 /**
534  * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
535  * @fwd: GPIO forwarder
536  * @offset: the offset of the line
537  *
538  * Returns: 0 for output, 1 for input, or an error code in case of error.
539  */
540 int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
541 {
542 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
543 
544 	return gpio_fwd_get_direction(gc, offset);
545 }
546 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
547 
548 /**
549  * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
550  * output
551  * @fwd: GPIO forwarder
552  * @offset: the offset of the line
553  * @value: value to set
554  *
555  * Returns: 0 on success, or negative errno on failure.
556  */
557 int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
558 				       int value)
559 {
560 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
561 
562 	return gpio_fwd_direction_output(gc, offset, value);
563 }
564 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
565 
566 /**
567  * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
568  * @fwd: GPIO forwarder
569  * @offset: the offset of the line
570  *
571  * Returns: 0 on success, or negative errno on failure.
572  */
573 int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
574 {
575 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
576 
577 	return gpio_fwd_direction_input(gc, offset);
578 }
579 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
580 
581 /**
582  * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
583  * @fwd: GPIO forwarder
584  * @offset: the offset of the line
585  *
586  * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
587  * account, or negative errno on failure.
588  */
589 int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
590 {
591 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
592 
593 	return gpio_fwd_get(gc, offset);
594 }
595 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
596 
597 /**
598  * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
599  * @fwd: GPIO forwarder
600  * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
601  *        which lines are to be read
602  * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
603  *        contains the read values for the lines specified by mask
604  *
605  * Returns: 0 on success, or negative errno on failure.
606  */
607 int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
608 				   unsigned long *bits)
609 {
610 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
611 
612 	return gpio_fwd_get_multiple_locked(gc, mask, bits);
613 }
614 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
615 
616 /**
617  * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
618  * @fwd: GPIO forwarder
619  * @offset: the offset of the line
620  * @value: value to set
621  *
622  * Returns: 0 on success, or negative errno on failure.
623  */
624 int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
625 {
626 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
627 
628 	return gpio_fwd_set(gc, offset, value);
629 }
630 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
631 
632 /**
633  * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
634  * @fwd: GPIO forwarder
635  * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
636  *        defines which outputs are to be changed
637  * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
638  *        defines the values the outputs specified by mask are to be set to
639  *
640  * Returns: 0 on success, or negative errno on failure.
641  */
642 int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
643 				   unsigned long *bits)
644 {
645 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
646 
647 	return gpio_fwd_set_multiple_locked(gc, mask, bits);
648 }
649 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
650 
651 /**
652  * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
653  * @fwd: GPIO forwarder
654  * @offset: the offset of the line
655  * @config: Same packed config format as generic pinconf
656  *
657  * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
658  * the configuration.
659  */
660 int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
661 				 unsigned long config)
662 {
663 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
664 
665 	return gpio_fwd_set_config(gc, offset, config);
666 }
667 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
668 
669 /**
670  * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
671  * @fwd: GPIO forwarder
672  * @offset: the offset of the line
673  *
674  * Returns: The Linux IRQ corresponding to the passed line, or an error code in
675  * case of error.
676  */
677 int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
678 {
679 	struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
680 
681 	return gpio_fwd_to_irq(gc, offset);
682 }
683 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
684 
685 /**
686  * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
687  * @dev: Parent device pointer
688  * @ngpios: Number of GPIOs in the forwarder
689  *
690  * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
691  * code on failure.
692  */
693 struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
694 					     unsigned int ngpios)
695 {
696 	struct gpiochip_fwd *fwd;
697 	struct gpio_chip *chip;
698 
699 	fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
700 	if (!fwd)
701 		return ERR_PTR(-ENOMEM);
702 
703 	fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
704 	if (!fwd->descs)
705 		return ERR_PTR(-ENOMEM);
706 
707 	fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
708 	if (!fwd->valid_mask)
709 		return ERR_PTR(-ENOMEM);
710 
711 	chip = &fwd->chip;
712 
713 	chip->label = dev_name(dev);
714 	chip->parent = dev;
715 	chip->owner = THIS_MODULE;
716 	chip->request = gpio_fwd_request;
717 	chip->get_direction = gpio_fwd_get_direction;
718 	chip->direction_input = gpio_fwd_direction_input;
719 	chip->direction_output = gpio_fwd_direction_output;
720 	chip->get = gpio_fwd_get;
721 	chip->get_multiple = gpio_fwd_get_multiple_locked;
722 	chip->set = gpio_fwd_set;
723 	chip->set_multiple = gpio_fwd_set_multiple_locked;
724 	chip->set_config = gpio_fwd_set_config;
725 	chip->to_irq = gpio_fwd_to_irq;
726 	chip->base = -1;
727 	chip->ngpio = ngpios;
728 
729 	return fwd;
730 }
731 EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
732 
733 /**
734  * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
735  * @fwd: GPIO forwarder
736  * @desc: GPIO descriptor to register
737  * @offset: offset for the GPIO in the forwarder
738  *
739  * Returns: 0 on success, or negative errno on failure.
740  */
741 int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
742 			  unsigned int offset)
743 {
744 	struct gpio_chip *chip = &fwd->chip;
745 
746 	if (offset >= chip->ngpio)
747 		return -EINVAL;
748 
749 	if (test_and_set_bit(offset, fwd->valid_mask))
750 		return -EEXIST;
751 
752 	/*
753 	 * If any of the GPIO lines are sleeping, then the entire forwarder
754 	 * will be sleeping.
755 	 */
756 	if (gpiod_cansleep(desc))
757 		chip->can_sleep = true;
758 
759 	fwd->descs[offset] = desc;
760 
761 	dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
762 		desc_to_gpio(desc), gpiod_to_irq(desc));
763 
764 	return 0;
765 }
766 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
767 
768 /**
769  * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
770  * @fwd: GPIO forwarder
771  * @offset: offset of GPIO desc to remove
772  */
773 void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
774 {
775 	if (test_and_clear_bit(offset, fwd->valid_mask))
776 		gpiod_put(fwd->descs[offset]);
777 }
778 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
779 
780 /**
781  * gpiochip_fwd_register - Register a GPIO forwarder
782  * @fwd: GPIO forwarder
783  * @data: driver-private data associated with this forwarder
784  *
785  * Returns: 0 on success, or negative errno on failure.
786  */
787 int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
788 {
789 	struct gpio_chip *chip = &fwd->chip;
790 
791 	/*
792 	 * Some gpio_desc were not registered. They will be registered at runtime
793 	 * but we have to suppose they can sleep.
794 	 */
795 	if (!bitmap_full(fwd->valid_mask, chip->ngpio))
796 		chip->can_sleep = true;
797 
798 	if (chip->can_sleep)
799 		mutex_init(&fwd->mlock);
800 	else
801 		spin_lock_init(&fwd->slock);
802 
803 	fwd->data = data;
804 
805 	return devm_gpiochip_add_data(chip->parent, chip, fwd);
806 }
807 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
808 
809 /**
810  * gpiochip_fwd_create() - Create a new GPIO forwarder
811  * @dev: Parent device pointer
812  * @ngpios: Number of GPIOs in the forwarder.
813  * @descs: Array containing the GPIO descriptors to forward to.
814  *         This array must contain @ngpios entries, and can be deallocated
815  *         as the forwarder has its own array.
816  * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
817  *
818  * This function creates a new gpiochip, which forwards all GPIO operations to
819  * the passed GPIO descriptors.
820  *
821  * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
822  *         code on failure.
823  */
824 static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
825 						unsigned int ngpios,
826 						struct gpio_desc *descs[],
827 						unsigned long features)
828 {
829 	struct gpiochip_fwd *fwd;
830 	unsigned int i;
831 	int error;
832 
833 	fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
834 	if (IS_ERR(fwd))
835 		return fwd;
836 
837 	for (i = 0; i < ngpios; i++) {
838 		error = gpiochip_fwd_desc_add(fwd, descs[i], i);
839 		if (error)
840 			return ERR_PTR(error);
841 	}
842 
843 	if (features & FWD_FEATURE_DELAY) {
844 		error = gpiochip_fwd_setup_delay_line(fwd);
845 		if (error)
846 			return ERR_PTR(error);
847 	}
848 
849 	error = gpiochip_fwd_register(fwd, NULL);
850 	if (error)
851 		return ERR_PTR(error);
852 
853 	return fwd;
854 }
855 
856 /*
857  * Configfs interface
858  */
859 
860 static struct gpio_aggregator *
861 to_gpio_aggregator(struct config_item *item)
862 {
863 	struct config_group *group = to_config_group(item);
864 
865 	return container_of(group, struct gpio_aggregator, group);
866 }
867 
868 static struct gpio_aggregator_line *
869 to_gpio_aggregator_line(struct config_item *item)
870 {
871 	struct config_group *group = to_config_group(item);
872 
873 	return container_of(group, struct gpio_aggregator_line, group);
874 }
875 
876 static struct fwnode_handle *
877 gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
878 {
879 	struct property_entry properties[2];
880 	struct gpio_aggregator_line *line;
881 	size_t num_lines;
882 	int n = 0;
883 
884 	memset(properties, 0, sizeof(properties));
885 
886 	num_lines = gpio_aggregator_count_lines(aggr);
887 	if (num_lines == 0)
888 		return NULL;
889 
890 	const char **line_names __free(kfree) = kcalloc(
891 				num_lines, sizeof(*line_names), GFP_KERNEL);
892 	if (!line_names)
893 		return ERR_PTR(-ENOMEM);
894 
895 	/* The list is always sorted as new elements are inserted in order. */
896 	list_for_each_entry(line, &aggr->list_head, entry)
897 		line_names[n++] = line->name ?: "";
898 
899 	properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
900 					"gpio-line-names",
901 					line_names, num_lines);
902 
903 	return fwnode_create_software_node(properties, NULL);
904 }
905 
906 static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
907 {
908 	struct platform_device_info pdevinfo;
909 	struct gpio_aggregator_line *line;
910 	struct platform_device *pdev;
911 	struct fwnode_handle *swnode;
912 	unsigned int n = 0;
913 	int ret = 0;
914 
915 	if (gpio_aggregator_count_lines(aggr) == 0)
916 		return -EINVAL;
917 
918 	aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
919 	if (!aggr->lookups)
920 		return -ENOMEM;
921 
922 	swnode = gpio_aggregator_make_device_sw_node(aggr);
923 	if (IS_ERR(swnode)) {
924 		ret = PTR_ERR(swnode);
925 		goto err_remove_lookups;
926 	}
927 
928 	memset(&pdevinfo, 0, sizeof(pdevinfo));
929 	pdevinfo.name = DRV_NAME;
930 	pdevinfo.id = aggr->id;
931 	pdevinfo.fwnode = swnode;
932 
933 	/* The list is always sorted as new elements are inserted in order. */
934 	list_for_each_entry(line, &aggr->list_head, entry) {
935 		/*
936 		 * - Either GPIO chip label or line name must be configured
937 		 *   (i.e. line->key must be non-NULL)
938 		 * - Line directories must be named with sequential numeric
939 		 *   suffixes starting from 0. (i.e. ./line0, ./line1, ...)
940 		 */
941 		if (!line->key || line->idx != n) {
942 			ret = -EINVAL;
943 			goto err_remove_swnode;
944 		}
945 
946 		if (line->offset < 0)
947 			ret = gpio_aggregator_add_gpio(aggr, line->key,
948 						       U16_MAX, &n);
949 		else
950 			ret = gpio_aggregator_add_gpio(aggr, line->key,
951 						       line->offset, &n);
952 		if (ret)
953 			goto err_remove_swnode;
954 	}
955 
956 	aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
957 	if (!aggr->lookups->dev_id) {
958 		ret = -ENOMEM;
959 		goto err_remove_swnode;
960 	}
961 
962 	gpiod_add_lookup_table(aggr->lookups);
963 
964 	pdev = platform_device_register_full(&pdevinfo);
965 	if (IS_ERR(pdev)) {
966 		ret = PTR_ERR(pdev);
967 		goto err_remove_lookup_table;
968 	}
969 
970 	wait_for_device_probe();
971 	if (!device_is_bound(&pdev->dev)) {
972 		ret = -ENXIO;
973 		goto err_unregister_pdev;
974 	}
975 
976 	aggr->pdev = pdev;
977 	return 0;
978 
979 err_unregister_pdev:
980 	platform_device_unregister(pdev);
981 err_remove_lookup_table:
982 	kfree(aggr->lookups->dev_id);
983 	gpiod_remove_lookup_table(aggr->lookups);
984 err_remove_swnode:
985 	fwnode_remove_software_node(swnode);
986 err_remove_lookups:
987 	kfree(aggr->lookups);
988 
989 	return ret;
990 }
991 
992 static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
993 {
994 	platform_device_unregister(aggr->pdev);
995 	aggr->pdev = NULL;
996 	gpiod_remove_lookup_table(aggr->lookups);
997 	kfree(aggr->lookups->dev_id);
998 	kfree(aggr->lookups);
999 }
1000 
1001 static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
1002 					    bool lock)
1003 {
1004 	struct configfs_subsystem *subsys = aggr->group.cg_subsys;
1005 	struct gpio_aggregator_line *line;
1006 
1007 	/*
1008 	 * The device only needs to depend on leaf lines. This is
1009 	 * sufficient to lock up all the configfs entries that the
1010 	 * instantiated, alive device depends on.
1011 	 */
1012 	list_for_each_entry(line, &aggr->list_head, entry) {
1013 		if (lock)
1014 			configfs_depend_item_unlocked(
1015 					subsys, &line->group.cg_item);
1016 		else
1017 			configfs_undepend_item_unlocked(
1018 					&line->group.cg_item);
1019 	}
1020 }
1021 
1022 static ssize_t
1023 gpio_aggregator_line_key_show(struct config_item *item, char *page)
1024 {
1025 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1026 	struct gpio_aggregator *aggr = line->parent;
1027 
1028 	guard(mutex)(&aggr->lock);
1029 
1030 	return sysfs_emit(page, "%s\n", line->key ?: "");
1031 }
1032 
1033 static ssize_t
1034 gpio_aggregator_line_key_store(struct config_item *item, const char *page,
1035 			       size_t count)
1036 {
1037 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1038 	struct gpio_aggregator *aggr = line->parent;
1039 
1040 	char *key __free(kfree) = kstrndup(skip_spaces(page), count,
1041 					   GFP_KERNEL);
1042 	if (!key)
1043 		return -ENOMEM;
1044 
1045 	strim(key);
1046 
1047 	guard(mutex)(&aggr->lock);
1048 
1049 	if (gpio_aggregator_is_activating(aggr) ||
1050 	    gpio_aggregator_is_active(aggr))
1051 		return -EBUSY;
1052 
1053 	kfree(line->key);
1054 	line->key = no_free_ptr(key);
1055 
1056 	return count;
1057 }
1058 CONFIGFS_ATTR(gpio_aggregator_line_, key);
1059 
1060 static ssize_t
1061 gpio_aggregator_line_name_show(struct config_item *item, char *page)
1062 {
1063 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1064 	struct gpio_aggregator *aggr = line->parent;
1065 
1066 	guard(mutex)(&aggr->lock);
1067 
1068 	return sysfs_emit(page, "%s\n", line->name ?: "");
1069 }
1070 
1071 static ssize_t
1072 gpio_aggregator_line_name_store(struct config_item *item, const char *page,
1073 				size_t count)
1074 {
1075 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1076 	struct gpio_aggregator *aggr = line->parent;
1077 
1078 	char *name __free(kfree) = kstrndup(skip_spaces(page), count,
1079 					    GFP_KERNEL);
1080 	if (!name)
1081 		return -ENOMEM;
1082 
1083 	strim(name);
1084 
1085 	guard(mutex)(&aggr->lock);
1086 
1087 	if (gpio_aggregator_is_activating(aggr) ||
1088 	    gpio_aggregator_is_active(aggr))
1089 		return -EBUSY;
1090 
1091 	kfree(line->name);
1092 	line->name = no_free_ptr(name);
1093 
1094 	return count;
1095 }
1096 CONFIGFS_ATTR(gpio_aggregator_line_, name);
1097 
1098 static ssize_t
1099 gpio_aggregator_line_offset_show(struct config_item *item, char *page)
1100 {
1101 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1102 	struct gpio_aggregator *aggr = line->parent;
1103 
1104 	guard(mutex)(&aggr->lock);
1105 
1106 	return sysfs_emit(page, "%d\n", line->offset);
1107 }
1108 
1109 static ssize_t
1110 gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
1111 				  size_t count)
1112 {
1113 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1114 	struct gpio_aggregator *aggr = line->parent;
1115 	int offset, ret;
1116 
1117 	ret = kstrtoint(page, 0, &offset);
1118 	if (ret)
1119 		return ret;
1120 
1121 	/*
1122 	 * When offset == -1, 'key' represents a line name to lookup.
1123 	 * When 0 <= offset < 65535, 'key' represents the label of the chip with
1124 	 * the 'offset' value representing the line within that chip.
1125 	 *
1126 	 * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
1127 	 * the greatest offset we can accept is (U16_MAX - 1).
1128 	 */
1129 	if (offset > (U16_MAX - 1) || offset < -1)
1130 		return -EINVAL;
1131 
1132 	guard(mutex)(&aggr->lock);
1133 
1134 	if (gpio_aggregator_is_activating(aggr) ||
1135 	    gpio_aggregator_is_active(aggr))
1136 		return -EBUSY;
1137 
1138 	line->offset = offset;
1139 
1140 	return count;
1141 }
1142 CONFIGFS_ATTR(gpio_aggregator_line_, offset);
1143 
1144 static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
1145 	&gpio_aggregator_line_attr_key,
1146 	&gpio_aggregator_line_attr_name,
1147 	&gpio_aggregator_line_attr_offset,
1148 	NULL
1149 };
1150 
1151 static ssize_t
1152 gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
1153 {
1154 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1155 	struct platform_device *pdev;
1156 
1157 	guard(mutex)(&aggr->lock);
1158 
1159 	pdev = aggr->pdev;
1160 	if (pdev)
1161 		return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
1162 
1163 	return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
1164 }
1165 CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
1166 
1167 static ssize_t
1168 gpio_aggregator_device_live_show(struct config_item *item, char *page)
1169 {
1170 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1171 
1172 	guard(mutex)(&aggr->lock);
1173 
1174 	return sysfs_emit(page, "%c\n",
1175 			  gpio_aggregator_is_active(aggr) ? '1' : '0');
1176 }
1177 
1178 static ssize_t
1179 gpio_aggregator_device_live_store(struct config_item *item, const char *page,
1180 				  size_t count)
1181 {
1182 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1183 	int ret = 0;
1184 	bool live;
1185 
1186 	ret = kstrtobool(page, &live);
1187 	if (ret)
1188 		return ret;
1189 
1190 	if (!try_module_get(THIS_MODULE))
1191 		return -ENOENT;
1192 
1193 	if (live && !aggr->init_via_sysfs)
1194 		gpio_aggregator_lockup_configfs(aggr, true);
1195 
1196 	scoped_guard(mutex, &aggr->lock) {
1197 		if (gpio_aggregator_is_activating(aggr) ||
1198 		    (live == gpio_aggregator_is_active(aggr)))
1199 			ret = -EPERM;
1200 		else if (live)
1201 			ret = gpio_aggregator_activate(aggr);
1202 		else
1203 			gpio_aggregator_deactivate(aggr);
1204 	}
1205 
1206 	/*
1207 	 * Undepend is required only if device disablement (live == 0)
1208 	 * succeeds or if device enablement (live == 1) fails.
1209 	 */
1210 	if (live == !!ret && !aggr->init_via_sysfs)
1211 		gpio_aggregator_lockup_configfs(aggr, false);
1212 
1213 	module_put(THIS_MODULE);
1214 
1215 	return ret ?: count;
1216 }
1217 CONFIGFS_ATTR(gpio_aggregator_device_, live);
1218 
1219 static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
1220 	&gpio_aggregator_device_attr_dev_name,
1221 	&gpio_aggregator_device_attr_live,
1222 	NULL
1223 };
1224 
1225 static void
1226 gpio_aggregator_line_release(struct config_item *item)
1227 {
1228 	struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
1229 	struct gpio_aggregator *aggr = line->parent;
1230 
1231 	guard(mutex)(&aggr->lock);
1232 
1233 	gpio_aggregator_line_del(aggr, line);
1234 	kfree(line->key);
1235 	kfree(line->name);
1236 	kfree(line);
1237 }
1238 
1239 static const struct configfs_item_operations gpio_aggregator_line_item_ops = {
1240 	.release	= gpio_aggregator_line_release,
1241 };
1242 
1243 static const struct config_item_type gpio_aggregator_line_type = {
1244 	.ct_item_ops	= &gpio_aggregator_line_item_ops,
1245 	.ct_attrs	= gpio_aggregator_line_attrs,
1246 	.ct_owner	= THIS_MODULE,
1247 };
1248 
1249 static void gpio_aggregator_device_release(struct config_item *item)
1250 {
1251 	struct gpio_aggregator *aggr = to_gpio_aggregator(item);
1252 
1253 	/*
1254 	 * At this point, aggr is neither active nor activating,
1255 	 * so calling gpio_aggregator_deactivate() is always unnecessary.
1256 	 */
1257 	gpio_aggregator_free(aggr);
1258 }
1259 
1260 static const struct configfs_item_operations gpio_aggregator_device_item_ops = {
1261 	.release	= gpio_aggregator_device_release,
1262 };
1263 
1264 static struct config_group *
1265 gpio_aggregator_device_make_group(struct config_group *group, const char *name)
1266 {
1267 	struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
1268 	struct gpio_aggregator_line *line;
1269 	unsigned int idx;
1270 	int ret, nchar;
1271 
1272 	ret = sscanf(name, "line%u%n", &idx, &nchar);
1273 	if (ret != 1 || nchar != strlen(name))
1274 		return ERR_PTR(-EINVAL);
1275 
1276 	if (aggr->init_via_sysfs)
1277 		/*
1278 		 * Aggregators created via legacy sysfs interface are exposed as
1279 		 * default groups, which means rmdir(2) is prohibited for them.
1280 		 * For simplicity, and to avoid confusion, we also prohibit
1281 		 * mkdir(2).
1282 		 */
1283 		return ERR_PTR(-EPERM);
1284 
1285 	guard(mutex)(&aggr->lock);
1286 
1287 	if (gpio_aggregator_is_active(aggr))
1288 		return ERR_PTR(-EBUSY);
1289 
1290 	list_for_each_entry(line, &aggr->list_head, entry)
1291 		if (line->idx == idx)
1292 			return ERR_PTR(-EINVAL);
1293 
1294 	line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
1295 	if (IS_ERR(line))
1296 		return ERR_CAST(line);
1297 
1298 	config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
1299 
1300 	gpio_aggregator_line_add(aggr, line);
1301 
1302 	return &line->group;
1303 }
1304 
1305 static const struct configfs_group_operations gpio_aggregator_device_group_ops = {
1306 	.make_group	= gpio_aggregator_device_make_group,
1307 };
1308 
1309 static const struct config_item_type gpio_aggregator_device_type = {
1310 	.ct_group_ops	= &gpio_aggregator_device_group_ops,
1311 	.ct_item_ops	= &gpio_aggregator_device_item_ops,
1312 	.ct_attrs	= gpio_aggregator_device_attrs,
1313 	.ct_owner	= THIS_MODULE,
1314 };
1315 
1316 static struct config_group *
1317 gpio_aggregator_make_group(struct config_group *group, const char *name)
1318 {
1319 	struct gpio_aggregator *aggr;
1320 	int ret;
1321 
1322 	/*
1323 	 * "_sysfs" prefix is reserved for auto-generated config group
1324 	 * for devices create via legacy sysfs interface.
1325 	 */
1326 	if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
1327 		    sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
1328 		return ERR_PTR(-EINVAL);
1329 
1330 	/* arg space is unneeded */
1331 	ret = gpio_aggregator_alloc(&aggr, 0);
1332 	if (ret)
1333 		return ERR_PTR(ret);
1334 
1335 	config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1336 
1337 	return &aggr->group;
1338 }
1339 
1340 static const struct configfs_group_operations gpio_aggregator_group_ops = {
1341 	.make_group	= gpio_aggregator_make_group,
1342 };
1343 
1344 static const struct config_item_type gpio_aggregator_type = {
1345 	.ct_group_ops	= &gpio_aggregator_group_ops,
1346 	.ct_owner	= THIS_MODULE,
1347 };
1348 
1349 static struct configfs_subsystem gpio_aggregator_subsys = {
1350 	.su_group = {
1351 		.cg_item = {
1352 			.ci_namebuf	= DRV_NAME,
1353 			.ci_type	= &gpio_aggregator_type,
1354 		},
1355 	},
1356 };
1357 
1358 /*
1359  * Sysfs interface
1360  */
1361 static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
1362 {
1363 	char *args = skip_spaces(aggr->args);
1364 	struct gpio_aggregator_line *line;
1365 	char name[CONFIGFS_ITEM_NAME_LEN];
1366 	char *key, *offsets, *p;
1367 	unsigned int i, n = 0;
1368 	int error = 0;
1369 
1370 	unsigned long *bitmap __free(bitmap) =
1371 			bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
1372 	if (!bitmap)
1373 		return -ENOMEM;
1374 
1375 	args = next_arg(args, &key, &p);
1376 	while (*args) {
1377 		args = next_arg(args, &offsets, &p);
1378 
1379 		p = get_options(offsets, 0, &error);
1380 		if (error == 0 || *p) {
1381 			/* Named GPIO line */
1382 			scnprintf(name, sizeof(name), "line%u", n);
1383 			line = gpio_aggregator_line_alloc(aggr, n, key, -1);
1384 			if (IS_ERR(line)) {
1385 				error = PTR_ERR(line);
1386 				goto err;
1387 			}
1388 			config_group_init_type_name(&line->group, name,
1389 						    &gpio_aggregator_line_type);
1390 			error = configfs_register_group(&aggr->group,
1391 							&line->group);
1392 			if (error)
1393 				goto err;
1394 			scoped_guard(mutex, &aggr->lock)
1395 				gpio_aggregator_line_add(aggr, line);
1396 
1397 			error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
1398 			if (error)
1399 				goto err;
1400 
1401 			key = offsets;
1402 			continue;
1403 		}
1404 
1405 		/* GPIO chip + offset(s) */
1406 		error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
1407 		if (error) {
1408 			pr_err("Cannot parse %s: %d\n", offsets, error);
1409 			goto err;
1410 		}
1411 
1412 		for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
1413 			scnprintf(name, sizeof(name), "line%u", n);
1414 			line = gpio_aggregator_line_alloc(aggr, n, key, i);
1415 			if (IS_ERR(line)) {
1416 				error = PTR_ERR(line);
1417 				goto err;
1418 			}
1419 			config_group_init_type_name(&line->group, name,
1420 						    &gpio_aggregator_line_type);
1421 			error = configfs_register_group(&aggr->group,
1422 							&line->group);
1423 			if (error)
1424 				goto err;
1425 			scoped_guard(mutex, &aggr->lock)
1426 				gpio_aggregator_line_add(aggr, line);
1427 
1428 			error = gpio_aggregator_add_gpio(aggr, key, i, &n);
1429 			if (error)
1430 				goto err;
1431 		}
1432 
1433 		args = next_arg(args, &key, &p);
1434 	}
1435 
1436 	if (!n) {
1437 		pr_err("No GPIOs specified\n");
1438 		error = -EINVAL;
1439 		goto err;
1440 	}
1441 
1442 	return 0;
1443 
1444 err:
1445 	gpio_aggregator_free_lines(aggr);
1446 	return error;
1447 }
1448 
1449 static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
1450 						const char *buf, size_t count)
1451 {
1452 	struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
1453 	char name[CONFIGFS_ITEM_NAME_LEN];
1454 	struct gpio_aggregator *aggr;
1455 	struct platform_device *pdev;
1456 	int res;
1457 
1458 	if (!try_module_get(THIS_MODULE))
1459 		return -ENOENT;
1460 
1461 	/* kernfs guarantees string termination, so count + 1 is safe */
1462 	res = gpio_aggregator_alloc(&aggr, count + 1);
1463 	if (res)
1464 		goto put_module;
1465 
1466 	memcpy(aggr->args, buf, count + 1);
1467 
1468 	aggr->init_via_sysfs = true;
1469 	aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
1470 	if (!aggr->lookups) {
1471 		res = -ENOMEM;
1472 		goto free_ga;
1473 	}
1474 
1475 	aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
1476 	if (!aggr->lookups->dev_id) {
1477 		res = -ENOMEM;
1478 		goto free_table;
1479 	}
1480 
1481 	scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
1482 	config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1483 
1484 	/* Expose to configfs */
1485 	res = configfs_register_group(&gpio_aggregator_subsys.su_group,
1486 				      &aggr->group);
1487 	if (res)
1488 		goto free_dev_id;
1489 
1490 	res = gpio_aggregator_parse(aggr);
1491 	if (res)
1492 		goto unregister_group;
1493 
1494 	gpiod_add_lookup_table(aggr->lookups);
1495 
1496 	pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
1497 	if (IS_ERR(pdev)) {
1498 		res = PTR_ERR(pdev);
1499 		goto remove_table;
1500 	}
1501 
1502 	aggr->pdev = pdev;
1503 	module_put(THIS_MODULE);
1504 	return count;
1505 
1506 remove_table:
1507 	gpiod_remove_lookup_table(aggr->lookups);
1508 unregister_group:
1509 	configfs_unregister_group(&aggr->group);
1510 free_dev_id:
1511 	kfree(aggr->lookups->dev_id);
1512 free_table:
1513 	kfree(aggr->lookups);
1514 free_ga:
1515 	gpio_aggregator_free(aggr);
1516 put_module:
1517 	module_put(THIS_MODULE);
1518 	return res;
1519 }
1520 
1521 static struct driver_attribute driver_attr_gpio_aggregator_new_device =
1522 	__ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
1523 
1524 static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
1525 {
1526 	scoped_guard(mutex, &aggr->lock) {
1527 		if (gpio_aggregator_is_activating(aggr) ||
1528 		    gpio_aggregator_is_active(aggr))
1529 			gpio_aggregator_deactivate(aggr);
1530 	}
1531 	gpio_aggregator_free_lines(aggr);
1532 	configfs_unregister_group(&aggr->group);
1533 	kfree(aggr);
1534 }
1535 
1536 static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
1537 						   const char *buf, size_t count)
1538 {
1539 	struct gpio_aggregator *aggr;
1540 	unsigned int id;
1541 	int error;
1542 
1543 	if (!str_has_prefix(buf, DRV_NAME "."))
1544 		return -EINVAL;
1545 
1546 	error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
1547 	if (error)
1548 		return error;
1549 
1550 	if (!try_module_get(THIS_MODULE))
1551 		return -ENOENT;
1552 
1553 	mutex_lock(&gpio_aggregator_lock);
1554 	aggr = idr_find(&gpio_aggregator_idr, id);
1555 	/*
1556 	 * For simplicity, devices created via configfs cannot be deleted
1557 	 * via sysfs.
1558 	 */
1559 	if (aggr && aggr->init_via_sysfs)
1560 		idr_remove(&gpio_aggregator_idr, id);
1561 	else {
1562 		mutex_unlock(&gpio_aggregator_lock);
1563 		module_put(THIS_MODULE);
1564 		return -ENOENT;
1565 	}
1566 	mutex_unlock(&gpio_aggregator_lock);
1567 
1568 	gpio_aggregator_destroy(aggr);
1569 	module_put(THIS_MODULE);
1570 	return count;
1571 }
1572 
1573 static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
1574 	__ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
1575 
1576 static struct attribute *gpio_aggregator_attrs[] = {
1577 	&driver_attr_gpio_aggregator_new_device.attr,
1578 	&driver_attr_gpio_aggregator_delete_device.attr,
1579 	NULL
1580 };
1581 ATTRIBUTE_GROUPS(gpio_aggregator);
1582 
1583 /*
1584  *  GPIO Aggregator platform device
1585  */
1586 
1587 static int gpio_aggregator_probe(struct platform_device *pdev)
1588 {
1589 	struct gpio_aggregator_pdev_meta *meta;
1590 	struct device *dev = &pdev->dev;
1591 	bool init_via_sysfs = false;
1592 	struct gpio_desc **descs;
1593 	struct gpiochip_fwd *fwd;
1594 	unsigned long features;
1595 	int i, n;
1596 
1597 	n = gpiod_count(dev, NULL);
1598 	if (n < 0)
1599 		return n;
1600 
1601 	descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
1602 	if (!descs)
1603 		return -ENOMEM;
1604 
1605 	meta = dev_get_platdata(&pdev->dev);
1606 	if (meta && meta->init_via_sysfs)
1607 		init_via_sysfs = true;
1608 
1609 	for (i = 0; i < n; i++) {
1610 		descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
1611 		if (IS_ERR(descs[i])) {
1612 			/*
1613 			 * Deferred probing is not suitable when the aggregator
1614 			 * is created via configfs. They should just retry later
1615 			 * whenever they like. For device creation via sysfs,
1616 			 * error is propagated without overriding for backward
1617 			 * compatibility. .prevent_deferred_probe is kept unset
1618 			 * for other cases.
1619 			 */
1620 			if (!init_via_sysfs && !dev_of_node(dev) &&
1621 			    descs[i] == ERR_PTR(-EPROBE_DEFER)) {
1622 				pr_warn("Deferred probe canceled for creation via configfs.\n");
1623 				return -ENODEV;
1624 			}
1625 			return PTR_ERR(descs[i]);
1626 		}
1627 	}
1628 
1629 	features = (uintptr_t)device_get_match_data(dev);
1630 	fwd = gpiochip_fwd_create(dev, n, descs, features);
1631 	if (IS_ERR(fwd))
1632 		return PTR_ERR(fwd);
1633 
1634 	platform_set_drvdata(pdev, fwd);
1635 	devm_kfree(dev, descs);
1636 	return 0;
1637 }
1638 
1639 static const struct of_device_id gpio_aggregator_dt_ids[] = {
1640 	{
1641 		.compatible = "gpio-delay",
1642 		.data = (void *)FWD_FEATURE_DELAY,
1643 	},
1644 	/*
1645 	 * Add GPIO-operated devices controlled from userspace below,
1646 	 * or use "driver_override" in sysfs.
1647 	 */
1648 	{}
1649 };
1650 MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
1651 
1652 static struct platform_driver gpio_aggregator_driver = {
1653 	.probe = gpio_aggregator_probe,
1654 	.driver = {
1655 		.name = DRV_NAME,
1656 		.groups = gpio_aggregator_groups,
1657 		.of_match_table = gpio_aggregator_dt_ids,
1658 	},
1659 };
1660 
1661 static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
1662 {
1663 	/*
1664 	 * There should be no aggregator created via configfs, as their
1665 	 * presence would prevent module unloading.
1666 	 */
1667 	gpio_aggregator_destroy(p);
1668 	return 0;
1669 }
1670 
1671 static void __exit gpio_aggregator_remove_all(void)
1672 {
1673 	/*
1674 	 * Configfs callbacks acquire gpio_aggregator_lock when accessing
1675 	 * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
1676 	 * cannot protect idr_for_each invocation here with
1677 	 * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
1678 	 * configfs groups. Fortunately, the new_device/delete_device path
1679 	 * and the module unload path are mutually exclusive, thanks to an
1680 	 * explicit try_module_get inside of those driver attr handlers.
1681 	 * Also, when we reach here, no configfs entries present or being
1682 	 * created. Therefore, no need to protect with gpio_aggregator_lock
1683 	 * below.
1684 	 */
1685 	idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
1686 	idr_destroy(&gpio_aggregator_idr);
1687 }
1688 
1689 static int __init gpio_aggregator_init(void)
1690 {
1691 	int ret = 0;
1692 
1693 	config_group_init(&gpio_aggregator_subsys.su_group);
1694 	mutex_init(&gpio_aggregator_subsys.su_mutex);
1695 	ret = configfs_register_subsystem(&gpio_aggregator_subsys);
1696 	if (ret) {
1697 		pr_err("Failed to register the '%s' configfs subsystem: %d\n",
1698 		       gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
1699 		mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1700 		return ret;
1701 	}
1702 
1703 	/*
1704 	 * CAVEAT: This must occur after configfs registration. Otherwise,
1705 	 * a race condition could arise: driver attribute groups might be
1706 	 * exposed and accessed by users before configfs registration
1707 	 * completes. new_device_store() does not expect a partially
1708 	 * initialized configfs state.
1709 	 */
1710 	ret = platform_driver_register(&gpio_aggregator_driver);
1711 	if (ret) {
1712 		pr_err("Failed to register the platform driver: %d\n", ret);
1713 		mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1714 		configfs_unregister_subsystem(&gpio_aggregator_subsys);
1715 	}
1716 
1717 	return ret;
1718 }
1719 module_init(gpio_aggregator_init);
1720 
1721 static void __exit gpio_aggregator_exit(void)
1722 {
1723 	gpio_aggregator_remove_all();
1724 	platform_driver_unregister(&gpio_aggregator_driver);
1725 	configfs_unregister_subsystem(&gpio_aggregator_subsys);
1726 }
1727 module_exit(gpio_aggregator_exit);
1728 
1729 MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
1730 MODULE_DESCRIPTION("GPIO Aggregator");
1731 MODULE_LICENSE("GPL v2");
1732