xref: /linux/drivers/mtd/ubi/nvmem.c (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
4  */
5 
6 /* UBI NVMEM provider */
7 #include "ubi.h"
8 #include <linux/nvmem-provider.h>
9 #include <asm/div64.h>
10 
11 /* List of all NVMEM devices */
12 static LIST_HEAD(nvmem_devices);
13 static DEFINE_MUTEX(devices_mutex);
14 
15 struct ubi_nvmem {
16 	struct nvmem_device *nvmem;
17 	int ubi_num;
18 	int vol_id;
19 	int usable_leb_size;
20 	struct list_head list;
21 };
22 
23 static int ubi_nvmem_reg_read(void *priv, unsigned int from,
24 			      void *val, size_t bytes)
25 {
26 	size_t to_read, bytes_left = bytes;
27 	struct ubi_nvmem *unv = priv;
28 	struct ubi_volume_desc *desc;
29 	uint32_t offs;
30 	uint64_t lnum = from;
31 	int err = 0;
32 
33 	desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
34 	if (IS_ERR(desc))
35 		return PTR_ERR(desc);
36 
37 	offs = do_div(lnum, unv->usable_leb_size);
38 	while (bytes_left) {
39 		to_read = unv->usable_leb_size - offs;
40 
41 		if (to_read > bytes_left)
42 			to_read = bytes_left;
43 
44 		err = ubi_read(desc, lnum, val, offs, to_read);
45 		if (err)
46 			break;
47 
48 		lnum += 1;
49 		offs = 0;
50 		bytes_left -= to_read;
51 		val += to_read;
52 	}
53 	ubi_close_volume(desc);
54 
55 	if (err)
56 		return err;
57 
58 	return bytes_left == 0 ? 0 : -EIO;
59 }
60 
61 static int ubi_nvmem_add(struct ubi_volume_info *vi)
62 {
63 	struct device_node *np = dev_of_node(vi->dev);
64 	struct nvmem_config config = {};
65 	struct ubi_nvmem *unv;
66 	int ret;
67 
68 	if (!np)
69 		return 0;
70 
71 	if (!of_get_child_by_name(np, "nvmem-layout"))
72 		return 0;
73 
74 	if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
75 	    WARN_ON_ONCE(vi->size <= 0))
76 		return -EINVAL;
77 
78 	unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
79 	if (!unv)
80 		return -ENOMEM;
81 
82 	config.id = NVMEM_DEVID_NONE;
83 	config.dev = vi->dev;
84 	config.name = dev_name(vi->dev);
85 	config.owner = THIS_MODULE;
86 	config.priv = unv;
87 	config.reg_read = ubi_nvmem_reg_read;
88 	config.size = vi->usable_leb_size * vi->size;
89 	config.word_size = 1;
90 	config.stride = 1;
91 	config.read_only = true;
92 	config.root_only = true;
93 	config.ignore_wp = true;
94 	config.of_node = np;
95 
96 	unv->ubi_num = vi->ubi_num;
97 	unv->vol_id = vi->vol_id;
98 	unv->usable_leb_size = vi->usable_leb_size;
99 	unv->nvmem = nvmem_register(&config);
100 	if (IS_ERR(unv->nvmem)) {
101 		ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
102 				    "Failed to register NVMEM device\n");
103 		kfree(unv);
104 		return ret;
105 	}
106 
107 	mutex_lock(&devices_mutex);
108 	list_add_tail(&unv->list, &nvmem_devices);
109 	mutex_unlock(&devices_mutex);
110 
111 	return 0;
112 }
113 
114 static void ubi_nvmem_remove(struct ubi_volume_info *vi)
115 {
116 	struct ubi_nvmem *unv_c, *unv = NULL;
117 
118 	mutex_lock(&devices_mutex);
119 	list_for_each_entry(unv_c, &nvmem_devices, list)
120 		if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
121 			unv = unv_c;
122 			break;
123 		}
124 
125 	if (!unv) {
126 		mutex_unlock(&devices_mutex);
127 		return;
128 	}
129 
130 	list_del(&unv->list);
131 	mutex_unlock(&devices_mutex);
132 	nvmem_unregister(unv->nvmem);
133 	kfree(unv);
134 }
135 
136 /**
137  * nvmem_notify - UBI notification handler.
138  * @nb: registered notifier block
139  * @l: notification type
140  * @ns_ptr: pointer to the &struct ubi_notification object
141  */
142 static int nvmem_notify(struct notifier_block *nb, unsigned long l,
143 			 void *ns_ptr)
144 {
145 	struct ubi_notification *nt = ns_ptr;
146 
147 	switch (l) {
148 	case UBI_VOLUME_RESIZED:
149 		ubi_nvmem_remove(&nt->vi);
150 		fallthrough;
151 	case UBI_VOLUME_ADDED:
152 		ubi_nvmem_add(&nt->vi);
153 		break;
154 	case UBI_VOLUME_SHUTDOWN:
155 		ubi_nvmem_remove(&nt->vi);
156 		break;
157 	default:
158 		break;
159 	}
160 	return NOTIFY_OK;
161 }
162 
163 static struct notifier_block nvmem_notifier = {
164 	.notifier_call = nvmem_notify,
165 };
166 
167 static int __init ubi_nvmem_init(void)
168 {
169 	return ubi_register_volume_notifier(&nvmem_notifier, 0);
170 }
171 
172 static void __exit ubi_nvmem_exit(void)
173 {
174 	struct ubi_nvmem *unv, *tmp;
175 
176 	mutex_lock(&devices_mutex);
177 	list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
178 		nvmem_unregister(unv->nvmem);
179 		list_del(&unv->list);
180 		kfree(unv);
181 	}
182 	mutex_unlock(&devices_mutex);
183 
184 	ubi_unregister_volume_notifier(&nvmem_notifier);
185 }
186 
187 module_init(ubi_nvmem_init);
188 module_exit(ubi_nvmem_exit);
189 MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
190 MODULE_AUTHOR("Daniel Golle");
191 MODULE_LICENSE("GPL");
192