xref: /linux/drivers/soc/qcom/rmtfs_mem.c (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017 Linaro Ltd.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/cdev.h>
8 #include <linux/err.h>
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/of.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/io.h>
17 #include <linux/qcom_scm.h>
18 
19 #define QCOM_RMTFS_MEM_DEV_MAX	(MINORMASK + 1)
20 
21 static dev_t qcom_rmtfs_mem_major;
22 
23 struct qcom_rmtfs_mem {
24 	struct device dev;
25 	struct cdev cdev;
26 
27 	void *base;
28 	phys_addr_t addr;
29 	phys_addr_t size;
30 
31 	unsigned int client_id;
32 
33 	unsigned int perms;
34 };
35 
36 static ssize_t qcom_rmtfs_mem_show(struct device *dev,
37 			      struct device_attribute *attr,
38 			      char *buf);
39 
40 static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
41 static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
42 static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
43 
44 static ssize_t qcom_rmtfs_mem_show(struct device *dev,
45 			      struct device_attribute *attr,
46 			      char *buf)
47 {
48 	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
49 							struct qcom_rmtfs_mem,
50 							dev);
51 
52 	if (attr == &dev_attr_phys_addr)
53 		return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
54 	if (attr == &dev_attr_size)
55 		return sprintf(buf, "%pa\n", &rmtfs_mem->size);
56 	if (attr == &dev_attr_client_id)
57 		return sprintf(buf, "%d\n", rmtfs_mem->client_id);
58 
59 	return -EINVAL;
60 }
61 
62 static struct attribute *qcom_rmtfs_mem_attrs[] = {
63 	&dev_attr_phys_addr.attr,
64 	&dev_attr_size.attr,
65 	&dev_attr_client_id.attr,
66 	NULL
67 };
68 ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
69 
70 static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
71 {
72 	struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
73 							struct qcom_rmtfs_mem,
74 							cdev);
75 
76 	get_device(&rmtfs_mem->dev);
77 	filp->private_data = rmtfs_mem;
78 
79 	return 0;
80 }
81 static ssize_t qcom_rmtfs_mem_read(struct file *filp,
82 			      char __user *buf, size_t count, loff_t *f_pos)
83 {
84 	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
85 
86 	if (*f_pos >= rmtfs_mem->size)
87 		return 0;
88 
89 	if (*f_pos + count >= rmtfs_mem->size)
90 		count = rmtfs_mem->size - *f_pos;
91 
92 	if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
93 		return -EFAULT;
94 
95 	*f_pos += count;
96 	return count;
97 }
98 
99 static ssize_t qcom_rmtfs_mem_write(struct file *filp,
100 			       const char __user *buf, size_t count,
101 			       loff_t *f_pos)
102 {
103 	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
104 
105 	if (*f_pos >= rmtfs_mem->size)
106 		return 0;
107 
108 	if (*f_pos + count >= rmtfs_mem->size)
109 		count = rmtfs_mem->size - *f_pos;
110 
111 	if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
112 		return -EFAULT;
113 
114 	*f_pos += count;
115 	return count;
116 }
117 
118 static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
119 {
120 	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
121 
122 	put_device(&rmtfs_mem->dev);
123 
124 	return 0;
125 }
126 
127 static struct class rmtfs_class = {
128 	.owner          = THIS_MODULE,
129 	.name           = "rmtfs",
130 };
131 
132 static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
133 {
134 	struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
135 
136 	if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
137 		dev_dbg(&rmtfs_mem->dev,
138 			"vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
139 			vma->vm_end, vma->vm_start,
140 			(vma->vm_end - vma->vm_start), &rmtfs_mem->size);
141 		return -EINVAL;
142 	}
143 
144 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
145 	return remap_pfn_range(vma,
146 			       vma->vm_start,
147 			       rmtfs_mem->addr >> PAGE_SHIFT,
148 			       vma->vm_end - vma->vm_start,
149 			       vma->vm_page_prot);
150 }
151 
152 static const struct file_operations qcom_rmtfs_mem_fops = {
153 	.owner = THIS_MODULE,
154 	.open = qcom_rmtfs_mem_open,
155 	.read = qcom_rmtfs_mem_read,
156 	.write = qcom_rmtfs_mem_write,
157 	.release = qcom_rmtfs_mem_release,
158 	.llseek = default_llseek,
159 	.mmap = qcom_rmtfs_mem_mmap,
160 };
161 
162 static void qcom_rmtfs_mem_release_device(struct device *dev)
163 {
164 	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
165 							struct qcom_rmtfs_mem,
166 							dev);
167 
168 	kfree(rmtfs_mem);
169 }
170 
171 static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
172 {
173 	struct device_node *node = pdev->dev.of_node;
174 	struct qcom_scm_vmperm perms[2];
175 	struct reserved_mem *rmem;
176 	struct qcom_rmtfs_mem *rmtfs_mem;
177 	u32 client_id;
178 	u32 vmid;
179 	int ret;
180 
181 	rmem = of_reserved_mem_lookup(node);
182 	if (!rmem) {
183 		dev_err(&pdev->dev, "failed to acquire memory region\n");
184 		return -EINVAL;
185 	}
186 
187 	ret = of_property_read_u32(node, "qcom,client-id", &client_id);
188 	if (ret) {
189 		dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
190 		return ret;
191 
192 	}
193 
194 	rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
195 	if (!rmtfs_mem)
196 		return -ENOMEM;
197 
198 	rmtfs_mem->addr = rmem->base;
199 	rmtfs_mem->client_id = client_id;
200 	rmtfs_mem->size = rmem->size;
201 
202 	device_initialize(&rmtfs_mem->dev);
203 	rmtfs_mem->dev.parent = &pdev->dev;
204 	rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
205 	rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
206 
207 	rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
208 					rmtfs_mem->size, MEMREMAP_WC);
209 	if (IS_ERR(rmtfs_mem->base)) {
210 		dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
211 		ret = PTR_ERR(rmtfs_mem->base);
212 		goto put_device;
213 	}
214 
215 	cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
216 	rmtfs_mem->cdev.owner = THIS_MODULE;
217 
218 	dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
219 	rmtfs_mem->dev.id = client_id;
220 	rmtfs_mem->dev.class = &rmtfs_class;
221 	rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
222 
223 	ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
224 	if (ret) {
225 		dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
226 		goto put_device;
227 	}
228 
229 	ret = of_property_read_u32(node, "qcom,vmid", &vmid);
230 	if (ret < 0 && ret != -EINVAL) {
231 		dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
232 		goto remove_cdev;
233 	} else if (!ret) {
234 		if (!qcom_scm_is_available()) {
235 			ret = -EPROBE_DEFER;
236 			goto remove_cdev;
237 		}
238 
239 		perms[0].vmid = QCOM_SCM_VMID_HLOS;
240 		perms[0].perm = QCOM_SCM_PERM_RW;
241 		perms[1].vmid = vmid;
242 		perms[1].perm = QCOM_SCM_PERM_RW;
243 
244 		rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
245 		ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
246 					  &rmtfs_mem->perms, perms, 2);
247 		if (ret < 0) {
248 			dev_err(&pdev->dev, "assign memory failed\n");
249 			goto remove_cdev;
250 		}
251 	}
252 
253 	dev_set_drvdata(&pdev->dev, rmtfs_mem);
254 
255 	return 0;
256 
257 remove_cdev:
258 	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
259 put_device:
260 	put_device(&rmtfs_mem->dev);
261 
262 	return ret;
263 }
264 
265 static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
266 {
267 	struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
268 	struct qcom_scm_vmperm perm;
269 
270 	if (rmtfs_mem->perms) {
271 		perm.vmid = QCOM_SCM_VMID_HLOS;
272 		perm.perm = QCOM_SCM_PERM_RW;
273 
274 		qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
275 				    &rmtfs_mem->perms, &perm, 1);
276 	}
277 
278 	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
279 	put_device(&rmtfs_mem->dev);
280 
281 	return 0;
282 }
283 
284 static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
285 	{ .compatible = "qcom,rmtfs-mem" },
286 	{}
287 };
288 MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
289 
290 static struct platform_driver qcom_rmtfs_mem_driver = {
291 	.probe = qcom_rmtfs_mem_probe,
292 	.remove = qcom_rmtfs_mem_remove,
293 	.driver  = {
294 		.name  = "qcom_rmtfs_mem",
295 		.of_match_table = qcom_rmtfs_mem_of_match,
296 	},
297 };
298 
299 static int __init qcom_rmtfs_mem_init(void)
300 {
301 	int ret;
302 
303 	ret = class_register(&rmtfs_class);
304 	if (ret)
305 		return ret;
306 
307 	ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
308 				  QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
309 	if (ret < 0) {
310 		pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
311 		goto unregister_class;
312 	}
313 
314 	ret = platform_driver_register(&qcom_rmtfs_mem_driver);
315 	if (ret < 0) {
316 		pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
317 		goto unregister_chrdev;
318 	}
319 
320 	return 0;
321 
322 unregister_chrdev:
323 	unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
324 unregister_class:
325 	class_unregister(&rmtfs_class);
326 	return ret;
327 }
328 module_init(qcom_rmtfs_mem_init);
329 
330 static void __exit qcom_rmtfs_mem_exit(void)
331 {
332 	platform_driver_unregister(&qcom_rmtfs_mem_driver);
333 	unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
334 	class_unregister(&rmtfs_class);
335 }
336 module_exit(qcom_rmtfs_mem_exit);
337 
338 MODULE_AUTHOR("Linaro Ltd");
339 MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
340 MODULE_LICENSE("GPL v2");
341