xref: /linux/drivers/nvmem/zynqmp_nvmem.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2019 Xilinx, Inc.
4  * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/nvmem-provider.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12 #include <linux/firmware/xlnx-zynqmp.h>
13 
14 #define SILICON_REVISION_MASK 0xF
15 #define P_USER_0_64_UPPER_MASK	GENMASK(31, 16)
16 #define P_USER_127_LOWER_4_BIT_MASK GENMASK(3, 0)
17 #define WORD_INBYTES		4
18 #define SOC_VER_SIZE		0x4
19 #define EFUSE_MEMORY_SIZE	0x177
20 #define UNUSED_SPACE		0x8
21 #define ZYNQMP_NVMEM_SIZE	(SOC_VER_SIZE + UNUSED_SPACE + \
22 				 EFUSE_MEMORY_SIZE)
23 #define SOC_VERSION_OFFSET	0x0
24 #define EFUSE_START_OFFSET	0xC
25 #define EFUSE_END_OFFSET	0xFC
26 #define EFUSE_PUF_START_OFFSET	0x100
27 #define EFUSE_PUF_MID_OFFSET	0x140
28 #define EFUSE_PUF_END_OFFSET	0x17F
29 #define EFUSE_NOT_ENABLED	29
30 
31 /*
32  * efuse access type
33  */
34 enum efuse_access {
35 	EFUSE_READ = 0,
36 	EFUSE_WRITE
37 };
38 
39 /**
40  * struct xilinx_efuse - the basic structure
41  * @src:	address of the buffer to store the data to be write/read
42  * @size:	read/write word count
43  * @offset:	read/write offset
44  * @flag:	0 - represents efuse read and 1- represents efuse write
45  * @pufuserfuse:0 - represents non-puf efuses, offset is used for read/write
46  *		1 - represents puf user fuse row number.
47  *
48  * this structure stores all the required details to
49  * read/write efuse memory.
50  */
51 struct xilinx_efuse {
52 	u64 src;
53 	u32 size;
54 	u32 offset;
55 	enum efuse_access flag;
56 	u32 pufuserfuse;
57 };
58 
zynqmp_efuse_access(void * context,unsigned int offset,void * val,size_t bytes,enum efuse_access flag,unsigned int pufflag)59 static int zynqmp_efuse_access(void *context, unsigned int offset,
60 			       void *val, size_t bytes, enum efuse_access flag,
61 			       unsigned int pufflag)
62 {
63 	struct device *dev = context;
64 	struct xilinx_efuse *efuse;
65 	dma_addr_t dma_addr;
66 	dma_addr_t dma_buf;
67 	size_t words = bytes / WORD_INBYTES;
68 	int ret;
69 	int value;
70 	char *data;
71 
72 	if (bytes % WORD_INBYTES != 0) {
73 		dev_err(dev, "Bytes requested should be word aligned\n");
74 		return -EOPNOTSUPP;
75 	}
76 
77 	if (pufflag == 0 && offset % WORD_INBYTES) {
78 		dev_err(dev, "Offset requested should be word aligned\n");
79 		return -EOPNOTSUPP;
80 	}
81 
82 	if (pufflag == 1 && flag == EFUSE_WRITE) {
83 		memcpy(&value, val, bytes);
84 		if ((offset == EFUSE_PUF_START_OFFSET ||
85 		     offset == EFUSE_PUF_MID_OFFSET) &&
86 		    value & P_USER_0_64_UPPER_MASK) {
87 			dev_err(dev, "Only lower 4 bytes are allowed to be programmed in P_USER_0 & P_USER_64\n");
88 			return -EOPNOTSUPP;
89 		}
90 
91 		if (offset == EFUSE_PUF_END_OFFSET &&
92 		    (value & P_USER_127_LOWER_4_BIT_MASK)) {
93 			dev_err(dev, "Only MSB 28 bits are allowed to be programmed for P_USER_127\n");
94 			return -EOPNOTSUPP;
95 		}
96 	}
97 
98 	efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
99 				   &dma_addr, GFP_KERNEL);
100 	if (!efuse)
101 		return -ENOMEM;
102 
103 	data = dma_alloc_coherent(dev, sizeof(bytes),
104 				  &dma_buf, GFP_KERNEL);
105 	if (!data) {
106 		ret = -ENOMEM;
107 		goto efuse_data_fail;
108 	}
109 
110 	if (flag == EFUSE_WRITE) {
111 		memcpy(data, val, bytes);
112 		efuse->flag = EFUSE_WRITE;
113 	} else {
114 		efuse->flag = EFUSE_READ;
115 	}
116 
117 	efuse->src = dma_buf;
118 	efuse->size = words;
119 	efuse->offset = offset;
120 	efuse->pufuserfuse = pufflag;
121 
122 	zynqmp_pm_efuse_access(dma_addr, (u32 *)&ret);
123 	if (ret != 0) {
124 		if (ret == EFUSE_NOT_ENABLED) {
125 			dev_err(dev, "efuse access is not enabled\n");
126 			ret = -EOPNOTSUPP;
127 		} else {
128 			dev_err(dev, "Error in efuse read %x\n", ret);
129 			ret = -EPERM;
130 		}
131 		goto efuse_access_err;
132 	}
133 
134 	if (flag == EFUSE_READ)
135 		memcpy(val, data, bytes);
136 efuse_access_err:
137 	dma_free_coherent(dev, sizeof(bytes),
138 			  data, dma_buf);
139 efuse_data_fail:
140 	dma_free_coherent(dev, sizeof(struct xilinx_efuse),
141 			  efuse, dma_addr);
142 
143 	return ret;
144 }
145 
zynqmp_nvmem_read(void * context,unsigned int offset,void * val,size_t bytes)146 static int zynqmp_nvmem_read(void *context, unsigned int offset, void *val, size_t bytes)
147 {
148 	struct device *dev = context;
149 	int ret;
150 	int pufflag = 0;
151 	int idcode;
152 	int version;
153 
154 	if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
155 		pufflag = 1;
156 
157 	switch (offset) {
158 	/* Soc version offset is zero */
159 	case SOC_VERSION_OFFSET:
160 		if (bytes != SOC_VER_SIZE)
161 			return -EOPNOTSUPP;
162 
163 		ret = zynqmp_pm_get_chipid((u32 *)&idcode, (u32 *)&version);
164 		if (ret < 0)
165 			return ret;
166 
167 		dev_dbg(dev, "Read chipid val %x %x\n", idcode, version);
168 		*(int *)val = version & SILICON_REVISION_MASK;
169 		break;
170 	/* Efuse offset starts from 0xc */
171 	case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
172 	case EFUSE_PUF_START_OFFSET ... EFUSE_PUF_END_OFFSET:
173 		ret = zynqmp_efuse_access(context, offset, val,
174 					  bytes, EFUSE_READ, pufflag);
175 		break;
176 	default:
177 		*(u32 *)val = 0xDEADBEEF;
178 		ret = 0;
179 		break;
180 	}
181 
182 	return ret;
183 }
184 
zynqmp_nvmem_write(void * context,unsigned int offset,void * val,size_t bytes)185 static int zynqmp_nvmem_write(void *context,
186 			      unsigned int offset, void *val, size_t bytes)
187 {
188 	int pufflag = 0;
189 
190 	if (offset < EFUSE_START_OFFSET || offset > EFUSE_PUF_END_OFFSET)
191 		return -EOPNOTSUPP;
192 
193 	if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
194 		pufflag = 1;
195 
196 	return zynqmp_efuse_access(context, offset,
197 				   val, bytes, EFUSE_WRITE, pufflag);
198 }
199 
200 static const struct of_device_id zynqmp_nvmem_match[] = {
201 	{ .compatible = "xlnx,zynqmp-nvmem-fw", },
202 	{ /* sentinel */ },
203 };
204 MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
205 
zynqmp_nvmem_probe(struct platform_device * pdev)206 static int zynqmp_nvmem_probe(struct platform_device *pdev)
207 {
208 	struct device *dev = &pdev->dev;
209 	struct nvmem_config econfig = {};
210 
211 	econfig.name = "zynqmp-nvmem";
212 	econfig.owner = THIS_MODULE;
213 	econfig.word_size = 1;
214 	econfig.size = ZYNQMP_NVMEM_SIZE;
215 	econfig.dev = dev;
216 	econfig.add_legacy_fixed_of_cells = true;
217 	econfig.reg_read = zynqmp_nvmem_read;
218 	econfig.reg_write = zynqmp_nvmem_write;
219 
220 	return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &econfig));
221 }
222 
223 static struct platform_driver zynqmp_nvmem_driver = {
224 	.probe = zynqmp_nvmem_probe,
225 	.driver = {
226 		.name = "zynqmp-nvmem",
227 		.of_match_table = zynqmp_nvmem_match,
228 	},
229 };
230 
231 module_platform_driver(zynqmp_nvmem_driver);
232 
233 MODULE_AUTHOR("Michal Simek <michal.simek@amd.com>, Nava kishore Manne <nava.kishore.manne@amd.com>");
234 MODULE_DESCRIPTION("ZynqMP NVMEM driver");
235 MODULE_LICENSE("GPL");
236