xref: /linux/drivers/mfd/intel-m10-bmc-pmci.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MAX10 BMC Platform Management Component Interface (PMCI) based
4  * interface.
5  *
6  * Copyright (C) 2020-2023 Intel Corporation.
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/device.h>
11 #include <linux/dfl.h>
12 #include <linux/mfd/core.h>
13 #include <linux/mfd/intel-m10-bmc.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
16 #include <linux/regmap.h>
17 
18 struct m10bmc_pmci_device {
19 	void __iomem *base;
20 	struct intel_m10bmc m10bmc;
21 	struct mutex flash_mutex;	/* protects flash_busy and serializes flash read/read */
22 	bool flash_busy;
23 };
24 
25 /*
26  * Intel FGPA indirect register access via hardware controller/bridge.
27  */
28 #define INDIRECT_CMD_OFF	0
29 #define INDIRECT_CMD_CLR	0
30 #define INDIRECT_CMD_RD		BIT(0)
31 #define INDIRECT_CMD_WR		BIT(1)
32 #define INDIRECT_CMD_ACK	BIT(2)
33 
34 #define INDIRECT_ADDR_OFF	0x4
35 #define INDIRECT_RD_OFF		0x8
36 #define INDIRECT_WR_OFF		0xc
37 
38 #define INDIRECT_INT_US		1
39 #define INDIRECT_TIMEOUT_US	10000
40 
41 struct indirect_ctx {
42 	void __iomem *base;
43 	struct device *dev;
44 };
45 
46 static int indirect_clear_cmd(struct indirect_ctx *ctx)
47 {
48 	unsigned int cmd;
49 	int ret;
50 
51 	writel(INDIRECT_CMD_CLR, ctx->base + INDIRECT_CMD_OFF);
52 
53 	ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, cmd,
54 				 cmd == INDIRECT_CMD_CLR,
55 				 INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
56 	if (ret)
57 		dev_err(ctx->dev, "timed out waiting clear cmd (residual cmd=0x%x)\n", cmd);
58 
59 	return ret;
60 }
61 
62 static int indirect_reg_read(void *context, unsigned int reg, unsigned int *val)
63 {
64 	struct indirect_ctx *ctx = context;
65 	unsigned int cmd, ack, tmpval;
66 	int ret, ret2;
67 
68 	cmd = readl(ctx->base + INDIRECT_CMD_OFF);
69 	if (cmd != INDIRECT_CMD_CLR)
70 		dev_warn(ctx->dev, "residual cmd 0x%x on read entry\n", cmd);
71 
72 	writel(reg, ctx->base + INDIRECT_ADDR_OFF);
73 	writel(INDIRECT_CMD_RD, ctx->base + INDIRECT_CMD_OFF);
74 
75 	ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
76 				 (ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
77 				 INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
78 	if (ret)
79 		dev_err(ctx->dev, "read timed out on reg 0x%x ack 0x%x\n", reg, ack);
80 	else
81 		tmpval = readl(ctx->base + INDIRECT_RD_OFF);
82 
83 	ret2 = indirect_clear_cmd(ctx);
84 
85 	if (ret)
86 		return ret;
87 	if (ret2)
88 		return ret2;
89 
90 	*val = tmpval;
91 	return 0;
92 }
93 
94 static int indirect_reg_write(void *context, unsigned int reg, unsigned int val)
95 {
96 	struct indirect_ctx *ctx = context;
97 	unsigned int cmd, ack;
98 	int ret, ret2;
99 
100 	cmd = readl(ctx->base + INDIRECT_CMD_OFF);
101 	if (cmd != INDIRECT_CMD_CLR)
102 		dev_warn(ctx->dev, "residual cmd 0x%x on write entry\n", cmd);
103 
104 	writel(val, ctx->base + INDIRECT_WR_OFF);
105 	writel(reg, ctx->base + INDIRECT_ADDR_OFF);
106 	writel(INDIRECT_CMD_WR, ctx->base + INDIRECT_CMD_OFF);
107 
108 	ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
109 				 (ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
110 				 INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
111 	if (ret)
112 		dev_err(ctx->dev, "write timed out on reg 0x%x ack 0x%x\n", reg, ack);
113 
114 	ret2 = indirect_clear_cmd(ctx);
115 
116 	if (ret)
117 		return ret;
118 	return ret2;
119 }
120 
121 static void pmci_write_fifo(void __iomem *base, const u32 *buf, size_t count)
122 {
123 	while (count--)
124 		writel(*buf++, base);
125 }
126 
127 static void pmci_read_fifo(void __iomem *base, u32 *buf, size_t count)
128 {
129 	while (count--)
130 		*buf++ = readl(base);
131 }
132 
133 static u32 pmci_get_write_space(struct m10bmc_pmci_device *pmci)
134 {
135 	u32 val;
136 	int ret;
137 
138 	ret = read_poll_timeout(readl, val,
139 				FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) ==
140 				M10BMC_N6000_FIFO_MAX_WORDS,
141 				M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US,
142 				false, pmci->base + M10BMC_N6000_FLASH_CTRL);
143 	if (ret == -ETIMEDOUT)
144 		return 0;
145 
146 	return FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) * M10BMC_N6000_FIFO_WORD_SIZE;
147 }
148 
149 static int pmci_flash_bulk_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 size)
150 {
151 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
152 	u32 blk_size, offset = 0, write_count;
153 
154 	while (size) {
155 		blk_size = min(pmci_get_write_space(pmci), size);
156 		if (blk_size == 0) {
157 			dev_err(m10bmc->dev, "get FIFO available size fail\n");
158 			return -EIO;
159 		}
160 
161 		if (size < M10BMC_N6000_FIFO_WORD_SIZE)
162 			break;
163 
164 		write_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
165 		pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
166 				(u32 *)(buf + offset), write_count);
167 
168 		size -= blk_size;
169 		offset += blk_size;
170 	}
171 
172 	/* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
173 	if (size) {
174 		u32 tmp = 0;
175 
176 		memcpy(&tmp, buf + offset, size);
177 		pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
178 	}
179 
180 	return 0;
181 }
182 
183 static int pmci_flash_bulk_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
184 {
185 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
186 	u32 blk_size, offset = 0, val, full_read_count, read_count;
187 	int ret;
188 
189 	while (size) {
190 		blk_size = min_t(u32, size, M10BMC_N6000_READ_BLOCK_SIZE);
191 		full_read_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
192 
193 		read_count = full_read_count;
194 		if (full_read_count * M10BMC_N6000_FIFO_WORD_SIZE < blk_size)
195 			read_count++;
196 
197 		writel(addr + offset, pmci->base + M10BMC_N6000_FLASH_ADDR);
198 		writel(FIELD_PREP(M10BMC_N6000_FLASH_READ_COUNT, read_count) |
199 		       M10BMC_N6000_FLASH_RD_MODE,
200 		       pmci->base + M10BMC_N6000_FLASH_CTRL);
201 
202 		ret = readl_poll_timeout((pmci->base + M10BMC_N6000_FLASH_CTRL), val,
203 					 !(val & M10BMC_N6000_FLASH_BUSY),
204 					 M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
205 		if (ret) {
206 			dev_err(m10bmc->dev, "read timed out on reading flash 0x%xn", val);
207 			return ret;
208 		}
209 
210 		pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
211 			       (u32 *)(buf + offset), full_read_count);
212 
213 		size -= blk_size;
214 		offset += blk_size;
215 
216 		if (full_read_count < read_count)
217 			break;
218 
219 		writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
220 	}
221 
222 	/* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
223 	if (size) {
224 		u32 tmp;
225 
226 		pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
227 		memcpy(buf + offset, &tmp, size);
228 
229 		writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
230 	}
231 
232 	return 0;
233 }
234 
235 static int m10bmc_pmci_set_flash_host_mux(struct intel_m10bmc *m10bmc, bool request)
236 {
237 	u32 ctrl;
238 	int ret;
239 
240 	ret = regmap_update_bits(m10bmc->regmap, M10BMC_N6000_FLASH_MUX_CTRL,
241 				 M10BMC_N6000_FLASH_HOST_REQUEST,
242 				 FIELD_PREP(M10BMC_N6000_FLASH_HOST_REQUEST, request));
243 	if (ret)
244 		return ret;
245 
246 	return regmap_read_poll_timeout(m10bmc->regmap,
247 					M10BMC_N6000_FLASH_MUX_CTRL, ctrl,
248 					request ?
249 					(get_flash_mux(ctrl) == M10BMC_N6000_FLASH_MUX_HOST) :
250 					(get_flash_mux(ctrl) != M10BMC_N6000_FLASH_MUX_HOST),
251 					M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
252 }
253 
254 static int m10bmc_pmci_flash_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
255 {
256 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
257 	int ret, ret2;
258 
259 	mutex_lock(&pmci->flash_mutex);
260 	if (pmci->flash_busy) {
261 		ret = -EBUSY;
262 		goto unlock;
263 	}
264 
265 	ret = m10bmc_pmci_set_flash_host_mux(m10bmc, true);
266 	if (ret)
267 		goto mux_fail;
268 
269 	ret = pmci_flash_bulk_read(m10bmc, buf, addr, size);
270 
271 mux_fail:
272 	ret2 = m10bmc_pmci_set_flash_host_mux(m10bmc, false);
273 
274 unlock:
275 	mutex_unlock(&pmci->flash_mutex);
276 	if (ret)
277 		return ret;
278 	return ret2;
279 }
280 
281 static int m10bmc_pmci_flash_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size)
282 {
283 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
284 	int ret;
285 
286 	mutex_lock(&pmci->flash_mutex);
287 	WARN_ON_ONCE(!pmci->flash_busy);
288 	/* On write, firmware manages flash MUX */
289 	ret = pmci_flash_bulk_write(m10bmc, buf + offset, size);
290 	mutex_unlock(&pmci->flash_mutex);
291 
292 	return ret;
293 }
294 
295 static int m10bmc_pmci_flash_lock(struct intel_m10bmc *m10bmc)
296 {
297 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
298 	int ret = 0;
299 
300 	mutex_lock(&pmci->flash_mutex);
301 	if (pmci->flash_busy) {
302 		ret = -EBUSY;
303 		goto unlock;
304 	}
305 
306 	pmci->flash_busy = true;
307 
308 unlock:
309 	mutex_unlock(&pmci->flash_mutex);
310 	return ret;
311 }
312 
313 static void m10bmc_pmci_flash_unlock(struct intel_m10bmc *m10bmc)
314 {
315 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
316 
317 	mutex_lock(&pmci->flash_mutex);
318 	WARN_ON_ONCE(!pmci->flash_busy);
319 	pmci->flash_busy = false;
320 	mutex_unlock(&pmci->flash_mutex);
321 }
322 
323 static const struct intel_m10bmc_flash_bulk_ops m10bmc_pmci_flash_bulk_ops = {
324 	.read = m10bmc_pmci_flash_read,
325 	.write = m10bmc_pmci_flash_write,
326 	.lock_write = m10bmc_pmci_flash_lock,
327 	.unlock_write = m10bmc_pmci_flash_unlock,
328 };
329 
330 static const struct regmap_range m10bmc_pmci_regmap_range[] = {
331 	regmap_reg_range(M10BMC_N6000_SYS_BASE, M10BMC_N6000_SYS_END),
332 };
333 
334 static const struct regmap_access_table m10bmc_pmci_access_table = {
335 	.yes_ranges	= m10bmc_pmci_regmap_range,
336 	.n_yes_ranges	= ARRAY_SIZE(m10bmc_pmci_regmap_range),
337 };
338 
339 static const struct regmap_config m10bmc_pmci_regmap_config = {
340 	.reg_bits = 32,
341 	.reg_stride = 4,
342 	.val_bits = 32,
343 	.wr_table = &m10bmc_pmci_access_table,
344 	.rd_table = &m10bmc_pmci_access_table,
345 	.reg_read = &indirect_reg_read,
346 	.reg_write = &indirect_reg_write,
347 	.max_register = M10BMC_N6000_SYS_END,
348 };
349 
350 static struct mfd_cell m10bmc_pmci_n6000_bmc_subdevs[] = {
351 	{ .name = "n6000bmc-hwmon" },
352 	{ .name = "n6000bmc-sec-update" },
353 };
354 
355 static const struct m10bmc_csr_map m10bmc_n6000_csr_map = {
356 	.base = M10BMC_N6000_SYS_BASE,
357 	.build_version = M10BMC_N6000_BUILD_VER,
358 	.fw_version = NIOS2_N6000_FW_VERSION,
359 	.mac_low = M10BMC_N6000_MAC_LOW,
360 	.mac_high = M10BMC_N6000_MAC_HIGH,
361 	.doorbell = M10BMC_N6000_DOORBELL,
362 	.auth_result = M10BMC_N6000_AUTH_RESULT,
363 	.bmc_prog_addr = M10BMC_N6000_BMC_PROG_ADDR,
364 	.bmc_reh_addr = M10BMC_N6000_BMC_REH_ADDR,
365 	.bmc_magic = M10BMC_N6000_BMC_PROG_MAGIC,
366 	.sr_prog_addr = M10BMC_N6000_SR_PROG_ADDR,
367 	.sr_reh_addr = M10BMC_N6000_SR_REH_ADDR,
368 	.sr_magic = M10BMC_N6000_SR_PROG_MAGIC,
369 	.pr_prog_addr = M10BMC_N6000_PR_PROG_ADDR,
370 	.pr_reh_addr = M10BMC_N6000_PR_REH_ADDR,
371 	.pr_magic = M10BMC_N6000_PR_PROG_MAGIC,
372 	.rsu_update_counter = M10BMC_N6000_STAGING_FLASH_COUNT,
373 	.staging_size = M10BMC_STAGING_SIZE,
374 };
375 
376 static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000 = {
377 	.cells = m10bmc_pmci_n6000_bmc_subdevs,
378 	.n_cells = ARRAY_SIZE(m10bmc_pmci_n6000_bmc_subdevs),
379 	.csr_map = &m10bmc_n6000_csr_map,
380 };
381 
382 static int m10bmc_pmci_probe(struct dfl_device *ddev)
383 {
384 	struct device *dev = &ddev->dev;
385 	struct m10bmc_pmci_device *pmci;
386 	struct indirect_ctx *ctx;
387 	int ret;
388 
389 	pmci = devm_kzalloc(dev, sizeof(*pmci), GFP_KERNEL);
390 	if (!pmci)
391 		return -ENOMEM;
392 
393 	pmci->m10bmc.flash_bulk_ops = &m10bmc_pmci_flash_bulk_ops;
394 	pmci->m10bmc.dev = dev;
395 
396 	pmci->base = devm_ioremap_resource(dev, &ddev->mmio_res);
397 	if (IS_ERR(pmci->base))
398 		return PTR_ERR(pmci->base);
399 
400 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
401 	if (!ctx)
402 		return -ENOMEM;
403 
404 	mutex_init(&pmci->flash_mutex);
405 
406 	ctx->base = pmci->base + M10BMC_N6000_INDIRECT_BASE;
407 	ctx->dev = dev;
408 	indirect_clear_cmd(ctx);
409 	pmci->m10bmc.regmap = devm_regmap_init(dev, NULL, ctx, &m10bmc_pmci_regmap_config);
410 
411 	if (IS_ERR(pmci->m10bmc.regmap)) {
412 		ret = PTR_ERR(pmci->m10bmc.regmap);
413 		goto destroy_mutex;
414 	}
415 
416 	ret = m10bmc_dev_init(&pmci->m10bmc, &m10bmc_pmci_n6000);
417 	if (ret)
418 		goto destroy_mutex;
419 	return 0;
420 
421 destroy_mutex:
422 	mutex_destroy(&pmci->flash_mutex);
423 	return ret;
424 }
425 
426 static void m10bmc_pmci_remove(struct dfl_device *ddev)
427 {
428 	struct intel_m10bmc *m10bmc = dev_get_drvdata(&ddev->dev);
429 	struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
430 
431 	mutex_destroy(&pmci->flash_mutex);
432 }
433 
434 #define FME_FEATURE_ID_M10BMC_PMCI	0x12
435 
436 static const struct dfl_device_id m10bmc_pmci_ids[] = {
437 	{ FME_ID, FME_FEATURE_ID_M10BMC_PMCI },
438 	{ }
439 };
440 MODULE_DEVICE_TABLE(dfl, m10bmc_pmci_ids);
441 
442 static struct dfl_driver m10bmc_pmci_driver = {
443 	.drv	= {
444 		.name       = "intel-m10-bmc",
445 		.dev_groups = m10bmc_dev_groups,
446 	},
447 	.id_table = m10bmc_pmci_ids,
448 	.probe    = m10bmc_pmci_probe,
449 	.remove   = m10bmc_pmci_remove,
450 };
451 
452 module_dfl_driver(m10bmc_pmci_driver);
453 
454 MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
455 MODULE_AUTHOR("Intel Corporation");
456 MODULE_LICENSE("GPL");
457 MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
458