xref: /linux/drivers/mtd/nand/raw/loongson-nand-controller.c (revision 5808ae66f22e665c7131816e146548f2d7903ae1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NAND Controller Driver for Loongson family chips
4  *
5  * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
6  * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iopoll.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/mtd/rawnand.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/regmap.h>
19 #include <linux/sizes.h>
20 
21 /* Loongson NAND Controller Registers */
22 #define LOONGSON_NAND_CMD		0x0
23 #define LOONGSON_NAND_ADDR1		0x4
24 #define LOONGSON_NAND_ADDR2		0x8
25 #define LOONGSON_NAND_TIMING		0xc
26 #define LOONGSON_NAND_IDL		0x10
27 #define LOONGSON_NAND_IDH_STATUS	0x14
28 #define LOONGSON_NAND_PARAM		0x18
29 #define LOONGSON_NAND_OP_NUM		0x1c
30 #define LOONGSON_NAND_CS_RDY_MAP	0x20
31 
32 /* Bitfields of nand command register */
33 #define LOONGSON_NAND_CMD_OP_DONE	BIT(10)
34 #define LOONGSON_NAND_CMD_OP_SPARE	BIT(9)
35 #define LOONGSON_NAND_CMD_OP_MAIN	BIT(8)
36 #define LOONGSON_NAND_CMD_STATUS	BIT(7)
37 #define LOONGSON_NAND_CMD_RESET		BIT(6)
38 #define LOONGSON_NAND_CMD_READID	BIT(5)
39 #define LOONGSON_NAND_CMD_BLOCKS_ERASE	BIT(4)
40 #define LOONGSON_NAND_CMD_ERASE		BIT(3)
41 #define LOONGSON_NAND_CMD_WRITE		BIT(2)
42 #define LOONGSON_NAND_CMD_READ		BIT(1)
43 #define LOONGSON_NAND_CMD_VALID		BIT(0)
44 
45 /* Bitfields of nand cs/rdy map register */
46 #define LOONGSON_NAND_MAP_CS1_SEL	GENMASK(11, 8)
47 #define LOONGSON_NAND_MAP_RDY1_SEL	GENMASK(15, 12)
48 #define LOONGSON_NAND_MAP_CS2_SEL	GENMASK(19, 16)
49 #define LOONGSON_NAND_MAP_RDY2_SEL	GENMASK(23, 20)
50 #define LOONGSON_NAND_MAP_CS3_SEL	GENMASK(27, 24)
51 #define LOONGSON_NAND_MAP_RDY3_SEL	GENMASK(31, 28)
52 
53 #define LOONGSON_NAND_CS_SEL0		BIT(0)
54 #define LOONGSON_NAND_CS_SEL1		BIT(1)
55 #define LOONGSON_NAND_CS_SEL2		BIT(2)
56 #define LOONGSON_NAND_CS_SEL3		BIT(3)
57 #define LOONGSON_NAND_CS_RDY0		BIT(0)
58 #define LOONGSON_NAND_CS_RDY1		BIT(1)
59 #define LOONGSON_NAND_CS_RDY2		BIT(2)
60 #define LOONGSON_NAND_CS_RDY3		BIT(3)
61 
62 /* Bitfields of nand timing register */
63 #define LOONGSON_NAND_WAIT_CYCLE_MASK	GENMASK(7, 0)
64 #define LOONGSON_NAND_HOLD_CYCLE_MASK	GENMASK(15, 8)
65 
66 /* Bitfields of nand parameter register */
67 #define LOONGSON_NAND_CELL_SIZE_MASK	GENMASK(11, 8)
68 
69 #define LOONGSON_NAND_COL_ADDR_CYC	2U
70 #define LOONGSON_NAND_MAX_ADDR_CYC	5U
71 
72 #define LOONGSON_NAND_READ_ID_SLEEP_US		1000
73 #define LOONGSON_NAND_READ_ID_TIMEOUT_US	5000
74 
75 #define BITS_PER_WORD			(4 * BITS_PER_BYTE)
76 
77 /* Loongson-2K1000 NAND DMA routing register */
78 #define LS2K1000_NAND_DMA_MASK         GENMASK(2, 0)
79 #define LS2K1000_DMA0_CONF             0x0
80 #define LS2K1000_DMA1_CONF             0x1
81 #define LS2K1000_DMA2_CONF             0x2
82 #define LS2K1000_DMA3_CONF             0x3
83 #define LS2K1000_DMA4_CONF             0x4
84 
85 struct loongson_nand_host;
86 
87 struct loongson_nand_op {
88 	char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
89 	unsigned int naddrs;
90 	unsigned int addrs_offset;
91 	unsigned int aligned_offset;
92 	unsigned int cmd_reg;
93 	unsigned int row_start;
94 	unsigned int rdy_timeout_ms;
95 	unsigned int orig_len;
96 	bool is_readid;
97 	bool is_erase;
98 	bool is_write;
99 	bool is_read;
100 	bool is_change_column;
101 	size_t len;
102 	char *buf;
103 };
104 
105 struct loongson_nand_data {
106 	unsigned int max_id_cycle;
107 	unsigned int id_cycle_field;
108 	unsigned int status_field;
109 	unsigned int op_scope_field;
110 	unsigned int hold_cycle;
111 	unsigned int wait_cycle;
112 	unsigned int nand_cs;
113 	unsigned int dma_bits;
114 	int (*dma_config)(struct device *dev);
115 	void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
116 };
117 
118 struct loongson_nand_host {
119 	struct device *dev;
120 	struct nand_chip chip;
121 	struct nand_controller controller;
122 	const struct loongson_nand_data *data;
123 	unsigned int addr_cs_field;
124 	void __iomem *reg_base;
125 	struct regmap *regmap;
126 	/* DMA Engine stuff */
127 	dma_addr_t dma_base;
128 	struct dma_chan *dma_chan;
129 	dma_cookie_t dma_cookie;
130 	struct completion dma_complete;
131 };
132 
133 static const struct regmap_config loongson_nand_regmap_config = {
134 	.reg_bits = 32,
135 	.val_bits = 32,
136 	.reg_stride = 4,
137 };
138 
139 static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
140 					u8 opcode)
141 {
142 	struct loongson_nand_host *host = nand_get_controller_data(chip);
143 
144 	op->row_start = chip->page_shift + 1;
145 
146 	/* The controller abstracts the following NAND operations. */
147 	switch (opcode) {
148 	case NAND_CMD_STATUS:
149 		op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
150 		break;
151 	case NAND_CMD_RESET:
152 		op->cmd_reg = LOONGSON_NAND_CMD_RESET;
153 		break;
154 	case NAND_CMD_READID:
155 		op->is_readid = true;
156 		op->cmd_reg = LOONGSON_NAND_CMD_READID;
157 		break;
158 	case NAND_CMD_ERASE1:
159 		op->is_erase = true;
160 		op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
161 		break;
162 	case NAND_CMD_ERASE2:
163 		if (!op->is_erase)
164 			return -EOPNOTSUPP;
165 		/* During erasing, row_start differs from the default value. */
166 		op->row_start = chip->page_shift;
167 		op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
168 		break;
169 	case NAND_CMD_SEQIN:
170 		op->is_write = true;
171 		break;
172 	case NAND_CMD_PAGEPROG:
173 		if (!op->is_write)
174 			return -EOPNOTSUPP;
175 		op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
176 		break;
177 	case NAND_CMD_READ0:
178 		op->is_read = true;
179 		break;
180 	case NAND_CMD_READSTART:
181 		if (!op->is_read)
182 			return -EOPNOTSUPP;
183 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
184 		break;
185 	case NAND_CMD_RNDOUT:
186 		op->is_change_column = true;
187 		break;
188 	case NAND_CMD_RNDOUTSTART:
189 		if (!op->is_change_column)
190 			return -EOPNOTSUPP;
191 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
192 		break;
193 	default:
194 		dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
195 		return -EOPNOTSUPP;
196 	}
197 
198 	return 0;
199 }
200 
201 static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
202 					    struct loongson_nand_op *op)
203 {
204 	unsigned int op_id;
205 	int ret;
206 
207 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
208 		const struct nand_op_instr *instr = &subop->instrs[op_id];
209 		unsigned int offset, naddrs;
210 		const u8 *addrs;
211 
212 		switch (instr->type) {
213 		case NAND_OP_CMD_INSTR:
214 			ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
215 			if (ret < 0)
216 				return ret;
217 
218 			break;
219 		case NAND_OP_ADDR_INSTR:
220 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
221 			if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
222 				return -EOPNOTSUPP;
223 			op->naddrs = naddrs;
224 			offset = nand_subop_get_addr_start_off(subop, op_id);
225 			addrs = &instr->ctx.addr.addrs[offset];
226 			memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
227 			break;
228 		case NAND_OP_DATA_IN_INSTR:
229 		case NAND_OP_DATA_OUT_INSTR:
230 			offset = nand_subop_get_data_start_off(subop, op_id);
231 			op->orig_len = nand_subop_get_data_len(subop, op_id);
232 			if (instr->type == NAND_OP_DATA_IN_INSTR)
233 				op->buf = instr->ctx.data.buf.in + offset;
234 			else if (instr->type == NAND_OP_DATA_OUT_INSTR)
235 				op->buf = (void *)instr->ctx.data.buf.out + offset;
236 
237 			break;
238 		case NAND_OP_WAITRDY_INSTR:
239 			op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
240 			break;
241 		default:
242 			break;
243 		}
244 	}
245 
246 	return 0;
247 }
248 
249 static void loongson_nand_set_addr_cs(struct loongson_nand_host *host)
250 {
251 	struct nand_chip *chip = &host->chip;
252 	struct mtd_info *mtd = nand_to_mtd(chip);
253 
254 	if (!host->data->nand_cs)
255 		return;
256 
257 	/*
258 	 * The Manufacturer/Chip ID read operation precedes attach_chip, at which point
259 	 * information such as NAND chip selection and capacity is unknown. As a
260 	 * workaround, we use 128MB cellsize (2KB pagesize) as a fallback.
261 	 */
262 	if (!mtd->writesize)
263 		host->addr_cs_field = GENMASK(17, 16);
264 
265 	regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field,
266 			   host->data->nand_cs << __ffs(host->addr_cs_field));
267 }
268 
269 static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
270 {
271 	struct nand_chip *chip = &host->chip;
272 	int i;
273 
274 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
275 		int shift, mask, val;
276 
277 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
278 			shift = i * BITS_PER_BYTE;
279 			mask = (u32)0xff << shift;
280 			mask &= GENMASK(chip->page_shift, 0);
281 			val = (u32)op->addrs[i] << shift;
282 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
283 		} else if (!op->is_change_column) {
284 			shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
285 			mask = (u32)0xff << shift;
286 			val = (u32)op->addrs[i] << shift;
287 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
288 
289 			if (i == 4) {
290 				mask = (u32)0xff >> (BITS_PER_WORD - shift);
291 				val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
292 				regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
293 			}
294 		}
295 	}
296 }
297 
298 static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
299 {
300 	int i;
301 
302 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
303 		int shift, mask, val;
304 
305 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
306 			shift = i * BITS_PER_BYTE;
307 			mask = (u32)0xff << shift;
308 			val = (u32)op->addrs[i] << shift;
309 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
310 		} else if (!op->is_change_column) {
311 			shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
312 			mask = (u32)0xff << shift;
313 			val = (u32)op->addrs[i] << shift;
314 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
315 		}
316 	}
317 
318 	loongson_nand_set_addr_cs(host);
319 }
320 
321 static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
322 {
323 	struct nand_chip *chip = &host->chip;
324 	struct mtd_info *mtd = nand_to_mtd(chip);
325 	int col0 = op->addrs[0];
326 	short col;
327 
328 	if (!IS_ALIGNED(col0, chip->buf_align)) {
329 		col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
330 		op->aligned_offset = op->addrs[0] - col0;
331 		op->addrs[0] = col0;
332 	}
333 
334 	if (host->data->set_addr)
335 		host->data->set_addr(host, op);
336 
337 	/* set operation length */
338 	if (op->is_write || op->is_read || op->is_change_column)
339 		op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
340 	else if (op->is_erase)
341 		op->len = 1;
342 	else
343 		op->len = op->orig_len;
344 
345 	writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
346 
347 	/* set operation area and scope */
348 	col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
349 	if (op->orig_len && !op->is_readid) {
350 		unsigned int op_scope = 0;
351 
352 		if (col < mtd->writesize) {
353 			op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
354 			op_scope = mtd->writesize;
355 		}
356 
357 		op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
358 		op_scope += mtd->oobsize;
359 
360 		op_scope <<= __ffs(host->data->op_scope_field);
361 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
362 				   host->data->op_scope_field, op_scope);
363 	}
364 
365 	/* set command */
366 	writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
367 
368 	/* trigger operation */
369 	regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
370 			  LOONGSON_NAND_CMD_VALID);
371 }
372 
373 static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
374 					  struct loongson_nand_op *op)
375 {
376 	unsigned int val;
377 	int ret = 0;
378 
379 	if (op->rdy_timeout_ms) {
380 		ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
381 					       val, val & LOONGSON_NAND_CMD_OP_DONE,
382 					       0, op->rdy_timeout_ms * MSEC_PER_SEC);
383 		if (ret)
384 			dev_err(host->dev, "operation failed\n");
385 	}
386 
387 	return ret;
388 }
389 
390 static void loongson_nand_dma_callback(void *data)
391 {
392 	struct loongson_nand_host *host = (struct loongson_nand_host *)data;
393 	struct dma_chan *chan = host->dma_chan;
394 	struct device *dev = chan->device->dev;
395 	enum dma_status status;
396 
397 	status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
398 	if (likely(status == DMA_COMPLETE)) {
399 		dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
400 		complete(&host->dma_complete);
401 	} else {
402 		dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
403 	}
404 }
405 
406 static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
407 {
408 	struct nand_chip *chip = &host->chip;
409 	struct dma_chan *chan = host->dma_chan;
410 	struct device *dev = chan->device->dev;
411 	struct dma_async_tx_descriptor *desc;
412 	enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
413 	enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
414 	void *buf = op->buf;
415 	char *dma_buf = NULL;
416 	dma_addr_t dma_addr;
417 	int ret;
418 
419 	if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
420 	    IS_ALIGNED(op->orig_len, chip->buf_align)) {
421 		dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
422 		if (dma_mapping_error(dev, dma_addr)) {
423 			dev_err(dev, "failed to map DMA buffer\n");
424 			return -ENXIO;
425 		}
426 	} else if (!op->is_write) {
427 		dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
428 		if (!dma_buf)
429 			return -ENOMEM;
430 	} else {
431 		dev_err(dev, "subpage writing not supported\n");
432 		return -EOPNOTSUPP;
433 	}
434 
435 	desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
436 	if (!desc) {
437 		dev_err(dev, "failed to prepare DMA descriptor\n");
438 		ret = -ENOMEM;
439 		goto err;
440 	}
441 	desc->callback = loongson_nand_dma_callback;
442 	desc->callback_param = host;
443 
444 	host->dma_cookie = dmaengine_submit(desc);
445 	ret = dma_submit_error(host->dma_cookie);
446 	if (ret) {
447 		dev_err(dev, "failed to submit DMA descriptor\n");
448 		goto err;
449 	}
450 
451 	dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
452 	dma_async_issue_pending(chan);
453 
454 	if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
455 		dmaengine_terminate_sync(chan);
456 		reinit_completion(&host->dma_complete);
457 		ret = -ETIMEDOUT;
458 		goto err;
459 	}
460 
461 	if (dma_buf)
462 		memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
463 err:
464 	if (dma_buf)
465 		dma_free_coherent(dev, op->len, dma_buf, dma_addr);
466 	else
467 		dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
468 
469 	return ret;
470 }
471 
472 static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
473 {
474 	struct loongson_nand_host *host = nand_get_controller_data(chip);
475 	struct loongson_nand_op op = {};
476 	int ret;
477 
478 	ret = loongson_nand_parse_instructions(chip, subop, &op);
479 	if (ret)
480 		return ret;
481 
482 	loongson_nand_trigger_op(host, &op);
483 
484 	ret = loongson_nand_dma_transfer(host, &op);
485 	if (ret)
486 		return ret;
487 
488 	return loongson_nand_wait_for_op_done(host, &op);
489 }
490 
491 static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
492 					struct loongson_nand_op *op)
493 {
494 	struct loongson_nand_host *host = nand_get_controller_data(chip);
495 	int ret;
496 
497 	ret = loongson_nand_parse_instructions(chip, subop, op);
498 	if (ret)
499 		return ret;
500 
501 	loongson_nand_trigger_op(host, op);
502 
503 	return loongson_nand_wait_for_op_done(host, op);
504 }
505 
506 static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
507 {
508 	struct loongson_nand_op op = {};
509 
510 	return loongson_nand_misc_type_exec(chip, subop, &op);
511 }
512 
513 static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
514 {
515 	struct loongson_nand_host *host = nand_get_controller_data(chip);
516 	struct loongson_nand_op op = {};
517 	int i, ret;
518 	union {
519 		char ids[6];
520 		struct {
521 			int idl;
522 			u16 idh;
523 		};
524 	} nand_id;
525 
526 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
527 	if (ret)
528 		return ret;
529 
530 	ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl,
531 				       LOONGSON_NAND_READ_ID_SLEEP_US,
532 				       LOONGSON_NAND_READ_ID_TIMEOUT_US);
533 	if (ret)
534 		return ret;
535 
536 	nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS);
537 
538 	for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++)
539 		op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i];
540 
541 	return ret;
542 }
543 
544 static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
545 					       const struct nand_subop *subop)
546 {
547 	struct loongson_nand_host *host = nand_get_controller_data(chip);
548 	struct loongson_nand_op op = {};
549 	int val, ret;
550 
551 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
552 	if (ret)
553 		return ret;
554 
555 	val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
556 	val &= ~host->data->status_field;
557 	op.buf[0] = val << ffs(host->data->status_field);
558 
559 	return ret;
560 }
561 
562 static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
563 	NAND_OP_PARSER_PATTERN(
564 		loongson_nand_read_id_type_exec,
565 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
566 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
567 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
568 	NAND_OP_PARSER_PATTERN(
569 		loongson_nand_read_status_type_exec,
570 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
571 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
572 	NAND_OP_PARSER_PATTERN(
573 		loongson_nand_zerolen_type_exec,
574 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
575 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
576 	NAND_OP_PARSER_PATTERN(
577 		loongson_nand_zerolen_type_exec,
578 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
579 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
580 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
581 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
582 	NAND_OP_PARSER_PATTERN(
583 		loongson_nand_data_type_exec,
584 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
585 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
586 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
587 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
588 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
589 	NAND_OP_PARSER_PATTERN(
590 		loongson_nand_data_type_exec,
591 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
592 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
593 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
594 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
595 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
596 	);
597 
598 static int loongson_nand_is_valid_cmd(u8 opcode)
599 {
600 	if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
601 		return 0;
602 
603 	return -EOPNOTSUPP;
604 }
605 
606 static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
607 {
608 	if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
609 		return 0;
610 
611 	if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
612 		return 0;
613 
614 	if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
615 		return 0;
616 
617 	if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
618 		return 0;
619 
620 	return -EOPNOTSUPP;
621 }
622 
623 static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
624 {
625 	const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
626 	int op_id;
627 
628 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
629 		const struct nand_op_instr *instr = &op->instrs[op_id];
630 
631 		if (instr->type == NAND_OP_CMD_INSTR) {
632 			if (!instr1)
633 				instr1 = instr;
634 			else if (!instr2)
635 				instr2 = instr;
636 			else
637 				break;
638 		}
639 	}
640 
641 	if (!instr1)
642 		return -EOPNOTSUPP;
643 
644 	if (!instr2)
645 		return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
646 
647 	return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
648 }
649 
650 static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
651 				 bool check_only)
652 {
653 	if (check_only)
654 		return loongson_nand_check_op(chip, op);
655 
656 	return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
657 }
658 
659 static int loongson_nand_get_chip_capacity(struct nand_chip *chip)
660 {
661 	struct loongson_nand_host *host = nand_get_controller_data(chip);
662 	u64 chipsize = nanddev_target_size(&chip->base);
663 	struct mtd_info *mtd = nand_to_mtd(chip);
664 
665 	switch (mtd->writesize) {
666 	case SZ_512:
667 		switch (chipsize) {
668 		case SZ_8M:
669 			host->addr_cs_field = GENMASK(15, 14);
670 			return 0x9;
671 		case SZ_16M:
672 			host->addr_cs_field = GENMASK(16, 15);
673 			return 0xa;
674 		case SZ_32M:
675 			host->addr_cs_field = GENMASK(17, 16);
676 			return 0xb;
677 		case SZ_64M:
678 			host->addr_cs_field = GENMASK(18, 17);
679 			return 0xc;
680 		case SZ_128M:
681 			host->addr_cs_field = GENMASK(19, 18);
682 			return 0xd;
683 		}
684 		break;
685 	case SZ_2K:
686 		switch (chipsize) {
687 		case SZ_128M:
688 			host->addr_cs_field = GENMASK(17, 16);
689 			return 0x0;
690 		case SZ_256M:
691 			host->addr_cs_field = GENMASK(18, 17);
692 			return 0x1;
693 		case SZ_512M:
694 			host->addr_cs_field = GENMASK(19, 18);
695 			return 0x2;
696 		case SZ_1G:
697 			host->addr_cs_field = GENMASK(20, 19);
698 			return 0x3;
699 		}
700 		break;
701 	case SZ_4K:
702 		if (chipsize == SZ_2G) {
703 			host->addr_cs_field = GENMASK(20, 19);
704 			return 0x4;
705 		}
706 		break;
707 	case SZ_8K:
708 		switch (chipsize) {
709 		case SZ_4G:
710 			host->addr_cs_field = GENMASK(20, 19);
711 			return 0x5;
712 		case SZ_8G:
713 			host->addr_cs_field = GENMASK(21, 20);
714 			return 0x6;
715 		case SZ_16G:
716 			host->addr_cs_field = GENMASK(22, 21);
717 			return 0x7;
718 		}
719 		break;
720 	}
721 
722 	dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n",
723 		chipsize, mtd->writesize);
724 	return -EINVAL;
725 }
726 
727 static int loongson_nand_attach_chip(struct nand_chip *chip)
728 {
729 	struct loongson_nand_host *host = nand_get_controller_data(chip);
730 	int cell_size = loongson_nand_get_chip_capacity(chip);
731 
732 	if (cell_size < 0)
733 		return cell_size;
734 
735 	switch (chip->ecc.engine_type) {
736 	case NAND_ECC_ENGINE_TYPE_NONE:
737 		break;
738 	case NAND_ECC_ENGINE_TYPE_SOFT:
739 		break;
740 	default:
741 		return -EINVAL;
742 	}
743 
744 	/* set cell size */
745 	regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
746 			   FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
747 
748 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
749 			   FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
750 
751 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
752 			   FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
753 
754 	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
755 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
756 
757 	return 0;
758 }
759 
760 static const struct nand_controller_ops loongson_nand_controller_ops = {
761 	.exec_op = loongson_nand_exec_op,
762 	.attach_chip = loongson_nand_attach_chip,
763 };
764 
765 static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
766 {
767 	if (host->dma_chan)
768 		dma_release_channel(host->dma_chan);
769 }
770 
771 static int ls2k1000_nand_apbdma_config(struct device *dev)
772 {
773 	struct platform_device *pdev = to_platform_device(dev);
774 	void __iomem *regs;
775 	int val;
776 
777 	regs = devm_platform_ioremap_resource_byname(pdev, "dma-config");
778 	if (IS_ERR(regs))
779 		return PTR_ERR(regs);
780 
781 	val = readl(regs);
782 	val |= FIELD_PREP(LS2K1000_NAND_DMA_MASK, LS2K1000_DMA0_CONF);
783 	writel(val, regs);
784 
785 	return 0;
786 }
787 
788 static int loongson_nand_controller_init(struct loongson_nand_host *host)
789 {
790 	struct device *dev = host->dev;
791 	struct dma_chan *chan;
792 	struct dma_slave_config cfg = {};
793 	int ret, val;
794 
795 	host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
796 	if (IS_ERR(host->regmap))
797 		return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
798 
799 	if (host->data->id_cycle_field)
800 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field,
801 				   host->data->max_id_cycle << __ffs(host->data->id_cycle_field));
802 
803 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits));
804 	if (ret)
805 		return dev_err_probe(dev, ret, "failed to set DMA mask\n");
806 
807 	val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) |
808 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) |
809 	      FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) |
810 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) |
811 	      FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) |
812 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3);
813 
814 	regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val);
815 
816 	if (host->data->dma_config) {
817 		ret = host->data->dma_config(dev);
818 		if (ret)
819 			return dev_err_probe(dev, ret, "failed to config DMA routing\n");
820 	}
821 
822 	chan = dma_request_chan(dev, "rxtx");
823 	if (IS_ERR(chan))
824 		return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
825 	host->dma_chan = chan;
826 
827 	cfg.src_addr = host->dma_base;
828 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
829 	cfg.dst_addr = host->dma_base;
830 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
831 	ret = dmaengine_slave_config(host->dma_chan, &cfg);
832 	if (ret)
833 		return dev_err_probe(dev, ret, "failed to config DMA channel\n");
834 
835 	init_completion(&host->dma_complete);
836 
837 	return 0;
838 }
839 
840 static int loongson_nand_chip_init(struct loongson_nand_host *host)
841 {
842 	struct device *dev = host->dev;
843 	int nchips = of_get_child_count(dev->of_node);
844 	struct device_node *chip_np;
845 	struct nand_chip *chip = &host->chip;
846 	struct mtd_info *mtd = nand_to_mtd(chip);
847 	int ret;
848 
849 	if (nchips != 1)
850 		return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
851 
852 	chip_np = of_get_next_child(dev->of_node, NULL);
853 	if (!chip_np)
854 		return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
855 
856 	nand_set_flash_node(chip, chip_np);
857 	of_node_put(chip_np);
858 	if (!mtd->name)
859 		return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
860 
861 	nand_set_controller_data(chip, host);
862 	chip->controller = &host->controller;
863 	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
864 	chip->buf_align = 16;
865 	mtd->dev.parent = dev;
866 	mtd->owner = THIS_MODULE;
867 
868 	ret = nand_scan(chip, 1);
869 	if (ret)
870 		return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
871 
872 	ret = mtd_device_register(mtd, NULL, 0);
873 	if (ret) {
874 		nand_cleanup(chip);
875 		return dev_err_probe(dev, ret, "failed to register MTD device\n");
876 	}
877 
878 	return 0;
879 }
880 
881 static int loongson_nand_probe(struct platform_device *pdev)
882 {
883 	struct device *dev = &pdev->dev;
884 	const struct loongson_nand_data *data;
885 	struct loongson_nand_host *host;
886 	struct resource *res;
887 	int ret;
888 
889 	data = of_device_get_match_data(dev);
890 	if (!data)
891 		return -ENODEV;
892 
893 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
894 	if (!host)
895 		return -ENOMEM;
896 
897 	host->reg_base = devm_platform_ioremap_resource(pdev, 0);
898 	if (IS_ERR(host->reg_base))
899 		return PTR_ERR(host->reg_base);
900 
901 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
902 	if (!res)
903 		return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
904 
905 	host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
906 					  DMA_BIDIRECTIONAL, 0);
907 	if (dma_mapping_error(dev, host->dma_base))
908 		return -ENXIO;
909 
910 	host->dev = dev;
911 	host->data = data;
912 	host->controller.ops = &loongson_nand_controller_ops;
913 
914 	nand_controller_init(&host->controller);
915 
916 	ret = loongson_nand_controller_init(host);
917 	if (ret)
918 		goto err;
919 
920 	ret = loongson_nand_chip_init(host);
921 	if (ret)
922 		goto err;
923 
924 	platform_set_drvdata(pdev, host);
925 
926 	return 0;
927 err:
928 	loongson_nand_controller_cleanup(host);
929 
930 	return ret;
931 }
932 
933 static void loongson_nand_remove(struct platform_device *pdev)
934 {
935 	struct loongson_nand_host *host = platform_get_drvdata(pdev);
936 	struct nand_chip *chip = &host->chip;
937 	int ret;
938 
939 	ret = mtd_device_unregister(nand_to_mtd(chip));
940 	WARN_ON(ret);
941 	nand_cleanup(chip);
942 	loongson_nand_controller_cleanup(host);
943 }
944 
945 static const struct loongson_nand_data ls1b_nand_data = {
946 	.max_id_cycle = 5,
947 	.status_field = GENMASK(15, 8),
948 	.hold_cycle = 0x2,
949 	.wait_cycle = 0xc,
950 	.dma_bits = 32,
951 	.set_addr = ls1b_nand_set_addr,
952 };
953 
954 static const struct loongson_nand_data ls1c_nand_data = {
955 	.max_id_cycle = 6,
956 	.id_cycle_field = GENMASK(14, 12),
957 	.status_field = GENMASK(23, 16),
958 	.op_scope_field = GENMASK(29, 16),
959 	.hold_cycle = 0x2,
960 	.wait_cycle = 0xc,
961 	.dma_bits = 32,
962 	.set_addr = ls1c_nand_set_addr,
963 };
964 
965 static const struct loongson_nand_data ls2k0500_nand_data = {
966 	.max_id_cycle = 6,
967 	.id_cycle_field = GENMASK(14, 12),
968 	.status_field = GENMASK(23, 16),
969 	.op_scope_field = GENMASK(29, 16),
970 	.hold_cycle = 0x4,
971 	.wait_cycle = 0x12,
972 	.dma_bits = 64,
973 	.set_addr = ls1c_nand_set_addr,
974 };
975 
976 static const struct loongson_nand_data ls2k1000_nand_data = {
977 	.max_id_cycle = 6,
978 	.id_cycle_field = GENMASK(14, 12),
979 	.status_field = GENMASK(23, 16),
980 	.op_scope_field = GENMASK(29, 16),
981 	.hold_cycle = 0x4,
982 	.wait_cycle = 0x12,
983 	.nand_cs = 0x2,
984 	.dma_bits = 64,
985 	.dma_config = ls2k1000_nand_apbdma_config,
986 	.set_addr = ls1c_nand_set_addr,
987 };
988 
989 static const struct of_device_id loongson_nand_match[] = {
990 	{
991 		.compatible = "loongson,ls1b-nand-controller",
992 		.data = &ls1b_nand_data,
993 	},
994 	{
995 		.compatible = "loongson,ls1c-nand-controller",
996 		.data = &ls1c_nand_data,
997 	},
998 	{
999 		.compatible = "loongson,ls2k0500-nand-controller",
1000 		.data = &ls2k0500_nand_data,
1001 	},
1002 	{
1003 		.compatible = "loongson,ls2k1000-nand-controller",
1004 		.data = &ls2k1000_nand_data,
1005 	},
1006 	{ /* sentinel */ }
1007 };
1008 MODULE_DEVICE_TABLE(of, loongson_nand_match);
1009 
1010 static struct platform_driver loongson_nand_driver = {
1011 	.probe = loongson_nand_probe,
1012 	.remove = loongson_nand_remove,
1013 	.driver = {
1014 		.name = KBUILD_MODNAME,
1015 		.of_match_table = loongson_nand_match,
1016 	},
1017 };
1018 
1019 module_platform_driver(loongson_nand_driver);
1020 
1021 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
1022 MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
1023 MODULE_DESCRIPTION("Loongson NAND Controller Driver");
1024 MODULE_LICENSE("GPL");
1025