xref: /linux/drivers/mtd/nand/raw/loongson-nand-controller.c (revision e55bbdd4a4b654dfe8ee8649b3be1db82319d6c0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NAND Controller Driver for Loongson family chips
4  *
5  * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
6  * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iopoll.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/mtd/rawnand.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/regmap.h>
19 #include <linux/sizes.h>
20 
21 /* Loongson NAND Controller Registers */
22 #define LOONGSON_NAND_CMD		0x0
23 #define LOONGSON_NAND_ADDR1		0x4
24 #define LOONGSON_NAND_ADDR2		0x8
25 #define LOONGSON_NAND_TIMING		0xc
26 #define LOONGSON_NAND_IDL		0x10
27 #define LOONGSON_NAND_IDH_STATUS	0x14
28 #define LOONGSON_NAND_PARAM		0x18
29 #define LOONGSON_NAND_OP_NUM		0x1c
30 #define LOONGSON_NAND_CS_RDY_MAP	0x20
31 
32 /* Bitfields of nand command register */
33 #define LOONGSON_NAND_CMD_OP_DONE	BIT(10)
34 #define LOONGSON_NAND_CMD_OP_SPARE	BIT(9)
35 #define LOONGSON_NAND_CMD_OP_MAIN	BIT(8)
36 #define LOONGSON_NAND_CMD_STATUS	BIT(7)
37 #define LOONGSON_NAND_CMD_RESET		BIT(6)
38 #define LOONGSON_NAND_CMD_READID	BIT(5)
39 #define LOONGSON_NAND_CMD_BLOCKS_ERASE	BIT(4)
40 #define LOONGSON_NAND_CMD_ERASE		BIT(3)
41 #define LOONGSON_NAND_CMD_WRITE		BIT(2)
42 #define LOONGSON_NAND_CMD_READ		BIT(1)
43 #define LOONGSON_NAND_CMD_VALID		BIT(0)
44 
45 /* Bitfields of nand cs/rdy map register */
46 #define LOONGSON_NAND_MAP_CS1_SEL	GENMASK(11, 8)
47 #define LOONGSON_NAND_MAP_RDY1_SEL	GENMASK(15, 12)
48 #define LOONGSON_NAND_MAP_CS2_SEL	GENMASK(19, 16)
49 #define LOONGSON_NAND_MAP_RDY2_SEL	GENMASK(23, 20)
50 #define LOONGSON_NAND_MAP_CS3_SEL	GENMASK(27, 24)
51 #define LOONGSON_NAND_MAP_RDY3_SEL	GENMASK(31, 28)
52 
53 #define LOONGSON_NAND_CS_SEL0		BIT(0)
54 #define LOONGSON_NAND_CS_SEL1		BIT(1)
55 #define LOONGSON_NAND_CS_SEL2		BIT(2)
56 #define LOONGSON_NAND_CS_SEL3		BIT(3)
57 #define LOONGSON_NAND_CS_RDY0		BIT(0)
58 #define LOONGSON_NAND_CS_RDY1		BIT(1)
59 #define LOONGSON_NAND_CS_RDY2		BIT(2)
60 #define LOONGSON_NAND_CS_RDY3		BIT(3)
61 
62 /* Bitfields of nand timing register */
63 #define LOONGSON_NAND_WAIT_CYCLE_MASK	GENMASK(7, 0)
64 #define LOONGSON_NAND_HOLD_CYCLE_MASK	GENMASK(15, 8)
65 
66 /* Bitfields of nand parameter register */
67 #define LOONGSON_NAND_CELL_SIZE_MASK	GENMASK(11, 8)
68 
69 #define LOONGSON_NAND_COL_ADDR_CYC	2U
70 #define LOONGSON_NAND_MAX_ADDR_CYC	5U
71 
72 #define LOONGSON_NAND_READ_ID_SLEEP_US		1000
73 #define LOONGSON_NAND_READ_ID_TIMEOUT_US	5000
74 
75 #define BITS_PER_WORD			(4 * BITS_PER_BYTE)
76 
77 struct loongson_nand_host;
78 
79 struct loongson_nand_op {
80 	char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
81 	unsigned int naddrs;
82 	unsigned int addrs_offset;
83 	unsigned int aligned_offset;
84 	unsigned int cmd_reg;
85 	unsigned int row_start;
86 	unsigned int rdy_timeout_ms;
87 	unsigned int orig_len;
88 	bool is_readid;
89 	bool is_erase;
90 	bool is_write;
91 	bool is_read;
92 	bool is_change_column;
93 	size_t len;
94 	char *buf;
95 };
96 
97 struct loongson_nand_data {
98 	unsigned int max_id_cycle;
99 	unsigned int id_cycle_field;
100 	unsigned int status_field;
101 	unsigned int op_scope_field;
102 	unsigned int hold_cycle;
103 	unsigned int wait_cycle;
104 	unsigned int nand_cs;
105 	unsigned int dma_bits;
106 	void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
107 };
108 
109 struct loongson_nand_host {
110 	struct device *dev;
111 	struct nand_chip chip;
112 	struct nand_controller controller;
113 	const struct loongson_nand_data *data;
114 	unsigned int addr_cs_field;
115 	void __iomem *reg_base;
116 	struct regmap *regmap;
117 	/* DMA Engine stuff */
118 	dma_addr_t dma_base;
119 	struct dma_chan *dma_chan;
120 	dma_cookie_t dma_cookie;
121 	struct completion dma_complete;
122 };
123 
124 static const struct regmap_config loongson_nand_regmap_config = {
125 	.reg_bits = 32,
126 	.val_bits = 32,
127 	.reg_stride = 4,
128 };
129 
130 static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
131 					u8 opcode)
132 {
133 	struct loongson_nand_host *host = nand_get_controller_data(chip);
134 
135 	op->row_start = chip->page_shift + 1;
136 
137 	/* The controller abstracts the following NAND operations. */
138 	switch (opcode) {
139 	case NAND_CMD_STATUS:
140 		op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
141 		break;
142 	case NAND_CMD_RESET:
143 		op->cmd_reg = LOONGSON_NAND_CMD_RESET;
144 		break;
145 	case NAND_CMD_READID:
146 		op->is_readid = true;
147 		op->cmd_reg = LOONGSON_NAND_CMD_READID;
148 		break;
149 	case NAND_CMD_ERASE1:
150 		op->is_erase = true;
151 		op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
152 		break;
153 	case NAND_CMD_ERASE2:
154 		if (!op->is_erase)
155 			return -EOPNOTSUPP;
156 		/* During erasing, row_start differs from the default value. */
157 		op->row_start = chip->page_shift;
158 		op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
159 		break;
160 	case NAND_CMD_SEQIN:
161 		op->is_write = true;
162 		break;
163 	case NAND_CMD_PAGEPROG:
164 		if (!op->is_write)
165 			return -EOPNOTSUPP;
166 		op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
167 		break;
168 	case NAND_CMD_READ0:
169 		op->is_read = true;
170 		break;
171 	case NAND_CMD_READSTART:
172 		if (!op->is_read)
173 			return -EOPNOTSUPP;
174 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
175 		break;
176 	case NAND_CMD_RNDOUT:
177 		op->is_change_column = true;
178 		break;
179 	case NAND_CMD_RNDOUTSTART:
180 		if (!op->is_change_column)
181 			return -EOPNOTSUPP;
182 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
183 		break;
184 	default:
185 		dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
186 		return -EOPNOTSUPP;
187 	}
188 
189 	return 0;
190 }
191 
192 static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
193 					    struct loongson_nand_op *op)
194 {
195 	unsigned int op_id;
196 	int ret;
197 
198 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
199 		const struct nand_op_instr *instr = &subop->instrs[op_id];
200 		unsigned int offset, naddrs;
201 		const u8 *addrs;
202 
203 		switch (instr->type) {
204 		case NAND_OP_CMD_INSTR:
205 			ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
206 			if (ret < 0)
207 				return ret;
208 
209 			break;
210 		case NAND_OP_ADDR_INSTR:
211 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
212 			if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
213 				return -EOPNOTSUPP;
214 			op->naddrs = naddrs;
215 			offset = nand_subop_get_addr_start_off(subop, op_id);
216 			addrs = &instr->ctx.addr.addrs[offset];
217 			memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
218 			break;
219 		case NAND_OP_DATA_IN_INSTR:
220 		case NAND_OP_DATA_OUT_INSTR:
221 			offset = nand_subop_get_data_start_off(subop, op_id);
222 			op->orig_len = nand_subop_get_data_len(subop, op_id);
223 			if (instr->type == NAND_OP_DATA_IN_INSTR)
224 				op->buf = instr->ctx.data.buf.in + offset;
225 			else if (instr->type == NAND_OP_DATA_OUT_INSTR)
226 				op->buf = (void *)instr->ctx.data.buf.out + offset;
227 
228 			break;
229 		case NAND_OP_WAITRDY_INSTR:
230 			op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
231 			break;
232 		default:
233 			break;
234 		}
235 	}
236 
237 	return 0;
238 }
239 
240 static void loongson_nand_set_addr_cs(struct loongson_nand_host *host)
241 {
242 	struct nand_chip *chip = &host->chip;
243 	struct mtd_info *mtd = nand_to_mtd(chip);
244 
245 	if (!host->data->nand_cs)
246 		return;
247 
248 	/*
249 	 * The Manufacturer/Chip ID read operation precedes attach_chip, at which point
250 	 * information such as NAND chip selection and capacity is unknown. As a
251 	 * workaround, we use 128MB cellsize (2KB pagesize) as a fallback.
252 	 */
253 	if (!mtd->writesize)
254 		host->addr_cs_field = GENMASK(17, 16);
255 
256 	regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field,
257 			   host->data->nand_cs << __ffs(host->addr_cs_field));
258 }
259 
260 static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
261 {
262 	struct nand_chip *chip = &host->chip;
263 	int i;
264 
265 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
266 		int shift, mask, val;
267 
268 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
269 			shift = i * BITS_PER_BYTE;
270 			mask = (u32)0xff << shift;
271 			mask &= GENMASK(chip->page_shift, 0);
272 			val = (u32)op->addrs[i] << shift;
273 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
274 		} else if (!op->is_change_column) {
275 			shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
276 			mask = (u32)0xff << shift;
277 			val = (u32)op->addrs[i] << shift;
278 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
279 
280 			if (i == 4) {
281 				mask = (u32)0xff >> (BITS_PER_WORD - shift);
282 				val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
283 				regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
284 			}
285 		}
286 	}
287 }
288 
289 static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
290 {
291 	int i;
292 
293 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
294 		int shift, mask, val;
295 
296 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
297 			shift = i * BITS_PER_BYTE;
298 			mask = (u32)0xff << shift;
299 			val = (u32)op->addrs[i] << shift;
300 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
301 		} else if (!op->is_change_column) {
302 			shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
303 			mask = (u32)0xff << shift;
304 			val = (u32)op->addrs[i] << shift;
305 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
306 		}
307 	}
308 
309 	loongson_nand_set_addr_cs(host);
310 }
311 
312 static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
313 {
314 	struct nand_chip *chip = &host->chip;
315 	struct mtd_info *mtd = nand_to_mtd(chip);
316 	int col0 = op->addrs[0];
317 	short col;
318 
319 	if (!IS_ALIGNED(col0, chip->buf_align)) {
320 		col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
321 		op->aligned_offset = op->addrs[0] - col0;
322 		op->addrs[0] = col0;
323 	}
324 
325 	if (host->data->set_addr)
326 		host->data->set_addr(host, op);
327 
328 	/* set operation length */
329 	if (op->is_write || op->is_read || op->is_change_column)
330 		op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
331 	else if (op->is_erase)
332 		op->len = 1;
333 	else
334 		op->len = op->orig_len;
335 
336 	writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
337 
338 	/* set operation area and scope */
339 	col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
340 	if (op->orig_len && !op->is_readid) {
341 		unsigned int op_scope = 0;
342 
343 		if (col < mtd->writesize) {
344 			op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
345 			op_scope = mtd->writesize;
346 		}
347 
348 		op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
349 		op_scope += mtd->oobsize;
350 
351 		op_scope <<= __ffs(host->data->op_scope_field);
352 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
353 				   host->data->op_scope_field, op_scope);
354 	}
355 
356 	/* set command */
357 	writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
358 
359 	/* trigger operation */
360 	regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
361 			  LOONGSON_NAND_CMD_VALID);
362 }
363 
364 static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
365 					  struct loongson_nand_op *op)
366 {
367 	unsigned int val;
368 	int ret = 0;
369 
370 	if (op->rdy_timeout_ms) {
371 		ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
372 					       val, val & LOONGSON_NAND_CMD_OP_DONE,
373 					       0, op->rdy_timeout_ms * MSEC_PER_SEC);
374 		if (ret)
375 			dev_err(host->dev, "operation failed\n");
376 	}
377 
378 	return ret;
379 }
380 
381 static void loongson_nand_dma_callback(void *data)
382 {
383 	struct loongson_nand_host *host = (struct loongson_nand_host *)data;
384 	struct dma_chan *chan = host->dma_chan;
385 	struct device *dev = chan->device->dev;
386 	enum dma_status status;
387 
388 	status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
389 	if (likely(status == DMA_COMPLETE)) {
390 		dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
391 		complete(&host->dma_complete);
392 	} else {
393 		dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
394 	}
395 }
396 
397 static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
398 {
399 	struct nand_chip *chip = &host->chip;
400 	struct dma_chan *chan = host->dma_chan;
401 	struct device *dev = chan->device->dev;
402 	struct dma_async_tx_descriptor *desc;
403 	enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
404 	enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
405 	void *buf = op->buf;
406 	char *dma_buf = NULL;
407 	dma_addr_t dma_addr;
408 	int ret;
409 
410 	if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
411 	    IS_ALIGNED(op->orig_len, chip->buf_align)) {
412 		dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
413 		if (dma_mapping_error(dev, dma_addr)) {
414 			dev_err(dev, "failed to map DMA buffer\n");
415 			return -ENXIO;
416 		}
417 	} else if (!op->is_write) {
418 		dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
419 		if (!dma_buf)
420 			return -ENOMEM;
421 	} else {
422 		dev_err(dev, "subpage writing not supported\n");
423 		return -EOPNOTSUPP;
424 	}
425 
426 	desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
427 	if (!desc) {
428 		dev_err(dev, "failed to prepare DMA descriptor\n");
429 		ret = -ENOMEM;
430 		goto err;
431 	}
432 	desc->callback = loongson_nand_dma_callback;
433 	desc->callback_param = host;
434 
435 	host->dma_cookie = dmaengine_submit(desc);
436 	ret = dma_submit_error(host->dma_cookie);
437 	if (ret) {
438 		dev_err(dev, "failed to submit DMA descriptor\n");
439 		goto err;
440 	}
441 
442 	dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
443 	dma_async_issue_pending(chan);
444 
445 	if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
446 		dmaengine_terminate_sync(chan);
447 		reinit_completion(&host->dma_complete);
448 		ret = -ETIMEDOUT;
449 		goto err;
450 	}
451 
452 	if (dma_buf)
453 		memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
454 err:
455 	if (dma_buf)
456 		dma_free_coherent(dev, op->len, dma_buf, dma_addr);
457 	else
458 		dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
459 
460 	return ret;
461 }
462 
463 static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
464 {
465 	struct loongson_nand_host *host = nand_get_controller_data(chip);
466 	struct loongson_nand_op op = {};
467 	int ret;
468 
469 	ret = loongson_nand_parse_instructions(chip, subop, &op);
470 	if (ret)
471 		return ret;
472 
473 	loongson_nand_trigger_op(host, &op);
474 
475 	ret = loongson_nand_dma_transfer(host, &op);
476 	if (ret)
477 		return ret;
478 
479 	return loongson_nand_wait_for_op_done(host, &op);
480 }
481 
482 static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
483 					struct loongson_nand_op *op)
484 {
485 	struct loongson_nand_host *host = nand_get_controller_data(chip);
486 	int ret;
487 
488 	ret = loongson_nand_parse_instructions(chip, subop, op);
489 	if (ret)
490 		return ret;
491 
492 	loongson_nand_trigger_op(host, op);
493 
494 	return loongson_nand_wait_for_op_done(host, op);
495 }
496 
497 static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
498 {
499 	struct loongson_nand_op op = {};
500 
501 	return loongson_nand_misc_type_exec(chip, subop, &op);
502 }
503 
504 static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
505 {
506 	struct loongson_nand_host *host = nand_get_controller_data(chip);
507 	struct loongson_nand_op op = {};
508 	int i, ret;
509 	union {
510 		char ids[6];
511 		struct {
512 			int idl;
513 			u16 idh;
514 		};
515 	} nand_id;
516 
517 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
518 	if (ret)
519 		return ret;
520 
521 	ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl,
522 				       LOONGSON_NAND_READ_ID_SLEEP_US,
523 				       LOONGSON_NAND_READ_ID_TIMEOUT_US);
524 	if (ret)
525 		return ret;
526 
527 	nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS);
528 
529 	for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++)
530 		op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i];
531 
532 	return ret;
533 }
534 
535 static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
536 					       const struct nand_subop *subop)
537 {
538 	struct loongson_nand_host *host = nand_get_controller_data(chip);
539 	struct loongson_nand_op op = {};
540 	int val, ret;
541 
542 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
543 	if (ret)
544 		return ret;
545 
546 	val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
547 	val &= ~host->data->status_field;
548 	op.buf[0] = val << ffs(host->data->status_field);
549 
550 	return ret;
551 }
552 
553 static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
554 	NAND_OP_PARSER_PATTERN(
555 		loongson_nand_read_id_type_exec,
556 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
557 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
558 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
559 	NAND_OP_PARSER_PATTERN(
560 		loongson_nand_read_status_type_exec,
561 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
562 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
563 	NAND_OP_PARSER_PATTERN(
564 		loongson_nand_zerolen_type_exec,
565 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
566 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
567 	NAND_OP_PARSER_PATTERN(
568 		loongson_nand_zerolen_type_exec,
569 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
570 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
571 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
572 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
573 	NAND_OP_PARSER_PATTERN(
574 		loongson_nand_data_type_exec,
575 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
576 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
577 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
578 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
579 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
580 	NAND_OP_PARSER_PATTERN(
581 		loongson_nand_data_type_exec,
582 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
583 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
584 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
585 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
586 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
587 	);
588 
589 static int loongson_nand_is_valid_cmd(u8 opcode)
590 {
591 	if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
592 		return 0;
593 
594 	return -EOPNOTSUPP;
595 }
596 
597 static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
598 {
599 	if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
600 		return 0;
601 
602 	if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
603 		return 0;
604 
605 	if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
606 		return 0;
607 
608 	if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
609 		return 0;
610 
611 	return -EOPNOTSUPP;
612 }
613 
614 static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
615 {
616 	const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
617 	int op_id;
618 
619 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
620 		const struct nand_op_instr *instr = &op->instrs[op_id];
621 
622 		if (instr->type == NAND_OP_CMD_INSTR) {
623 			if (!instr1)
624 				instr1 = instr;
625 			else if (!instr2)
626 				instr2 = instr;
627 			else
628 				break;
629 		}
630 	}
631 
632 	if (!instr1)
633 		return -EOPNOTSUPP;
634 
635 	if (!instr2)
636 		return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
637 
638 	return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
639 }
640 
641 static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
642 				 bool check_only)
643 {
644 	if (check_only)
645 		return loongson_nand_check_op(chip, op);
646 
647 	return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
648 }
649 
650 static int loongson_nand_get_chip_capacity(struct nand_chip *chip)
651 {
652 	struct loongson_nand_host *host = nand_get_controller_data(chip);
653 	u64 chipsize = nanddev_target_size(&chip->base);
654 	struct mtd_info *mtd = nand_to_mtd(chip);
655 
656 	switch (mtd->writesize) {
657 	case SZ_512:
658 		switch (chipsize) {
659 		case SZ_8M:
660 			host->addr_cs_field = GENMASK(15, 14);
661 			return 0x9;
662 		case SZ_16M:
663 			host->addr_cs_field = GENMASK(16, 15);
664 			return 0xa;
665 		case SZ_32M:
666 			host->addr_cs_field = GENMASK(17, 16);
667 			return 0xb;
668 		case SZ_64M:
669 			host->addr_cs_field = GENMASK(18, 17);
670 			return 0xc;
671 		case SZ_128M:
672 			host->addr_cs_field = GENMASK(19, 18);
673 			return 0xd;
674 		}
675 		break;
676 	case SZ_2K:
677 		switch (chipsize) {
678 		case SZ_128M:
679 			host->addr_cs_field = GENMASK(17, 16);
680 			return 0x0;
681 		case SZ_256M:
682 			host->addr_cs_field = GENMASK(18, 17);
683 			return 0x1;
684 		case SZ_512M:
685 			host->addr_cs_field = GENMASK(19, 18);
686 			return 0x2;
687 		case SZ_1G:
688 			host->addr_cs_field = GENMASK(20, 19);
689 			return 0x3;
690 		}
691 		break;
692 	case SZ_4K:
693 		if (chipsize == SZ_2G) {
694 			host->addr_cs_field = GENMASK(20, 19);
695 			return 0x4;
696 		}
697 		break;
698 	case SZ_8K:
699 		switch (chipsize) {
700 		case SZ_4G:
701 			host->addr_cs_field = GENMASK(20, 19);
702 			return 0x5;
703 		case SZ_8G:
704 			host->addr_cs_field = GENMASK(21, 20);
705 			return 0x6;
706 		case SZ_16G:
707 			host->addr_cs_field = GENMASK(22, 21);
708 			return 0x7;
709 		}
710 		break;
711 	}
712 
713 	dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n",
714 		chipsize, mtd->writesize);
715 	return -EINVAL;
716 }
717 
718 static int loongson_nand_attach_chip(struct nand_chip *chip)
719 {
720 	struct loongson_nand_host *host = nand_get_controller_data(chip);
721 	int cell_size = loongson_nand_get_chip_capacity(chip);
722 
723 	if (cell_size < 0)
724 		return cell_size;
725 
726 	switch (chip->ecc.engine_type) {
727 	case NAND_ECC_ENGINE_TYPE_NONE:
728 		break;
729 	case NAND_ECC_ENGINE_TYPE_SOFT:
730 		break;
731 	default:
732 		return -EINVAL;
733 	}
734 
735 	/* set cell size */
736 	regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
737 			   FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
738 
739 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
740 			   FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
741 
742 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
743 			   FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
744 
745 	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
746 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
747 
748 	return 0;
749 }
750 
751 static const struct nand_controller_ops loongson_nand_controller_ops = {
752 	.exec_op = loongson_nand_exec_op,
753 	.attach_chip = loongson_nand_attach_chip,
754 };
755 
756 static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
757 {
758 	if (host->dma_chan)
759 		dma_release_channel(host->dma_chan);
760 }
761 
762 static int loongson_nand_controller_init(struct loongson_nand_host *host)
763 {
764 	struct device *dev = host->dev;
765 	struct dma_chan *chan;
766 	struct dma_slave_config cfg = {};
767 	int ret, val;
768 
769 	host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
770 	if (IS_ERR(host->regmap))
771 		return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
772 
773 	if (host->data->id_cycle_field)
774 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field,
775 				   host->data->max_id_cycle << __ffs(host->data->id_cycle_field));
776 
777 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits));
778 	if (ret)
779 		return dev_err_probe(dev, ret, "failed to set DMA mask\n");
780 
781 	val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) |
782 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) |
783 	      FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) |
784 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) |
785 	      FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) |
786 	      FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3);
787 
788 	regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val);
789 
790 	chan = dma_request_chan(dev, "rxtx");
791 	if (IS_ERR(chan))
792 		return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
793 	host->dma_chan = chan;
794 
795 	cfg.src_addr = host->dma_base;
796 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
797 	cfg.dst_addr = host->dma_base;
798 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
799 	ret = dmaengine_slave_config(host->dma_chan, &cfg);
800 	if (ret)
801 		return dev_err_probe(dev, ret, "failed to config DMA channel\n");
802 
803 	init_completion(&host->dma_complete);
804 
805 	return 0;
806 }
807 
808 static int loongson_nand_chip_init(struct loongson_nand_host *host)
809 {
810 	struct device *dev = host->dev;
811 	int nchips = of_get_child_count(dev->of_node);
812 	struct device_node *chip_np;
813 	struct nand_chip *chip = &host->chip;
814 	struct mtd_info *mtd = nand_to_mtd(chip);
815 	int ret;
816 
817 	if (nchips != 1)
818 		return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
819 
820 	chip_np = of_get_next_child(dev->of_node, NULL);
821 	if (!chip_np)
822 		return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
823 
824 	nand_set_flash_node(chip, chip_np);
825 	of_node_put(chip_np);
826 	if (!mtd->name)
827 		return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
828 
829 	nand_set_controller_data(chip, host);
830 	chip->controller = &host->controller;
831 	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
832 	chip->buf_align = 16;
833 	mtd->dev.parent = dev;
834 	mtd->owner = THIS_MODULE;
835 
836 	ret = nand_scan(chip, 1);
837 	if (ret)
838 		return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
839 
840 	ret = mtd_device_register(mtd, NULL, 0);
841 	if (ret) {
842 		nand_cleanup(chip);
843 		return dev_err_probe(dev, ret, "failed to register MTD device\n");
844 	}
845 
846 	return 0;
847 }
848 
849 static int loongson_nand_probe(struct platform_device *pdev)
850 {
851 	struct device *dev = &pdev->dev;
852 	const struct loongson_nand_data *data;
853 	struct loongson_nand_host *host;
854 	struct resource *res;
855 	int ret;
856 
857 	data = of_device_get_match_data(dev);
858 	if (!data)
859 		return -ENODEV;
860 
861 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
862 	if (!host)
863 		return -ENOMEM;
864 
865 	host->reg_base = devm_platform_ioremap_resource(pdev, 0);
866 	if (IS_ERR(host->reg_base))
867 		return PTR_ERR(host->reg_base);
868 
869 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
870 	if (!res)
871 		return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
872 
873 	host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
874 					  DMA_BIDIRECTIONAL, 0);
875 	if (dma_mapping_error(dev, host->dma_base))
876 		return -ENXIO;
877 
878 	host->dev = dev;
879 	host->data = data;
880 	host->controller.ops = &loongson_nand_controller_ops;
881 
882 	nand_controller_init(&host->controller);
883 
884 	ret = loongson_nand_controller_init(host);
885 	if (ret)
886 		goto err;
887 
888 	ret = loongson_nand_chip_init(host);
889 	if (ret)
890 		goto err;
891 
892 	platform_set_drvdata(pdev, host);
893 
894 	return 0;
895 err:
896 	loongson_nand_controller_cleanup(host);
897 
898 	return ret;
899 }
900 
901 static void loongson_nand_remove(struct platform_device *pdev)
902 {
903 	struct loongson_nand_host *host = platform_get_drvdata(pdev);
904 	struct nand_chip *chip = &host->chip;
905 	int ret;
906 
907 	ret = mtd_device_unregister(nand_to_mtd(chip));
908 	WARN_ON(ret);
909 	nand_cleanup(chip);
910 	loongson_nand_controller_cleanup(host);
911 }
912 
913 static const struct loongson_nand_data ls1b_nand_data = {
914 	.max_id_cycle = 5,
915 	.status_field = GENMASK(15, 8),
916 	.hold_cycle = 0x2,
917 	.wait_cycle = 0xc,
918 	.dma_bits = 32,
919 	.set_addr = ls1b_nand_set_addr,
920 };
921 
922 static const struct loongson_nand_data ls1c_nand_data = {
923 	.max_id_cycle = 6,
924 	.id_cycle_field = GENMASK(14, 12),
925 	.status_field = GENMASK(23, 16),
926 	.op_scope_field = GENMASK(29, 16),
927 	.hold_cycle = 0x2,
928 	.wait_cycle = 0xc,
929 	.dma_bits = 32,
930 	.set_addr = ls1c_nand_set_addr,
931 };
932 
933 static const struct loongson_nand_data ls2k0500_nand_data = {
934 	.max_id_cycle = 6,
935 	.id_cycle_field = GENMASK(14, 12),
936 	.status_field = GENMASK(23, 16),
937 	.op_scope_field = GENMASK(29, 16),
938 	.hold_cycle = 0x4,
939 	.wait_cycle = 0x12,
940 	.dma_bits = 64,
941 	.set_addr = ls1c_nand_set_addr,
942 };
943 
944 static const struct of_device_id loongson_nand_match[] = {
945 	{
946 		.compatible = "loongson,ls1b-nand-controller",
947 		.data = &ls1b_nand_data,
948 	},
949 	{
950 		.compatible = "loongson,ls1c-nand-controller",
951 		.data = &ls1c_nand_data,
952 	},
953 	{
954 		.compatible = "loongson,ls2k0500-nand-controller",
955 		.data = &ls2k0500_nand_data,
956 	},
957 	{ /* sentinel */ }
958 };
959 MODULE_DEVICE_TABLE(of, loongson_nand_match);
960 
961 static struct platform_driver loongson_nand_driver = {
962 	.probe = loongson_nand_probe,
963 	.remove = loongson_nand_remove,
964 	.driver = {
965 		.name = KBUILD_MODNAME,
966 		.of_match_table = loongson_nand_match,
967 	},
968 };
969 
970 module_platform_driver(loongson_nand_driver);
971 
972 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
973 MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
974 MODULE_DESCRIPTION("Loongson NAND Controller Driver");
975 MODULE_LICENSE("GPL");
976