xref: /linux/drivers/mtd/nand/raw/loongson-nand-controller.c (revision 7a1e3a452a574ef337c4c2cd9202332a1ae9cd94)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NAND Controller Driver for Loongson family chips
4  *
5  * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/iopoll.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/rawnand.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/sizes.h>
19 
20 /* Loongson NAND Controller Registers */
21 #define LOONGSON_NAND_CMD		0x0
22 #define LOONGSON_NAND_ADDR1		0x4
23 #define LOONGSON_NAND_ADDR2		0x8
24 #define LOONGSON_NAND_TIMING		0xc
25 #define LOONGSON_NAND_IDL		0x10
26 #define LOONGSON_NAND_IDH_STATUS	0x14
27 #define LOONGSON_NAND_PARAM		0x18
28 #define LOONGSON_NAND_OP_NUM		0x1c
29 
30 /* Bitfields of nand command register */
31 #define LOONGSON_NAND_CMD_OP_DONE	BIT(10)
32 #define LOONGSON_NAND_CMD_OP_SPARE	BIT(9)
33 #define LOONGSON_NAND_CMD_OP_MAIN	BIT(8)
34 #define LOONGSON_NAND_CMD_STATUS	BIT(7)
35 #define LOONGSON_NAND_CMD_RESET		BIT(6)
36 #define LOONGSON_NAND_CMD_READID	BIT(5)
37 #define LOONGSON_NAND_CMD_BLOCKS_ERASE	BIT(4)
38 #define LOONGSON_NAND_CMD_ERASE		BIT(3)
39 #define LOONGSON_NAND_CMD_WRITE		BIT(2)
40 #define LOONGSON_NAND_CMD_READ		BIT(1)
41 #define LOONGSON_NAND_CMD_VALID		BIT(0)
42 
43 /* Bitfields of nand timing register */
44 #define LOONGSON_NAND_WAIT_CYCLE_MASK	GENMASK(7, 0)
45 #define LOONGSON_NAND_HOLD_CYCLE_MASK	GENMASK(15, 8)
46 
47 /* Bitfields of nand parameter register */
48 #define LOONGSON_NAND_CELL_SIZE_MASK	GENMASK(11, 8)
49 
50 #define LOONGSON_NAND_COL_ADDR_CYC	2U
51 #define LOONGSON_NAND_MAX_ADDR_CYC	5U
52 
53 #define BITS_PER_WORD			(4 * BITS_PER_BYTE)
54 
55 struct loongson_nand_host;
56 
57 struct loongson_nand_op {
58 	char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
59 	unsigned int naddrs;
60 	unsigned int addrs_offset;
61 	unsigned int aligned_offset;
62 	unsigned int cmd_reg;
63 	unsigned int row_start;
64 	unsigned int rdy_timeout_ms;
65 	unsigned int orig_len;
66 	bool is_readid;
67 	bool is_erase;
68 	bool is_write;
69 	bool is_read;
70 	bool is_change_column;
71 	size_t len;
72 	char *buf;
73 };
74 
75 struct loongson_nand_data {
76 	unsigned int status_field;
77 	unsigned int op_scope_field;
78 	unsigned int hold_cycle;
79 	unsigned int wait_cycle;
80 	void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
81 };
82 
83 struct loongson_nand_host {
84 	struct device *dev;
85 	struct nand_chip chip;
86 	struct nand_controller controller;
87 	const struct loongson_nand_data *data;
88 	void __iomem *reg_base;
89 	struct regmap *regmap;
90 	/* DMA Engine stuff */
91 	dma_addr_t dma_base;
92 	struct dma_chan *dma_chan;
93 	dma_cookie_t dma_cookie;
94 	struct completion dma_complete;
95 };
96 
97 static const struct regmap_config loongson_nand_regmap_config = {
98 	.reg_bits = 32,
99 	.val_bits = 32,
100 	.reg_stride = 4,
101 };
102 
103 static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
104 					u8 opcode)
105 {
106 	struct loongson_nand_host *host = nand_get_controller_data(chip);
107 
108 	op->row_start = chip->page_shift + 1;
109 
110 	/* The controller abstracts the following NAND operations. */
111 	switch (opcode) {
112 	case NAND_CMD_STATUS:
113 		op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
114 		break;
115 	case NAND_CMD_RESET:
116 		op->cmd_reg = LOONGSON_NAND_CMD_RESET;
117 		break;
118 	case NAND_CMD_READID:
119 		op->is_readid = true;
120 		op->cmd_reg = LOONGSON_NAND_CMD_READID;
121 		break;
122 	case NAND_CMD_ERASE1:
123 		op->is_erase = true;
124 		op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
125 		break;
126 	case NAND_CMD_ERASE2:
127 		if (!op->is_erase)
128 			return -EOPNOTSUPP;
129 		/* During erasing, row_start differs from the default value. */
130 		op->row_start = chip->page_shift;
131 		op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
132 		break;
133 	case NAND_CMD_SEQIN:
134 		op->is_write = true;
135 		break;
136 	case NAND_CMD_PAGEPROG:
137 		if (!op->is_write)
138 			return -EOPNOTSUPP;
139 		op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
140 		break;
141 	case NAND_CMD_READ0:
142 		op->is_read = true;
143 		break;
144 	case NAND_CMD_READSTART:
145 		if (!op->is_read)
146 			return -EOPNOTSUPP;
147 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
148 		break;
149 	case NAND_CMD_RNDOUT:
150 		op->is_change_column = true;
151 		break;
152 	case NAND_CMD_RNDOUTSTART:
153 		if (!op->is_change_column)
154 			return -EOPNOTSUPP;
155 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
156 		break;
157 	default:
158 		dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
159 		return -EOPNOTSUPP;
160 	}
161 
162 	return 0;
163 }
164 
165 static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
166 					    struct loongson_nand_op *op)
167 {
168 	unsigned int op_id;
169 	int ret;
170 
171 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
172 		const struct nand_op_instr *instr = &subop->instrs[op_id];
173 		unsigned int offset, naddrs;
174 		const u8 *addrs;
175 
176 		switch (instr->type) {
177 		case NAND_OP_CMD_INSTR:
178 			ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
179 			if (ret < 0)
180 				return ret;
181 
182 			break;
183 		case NAND_OP_ADDR_INSTR:
184 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
185 			if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
186 				return -EOPNOTSUPP;
187 			op->naddrs = naddrs;
188 			offset = nand_subop_get_addr_start_off(subop, op_id);
189 			addrs = &instr->ctx.addr.addrs[offset];
190 			memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
191 			break;
192 		case NAND_OP_DATA_IN_INSTR:
193 		case NAND_OP_DATA_OUT_INSTR:
194 			offset = nand_subop_get_data_start_off(subop, op_id);
195 			op->orig_len = nand_subop_get_data_len(subop, op_id);
196 			if (instr->type == NAND_OP_DATA_IN_INSTR)
197 				op->buf = instr->ctx.data.buf.in + offset;
198 			else if (instr->type == NAND_OP_DATA_OUT_INSTR)
199 				op->buf = (void *)instr->ctx.data.buf.out + offset;
200 
201 			break;
202 		case NAND_OP_WAITRDY_INSTR:
203 			op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
204 			break;
205 		default:
206 			break;
207 		}
208 	}
209 
210 	return 0;
211 }
212 
213 static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
214 {
215 	struct nand_chip *chip = &host->chip;
216 	int i;
217 
218 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
219 		int shift, mask, val;
220 
221 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
222 			shift = i * BITS_PER_BYTE;
223 			mask = (u32)0xff << shift;
224 			mask &= GENMASK(chip->page_shift, 0);
225 			val = (u32)op->addrs[i] << shift;
226 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
227 		} else if (!op->is_change_column) {
228 			shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
229 			mask = (u32)0xff << shift;
230 			val = (u32)op->addrs[i] << shift;
231 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
232 
233 			if (i == 4) {
234 				mask = (u32)0xff >> (BITS_PER_WORD - shift);
235 				val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
236 				regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
237 			}
238 		}
239 	}
240 }
241 
242 static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
243 {
244 	int i;
245 
246 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
247 		int shift, mask, val;
248 
249 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
250 			shift = i * BITS_PER_BYTE;
251 			mask = (u32)0xff << shift;
252 			val = (u32)op->addrs[i] << shift;
253 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
254 		} else if (!op->is_change_column) {
255 			shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
256 			mask = (u32)0xff << shift;
257 			val = (u32)op->addrs[i] << shift;
258 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
259 		}
260 	}
261 }
262 
263 static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
264 {
265 	struct nand_chip *chip = &host->chip;
266 	struct mtd_info *mtd = nand_to_mtd(chip);
267 	int col0 = op->addrs[0];
268 	short col;
269 
270 	if (!IS_ALIGNED(col0, chip->buf_align)) {
271 		col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
272 		op->aligned_offset = op->addrs[0] - col0;
273 		op->addrs[0] = col0;
274 	}
275 
276 	if (host->data->set_addr)
277 		host->data->set_addr(host, op);
278 
279 	/* set operation length */
280 	if (op->is_write || op->is_read || op->is_change_column)
281 		op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
282 	else if (op->is_erase)
283 		op->len = 1;
284 	else
285 		op->len = op->orig_len;
286 
287 	writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
288 
289 	/* set operation area and scope */
290 	col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
291 	if (op->orig_len && !op->is_readid) {
292 		unsigned int op_scope = 0;
293 
294 		if (col < mtd->writesize) {
295 			op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
296 			op_scope = mtd->writesize;
297 		}
298 
299 		op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
300 		op_scope += mtd->oobsize;
301 
302 		op_scope <<= __ffs(host->data->op_scope_field);
303 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
304 				   host->data->op_scope_field, op_scope);
305 	}
306 
307 	/* set command */
308 	writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
309 
310 	/* trigger operation */
311 	regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
312 			  LOONGSON_NAND_CMD_VALID);
313 }
314 
315 static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
316 					  struct loongson_nand_op *op)
317 {
318 	unsigned int val;
319 	int ret = 0;
320 
321 	if (op->rdy_timeout_ms) {
322 		ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
323 					       val, val & LOONGSON_NAND_CMD_OP_DONE,
324 					       0, op->rdy_timeout_ms * MSEC_PER_SEC);
325 		if (ret)
326 			dev_err(host->dev, "operation failed\n");
327 	}
328 
329 	return ret;
330 }
331 
332 static void loongson_nand_dma_callback(void *data)
333 {
334 	struct loongson_nand_host *host = (struct loongson_nand_host *)data;
335 	struct dma_chan *chan = host->dma_chan;
336 	struct device *dev = chan->device->dev;
337 	enum dma_status status;
338 
339 	status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
340 	if (likely(status == DMA_COMPLETE)) {
341 		dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
342 		complete(&host->dma_complete);
343 	} else {
344 		dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
345 	}
346 }
347 
348 static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
349 {
350 	struct nand_chip *chip = &host->chip;
351 	struct dma_chan *chan = host->dma_chan;
352 	struct device *dev = chan->device->dev;
353 	struct dma_async_tx_descriptor *desc;
354 	enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
355 	enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
356 	void *buf = op->buf;
357 	char *dma_buf = NULL;
358 	dma_addr_t dma_addr;
359 	int ret;
360 
361 	if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
362 	    IS_ALIGNED(op->orig_len, chip->buf_align)) {
363 		dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
364 		if (dma_mapping_error(dev, dma_addr)) {
365 			dev_err(dev, "failed to map DMA buffer\n");
366 			return -ENXIO;
367 		}
368 	} else if (!op->is_write) {
369 		dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
370 		if (!dma_buf)
371 			return -ENOMEM;
372 	} else {
373 		dev_err(dev, "subpage writing not supported\n");
374 		return -EOPNOTSUPP;
375 	}
376 
377 	desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
378 	if (!desc) {
379 		dev_err(dev, "failed to prepare DMA descriptor\n");
380 		ret = -ENOMEM;
381 		goto err;
382 	}
383 	desc->callback = loongson_nand_dma_callback;
384 	desc->callback_param = host;
385 
386 	host->dma_cookie = dmaengine_submit(desc);
387 	ret = dma_submit_error(host->dma_cookie);
388 	if (ret) {
389 		dev_err(dev, "failed to submit DMA descriptor\n");
390 		goto err;
391 	}
392 
393 	dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
394 	dma_async_issue_pending(chan);
395 
396 	if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
397 		dmaengine_terminate_sync(chan);
398 		reinit_completion(&host->dma_complete);
399 		ret = -ETIMEDOUT;
400 		goto err;
401 	}
402 
403 	if (dma_buf)
404 		memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
405 err:
406 	if (dma_buf)
407 		dma_free_coherent(dev, op->len, dma_buf, dma_addr);
408 	else
409 		dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
410 
411 	return ret;
412 }
413 
414 static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
415 {
416 	struct loongson_nand_host *host = nand_get_controller_data(chip);
417 	struct loongson_nand_op op = {};
418 	int ret;
419 
420 	ret = loongson_nand_parse_instructions(chip, subop, &op);
421 	if (ret)
422 		return ret;
423 
424 	loongson_nand_trigger_op(host, &op);
425 
426 	ret = loongson_nand_dma_transfer(host, &op);
427 	if (ret)
428 		return ret;
429 
430 	return loongson_nand_wait_for_op_done(host, &op);
431 }
432 
433 static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
434 					struct loongson_nand_op *op)
435 {
436 	struct loongson_nand_host *host = nand_get_controller_data(chip);
437 	int ret;
438 
439 	ret = loongson_nand_parse_instructions(chip, subop, op);
440 	if (ret)
441 		return ret;
442 
443 	loongson_nand_trigger_op(host, op);
444 
445 	return loongson_nand_wait_for_op_done(host, op);
446 }
447 
448 static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
449 {
450 	struct loongson_nand_op op = {};
451 
452 	return loongson_nand_misc_type_exec(chip, subop, &op);
453 }
454 
455 static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
456 {
457 	struct loongson_nand_host *host = nand_get_controller_data(chip);
458 	struct loongson_nand_op op = {};
459 	int i, ret;
460 	union {
461 		char ids[5];
462 		struct {
463 			int idl;
464 			char idh;
465 		};
466 	} nand_id;
467 
468 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
469 	if (ret)
470 		return ret;
471 
472 	nand_id.idl = readl(host->reg_base + LOONGSON_NAND_IDL);
473 	nand_id.idh = readb(host->reg_base + LOONGSON_NAND_IDH_STATUS);
474 
475 	for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
476 		op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
477 
478 	return ret;
479 }
480 
481 static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
482 					       const struct nand_subop *subop)
483 {
484 	struct loongson_nand_host *host = nand_get_controller_data(chip);
485 	struct loongson_nand_op op = {};
486 	int val, ret;
487 
488 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
489 	if (ret)
490 		return ret;
491 
492 	val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
493 	val &= ~host->data->status_field;
494 	op.buf[0] = val << ffs(host->data->status_field);
495 
496 	return ret;
497 }
498 
499 static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
500 	NAND_OP_PARSER_PATTERN(
501 		loongson_nand_read_id_type_exec,
502 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
503 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
504 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
505 	NAND_OP_PARSER_PATTERN(
506 		loongson_nand_read_status_type_exec,
507 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
508 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
509 	NAND_OP_PARSER_PATTERN(
510 		loongson_nand_zerolen_type_exec,
511 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
512 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
513 	NAND_OP_PARSER_PATTERN(
514 		loongson_nand_zerolen_type_exec,
515 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
516 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
517 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
518 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
519 	NAND_OP_PARSER_PATTERN(
520 		loongson_nand_data_type_exec,
521 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
522 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
523 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
524 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
525 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
526 	NAND_OP_PARSER_PATTERN(
527 		loongson_nand_data_type_exec,
528 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
529 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
530 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
531 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
532 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
533 	);
534 
535 static int loongson_nand_is_valid_cmd(u8 opcode)
536 {
537 	if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
538 		return 0;
539 
540 	return -EOPNOTSUPP;
541 }
542 
543 static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
544 {
545 	if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
546 		return 0;
547 
548 	if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
549 		return 0;
550 
551 	if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
552 		return 0;
553 
554 	if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
555 		return 0;
556 
557 	return -EOPNOTSUPP;
558 }
559 
560 static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
561 {
562 	const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
563 	int op_id;
564 
565 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
566 		const struct nand_op_instr *instr = &op->instrs[op_id];
567 
568 		if (instr->type == NAND_OP_CMD_INSTR) {
569 			if (!instr1)
570 				instr1 = instr;
571 			else if (!instr2)
572 				instr2 = instr;
573 			else
574 				break;
575 		}
576 	}
577 
578 	if (!instr1)
579 		return -EOPNOTSUPP;
580 
581 	if (!instr2)
582 		return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
583 
584 	return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
585 }
586 
587 static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
588 				 bool check_only)
589 {
590 	if (check_only)
591 		return loongson_nand_check_op(chip, op);
592 
593 	return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
594 }
595 
596 static int loongson_nand_attach_chip(struct nand_chip *chip)
597 {
598 	struct loongson_nand_host *host = nand_get_controller_data(chip);
599 	u64 chipsize = nanddev_target_size(&chip->base);
600 	int cell_size = 0;
601 
602 	switch (chipsize) {
603 	case SZ_128M:
604 		cell_size = 0x0;
605 		break;
606 	case SZ_256M:
607 		cell_size = 0x1;
608 		break;
609 	case SZ_512M:
610 		cell_size = 0x2;
611 		break;
612 	case SZ_1G:
613 		cell_size = 0x3;
614 		break;
615 	case SZ_2G:
616 		cell_size = 0x4;
617 		break;
618 	case SZ_4G:
619 		cell_size = 0x5;
620 		break;
621 	case SZ_8G:
622 		cell_size = 0x6;
623 		break;
624 	case SZ_16G:
625 		cell_size = 0x7;
626 		break;
627 	default:
628 		dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
629 		return -EINVAL;
630 	}
631 
632 	switch (chip->ecc.engine_type) {
633 	case NAND_ECC_ENGINE_TYPE_NONE:
634 		break;
635 	case NAND_ECC_ENGINE_TYPE_SOFT:
636 		break;
637 	default:
638 		return -EINVAL;
639 	}
640 
641 	/* set cell size */
642 	regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
643 			   FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
644 
645 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
646 			   FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
647 
648 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
649 			   FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
650 
651 	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
652 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
653 
654 	return 0;
655 }
656 
657 static const struct nand_controller_ops loongson_nand_controller_ops = {
658 	.exec_op = loongson_nand_exec_op,
659 	.attach_chip = loongson_nand_attach_chip,
660 };
661 
662 static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
663 {
664 	if (host->dma_chan)
665 		dma_release_channel(host->dma_chan);
666 }
667 
668 static int loongson_nand_controller_init(struct loongson_nand_host *host)
669 {
670 	struct device *dev = host->dev;
671 	struct dma_chan *chan;
672 	struct dma_slave_config cfg = {};
673 	int ret;
674 
675 	host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
676 	if (IS_ERR(host->regmap))
677 		return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
678 
679 	chan = dma_request_chan(dev, "rxtx");
680 	if (IS_ERR(chan))
681 		return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
682 	host->dma_chan = chan;
683 
684 	cfg.src_addr = host->dma_base;
685 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
686 	cfg.dst_addr = host->dma_base;
687 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
688 	ret = dmaengine_slave_config(host->dma_chan, &cfg);
689 	if (ret)
690 		return dev_err_probe(dev, ret, "failed to config DMA channel\n");
691 
692 	init_completion(&host->dma_complete);
693 
694 	return 0;
695 }
696 
697 static int loongson_nand_chip_init(struct loongson_nand_host *host)
698 {
699 	struct device *dev = host->dev;
700 	int nchips = of_get_child_count(dev->of_node);
701 	struct device_node *chip_np;
702 	struct nand_chip *chip = &host->chip;
703 	struct mtd_info *mtd = nand_to_mtd(chip);
704 	int ret;
705 
706 	if (nchips != 1)
707 		return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
708 
709 	chip_np = of_get_next_child(dev->of_node, NULL);
710 	if (!chip_np)
711 		return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
712 
713 	nand_set_flash_node(chip, chip_np);
714 	of_node_put(chip_np);
715 	if (!mtd->name)
716 		return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
717 
718 	nand_set_controller_data(chip, host);
719 	chip->controller = &host->controller;
720 	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
721 	chip->buf_align = 16;
722 	mtd->dev.parent = dev;
723 	mtd->owner = THIS_MODULE;
724 
725 	ret = nand_scan(chip, 1);
726 	if (ret)
727 		return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
728 
729 	ret = mtd_device_register(mtd, NULL, 0);
730 	if (ret) {
731 		nand_cleanup(chip);
732 		return dev_err_probe(dev, ret, "failed to register MTD device\n");
733 	}
734 
735 	return 0;
736 }
737 
738 static int loongson_nand_probe(struct platform_device *pdev)
739 {
740 	struct device *dev = &pdev->dev;
741 	const struct loongson_nand_data *data;
742 	struct loongson_nand_host *host;
743 	struct resource *res;
744 	int ret;
745 
746 	data = of_device_get_match_data(dev);
747 	if (!data)
748 		return -ENODEV;
749 
750 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
751 	if (!host)
752 		return -ENOMEM;
753 
754 	host->reg_base = devm_platform_ioremap_resource(pdev, 0);
755 	if (IS_ERR(host->reg_base))
756 		return PTR_ERR(host->reg_base);
757 
758 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
759 	if (!res)
760 		return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
761 
762 	host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
763 					  DMA_BIDIRECTIONAL, 0);
764 	if (dma_mapping_error(dev, host->dma_base))
765 		return -ENXIO;
766 
767 	host->dev = dev;
768 	host->data = data;
769 	host->controller.ops = &loongson_nand_controller_ops;
770 
771 	nand_controller_init(&host->controller);
772 
773 	ret = loongson_nand_controller_init(host);
774 	if (ret)
775 		goto err;
776 
777 	ret = loongson_nand_chip_init(host);
778 	if (ret)
779 		goto err;
780 
781 	platform_set_drvdata(pdev, host);
782 
783 	return 0;
784 err:
785 	loongson_nand_controller_cleanup(host);
786 
787 	return ret;
788 }
789 
790 static void loongson_nand_remove(struct platform_device *pdev)
791 {
792 	struct loongson_nand_host *host = platform_get_drvdata(pdev);
793 	struct nand_chip *chip = &host->chip;
794 	int ret;
795 
796 	ret = mtd_device_unregister(nand_to_mtd(chip));
797 	WARN_ON(ret);
798 	nand_cleanup(chip);
799 	loongson_nand_controller_cleanup(host);
800 }
801 
802 static const struct loongson_nand_data ls1b_nand_data = {
803 	.status_field = GENMASK(15, 8),
804 	.hold_cycle = 0x2,
805 	.wait_cycle = 0xc,
806 	.set_addr = ls1b_nand_set_addr,
807 };
808 
809 static const struct loongson_nand_data ls1c_nand_data = {
810 	.status_field = GENMASK(23, 16),
811 	.op_scope_field = GENMASK(29, 16),
812 	.hold_cycle = 0x2,
813 	.wait_cycle = 0xc,
814 	.set_addr = ls1c_nand_set_addr,
815 };
816 
817 static const struct of_device_id loongson_nand_match[] = {
818 	{
819 		.compatible = "loongson,ls1b-nand-controller",
820 		.data = &ls1b_nand_data,
821 	},
822 	{
823 		.compatible = "loongson,ls1c-nand-controller",
824 		.data = &ls1c_nand_data,
825 	},
826 	{ /* sentinel */ }
827 };
828 MODULE_DEVICE_TABLE(of, loongson_nand_match);
829 
830 static struct platform_driver loongson_nand_driver = {
831 	.probe = loongson_nand_probe,
832 	.remove = loongson_nand_remove,
833 	.driver = {
834 		.name = KBUILD_MODNAME,
835 		.of_match_table = loongson_nand_match,
836 	},
837 };
838 
839 module_platform_driver(loongson_nand_driver);
840 
841 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
842 MODULE_DESCRIPTION("Loongson NAND Controller Driver");
843 MODULE_LICENSE("GPL");
844