xref: /linux/drivers/mtd/nand/raw/loongson-nand-controller.c (revision 7ad5bdf88d7295c295a363a5daf481b283acedc2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NAND Controller Driver for Loongson family chips
4  *
5  * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/iopoll.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/rawnand.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/sizes.h>
19 
20 /* Loongson NAND Controller Registers */
21 #define LOONGSON_NAND_CMD		0x0
22 #define LOONGSON_NAND_ADDR1		0x4
23 #define LOONGSON_NAND_ADDR2		0x8
24 #define LOONGSON_NAND_TIMING		0xc
25 #define LOONGSON_NAND_IDL		0x10
26 #define LOONGSON_NAND_IDH_STATUS	0x14
27 #define LOONGSON_NAND_PARAM		0x18
28 #define LOONGSON_NAND_OP_NUM		0x1c
29 
30 /* Bitfields of nand command register */
31 #define LOONGSON_NAND_CMD_OP_DONE	BIT(10)
32 #define LOONGSON_NAND_CMD_OP_SPARE	BIT(9)
33 #define LOONGSON_NAND_CMD_OP_MAIN	BIT(8)
34 #define LOONGSON_NAND_CMD_STATUS	BIT(7)
35 #define LOONGSON_NAND_CMD_RESET		BIT(6)
36 #define LOONGSON_NAND_CMD_READID	BIT(5)
37 #define LOONGSON_NAND_CMD_BLOCKS_ERASE	BIT(4)
38 #define LOONGSON_NAND_CMD_ERASE		BIT(3)
39 #define LOONGSON_NAND_CMD_WRITE		BIT(2)
40 #define LOONGSON_NAND_CMD_READ		BIT(1)
41 #define LOONGSON_NAND_CMD_VALID		BIT(0)
42 
43 /* Bitfields of nand timing register */
44 #define LOONGSON_NAND_WAIT_CYCLE_MASK	GENMASK(7, 0)
45 #define LOONGSON_NAND_HOLD_CYCLE_MASK	GENMASK(15, 8)
46 
47 /* Bitfields of nand parameter register */
48 #define LOONGSON_NAND_CELL_SIZE_MASK	GENMASK(11, 8)
49 
50 #define LOONGSON_NAND_COL_ADDR_CYC	2U
51 #define LOONGSON_NAND_MAX_ADDR_CYC	5U
52 
53 #define LOONGSON_NAND_READ_ID_SLEEP_US		1000
54 #define LOONGSON_NAND_READ_ID_TIMEOUT_US	5000
55 
56 #define BITS_PER_WORD			(4 * BITS_PER_BYTE)
57 
58 struct loongson_nand_host;
59 
60 struct loongson_nand_op {
61 	char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
62 	unsigned int naddrs;
63 	unsigned int addrs_offset;
64 	unsigned int aligned_offset;
65 	unsigned int cmd_reg;
66 	unsigned int row_start;
67 	unsigned int rdy_timeout_ms;
68 	unsigned int orig_len;
69 	bool is_readid;
70 	bool is_erase;
71 	bool is_write;
72 	bool is_read;
73 	bool is_change_column;
74 	size_t len;
75 	char *buf;
76 };
77 
78 struct loongson_nand_data {
79 	unsigned int max_id_cycle;
80 	unsigned int id_cycle_field;
81 	unsigned int status_field;
82 	unsigned int op_scope_field;
83 	unsigned int hold_cycle;
84 	unsigned int wait_cycle;
85 	unsigned int nand_cs;
86 	void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
87 };
88 
89 struct loongson_nand_host {
90 	struct device *dev;
91 	struct nand_chip chip;
92 	struct nand_controller controller;
93 	const struct loongson_nand_data *data;
94 	unsigned int addr_cs_field;
95 	void __iomem *reg_base;
96 	struct regmap *regmap;
97 	/* DMA Engine stuff */
98 	dma_addr_t dma_base;
99 	struct dma_chan *dma_chan;
100 	dma_cookie_t dma_cookie;
101 	struct completion dma_complete;
102 };
103 
104 static const struct regmap_config loongson_nand_regmap_config = {
105 	.reg_bits = 32,
106 	.val_bits = 32,
107 	.reg_stride = 4,
108 };
109 
110 static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
111 					u8 opcode)
112 {
113 	struct loongson_nand_host *host = nand_get_controller_data(chip);
114 
115 	op->row_start = chip->page_shift + 1;
116 
117 	/* The controller abstracts the following NAND operations. */
118 	switch (opcode) {
119 	case NAND_CMD_STATUS:
120 		op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
121 		break;
122 	case NAND_CMD_RESET:
123 		op->cmd_reg = LOONGSON_NAND_CMD_RESET;
124 		break;
125 	case NAND_CMD_READID:
126 		op->is_readid = true;
127 		op->cmd_reg = LOONGSON_NAND_CMD_READID;
128 		break;
129 	case NAND_CMD_ERASE1:
130 		op->is_erase = true;
131 		op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
132 		break;
133 	case NAND_CMD_ERASE2:
134 		if (!op->is_erase)
135 			return -EOPNOTSUPP;
136 		/* During erasing, row_start differs from the default value. */
137 		op->row_start = chip->page_shift;
138 		op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
139 		break;
140 	case NAND_CMD_SEQIN:
141 		op->is_write = true;
142 		break;
143 	case NAND_CMD_PAGEPROG:
144 		if (!op->is_write)
145 			return -EOPNOTSUPP;
146 		op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
147 		break;
148 	case NAND_CMD_READ0:
149 		op->is_read = true;
150 		break;
151 	case NAND_CMD_READSTART:
152 		if (!op->is_read)
153 			return -EOPNOTSUPP;
154 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
155 		break;
156 	case NAND_CMD_RNDOUT:
157 		op->is_change_column = true;
158 		break;
159 	case NAND_CMD_RNDOUTSTART:
160 		if (!op->is_change_column)
161 			return -EOPNOTSUPP;
162 		op->cmd_reg = LOONGSON_NAND_CMD_READ;
163 		break;
164 	default:
165 		dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
166 		return -EOPNOTSUPP;
167 	}
168 
169 	return 0;
170 }
171 
172 static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
173 					    struct loongson_nand_op *op)
174 {
175 	unsigned int op_id;
176 	int ret;
177 
178 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
179 		const struct nand_op_instr *instr = &subop->instrs[op_id];
180 		unsigned int offset, naddrs;
181 		const u8 *addrs;
182 
183 		switch (instr->type) {
184 		case NAND_OP_CMD_INSTR:
185 			ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
186 			if (ret < 0)
187 				return ret;
188 
189 			break;
190 		case NAND_OP_ADDR_INSTR:
191 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
192 			if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
193 				return -EOPNOTSUPP;
194 			op->naddrs = naddrs;
195 			offset = nand_subop_get_addr_start_off(subop, op_id);
196 			addrs = &instr->ctx.addr.addrs[offset];
197 			memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
198 			break;
199 		case NAND_OP_DATA_IN_INSTR:
200 		case NAND_OP_DATA_OUT_INSTR:
201 			offset = nand_subop_get_data_start_off(subop, op_id);
202 			op->orig_len = nand_subop_get_data_len(subop, op_id);
203 			if (instr->type == NAND_OP_DATA_IN_INSTR)
204 				op->buf = instr->ctx.data.buf.in + offset;
205 			else if (instr->type == NAND_OP_DATA_OUT_INSTR)
206 				op->buf = (void *)instr->ctx.data.buf.out + offset;
207 
208 			break;
209 		case NAND_OP_WAITRDY_INSTR:
210 			op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
211 			break;
212 		default:
213 			break;
214 		}
215 	}
216 
217 	return 0;
218 }
219 
220 static void loongson_nand_set_addr_cs(struct loongson_nand_host *host)
221 {
222 	struct nand_chip *chip = &host->chip;
223 	struct mtd_info *mtd = nand_to_mtd(chip);
224 
225 	if (!host->data->nand_cs)
226 		return;
227 
228 	/*
229 	 * The Manufacturer/Chip ID read operation precedes attach_chip, at which point
230 	 * information such as NAND chip selection and capacity is unknown. As a
231 	 * workaround, we use 128MB cellsize (2KB pagesize) as a fallback.
232 	 */
233 	if (!mtd->writesize)
234 		host->addr_cs_field = GENMASK(17, 16);
235 
236 	regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field,
237 			   host->data->nand_cs << __ffs(host->addr_cs_field));
238 }
239 
240 static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
241 {
242 	struct nand_chip *chip = &host->chip;
243 	int i;
244 
245 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
246 		int shift, mask, val;
247 
248 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
249 			shift = i * BITS_PER_BYTE;
250 			mask = (u32)0xff << shift;
251 			mask &= GENMASK(chip->page_shift, 0);
252 			val = (u32)op->addrs[i] << shift;
253 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
254 		} else if (!op->is_change_column) {
255 			shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
256 			mask = (u32)0xff << shift;
257 			val = (u32)op->addrs[i] << shift;
258 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
259 
260 			if (i == 4) {
261 				mask = (u32)0xff >> (BITS_PER_WORD - shift);
262 				val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
263 				regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
264 			}
265 		}
266 	}
267 }
268 
269 static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
270 {
271 	int i;
272 
273 	for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
274 		int shift, mask, val;
275 
276 		if (i < LOONGSON_NAND_COL_ADDR_CYC) {
277 			shift = i * BITS_PER_BYTE;
278 			mask = (u32)0xff << shift;
279 			val = (u32)op->addrs[i] << shift;
280 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
281 		} else if (!op->is_change_column) {
282 			shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
283 			mask = (u32)0xff << shift;
284 			val = (u32)op->addrs[i] << shift;
285 			regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
286 		}
287 	}
288 
289 	loongson_nand_set_addr_cs(host);
290 }
291 
292 static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
293 {
294 	struct nand_chip *chip = &host->chip;
295 	struct mtd_info *mtd = nand_to_mtd(chip);
296 	int col0 = op->addrs[0];
297 	short col;
298 
299 	if (!IS_ALIGNED(col0, chip->buf_align)) {
300 		col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
301 		op->aligned_offset = op->addrs[0] - col0;
302 		op->addrs[0] = col0;
303 	}
304 
305 	if (host->data->set_addr)
306 		host->data->set_addr(host, op);
307 
308 	/* set operation length */
309 	if (op->is_write || op->is_read || op->is_change_column)
310 		op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
311 	else if (op->is_erase)
312 		op->len = 1;
313 	else
314 		op->len = op->orig_len;
315 
316 	writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
317 
318 	/* set operation area and scope */
319 	col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
320 	if (op->orig_len && !op->is_readid) {
321 		unsigned int op_scope = 0;
322 
323 		if (col < mtd->writesize) {
324 			op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
325 			op_scope = mtd->writesize;
326 		}
327 
328 		op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
329 		op_scope += mtd->oobsize;
330 
331 		op_scope <<= __ffs(host->data->op_scope_field);
332 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
333 				   host->data->op_scope_field, op_scope);
334 	}
335 
336 	/* set command */
337 	writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
338 
339 	/* trigger operation */
340 	regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
341 			  LOONGSON_NAND_CMD_VALID);
342 }
343 
344 static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
345 					  struct loongson_nand_op *op)
346 {
347 	unsigned int val;
348 	int ret = 0;
349 
350 	if (op->rdy_timeout_ms) {
351 		ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
352 					       val, val & LOONGSON_NAND_CMD_OP_DONE,
353 					       0, op->rdy_timeout_ms * MSEC_PER_SEC);
354 		if (ret)
355 			dev_err(host->dev, "operation failed\n");
356 	}
357 
358 	return ret;
359 }
360 
361 static void loongson_nand_dma_callback(void *data)
362 {
363 	struct loongson_nand_host *host = (struct loongson_nand_host *)data;
364 	struct dma_chan *chan = host->dma_chan;
365 	struct device *dev = chan->device->dev;
366 	enum dma_status status;
367 
368 	status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
369 	if (likely(status == DMA_COMPLETE)) {
370 		dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
371 		complete(&host->dma_complete);
372 	} else {
373 		dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
374 	}
375 }
376 
377 static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
378 {
379 	struct nand_chip *chip = &host->chip;
380 	struct dma_chan *chan = host->dma_chan;
381 	struct device *dev = chan->device->dev;
382 	struct dma_async_tx_descriptor *desc;
383 	enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
384 	enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
385 	void *buf = op->buf;
386 	char *dma_buf = NULL;
387 	dma_addr_t dma_addr;
388 	int ret;
389 
390 	if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
391 	    IS_ALIGNED(op->orig_len, chip->buf_align)) {
392 		dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
393 		if (dma_mapping_error(dev, dma_addr)) {
394 			dev_err(dev, "failed to map DMA buffer\n");
395 			return -ENXIO;
396 		}
397 	} else if (!op->is_write) {
398 		dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
399 		if (!dma_buf)
400 			return -ENOMEM;
401 	} else {
402 		dev_err(dev, "subpage writing not supported\n");
403 		return -EOPNOTSUPP;
404 	}
405 
406 	desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
407 	if (!desc) {
408 		dev_err(dev, "failed to prepare DMA descriptor\n");
409 		ret = -ENOMEM;
410 		goto err;
411 	}
412 	desc->callback = loongson_nand_dma_callback;
413 	desc->callback_param = host;
414 
415 	host->dma_cookie = dmaengine_submit(desc);
416 	ret = dma_submit_error(host->dma_cookie);
417 	if (ret) {
418 		dev_err(dev, "failed to submit DMA descriptor\n");
419 		goto err;
420 	}
421 
422 	dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
423 	dma_async_issue_pending(chan);
424 
425 	if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
426 		dmaengine_terminate_sync(chan);
427 		reinit_completion(&host->dma_complete);
428 		ret = -ETIMEDOUT;
429 		goto err;
430 	}
431 
432 	if (dma_buf)
433 		memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
434 err:
435 	if (dma_buf)
436 		dma_free_coherent(dev, op->len, dma_buf, dma_addr);
437 	else
438 		dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
439 
440 	return ret;
441 }
442 
443 static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
444 {
445 	struct loongson_nand_host *host = nand_get_controller_data(chip);
446 	struct loongson_nand_op op = {};
447 	int ret;
448 
449 	ret = loongson_nand_parse_instructions(chip, subop, &op);
450 	if (ret)
451 		return ret;
452 
453 	loongson_nand_trigger_op(host, &op);
454 
455 	ret = loongson_nand_dma_transfer(host, &op);
456 	if (ret)
457 		return ret;
458 
459 	return loongson_nand_wait_for_op_done(host, &op);
460 }
461 
462 static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
463 					struct loongson_nand_op *op)
464 {
465 	struct loongson_nand_host *host = nand_get_controller_data(chip);
466 	int ret;
467 
468 	ret = loongson_nand_parse_instructions(chip, subop, op);
469 	if (ret)
470 		return ret;
471 
472 	loongson_nand_trigger_op(host, op);
473 
474 	return loongson_nand_wait_for_op_done(host, op);
475 }
476 
477 static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
478 {
479 	struct loongson_nand_op op = {};
480 
481 	return loongson_nand_misc_type_exec(chip, subop, &op);
482 }
483 
484 static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
485 {
486 	struct loongson_nand_host *host = nand_get_controller_data(chip);
487 	struct loongson_nand_op op = {};
488 	int i, ret;
489 	union {
490 		char ids[6];
491 		struct {
492 			int idl;
493 			u16 idh;
494 		};
495 	} nand_id;
496 
497 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
498 	if (ret)
499 		return ret;
500 
501 	ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl,
502 				       LOONGSON_NAND_READ_ID_SLEEP_US,
503 				       LOONGSON_NAND_READ_ID_TIMEOUT_US);
504 	if (ret)
505 		return ret;
506 
507 	nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS);
508 
509 	for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++)
510 		op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i];
511 
512 	return ret;
513 }
514 
515 static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
516 					       const struct nand_subop *subop)
517 {
518 	struct loongson_nand_host *host = nand_get_controller_data(chip);
519 	struct loongson_nand_op op = {};
520 	int val, ret;
521 
522 	ret = loongson_nand_misc_type_exec(chip, subop, &op);
523 	if (ret)
524 		return ret;
525 
526 	val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
527 	val &= ~host->data->status_field;
528 	op.buf[0] = val << ffs(host->data->status_field);
529 
530 	return ret;
531 }
532 
533 static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
534 	NAND_OP_PARSER_PATTERN(
535 		loongson_nand_read_id_type_exec,
536 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
537 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
538 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
539 	NAND_OP_PARSER_PATTERN(
540 		loongson_nand_read_status_type_exec,
541 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
542 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
543 	NAND_OP_PARSER_PATTERN(
544 		loongson_nand_zerolen_type_exec,
545 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
546 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
547 	NAND_OP_PARSER_PATTERN(
548 		loongson_nand_zerolen_type_exec,
549 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
550 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
551 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
552 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
553 	NAND_OP_PARSER_PATTERN(
554 		loongson_nand_data_type_exec,
555 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
556 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
557 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
558 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
559 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
560 	NAND_OP_PARSER_PATTERN(
561 		loongson_nand_data_type_exec,
562 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
563 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
564 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
565 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
566 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
567 	);
568 
569 static int loongson_nand_is_valid_cmd(u8 opcode)
570 {
571 	if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
572 		return 0;
573 
574 	return -EOPNOTSUPP;
575 }
576 
577 static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
578 {
579 	if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
580 		return 0;
581 
582 	if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
583 		return 0;
584 
585 	if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
586 		return 0;
587 
588 	if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
589 		return 0;
590 
591 	return -EOPNOTSUPP;
592 }
593 
594 static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
595 {
596 	const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
597 	int op_id;
598 
599 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
600 		const struct nand_op_instr *instr = &op->instrs[op_id];
601 
602 		if (instr->type == NAND_OP_CMD_INSTR) {
603 			if (!instr1)
604 				instr1 = instr;
605 			else if (!instr2)
606 				instr2 = instr;
607 			else
608 				break;
609 		}
610 	}
611 
612 	if (!instr1)
613 		return -EOPNOTSUPP;
614 
615 	if (!instr2)
616 		return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
617 
618 	return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
619 }
620 
621 static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
622 				 bool check_only)
623 {
624 	if (check_only)
625 		return loongson_nand_check_op(chip, op);
626 
627 	return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
628 }
629 
630 static int loongson_nand_get_chip_capacity(struct nand_chip *chip)
631 {
632 	struct loongson_nand_host *host = nand_get_controller_data(chip);
633 	u64 chipsize = nanddev_target_size(&chip->base);
634 	struct mtd_info *mtd = nand_to_mtd(chip);
635 
636 	switch (mtd->writesize) {
637 	case SZ_512:
638 		switch (chipsize) {
639 		case SZ_8M:
640 			host->addr_cs_field = GENMASK(15, 14);
641 			return 0x9;
642 		case SZ_16M:
643 			host->addr_cs_field = GENMASK(16, 15);
644 			return 0xa;
645 		case SZ_32M:
646 			host->addr_cs_field = GENMASK(17, 16);
647 			return 0xb;
648 		case SZ_64M:
649 			host->addr_cs_field = GENMASK(18, 17);
650 			return 0xc;
651 		case SZ_128M:
652 			host->addr_cs_field = GENMASK(19, 18);
653 			return 0xd;
654 		}
655 		break;
656 	case SZ_2K:
657 		switch (chipsize) {
658 		case SZ_128M:
659 			host->addr_cs_field = GENMASK(17, 16);
660 			return 0x0;
661 		case SZ_256M:
662 			host->addr_cs_field = GENMASK(18, 17);
663 			return 0x1;
664 		case SZ_512M:
665 			host->addr_cs_field = GENMASK(19, 18);
666 			return 0x2;
667 		case SZ_1G:
668 			host->addr_cs_field = GENMASK(20, 19);
669 			return 0x3;
670 		}
671 		break;
672 	case SZ_4K:
673 		if (chipsize == SZ_2G) {
674 			host->addr_cs_field = GENMASK(20, 19);
675 			return 0x4;
676 		}
677 		break;
678 	case SZ_8K:
679 		switch (chipsize) {
680 		case SZ_4G:
681 			host->addr_cs_field = GENMASK(20, 19);
682 			return 0x5;
683 		case SZ_8G:
684 			host->addr_cs_field = GENMASK(21, 20);
685 			return 0x6;
686 		case SZ_16G:
687 			host->addr_cs_field = GENMASK(22, 21);
688 			return 0x7;
689 		}
690 		break;
691 	}
692 
693 	dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n",
694 		chipsize, mtd->writesize);
695 	return -EINVAL;
696 }
697 
698 static int loongson_nand_attach_chip(struct nand_chip *chip)
699 {
700 	struct loongson_nand_host *host = nand_get_controller_data(chip);
701 	int cell_size = loongson_nand_get_chip_capacity(chip);
702 
703 	if (cell_size < 0)
704 		return cell_size;
705 
706 	switch (chip->ecc.engine_type) {
707 	case NAND_ECC_ENGINE_TYPE_NONE:
708 		break;
709 	case NAND_ECC_ENGINE_TYPE_SOFT:
710 		break;
711 	default:
712 		return -EINVAL;
713 	}
714 
715 	/* set cell size */
716 	regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
717 			   FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
718 
719 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
720 			   FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
721 
722 	regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
723 			   FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
724 
725 	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
726 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
727 
728 	return 0;
729 }
730 
731 static const struct nand_controller_ops loongson_nand_controller_ops = {
732 	.exec_op = loongson_nand_exec_op,
733 	.attach_chip = loongson_nand_attach_chip,
734 };
735 
736 static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
737 {
738 	if (host->dma_chan)
739 		dma_release_channel(host->dma_chan);
740 }
741 
742 static int loongson_nand_controller_init(struct loongson_nand_host *host)
743 {
744 	struct device *dev = host->dev;
745 	struct dma_chan *chan;
746 	struct dma_slave_config cfg = {};
747 	int ret;
748 
749 	host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
750 	if (IS_ERR(host->regmap))
751 		return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
752 
753 	if (host->data->id_cycle_field)
754 		regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field,
755 				   host->data->max_id_cycle << __ffs(host->data->id_cycle_field));
756 
757 	chan = dma_request_chan(dev, "rxtx");
758 	if (IS_ERR(chan))
759 		return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
760 	host->dma_chan = chan;
761 
762 	cfg.src_addr = host->dma_base;
763 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
764 	cfg.dst_addr = host->dma_base;
765 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
766 	ret = dmaengine_slave_config(host->dma_chan, &cfg);
767 	if (ret)
768 		return dev_err_probe(dev, ret, "failed to config DMA channel\n");
769 
770 	init_completion(&host->dma_complete);
771 
772 	return 0;
773 }
774 
775 static int loongson_nand_chip_init(struct loongson_nand_host *host)
776 {
777 	struct device *dev = host->dev;
778 	int nchips = of_get_child_count(dev->of_node);
779 	struct device_node *chip_np;
780 	struct nand_chip *chip = &host->chip;
781 	struct mtd_info *mtd = nand_to_mtd(chip);
782 	int ret;
783 
784 	if (nchips != 1)
785 		return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
786 
787 	chip_np = of_get_next_child(dev->of_node, NULL);
788 	if (!chip_np)
789 		return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
790 
791 	nand_set_flash_node(chip, chip_np);
792 	of_node_put(chip_np);
793 	if (!mtd->name)
794 		return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
795 
796 	nand_set_controller_data(chip, host);
797 	chip->controller = &host->controller;
798 	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
799 	chip->buf_align = 16;
800 	mtd->dev.parent = dev;
801 	mtd->owner = THIS_MODULE;
802 
803 	ret = nand_scan(chip, 1);
804 	if (ret)
805 		return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
806 
807 	ret = mtd_device_register(mtd, NULL, 0);
808 	if (ret) {
809 		nand_cleanup(chip);
810 		return dev_err_probe(dev, ret, "failed to register MTD device\n");
811 	}
812 
813 	return 0;
814 }
815 
816 static int loongson_nand_probe(struct platform_device *pdev)
817 {
818 	struct device *dev = &pdev->dev;
819 	const struct loongson_nand_data *data;
820 	struct loongson_nand_host *host;
821 	struct resource *res;
822 	int ret;
823 
824 	data = of_device_get_match_data(dev);
825 	if (!data)
826 		return -ENODEV;
827 
828 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
829 	if (!host)
830 		return -ENOMEM;
831 
832 	host->reg_base = devm_platform_ioremap_resource(pdev, 0);
833 	if (IS_ERR(host->reg_base))
834 		return PTR_ERR(host->reg_base);
835 
836 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
837 	if (!res)
838 		return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
839 
840 	host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
841 					  DMA_BIDIRECTIONAL, 0);
842 	if (dma_mapping_error(dev, host->dma_base))
843 		return -ENXIO;
844 
845 	host->dev = dev;
846 	host->data = data;
847 	host->controller.ops = &loongson_nand_controller_ops;
848 
849 	nand_controller_init(&host->controller);
850 
851 	ret = loongson_nand_controller_init(host);
852 	if (ret)
853 		goto err;
854 
855 	ret = loongson_nand_chip_init(host);
856 	if (ret)
857 		goto err;
858 
859 	platform_set_drvdata(pdev, host);
860 
861 	return 0;
862 err:
863 	loongson_nand_controller_cleanup(host);
864 
865 	return ret;
866 }
867 
868 static void loongson_nand_remove(struct platform_device *pdev)
869 {
870 	struct loongson_nand_host *host = platform_get_drvdata(pdev);
871 	struct nand_chip *chip = &host->chip;
872 	int ret;
873 
874 	ret = mtd_device_unregister(nand_to_mtd(chip));
875 	WARN_ON(ret);
876 	nand_cleanup(chip);
877 	loongson_nand_controller_cleanup(host);
878 }
879 
880 static const struct loongson_nand_data ls1b_nand_data = {
881 	.max_id_cycle = 5,
882 	.status_field = GENMASK(15, 8),
883 	.hold_cycle = 0x2,
884 	.wait_cycle = 0xc,
885 	.set_addr = ls1b_nand_set_addr,
886 };
887 
888 static const struct loongson_nand_data ls1c_nand_data = {
889 	.max_id_cycle = 6,
890 	.id_cycle_field = GENMASK(14, 12),
891 	.status_field = GENMASK(23, 16),
892 	.op_scope_field = GENMASK(29, 16),
893 	.hold_cycle = 0x2,
894 	.wait_cycle = 0xc,
895 	.set_addr = ls1c_nand_set_addr,
896 };
897 
898 static const struct of_device_id loongson_nand_match[] = {
899 	{
900 		.compatible = "loongson,ls1b-nand-controller",
901 		.data = &ls1b_nand_data,
902 	},
903 	{
904 		.compatible = "loongson,ls1c-nand-controller",
905 		.data = &ls1c_nand_data,
906 	},
907 	{ /* sentinel */ }
908 };
909 MODULE_DEVICE_TABLE(of, loongson_nand_match);
910 
911 static struct platform_driver loongson_nand_driver = {
912 	.probe = loongson_nand_probe,
913 	.remove = loongson_nand_remove,
914 	.driver = {
915 		.name = KBUILD_MODNAME,
916 		.of_match_table = loongson_nand_match,
917 	},
918 };
919 
920 module_platform_driver(loongson_nand_driver);
921 
922 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
923 MODULE_DESCRIPTION("Loongson NAND Controller Driver");
924 MODULE_LICENSE("GPL");
925