xref: /linux/drivers/mtd/nand/raw/loongson1-nand-controller.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NAND Controller Driver for Loongson-1 SoC
4  *
5  * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/iopoll.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/rawnand.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/sizes.h>
19 
20 /* Loongson-1 NAND Controller Registers */
21 #define LS1X_NAND_CMD		0x0
22 #define LS1X_NAND_ADDR1		0x4
23 #define LS1X_NAND_ADDR2		0x8
24 #define LS1X_NAND_TIMING	0xc
25 #define LS1X_NAND_IDL		0x10
26 #define LS1X_NAND_IDH_STATUS	0x14
27 #define LS1X_NAND_PARAM		0x18
28 #define LS1X_NAND_OP_NUM	0x1c
29 
30 /* NAND Command Register Bits */
31 #define LS1X_NAND_CMD_OP_DONE		BIT(10)
32 #define LS1X_NAND_CMD_OP_SPARE		BIT(9)
33 #define LS1X_NAND_CMD_OP_MAIN		BIT(8)
34 #define LS1X_NAND_CMD_STATUS		BIT(7)
35 #define LS1X_NAND_CMD_RESET		BIT(6)
36 #define LS1X_NAND_CMD_READID		BIT(5)
37 #define LS1X_NAND_CMD_BLOCKS_ERASE	BIT(4)
38 #define LS1X_NAND_CMD_ERASE		BIT(3)
39 #define LS1X_NAND_CMD_WRITE		BIT(2)
40 #define LS1X_NAND_CMD_READ		BIT(1)
41 #define LS1X_NAND_CMD_VALID		BIT(0)
42 
43 #define LS1X_NAND_WAIT_CYCLE_MASK	GENMASK(7, 0)
44 #define LS1X_NAND_HOLD_CYCLE_MASK	GENMASK(15, 8)
45 #define LS1X_NAND_CELL_SIZE_MASK	GENMASK(11, 8)
46 
47 #define LS1X_NAND_COL_ADDR_CYC		2U
48 #define LS1X_NAND_MAX_ADDR_CYC		5U
49 
50 #define BITS_PER_WORD		(4 * BITS_PER_BYTE)
51 
52 struct ls1x_nand_host;
53 
54 struct ls1x_nand_op {
55 	char addrs[LS1X_NAND_MAX_ADDR_CYC];
56 	unsigned int naddrs;
57 	unsigned int addrs_offset;
58 	unsigned int aligned_offset;
59 	unsigned int cmd_reg;
60 	unsigned int row_start;
61 	unsigned int rdy_timeout_ms;
62 	unsigned int orig_len;
63 	bool is_readid;
64 	bool is_erase;
65 	bool is_write;
66 	bool is_read;
67 	bool is_change_column;
68 	size_t len;
69 	char *buf;
70 };
71 
72 struct ls1x_nand_data {
73 	unsigned int status_field;
74 	unsigned int op_scope_field;
75 	unsigned int hold_cycle;
76 	unsigned int wait_cycle;
77 	void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
78 };
79 
80 struct ls1x_nand_host {
81 	struct device *dev;
82 	struct nand_chip chip;
83 	struct nand_controller controller;
84 	const struct ls1x_nand_data *data;
85 	void __iomem *reg_base;
86 	struct regmap *regmap;
87 	/* DMA Engine stuff */
88 	dma_addr_t dma_base;
89 	struct dma_chan *dma_chan;
90 	dma_cookie_t dma_cookie;
91 	struct completion dma_complete;
92 };
93 
94 static const struct regmap_config ls1x_nand_regmap_config = {
95 	.reg_bits = 32,
96 	.val_bits = 32,
97 	.reg_stride = 4,
98 };
99 
100 static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
101 {
102 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
103 
104 	op->row_start = chip->page_shift + 1;
105 
106 	/* The controller abstracts the following NAND operations. */
107 	switch (opcode) {
108 	case NAND_CMD_STATUS:
109 		op->cmd_reg = LS1X_NAND_CMD_STATUS;
110 		break;
111 	case NAND_CMD_RESET:
112 		op->cmd_reg = LS1X_NAND_CMD_RESET;
113 		break;
114 	case NAND_CMD_READID:
115 		op->is_readid = true;
116 		op->cmd_reg = LS1X_NAND_CMD_READID;
117 		break;
118 	case NAND_CMD_ERASE1:
119 		op->is_erase = true;
120 		op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
121 		break;
122 	case NAND_CMD_ERASE2:
123 		if (!op->is_erase)
124 			return -EOPNOTSUPP;
125 		/* During erasing, row_start differs from the default value. */
126 		op->row_start = chip->page_shift;
127 		op->cmd_reg = LS1X_NAND_CMD_ERASE;
128 		break;
129 	case NAND_CMD_SEQIN:
130 		op->is_write = true;
131 		break;
132 	case NAND_CMD_PAGEPROG:
133 		if (!op->is_write)
134 			return -EOPNOTSUPP;
135 		op->cmd_reg = LS1X_NAND_CMD_WRITE;
136 		break;
137 	case NAND_CMD_READ0:
138 		op->is_read = true;
139 		break;
140 	case NAND_CMD_READSTART:
141 		if (!op->is_read)
142 			return -EOPNOTSUPP;
143 		op->cmd_reg = LS1X_NAND_CMD_READ;
144 		break;
145 	case NAND_CMD_RNDOUT:
146 		op->is_change_column = true;
147 		break;
148 	case NAND_CMD_RNDOUTSTART:
149 		if (!op->is_change_column)
150 			return -EOPNOTSUPP;
151 		op->cmd_reg = LS1X_NAND_CMD_READ;
152 		break;
153 	default:
154 		dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
155 		return -EOPNOTSUPP;
156 	}
157 
158 	return 0;
159 }
160 
161 static int ls1x_nand_parse_instructions(struct nand_chip *chip,
162 					const struct nand_subop *subop, struct ls1x_nand_op *op)
163 {
164 	unsigned int op_id;
165 	int ret;
166 
167 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
168 		const struct nand_op_instr *instr = &subop->instrs[op_id];
169 		unsigned int offset, naddrs;
170 		const u8 *addrs;
171 
172 		switch (instr->type) {
173 		case NAND_OP_CMD_INSTR:
174 			ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
175 			if (ret < 0)
176 				return ret;
177 
178 			break;
179 		case NAND_OP_ADDR_INSTR:
180 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
181 			if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
182 				return -EOPNOTSUPP;
183 			op->naddrs = naddrs;
184 			offset = nand_subop_get_addr_start_off(subop, op_id);
185 			addrs = &instr->ctx.addr.addrs[offset];
186 			memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
187 			break;
188 		case NAND_OP_DATA_IN_INSTR:
189 		case NAND_OP_DATA_OUT_INSTR:
190 			offset = nand_subop_get_data_start_off(subop, op_id);
191 			op->orig_len = nand_subop_get_data_len(subop, op_id);
192 			if (instr->type == NAND_OP_DATA_IN_INSTR)
193 				op->buf = instr->ctx.data.buf.in + offset;
194 			else if (instr->type == NAND_OP_DATA_OUT_INSTR)
195 				op->buf = (void *)instr->ctx.data.buf.out + offset;
196 
197 			break;
198 		case NAND_OP_WAITRDY_INSTR:
199 			op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
200 			break;
201 		default:
202 			break;
203 		}
204 	}
205 
206 	return 0;
207 }
208 
209 static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
210 {
211 	struct nand_chip *chip = &host->chip;
212 	int i;
213 
214 	for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
215 		int shift, mask, val;
216 
217 		if (i < LS1X_NAND_COL_ADDR_CYC) {
218 			shift = i * BITS_PER_BYTE;
219 			mask = (u32)0xff << shift;
220 			mask &= GENMASK(chip->page_shift, 0);
221 			val = (u32)op->addrs[i] << shift;
222 			regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
223 		} else if (!op->is_change_column) {
224 			shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
225 			mask = (u32)0xff << shift;
226 			val = (u32)op->addrs[i] << shift;
227 			regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
228 
229 			if (i == 4) {
230 				mask = (u32)0xff >> (BITS_PER_WORD - shift);
231 				val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
232 				regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
233 			}
234 		}
235 	}
236 }
237 
238 static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
239 {
240 	int i;
241 
242 	for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
243 		int shift, mask, val;
244 
245 		if (i < LS1X_NAND_COL_ADDR_CYC) {
246 			shift = i * BITS_PER_BYTE;
247 			mask = (u32)0xff << shift;
248 			val = (u32)op->addrs[i] << shift;
249 			regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
250 		} else if (!op->is_change_column) {
251 			shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
252 			mask = (u32)0xff << shift;
253 			val = (u32)op->addrs[i] << shift;
254 			regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
255 		}
256 	}
257 }
258 
259 static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
260 {
261 	struct nand_chip *chip = &host->chip;
262 	struct mtd_info *mtd = nand_to_mtd(chip);
263 	int col0 = op->addrs[0];
264 	short col;
265 
266 	if (!IS_ALIGNED(col0, chip->buf_align)) {
267 		col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
268 		op->aligned_offset = op->addrs[0] - col0;
269 		op->addrs[0] = col0;
270 	}
271 
272 	if (host->data->set_addr)
273 		host->data->set_addr(host, op);
274 
275 	/* set operation length */
276 	if (op->is_write || op->is_read || op->is_change_column)
277 		op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
278 	else if (op->is_erase)
279 		op->len = 1;
280 	else
281 		op->len = op->orig_len;
282 
283 	writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
284 
285 	/* set operation area and scope */
286 	col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
287 	if (op->orig_len && !op->is_readid) {
288 		unsigned int op_scope = 0;
289 
290 		if (col < mtd->writesize) {
291 			op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
292 			op_scope = mtd->writesize;
293 		}
294 
295 		op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
296 		op_scope += mtd->oobsize;
297 
298 		op_scope <<= __ffs(host->data->op_scope_field);
299 		regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
300 				   host->data->op_scope_field, op_scope);
301 	}
302 
303 	/* set command */
304 	writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
305 
306 	/* trigger operation */
307 	regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
308 }
309 
310 static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
311 {
312 	unsigned int val;
313 	int ret = 0;
314 
315 	if (op->rdy_timeout_ms) {
316 		ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
317 					       val, val & LS1X_NAND_CMD_OP_DONE,
318 					       0, op->rdy_timeout_ms * MSEC_PER_SEC);
319 		if (ret)
320 			dev_err(host->dev, "operation failed\n");
321 	}
322 
323 	return ret;
324 }
325 
326 static void ls1x_nand_dma_callback(void *data)
327 {
328 	struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
329 	struct dma_chan *chan = host->dma_chan;
330 	struct device *dev = chan->device->dev;
331 	enum dma_status status;
332 
333 	status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
334 	if (likely(status == DMA_COMPLETE)) {
335 		dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
336 		complete(&host->dma_complete);
337 	} else {
338 		dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
339 	}
340 }
341 
342 static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
343 {
344 	struct nand_chip *chip = &host->chip;
345 	struct dma_chan *chan = host->dma_chan;
346 	struct device *dev = chan->device->dev;
347 	struct dma_async_tx_descriptor *desc;
348 	enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
349 	enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
350 	void *buf = op->buf;
351 	char *dma_buf = NULL;
352 	dma_addr_t dma_addr;
353 	int ret;
354 
355 	if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
356 	    IS_ALIGNED(op->orig_len, chip->buf_align)) {
357 		dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
358 		if (dma_mapping_error(dev, dma_addr)) {
359 			dev_err(dev, "failed to map DMA buffer\n");
360 			return -ENXIO;
361 		}
362 	} else if (!op->is_write) {
363 		dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
364 		if (!dma_buf)
365 			return -ENOMEM;
366 	} else {
367 		dev_err(dev, "subpage writing not supported\n");
368 		return -EOPNOTSUPP;
369 	}
370 
371 	desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
372 	if (!desc) {
373 		dev_err(dev, "failed to prepare DMA descriptor\n");
374 		ret = -ENOMEM;
375 		goto err;
376 	}
377 	desc->callback = ls1x_nand_dma_callback;
378 	desc->callback_param = host;
379 
380 	host->dma_cookie = dmaengine_submit(desc);
381 	ret = dma_submit_error(host->dma_cookie);
382 	if (ret) {
383 		dev_err(dev, "failed to submit DMA descriptor\n");
384 		goto err;
385 	}
386 
387 	dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
388 	dma_async_issue_pending(chan);
389 
390 	if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
391 		dmaengine_terminate_sync(chan);
392 		reinit_completion(&host->dma_complete);
393 		ret = -ETIMEDOUT;
394 		goto err;
395 	}
396 
397 	if (dma_buf)
398 		memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
399 err:
400 	if (dma_buf)
401 		dma_free_coherent(dev, op->len, dma_buf, dma_addr);
402 	else
403 		dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
404 
405 	return ret;
406 }
407 
408 static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
409 {
410 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
411 	struct ls1x_nand_op op = {};
412 	int ret;
413 
414 	ret = ls1x_nand_parse_instructions(chip, subop, &op);
415 	if (ret)
416 		return ret;
417 
418 	ls1x_nand_trigger_op(host, &op);
419 
420 	ret = ls1x_nand_dma_transfer(host, &op);
421 	if (ret)
422 		return ret;
423 
424 	return ls1x_nand_wait_for_op_done(host, &op);
425 }
426 
427 static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
428 				    const struct nand_subop *subop, struct ls1x_nand_op *op)
429 {
430 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
431 	int ret;
432 
433 	ret = ls1x_nand_parse_instructions(chip, subop, op);
434 	if (ret)
435 		return ret;
436 
437 	ls1x_nand_trigger_op(host, op);
438 
439 	return ls1x_nand_wait_for_op_done(host, op);
440 }
441 
442 static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
443 {
444 	struct ls1x_nand_op op = {};
445 
446 	return ls1x_nand_misc_type_exec(chip, subop, &op);
447 }
448 
449 static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
450 {
451 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
452 	struct ls1x_nand_op op = {};
453 	int i, ret;
454 	union {
455 		char ids[5];
456 		struct {
457 			int idl;
458 			char idh;
459 		};
460 	} nand_id;
461 
462 	ret = ls1x_nand_misc_type_exec(chip, subop, &op);
463 	if (ret)
464 		return ret;
465 
466 	nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
467 	nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
468 
469 	for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
470 		op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
471 
472 	return ret;
473 }
474 
475 static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
476 {
477 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
478 	struct ls1x_nand_op op = {};
479 	int val, ret;
480 
481 	ret = ls1x_nand_misc_type_exec(chip, subop, &op);
482 	if (ret)
483 		return ret;
484 
485 	val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
486 	val &= ~host->data->status_field;
487 	op.buf[0] = val << ffs(host->data->status_field);
488 
489 	return ret;
490 }
491 
492 static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
493 	NAND_OP_PARSER_PATTERN(
494 		ls1x_nand_read_id_type_exec,
495 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
496 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
497 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
498 	NAND_OP_PARSER_PATTERN(
499 		ls1x_nand_read_status_type_exec,
500 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
501 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
502 	NAND_OP_PARSER_PATTERN(
503 		ls1x_nand_zerolen_type_exec,
504 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
505 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
506 	NAND_OP_PARSER_PATTERN(
507 		ls1x_nand_zerolen_type_exec,
508 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
509 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
510 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
511 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
512 	NAND_OP_PARSER_PATTERN(
513 		ls1x_nand_data_type_exec,
514 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
515 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
516 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
517 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
518 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
519 	NAND_OP_PARSER_PATTERN(
520 		ls1x_nand_data_type_exec,
521 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
522 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
523 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
524 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
525 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
526 	);
527 
528 static int ls1x_nand_is_valid_cmd(u8 opcode)
529 {
530 	if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
531 		return 0;
532 
533 	return -EOPNOTSUPP;
534 }
535 
536 static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
537 {
538 	if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
539 		return 0;
540 
541 	if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
542 		return 0;
543 
544 	if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
545 		return 0;
546 
547 	if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
548 		return 0;
549 
550 	return -EOPNOTSUPP;
551 }
552 
553 static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
554 {
555 	const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
556 	int op_id;
557 
558 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
559 		const struct nand_op_instr *instr = &op->instrs[op_id];
560 
561 		if (instr->type == NAND_OP_CMD_INSTR) {
562 			if (!instr1)
563 				instr1 = instr;
564 			else if (!instr2)
565 				instr2 = instr;
566 			else
567 				break;
568 		}
569 	}
570 
571 	if (!instr1)
572 		return -EOPNOTSUPP;
573 
574 	if (!instr2)
575 		return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
576 
577 	return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
578 }
579 
580 static int ls1x_nand_exec_op(struct nand_chip *chip,
581 			     const struct nand_operation *op, bool check_only)
582 {
583 	if (check_only)
584 		return ls1x_nand_check_op(chip, op);
585 
586 	return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
587 }
588 
589 static int ls1x_nand_attach_chip(struct nand_chip *chip)
590 {
591 	struct ls1x_nand_host *host = nand_get_controller_data(chip);
592 	u64 chipsize = nanddev_target_size(&chip->base);
593 	int cell_size = 0;
594 
595 	switch (chipsize) {
596 	case SZ_128M:
597 		cell_size = 0x0;
598 		break;
599 	case SZ_256M:
600 		cell_size = 0x1;
601 		break;
602 	case SZ_512M:
603 		cell_size = 0x2;
604 		break;
605 	case SZ_1G:
606 		cell_size = 0x3;
607 		break;
608 	case SZ_2G:
609 		cell_size = 0x4;
610 		break;
611 	case SZ_4G:
612 		cell_size = 0x5;
613 		break;
614 	case SZ_8G:
615 		cell_size = 0x6;
616 		break;
617 	case SZ_16G:
618 		cell_size = 0x7;
619 		break;
620 	default:
621 		dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
622 		return -EINVAL;
623 	}
624 
625 	switch (chip->ecc.engine_type) {
626 	case NAND_ECC_ENGINE_TYPE_NONE:
627 		break;
628 	case NAND_ECC_ENGINE_TYPE_SOFT:
629 		break;
630 	default:
631 		return -EINVAL;
632 	}
633 
634 	/* set cell size */
635 	regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
636 			   FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
637 
638 	regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
639 			   FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
640 
641 	regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
642 			   FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
643 
644 	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
645 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
646 
647 	return 0;
648 }
649 
650 static const struct nand_controller_ops ls1x_nand_controller_ops = {
651 	.exec_op = ls1x_nand_exec_op,
652 	.attach_chip = ls1x_nand_attach_chip,
653 };
654 
655 static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
656 {
657 	if (host->dma_chan)
658 		dma_release_channel(host->dma_chan);
659 }
660 
661 static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
662 {
663 	struct device *dev = host->dev;
664 	struct dma_chan *chan;
665 	struct dma_slave_config cfg = {};
666 	int ret;
667 
668 	host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
669 	if (IS_ERR(host->regmap))
670 		return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
671 
672 	chan = dma_request_chan(dev, "rxtx");
673 	if (IS_ERR(chan))
674 		return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
675 	host->dma_chan = chan;
676 
677 	cfg.src_addr = host->dma_base;
678 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
679 	cfg.dst_addr = host->dma_base;
680 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
681 	ret = dmaengine_slave_config(host->dma_chan, &cfg);
682 	if (ret)
683 		return dev_err_probe(dev, ret, "failed to config DMA channel\n");
684 
685 	init_completion(&host->dma_complete);
686 
687 	return 0;
688 }
689 
690 static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
691 {
692 	struct device *dev = host->dev;
693 	int nchips = of_get_child_count(dev->of_node);
694 	struct device_node *chip_np;
695 	struct nand_chip *chip = &host->chip;
696 	struct mtd_info *mtd = nand_to_mtd(chip);
697 	int ret;
698 
699 	if (nchips != 1)
700 		return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
701 
702 	chip_np = of_get_next_child(dev->of_node, NULL);
703 	if (!chip_np)
704 		return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
705 
706 	nand_set_flash_node(chip, chip_np);
707 	of_node_put(chip_np);
708 	if (!mtd->name)
709 		return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
710 
711 	nand_set_controller_data(chip, host);
712 	chip->controller = &host->controller;
713 	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
714 	chip->buf_align = 16;
715 	mtd->dev.parent = dev;
716 	mtd->owner = THIS_MODULE;
717 
718 	ret = nand_scan(chip, 1);
719 	if (ret)
720 		return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
721 
722 	ret = mtd_device_register(mtd, NULL, 0);
723 	if (ret) {
724 		nand_cleanup(chip);
725 		return dev_err_probe(dev, ret, "failed to register MTD device\n");
726 	}
727 
728 	return 0;
729 }
730 
731 static int ls1x_nand_probe(struct platform_device *pdev)
732 {
733 	struct device *dev = &pdev->dev;
734 	const struct ls1x_nand_data *data;
735 	struct ls1x_nand_host *host;
736 	struct resource *res;
737 	int ret;
738 
739 	data = of_device_get_match_data(dev);
740 	if (!data)
741 		return -ENODEV;
742 
743 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
744 	if (!host)
745 		return -ENOMEM;
746 
747 	host->reg_base = devm_platform_ioremap_resource(pdev, 0);
748 	if (IS_ERR(host->reg_base))
749 		return PTR_ERR(host->reg_base);
750 
751 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
752 	if (!res)
753 		return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
754 
755 	host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
756 					  DMA_BIDIRECTIONAL, 0);
757 	if (dma_mapping_error(dev, host->dma_base))
758 		return -ENXIO;
759 
760 	host->dev = dev;
761 	host->data = data;
762 	host->controller.ops = &ls1x_nand_controller_ops;
763 
764 	nand_controller_init(&host->controller);
765 
766 	ret = ls1x_nand_controller_init(host);
767 	if (ret)
768 		goto err;
769 
770 	ret = ls1x_nand_chip_init(host);
771 	if (ret)
772 		goto err;
773 
774 	platform_set_drvdata(pdev, host);
775 
776 	return 0;
777 err:
778 	ls1x_nand_controller_cleanup(host);
779 
780 	return ret;
781 }
782 
783 static void ls1x_nand_remove(struct platform_device *pdev)
784 {
785 	struct ls1x_nand_host *host = platform_get_drvdata(pdev);
786 	struct nand_chip *chip = &host->chip;
787 	int ret;
788 
789 	ret = mtd_device_unregister(nand_to_mtd(chip));
790 	WARN_ON(ret);
791 	nand_cleanup(chip);
792 	ls1x_nand_controller_cleanup(host);
793 }
794 
795 static const struct ls1x_nand_data ls1b_nand_data = {
796 	.status_field = GENMASK(15, 8),
797 	.hold_cycle = 0x2,
798 	.wait_cycle = 0xc,
799 	.set_addr = ls1b_nand_set_addr,
800 };
801 
802 static const struct ls1x_nand_data ls1c_nand_data = {
803 	.status_field = GENMASK(23, 16),
804 	.op_scope_field = GENMASK(29, 16),
805 	.hold_cycle = 0x2,
806 	.wait_cycle = 0xc,
807 	.set_addr = ls1c_nand_set_addr,
808 };
809 
810 static const struct of_device_id ls1x_nand_match[] = {
811 	{
812 		.compatible = "loongson,ls1b-nand-controller",
813 		.data = &ls1b_nand_data,
814 	},
815 	{
816 		.compatible = "loongson,ls1c-nand-controller",
817 		.data = &ls1c_nand_data,
818 	},
819 	{ /* sentinel */ }
820 };
821 MODULE_DEVICE_TABLE(of, ls1x_nand_match);
822 
823 static struct platform_driver ls1x_nand_driver = {
824 	.probe = ls1x_nand_probe,
825 	.remove = ls1x_nand_remove,
826 	.driver = {
827 		.name = KBUILD_MODNAME,
828 		.of_match_table = ls1x_nand_match,
829 	},
830 };
831 
832 module_platform_driver(ls1x_nand_driver);
833 
834 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
835 MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
836 MODULE_LICENSE("GPL");
837