xref: /linux/drivers/mtd/nand/qpic_common.c (revision a1d8128f701682d34d9308c9e6b7385c0ffa4b4b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
5  */
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma/qcom_adm.h>
11 #include <linux/dma/qcom_bam_dma.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-qpic-common.h>
17 
18 /**
19  * qcom_free_bam_transaction() - Frees the BAM transaction memory
20  * @nandc: qpic nand controller
21  *
22  * This function frees the bam transaction memory
23  */
qcom_free_bam_transaction(struct qcom_nand_controller * nandc)24 void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
25 {
26 	struct bam_transaction *bam_txn = nandc->bam_txn;
27 
28 	kfree(bam_txn);
29 }
30 EXPORT_SYMBOL(qcom_free_bam_transaction);
31 
32 /**
33  * qcom_alloc_bam_transaction() - allocate BAM transaction
34  * @nandc: qpic nand controller
35  *
36  * This function will allocate and initialize the BAM transaction structure
37  */
38 struct bam_transaction *
qcom_alloc_bam_transaction(struct qcom_nand_controller * nandc)39 qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
40 {
41 	struct bam_transaction *bam_txn;
42 	size_t bam_txn_size;
43 	unsigned int num_cw = nandc->max_cwperpage;
44 	void *bam_txn_buf;
45 
46 	bam_txn_size =
47 		sizeof(*bam_txn) + num_cw *
48 		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
49 		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
50 		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
51 
52 	bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
53 	if (!bam_txn_buf)
54 		return NULL;
55 
56 	bam_txn = bam_txn_buf;
57 	bam_txn_buf += sizeof(*bam_txn);
58 
59 	bam_txn->bam_ce = bam_txn_buf;
60 	bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw;
61 	bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems;
62 
63 	bam_txn->cmd_sgl = bam_txn_buf;
64 	bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw;
65 	bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems;
66 
67 	bam_txn->data_sgl = bam_txn_buf;
68 	bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw;
69 
70 	init_completion(&bam_txn->txn_done);
71 
72 	return bam_txn;
73 }
74 EXPORT_SYMBOL(qcom_alloc_bam_transaction);
75 
76 /**
77  * qcom_clear_bam_transaction() - Clears the BAM transaction
78  * @nandc: qpic nand controller
79  *
80  * This function will clear the BAM transaction indexes.
81  */
qcom_clear_bam_transaction(struct qcom_nand_controller * nandc)82 void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
83 {
84 	struct bam_transaction *bam_txn = nandc->bam_txn;
85 
86 	if (!nandc->props->supports_bam)
87 		return;
88 
89 	memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
90 	bam_txn->last_data_desc = NULL;
91 
92 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
93 		      QPIC_PER_CW_CMD_SGL);
94 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
95 		      QPIC_PER_CW_DATA_SGL);
96 
97 	reinit_completion(&bam_txn->txn_done);
98 }
99 EXPORT_SYMBOL(qcom_clear_bam_transaction);
100 
101 /**
102  * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
103  * @data: data pointer
104  *
105  * This function is a callback for DMA descriptor completion
106  */
qcom_qpic_bam_dma_done(void * data)107 void qcom_qpic_bam_dma_done(void *data)
108 {
109 	struct bam_transaction *bam_txn = data;
110 
111 	complete(&bam_txn->txn_done);
112 }
113 EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
114 
115 /**
116  * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
117  * @nandc: qpic nand controller
118  * @is_cpu: cpu or Device
119  *
120  * This function will check for dma sync for cpu or device
121  */
qcom_nandc_dev_to_mem(struct qcom_nand_controller * nandc,bool is_cpu)122 inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
123 {
124 	if (!nandc->props->supports_bam)
125 		return;
126 
127 	if (is_cpu)
128 		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
129 					MAX_REG_RD *
130 					sizeof(*nandc->reg_read_buf),
131 					DMA_FROM_DEVICE);
132 	else
133 		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
134 					   MAX_REG_RD *
135 					   sizeof(*nandc->reg_read_buf),
136 					   DMA_FROM_DEVICE);
137 }
138 EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
139 
140 /**
141  * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
142  * @nandc: qpic nand controller
143  * @chan: dma channel
144  * @flags: flags to control DMA descriptor preparation
145  *
146  * This function maps the scatter gather list for DMA transfer and forms the
147  * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
148  * descriptor queue which will be submitted to DMA engine.
149  */
qcom_prepare_bam_async_desc(struct qcom_nand_controller * nandc,struct dma_chan * chan,unsigned long flags)150 int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
151 				struct dma_chan *chan, unsigned long flags)
152 {
153 	struct desc_info *desc;
154 	struct scatterlist *sgl;
155 	unsigned int sgl_cnt;
156 	int ret;
157 	struct bam_transaction *bam_txn = nandc->bam_txn;
158 	enum dma_transfer_direction dir_eng;
159 	struct dma_async_tx_descriptor *dma_desc;
160 
161 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
162 	if (!desc)
163 		return -ENOMEM;
164 
165 	if (chan == nandc->cmd_chan) {
166 		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
167 		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
168 		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
169 		dir_eng = DMA_MEM_TO_DEV;
170 		desc->dir = DMA_TO_DEVICE;
171 	} else if (chan == nandc->tx_chan) {
172 		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
173 		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
174 		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
175 		dir_eng = DMA_MEM_TO_DEV;
176 		desc->dir = DMA_TO_DEVICE;
177 	} else {
178 		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
179 		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
180 		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
181 		dir_eng = DMA_DEV_TO_MEM;
182 		desc->dir = DMA_FROM_DEVICE;
183 	}
184 
185 	sg_mark_end(sgl + sgl_cnt - 1);
186 	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
187 	if (ret == 0) {
188 		dev_err(nandc->dev, "failure in mapping desc\n");
189 		kfree(desc);
190 		return -ENOMEM;
191 	}
192 
193 	desc->sgl_cnt = sgl_cnt;
194 	desc->bam_sgl = sgl;
195 
196 	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
197 					   flags);
198 
199 	if (!dma_desc) {
200 		dev_err(nandc->dev, "failure in prep desc\n");
201 		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
202 		kfree(desc);
203 		return -EINVAL;
204 	}
205 
206 	desc->dma_desc = dma_desc;
207 
208 	/* update last data/command descriptor */
209 	if (chan == nandc->cmd_chan)
210 		bam_txn->last_cmd_desc = dma_desc;
211 	else
212 		bam_txn->last_data_desc = dma_desc;
213 
214 	list_add_tail(&desc->node, &nandc->desc_list);
215 
216 	return 0;
217 }
218 EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
219 
220 /**
221  * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
222  * @nandc: qpic nand controller
223  * @read: read or write type
224  * @reg_off: offset within the controller's data buffer
225  * @vaddr: virtual address of the buffer we want to write to
226  * @size: DMA transaction size in bytes
227  * @flags: flags to control DMA descriptor preparation
228  *
229  * This function will prepares the command descriptor for BAM DMA
230  * which will be used for NAND register reads and writes.
231  */
qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,unsigned int flags)232 int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
233 			       int reg_off, const void *vaddr,
234 			       int size, unsigned int flags)
235 {
236 	int bam_ce_size;
237 	int i, ret;
238 	struct bam_cmd_element *bam_ce_buffer;
239 	struct bam_transaction *bam_txn = nandc->bam_txn;
240 	u32 offset;
241 
242 	if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) {
243 		dev_err(nandc->dev, "BAM %s array is full\n", "CE");
244 		return -EINVAL;
245 	}
246 
247 	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
248 
249 	/* fill the command desc */
250 	for (i = 0; i < size; i++) {
251 		offset = nandc->props->bam_offset + reg_off + 4 * i;
252 		if (read)
253 			bam_prep_ce(&bam_ce_buffer[i],
254 				    offset, BAM_READ_COMMAND,
255 				    reg_buf_dma_addr(nandc,
256 						     (__le32 *)vaddr + i));
257 		else
258 			bam_prep_ce_le32(&bam_ce_buffer[i],
259 					 offset, BAM_WRITE_COMMAND,
260 					 *((__le32 *)vaddr + i));
261 	}
262 
263 	bam_txn->bam_ce_pos += size;
264 
265 	/* use the separate sgl after this command */
266 	if (flags & NAND_BAM_NEXT_SGL) {
267 		if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) {
268 			dev_err(nandc->dev, "BAM %s array is full\n",
269 				"CMD sgl");
270 			return -EINVAL;
271 		}
272 
273 		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
274 		bam_ce_size = (bam_txn->bam_ce_pos -
275 				bam_txn->bam_ce_start) *
276 				sizeof(struct bam_cmd_element);
277 		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
278 			   bam_ce_buffer, bam_ce_size);
279 		bam_txn->cmd_sgl_pos++;
280 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
281 
282 		if (flags & NAND_BAM_NWD) {
283 			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
284 							  DMA_PREP_FENCE | DMA_PREP_CMD);
285 			if (ret)
286 				return ret;
287 		}
288 	}
289 
290 	return 0;
291 }
292 EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
293 
294 /**
295  * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
296  * @nandc: qpic nand controller
297  * @read: read or write type
298  * @vaddr: virtual address of the buffer we want to write to
299  * @size: DMA transaction size in bytes
300  * @flags: flags to control DMA descriptor preparation
301  *
302  * This function will prepares the data descriptor for BAM DMA which
303  * will be used for NAND data reads and writes.
304  */
qcom_prep_bam_dma_desc_data(struct qcom_nand_controller * nandc,bool read,const void * vaddr,int size,unsigned int flags)305 int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
306 				const void *vaddr, int size, unsigned int flags)
307 {
308 	int ret;
309 	struct bam_transaction *bam_txn = nandc->bam_txn;
310 
311 	if (read) {
312 		if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) {
313 			dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl");
314 			return -EINVAL;
315 		}
316 
317 		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
318 			   vaddr, size);
319 		bam_txn->rx_sgl_pos++;
320 	} else {
321 		if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) {
322 			dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl");
323 			return -EINVAL;
324 		}
325 
326 		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
327 			   vaddr, size);
328 		bam_txn->tx_sgl_pos++;
329 
330 		/*
331 		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
332 		 * is not set, form the DMA descriptor
333 		 */
334 		if (!(flags & NAND_BAM_NO_EOT)) {
335 			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
336 							  DMA_PREP_INTERRUPT);
337 			if (ret)
338 				return ret;
339 		}
340 	}
341 
342 	return 0;
343 }
344 EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
345 
346 /**
347  * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
348  * @nandc: qpic nand controller
349  * @read: read or write type
350  * @reg_off: offset within the controller's data buffer
351  * @vaddr: virtual address of the buffer we want to write to
352  * @size: adm dma transaction size in bytes
353  * @flow_control: flow controller
354  *
355  * This function will prepare descriptor for adma
356  */
qcom_prep_adm_dma_desc(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,bool flow_control)357 int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
358 			   int reg_off, const void *vaddr, int size,
359 			   bool flow_control)
360 {
361 	struct qcom_adm_peripheral_config periph_conf = {};
362 	struct dma_async_tx_descriptor *dma_desc;
363 	struct dma_slave_config slave_conf = {0};
364 	enum dma_transfer_direction dir_eng;
365 	struct desc_info *desc;
366 	struct scatterlist *sgl;
367 	int ret;
368 
369 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
370 	if (!desc)
371 		return -ENOMEM;
372 
373 	sgl = &desc->adm_sgl;
374 
375 	sg_init_one(sgl, vaddr, size);
376 
377 	if (read) {
378 		dir_eng = DMA_DEV_TO_MEM;
379 		desc->dir = DMA_FROM_DEVICE;
380 	} else {
381 		dir_eng = DMA_MEM_TO_DEV;
382 		desc->dir = DMA_TO_DEVICE;
383 	}
384 
385 	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
386 	if (!ret) {
387 		ret = -ENOMEM;
388 		goto err;
389 	}
390 
391 	slave_conf.device_fc = flow_control;
392 	if (read) {
393 		slave_conf.src_maxburst = 16;
394 		slave_conf.src_addr = nandc->base_dma + reg_off;
395 		if (nandc->data_crci) {
396 			periph_conf.crci = nandc->data_crci;
397 			slave_conf.peripheral_config = &periph_conf;
398 			slave_conf.peripheral_size = sizeof(periph_conf);
399 		}
400 	} else {
401 		slave_conf.dst_maxburst = 16;
402 		slave_conf.dst_addr = nandc->base_dma + reg_off;
403 		if (nandc->cmd_crci) {
404 			periph_conf.crci = nandc->cmd_crci;
405 			slave_conf.peripheral_config = &periph_conf;
406 			slave_conf.peripheral_size = sizeof(periph_conf);
407 		}
408 	}
409 
410 	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
411 	if (ret) {
412 		dev_err(nandc->dev, "failed to configure dma channel\n");
413 		goto err;
414 	}
415 
416 	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
417 	if (!dma_desc) {
418 		dev_err(nandc->dev, "failed to prepare desc\n");
419 		ret = -EINVAL;
420 		goto err;
421 	}
422 
423 	desc->dma_desc = dma_desc;
424 
425 	list_add_tail(&desc->node, &nandc->desc_list);
426 
427 	return 0;
428 err:
429 	kfree(desc);
430 
431 	return ret;
432 }
433 EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
434 
435 /**
436  * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
437  * @nandc: qpic nand controller
438  * @first: offset of the first register in the contiguous block
439  * @num_regs: number of registers to read
440  * @flags: flags to control DMA descriptor preparation
441  *
442  * This function will prepares a descriptor to read a given number of
443  * contiguous registers to the reg_read_buf pointer.
444  */
qcom_read_reg_dma(struct qcom_nand_controller * nandc,int first,int num_regs,unsigned int flags)445 int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
446 		      int num_regs, unsigned int flags)
447 {
448 	bool flow_control = false;
449 	void *vaddr;
450 
451 	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
452 	nandc->reg_read_pos += num_regs;
453 
454 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
455 		first = dev_cmd_reg_addr(nandc, first);
456 
457 	if (nandc->props->supports_bam)
458 		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
459 					     num_regs, flags);
460 
461 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
462 		flow_control = true;
463 
464 	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
465 				      num_regs * sizeof(u32), flow_control);
466 }
467 EXPORT_SYMBOL(qcom_read_reg_dma);
468 
469 /**
470  * qcom_write_reg_dma() - write a given number of registers
471  * @nandc: qpic nand controller
472  * @vaddr: contiguous memory from where register value will
473  *	   be written
474  * @first: offset of the first register in the contiguous block
475  * @num_regs: number of registers to write
476  * @flags: flags to control DMA descriptor preparation
477  *
478  * This function will prepares a descriptor to write a given number of
479  * contiguous registers
480  */
qcom_write_reg_dma(struct qcom_nand_controller * nandc,__le32 * vaddr,int first,int num_regs,unsigned int flags)481 int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
482 		       int first, int num_regs, unsigned int flags)
483 {
484 	bool flow_control = false;
485 
486 	if (first == NAND_EXEC_CMD)
487 		flags |= NAND_BAM_NWD;
488 
489 	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
490 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
491 
492 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
493 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
494 
495 	if (nandc->props->supports_bam)
496 		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
497 						  num_regs, flags);
498 
499 	if (first == NAND_FLASH_CMD)
500 		flow_control = true;
501 
502 	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
503 				      num_regs * sizeof(u32), flow_control);
504 }
505 EXPORT_SYMBOL(qcom_write_reg_dma);
506 
507 /**
508  * qcom_read_data_dma() - transfer data
509  * @nandc: qpic nand controller
510  * @reg_off: offset within the controller's data buffer
511  * @vaddr: virtual address of the buffer we want to write to
512  * @size: DMA transaction size in bytes
513  * @flags: flags to control DMA descriptor preparation
514  *
515  * This function will prepares a DMA descriptor to transfer data from the
516  * controller's internal buffer to the buffer 'vaddr'
517  */
qcom_read_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)518 int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
519 		       const u8 *vaddr, int size, unsigned int flags)
520 {
521 	if (nandc->props->supports_bam)
522 		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
523 
524 	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
525 }
526 EXPORT_SYMBOL(qcom_read_data_dma);
527 
528 /**
529  * qcom_write_data_dma() - transfer data
530  * @nandc: qpic nand controller
531  * @reg_off: offset within the controller's data buffer
532  * @vaddr: virtual address of the buffer we want to read from
533  * @size: DMA transaction size in bytes
534  * @flags: flags to control DMA descriptor preparation
535  *
536  * This function will prepares a DMA descriptor to transfer data from
537  * 'vaddr' to the controller's internal buffer
538  */
qcom_write_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)539 int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
540 			const u8 *vaddr, int size, unsigned int flags)
541 {
542 	if (nandc->props->supports_bam)
543 		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
544 
545 	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
546 }
547 EXPORT_SYMBOL(qcom_write_data_dma);
548 
549 /**
550  * qcom_submit_descs() - submit dma descriptor
551  * @nandc: qpic nand controller
552  *
553  * This function will submit all the prepared dma descriptor
554  * cmd or data descriptor
555  */
qcom_submit_descs(struct qcom_nand_controller * nandc)556 int qcom_submit_descs(struct qcom_nand_controller *nandc)
557 {
558 	struct desc_info *desc, *n;
559 	dma_cookie_t cookie = 0;
560 	struct bam_transaction *bam_txn = nandc->bam_txn;
561 	int ret = 0;
562 
563 	if (nandc->props->supports_bam) {
564 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
565 			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
566 			if (ret)
567 				goto err_unmap_free_desc;
568 		}
569 
570 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
571 			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
572 							  DMA_PREP_INTERRUPT);
573 			if (ret)
574 				goto err_unmap_free_desc;
575 		}
576 
577 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
578 			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
579 							  DMA_PREP_CMD);
580 			if (ret)
581 				goto err_unmap_free_desc;
582 		}
583 	}
584 
585 	list_for_each_entry(desc, &nandc->desc_list, node)
586 		cookie = dmaengine_submit(desc->dma_desc);
587 
588 	if (nandc->props->supports_bam) {
589 		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
590 		bam_txn->last_cmd_desc->callback_param = bam_txn;
591 
592 		dma_async_issue_pending(nandc->tx_chan);
593 		dma_async_issue_pending(nandc->rx_chan);
594 		dma_async_issue_pending(nandc->cmd_chan);
595 
596 		if (!wait_for_completion_timeout(&bam_txn->txn_done,
597 						 QPIC_NAND_COMPLETION_TIMEOUT))
598 			ret = -ETIMEDOUT;
599 	} else {
600 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
601 			ret = -ETIMEDOUT;
602 	}
603 
604 err_unmap_free_desc:
605 	/*
606 	 * Unmap the dma sg_list and free the desc allocated by both
607 	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
608 	 */
609 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
610 		list_del(&desc->node);
611 
612 		if (nandc->props->supports_bam)
613 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
614 				     desc->sgl_cnt, desc->dir);
615 		else
616 			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
617 				     desc->dir);
618 
619 		kfree(desc);
620 	}
621 
622 	return ret;
623 }
624 EXPORT_SYMBOL(qcom_submit_descs);
625 
626 /**
627  * qcom_clear_read_regs() - reset the read register buffer
628  * @nandc: qpic nand controller
629  *
630  * This function reset the register read buffer for next NAND operation
631  */
qcom_clear_read_regs(struct qcom_nand_controller * nandc)632 void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
633 {
634 	nandc->reg_read_pos = 0;
635 	qcom_nandc_dev_to_mem(nandc, false);
636 }
637 EXPORT_SYMBOL(qcom_clear_read_regs);
638 
639 /**
640  * qcom_nandc_unalloc() - unallocate qpic nand controller
641  * @nandc: qpic nand controller
642  *
643  * This function will unallocate memory alloacted for qpic nand controller
644  */
qcom_nandc_unalloc(struct qcom_nand_controller * nandc)645 void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
646 {
647 	if (nandc->props->supports_bam) {
648 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
649 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
650 					 MAX_REG_RD *
651 					 sizeof(*nandc->reg_read_buf),
652 					 DMA_FROM_DEVICE);
653 
654 		if (nandc->tx_chan)
655 			dma_release_channel(nandc->tx_chan);
656 
657 		if (nandc->rx_chan)
658 			dma_release_channel(nandc->rx_chan);
659 
660 		if (nandc->cmd_chan)
661 			dma_release_channel(nandc->cmd_chan);
662 	} else {
663 		if (nandc->chan)
664 			dma_release_channel(nandc->chan);
665 	}
666 }
667 EXPORT_SYMBOL(qcom_nandc_unalloc);
668 
669 /**
670  * qcom_nandc_alloc() - Allocate qpic nand controller
671  * @nandc: qpic nand controller
672  *
673  * This function will allocate memory for qpic nand controller
674  */
qcom_nandc_alloc(struct qcom_nand_controller * nandc)675 int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
676 {
677 	int ret;
678 
679 	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
680 	if (ret) {
681 		dev_err(nandc->dev, "failed to set DMA mask\n");
682 		return ret;
683 	}
684 
685 	/*
686 	 * we use the internal buffer for reading ONFI params, reading small
687 	 * data like ID and status, and preforming read-copy-write operations
688 	 * when writing to a codeword partially. 532 is the maximum possible
689 	 * size of a codeword for our nand controller
690 	 */
691 	nandc->buf_size = 532;
692 
693 	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
694 	if (!nandc->data_buffer)
695 		return -ENOMEM;
696 
697 	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
698 	if (!nandc->regs)
699 		return -ENOMEM;
700 
701 	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
702 					   sizeof(*nandc->reg_read_buf),
703 					   GFP_KERNEL);
704 	if (!nandc->reg_read_buf)
705 		return -ENOMEM;
706 
707 	if (nandc->props->supports_bam) {
708 		nandc->reg_read_dma =
709 			dma_map_single(nandc->dev, nandc->reg_read_buf,
710 				       MAX_REG_RD *
711 				       sizeof(*nandc->reg_read_buf),
712 				       DMA_FROM_DEVICE);
713 		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
714 			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
715 			return -EIO;
716 		}
717 
718 		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
719 		if (IS_ERR(nandc->tx_chan)) {
720 			ret = PTR_ERR(nandc->tx_chan);
721 			nandc->tx_chan = NULL;
722 			dev_err_probe(nandc->dev, ret,
723 				      "tx DMA channel request failed\n");
724 			goto unalloc;
725 		}
726 
727 		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
728 		if (IS_ERR(nandc->rx_chan)) {
729 			ret = PTR_ERR(nandc->rx_chan);
730 			nandc->rx_chan = NULL;
731 			dev_err_probe(nandc->dev, ret,
732 				      "rx DMA channel request failed\n");
733 			goto unalloc;
734 		}
735 
736 		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
737 		if (IS_ERR(nandc->cmd_chan)) {
738 			ret = PTR_ERR(nandc->cmd_chan);
739 			nandc->cmd_chan = NULL;
740 			dev_err_probe(nandc->dev, ret,
741 				      "cmd DMA channel request failed\n");
742 			goto unalloc;
743 		}
744 
745 		/*
746 		 * Initially allocate BAM transaction to read ONFI param page.
747 		 * After detecting all the devices, this BAM transaction will
748 		 * be freed and the next BAM transaction will be allocated with
749 		 * maximum codeword size
750 		 */
751 		nandc->max_cwperpage = 1;
752 		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
753 		if (!nandc->bam_txn) {
754 			dev_err(nandc->dev,
755 				"failed to allocate bam transaction\n");
756 			ret = -ENOMEM;
757 			goto unalloc;
758 		}
759 	} else {
760 		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
761 		if (IS_ERR(nandc->chan)) {
762 			ret = PTR_ERR(nandc->chan);
763 			nandc->chan = NULL;
764 			dev_err_probe(nandc->dev, ret,
765 				      "rxtx DMA channel request failed\n");
766 			return ret;
767 		}
768 	}
769 
770 	INIT_LIST_HEAD(&nandc->desc_list);
771 	INIT_LIST_HEAD(&nandc->host_list);
772 
773 	return 0;
774 unalloc:
775 	qcom_nandc_unalloc(nandc);
776 	return ret;
777 }
778 EXPORT_SYMBOL(qcom_nandc_alloc);
779 
780 MODULE_DESCRIPTION("QPIC controller common api");
781 MODULE_LICENSE("GPL");
782