xref: /linux/drivers/misc/xilinx_sdfec.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx SDFEC
4  *
5  * Copyright (C) 2019 Xilinx, Inc.
6  *
7  * Description:
8  * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
9  * IP. It exposes a char device which supports file operations
10  * like  open(), close() and ioctl().
11  */
12 
13 #include <linux/miscdevice.h>
14 #include <linux/io.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/clk.h>
23 #include <linux/compat.h>
24 #include <linux/highmem.h>
25 
26 #include <uapi/misc/xilinx_sdfec.h>
27 
28 #define DEV_NAME_LEN 12
29 
30 static DEFINE_IDA(dev_nrs);
31 
32 /* Xilinx SDFEC Register Map */
33 /* CODE_WRI_PROTECT Register */
34 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
35 
36 /* ACTIVE Register */
37 #define XSDFEC_ACTIVE_ADDR (0x8)
38 #define XSDFEC_IS_ACTIVITY_SET (0x1)
39 
40 /* AXIS_WIDTH Register */
41 #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
42 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
43 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
44 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
45 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
46 
47 /* AXIS_ENABLE Register */
48 #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
49 #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
50 #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
51 #define XSDFEC_AXIS_ENABLE_MASK                                                \
52 	(XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
53 
54 /* FEC_CODE Register */
55 #define XSDFEC_FEC_CODE_ADDR (0x14)
56 
57 /* ORDER Register Map */
58 #define XSDFEC_ORDER_ADDR (0x18)
59 
60 /* Interrupt Status Register */
61 #define XSDFEC_ISR_ADDR (0x1C)
62 /* Interrupt Status Register Bit Mask */
63 #define XSDFEC_ISR_MASK (0x3F)
64 
65 /* Write Only - Interrupt Enable Register */
66 #define XSDFEC_IER_ADDR (0x20)
67 /* Write Only - Interrupt Disable Register */
68 #define XSDFEC_IDR_ADDR (0x24)
69 /* Read Only - Interrupt Mask Register */
70 #define XSDFEC_IMR_ADDR (0x28)
71 
72 /* ECC Interrupt Status Register */
73 #define XSDFEC_ECC_ISR_ADDR (0x2C)
74 /* Single Bit Errors */
75 #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
76 /* PL Initialize Single Bit Errors */
77 #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
78 /* Multi Bit Errors */
79 #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
80 /* PL Initialize Multi Bit Errors */
81 #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
82 /* Multi Bit Error to Event Shift */
83 #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
84 /* PL Initialize Multi Bit Error to Event Shift */
85 #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
86 /* ECC Interrupt Status Bit Mask */
87 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
88 /* ECC Interrupt Status PL Initialize Bit Mask */
89 #define XSDFEC_PL_INIT_ECC_ISR_MASK                                            \
90 	(XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
91 /* ECC Interrupt Status All Bit Mask */
92 #define XSDFEC_ALL_ECC_ISR_MASK                                                \
93 	(XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
94 /* ECC Interrupt Status Single Bit Errors Mask */
95 #define XSDFEC_ALL_ECC_ISR_SBE_MASK                                            \
96 	(XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
97 /* ECC Interrupt Status Multi Bit Errors Mask */
98 #define XSDFEC_ALL_ECC_ISR_MBE_MASK                                            \
99 	(XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
100 
101 /* Write Only - ECC Interrupt Enable Register */
102 #define XSDFEC_ECC_IER_ADDR (0x30)
103 /* Write Only - ECC Interrupt Disable Register */
104 #define XSDFEC_ECC_IDR_ADDR (0x34)
105 /* Read Only - ECC Interrupt Mask Register */
106 #define XSDFEC_ECC_IMR_ADDR (0x38)
107 
108 /* BYPASS Register */
109 #define XSDFEC_BYPASS_ADDR (0x3C)
110 
111 /* Turbo Code Register */
112 #define XSDFEC_TURBO_ADDR (0x100)
113 #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
114 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
115 #define XSDFEC_TURBO_SCALE_MAX (15)
116 
117 /* REG0 Register */
118 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
119 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
120 #define XSDFEC_REG0_N_MIN (4)
121 #define XSDFEC_REG0_N_MAX (32768)
122 #define XSDFEC_REG0_N_MUL_P (256)
123 #define XSDFEC_REG0_N_LSB (0)
124 #define XSDFEC_REG0_K_MIN (2)
125 #define XSDFEC_REG0_K_MAX (32766)
126 #define XSDFEC_REG0_K_MUL_P (256)
127 #define XSDFEC_REG0_K_LSB (16)
128 
129 /* REG1 Register */
130 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
131 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
132 #define XSDFEC_REG1_PSIZE_MIN (2)
133 #define XSDFEC_REG1_PSIZE_MAX (512)
134 #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
135 #define XSDFEC_REG1_NO_PACKING_LSB (10)
136 #define XSDFEC_REG1_NM_MASK (0xFF800)
137 #define XSDFEC_REG1_NM_LSB (11)
138 #define XSDFEC_REG1_BYPASS_MASK (0x100000)
139 
140 /* REG2 Register */
141 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
142 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
143 #define XSDFEC_REG2_NLAYERS_MIN (1)
144 #define XSDFEC_REG2_NLAYERS_MAX (256)
145 #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
146 #define XSDFEC_REG2_NMQC_LSB (9)
147 #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
148 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
149 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
150 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
151 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
152 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
153 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
154 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
155 
156 /* REG3 Register */
157 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
158 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
159 #define XSDFEC_REG3_LA_OFF_LSB (8)
160 #define XSDFEC_REG3_QC_OFF_LSB (16)
161 
162 #define XSDFEC_LDPC_REG_JUMP (0x10)
163 #define XSDFEC_REG_WIDTH_JUMP (4)
164 
165 /* The maximum number of pinned pages */
166 #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
167 
168 /**
169  * struct xsdfec_clks - For managing SD-FEC clocks
170  * @core_clk: Main processing clock for core
171  * @axi_clk: AXI4-Lite memory-mapped clock
172  * @din_words_clk: DIN Words AXI4-Stream Slave clock
173  * @din_clk: DIN AXI4-Stream Slave clock
174  * @dout_clk: DOUT Words AXI4-Stream Slave clock
175  * @dout_words_clk: DOUT AXI4-Stream Slave clock
176  * @ctrl_clk: Control AXI4-Stream Slave clock
177  * @status_clk: Status AXI4-Stream Slave clock
178  */
179 struct xsdfec_clks {
180 	struct clk *core_clk;
181 	struct clk *axi_clk;
182 	struct clk *din_words_clk;
183 	struct clk *din_clk;
184 	struct clk *dout_clk;
185 	struct clk *dout_words_clk;
186 	struct clk *ctrl_clk;
187 	struct clk *status_clk;
188 };
189 
190 /**
191  * struct xsdfec_dev - Driver data for SDFEC
192  * @miscdev: Misc device handle
193  * @clks: Clocks managed by the SDFEC driver
194  * @waitq: Driver wait queue
195  * @config: Configuration of the SDFEC device
196  * @dev_name: Device name
197  * @flags: spinlock flags
198  * @regs: device physical base address
199  * @dev: pointer to device struct
200  * @state: State of the SDFEC device
201  * @error_data_lock: Error counter and states spinlock
202  * @dev_id: Device ID
203  * @isr_err_count: Count of ISR errors
204  * @cecc_count: Count of Correctable ECC errors (SBE)
205  * @uecc_count: Count of Uncorrectable ECC errors (MBE)
206  * @irq: IRQ number
207  * @state_updated: indicates State updated by interrupt handler
208  * @stats_updated: indicates Stats updated by interrupt handler
209  * @intr_enabled: indicates IRQ enabled
210  *
211  * This structure contains necessary state for SDFEC driver to operate
212  */
213 struct xsdfec_dev {
214 	struct miscdevice miscdev;
215 	struct xsdfec_clks clks;
216 	wait_queue_head_t waitq;
217 	struct xsdfec_config config;
218 	char dev_name[DEV_NAME_LEN];
219 	unsigned long flags;
220 	void __iomem *regs;
221 	struct device *dev;
222 	enum xsdfec_state state;
223 	/* Spinlock to protect state_updated and stats_updated */
224 	spinlock_t error_data_lock;
225 	int dev_id;
226 	u32 isr_err_count;
227 	u32 cecc_count;
228 	u32 uecc_count;
229 	int irq;
230 	bool state_updated;
231 	bool stats_updated;
232 	bool intr_enabled;
233 };
234 
235 static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
236 				   u32 value)
237 {
238 	dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
239 	iowrite32(value, xsdfec->regs + addr);
240 }
241 
242 static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
243 {
244 	u32 rval;
245 
246 	rval = ioread32(xsdfec->regs + addr);
247 	dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
248 	return rval;
249 }
250 
251 static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
252 					u32 reg_offset, u32 bit_num,
253 					char *config_value)
254 {
255 	u32 reg_val;
256 	u32 bit_mask = 1 << bit_num;
257 
258 	reg_val = xsdfec_regread(xsdfec, reg_offset);
259 	*config_value = (reg_val & bit_mask) > 0;
260 }
261 
262 static void update_config_from_hw(struct xsdfec_dev *xsdfec)
263 {
264 	u32 reg_value;
265 	bool sdfec_started;
266 
267 	/* Update the Order */
268 	reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
269 	xsdfec->config.order = reg_value;
270 
271 	update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
272 				    0, /* Bit Number, maybe change to mask */
273 				    &xsdfec->config.bypass);
274 
275 	update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
276 				    0, /* Bit Number */
277 				    &xsdfec->config.code_wr_protect);
278 
279 	reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
280 	xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
281 
282 	reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
283 	xsdfec->config.irq.enable_ecc_isr =
284 		(reg_value & XSDFEC_ECC_ISR_MASK) > 0;
285 
286 	reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
287 	sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
288 	if (sdfec_started)
289 		xsdfec->state = XSDFEC_STARTED;
290 	else
291 		xsdfec->state = XSDFEC_STOPPED;
292 }
293 
294 static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
295 {
296 	struct xsdfec_status status;
297 	int err;
298 
299 	memset(&status, 0, sizeof(status));
300 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
301 	status.state = xsdfec->state;
302 	xsdfec->state_updated = false;
303 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
304 	status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
305 			   XSDFEC_IS_ACTIVITY_SET);
306 
307 	err = copy_to_user(arg, &status, sizeof(status));
308 	if (err)
309 		err = -EFAULT;
310 
311 	return err;
312 }
313 
314 static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
315 {
316 	int err;
317 
318 	err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
319 	if (err)
320 		err = -EFAULT;
321 
322 	return err;
323 }
324 
325 static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
326 {
327 	u32 mask_read;
328 
329 	if (enable) {
330 		/* Enable */
331 		xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
332 		mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
333 		if (mask_read & XSDFEC_ISR_MASK) {
334 			dev_dbg(xsdfec->dev,
335 				"SDFEC enabling irq with IER failed");
336 			return -EIO;
337 		}
338 	} else {
339 		/* Disable */
340 		xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
341 		mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
342 		if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
343 			dev_dbg(xsdfec->dev,
344 				"SDFEC disabling irq with IDR failed");
345 			return -EIO;
346 		}
347 	}
348 	return 0;
349 }
350 
351 static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
352 {
353 	u32 mask_read;
354 
355 	if (enable) {
356 		/* Enable */
357 		xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
358 				XSDFEC_ALL_ECC_ISR_MASK);
359 		mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
360 		if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
361 			dev_dbg(xsdfec->dev,
362 				"SDFEC enabling ECC irq with ECC IER failed");
363 			return -EIO;
364 		}
365 	} else {
366 		/* Disable */
367 		xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
368 				XSDFEC_ALL_ECC_ISR_MASK);
369 		mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
370 		if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
371 		       XSDFEC_ECC_ISR_MASK) ||
372 		      ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
373 		       XSDFEC_PL_INIT_ECC_ISR_MASK))) {
374 			dev_dbg(xsdfec->dev,
375 				"SDFEC disable ECC irq with ECC IDR failed");
376 			return -EIO;
377 		}
378 	}
379 	return 0;
380 }
381 
382 static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
383 {
384 	struct xsdfec_irq irq;
385 	int err;
386 	int isr_err;
387 	int ecc_err;
388 
389 	err = copy_from_user(&irq, arg, sizeof(irq));
390 	if (err)
391 		return -EFAULT;
392 
393 	/* Setup tlast related IRQ */
394 	isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
395 	if (!isr_err)
396 		xsdfec->config.irq.enable_isr = irq.enable_isr;
397 
398 	/* Setup ECC related IRQ */
399 	ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
400 	if (!ecc_err)
401 		xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
402 
403 	if (isr_err < 0 || ecc_err < 0)
404 		err = -EIO;
405 
406 	return err;
407 }
408 
409 static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
410 {
411 	struct xsdfec_turbo turbo;
412 	int err;
413 	u32 turbo_write;
414 
415 	err = copy_from_user(&turbo, arg, sizeof(turbo));
416 	if (err)
417 		return -EFAULT;
418 
419 	if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
420 		return -EINVAL;
421 
422 	if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
423 		return -EINVAL;
424 
425 	/* Check to see what device tree says about the FEC codes */
426 	if (xsdfec->config.code == XSDFEC_LDPC_CODE)
427 		return -EIO;
428 
429 	turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
430 		       << XSDFEC_TURBO_SCALE_BIT_POS) |
431 		      turbo.alg;
432 	xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
433 	return err;
434 }
435 
436 static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
437 {
438 	u32 reg_value;
439 	struct xsdfec_turbo turbo_params;
440 	int err;
441 
442 	if (xsdfec->config.code == XSDFEC_LDPC_CODE)
443 		return -EIO;
444 
445 	memset(&turbo_params, 0, sizeof(turbo_params));
446 	reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
447 
448 	turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
449 			     XSDFEC_TURBO_SCALE_BIT_POS;
450 	turbo_params.alg = reg_value & 0x1;
451 
452 	err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
453 	if (err)
454 		err = -EFAULT;
455 
456 	return err;
457 }
458 
459 static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
460 			     u32 offset)
461 {
462 	u32 wdata;
463 
464 	if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
465 	    (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
466 		dev_dbg(xsdfec->dev, "N value is not in range");
467 		return -EINVAL;
468 	}
469 	n <<= XSDFEC_REG0_N_LSB;
470 
471 	if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
472 	    (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
473 		dev_dbg(xsdfec->dev, "K value is not in range");
474 		return -EINVAL;
475 	}
476 	k = k << XSDFEC_REG0_K_LSB;
477 	wdata = k | n;
478 
479 	if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
480 	    XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
481 		dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
482 			XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
483 				(offset * XSDFEC_LDPC_REG_JUMP));
484 		return -EINVAL;
485 	}
486 	xsdfec_regwrite(xsdfec,
487 			XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
488 				(offset * XSDFEC_LDPC_REG_JUMP),
489 			wdata);
490 	return 0;
491 }
492 
493 static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
494 			     u32 no_packing, u32 nm, u32 offset)
495 {
496 	u32 wdata;
497 
498 	if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
499 		dev_dbg(xsdfec->dev, "Psize is not in range");
500 		return -EINVAL;
501 	}
502 
503 	if (no_packing != 0 && no_packing != 1)
504 		dev_dbg(xsdfec->dev, "No-packing bit register invalid");
505 	no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
506 		      XSDFEC_REG1_NO_PACKING_MASK);
507 
508 	if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
509 		dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
510 	nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
511 
512 	wdata = nm | no_packing | psize;
513 	if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
514 	    XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
515 		dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
516 			XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
517 				(offset * XSDFEC_LDPC_REG_JUMP));
518 		return -EINVAL;
519 	}
520 	xsdfec_regwrite(xsdfec,
521 			XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
522 				(offset * XSDFEC_LDPC_REG_JUMP),
523 			wdata);
524 	return 0;
525 }
526 
527 static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
528 			     u32 norm_type, u32 special_qc, u32 no_final_parity,
529 			     u32 max_schedule, u32 offset)
530 {
531 	u32 wdata;
532 
533 	if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
534 	    nlayers > XSDFEC_REG2_NLAYERS_MAX) {
535 		dev_dbg(xsdfec->dev, "Nlayers is not in range");
536 		return -EINVAL;
537 	}
538 
539 	if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
540 		dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
541 	nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
542 
543 	if (norm_type > 1)
544 		dev_dbg(xsdfec->dev, "Norm type is invalid");
545 	norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
546 		     XSDFEC_REG2_NORM_TYPE_MASK);
547 	if (special_qc > 1)
548 		dev_dbg(xsdfec->dev, "Special QC in invalid");
549 	special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
550 		      XSDFEC_REG2_SPECIAL_QC_MASK);
551 
552 	if (no_final_parity > 1)
553 		dev_dbg(xsdfec->dev, "No final parity check invalid");
554 	no_final_parity =
555 		((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
556 		 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
557 	if (max_schedule &
558 	    ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
559 		dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
560 	max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
561 			XSDFEC_REG2_MAX_SCHEDULE_MASK);
562 
563 	wdata = (max_schedule | no_final_parity | special_qc | norm_type |
564 		 nmqc | nlayers);
565 
566 	if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
567 	    XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
568 		dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
569 			XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
570 				(offset * XSDFEC_LDPC_REG_JUMP));
571 		return -EINVAL;
572 	}
573 	xsdfec_regwrite(xsdfec,
574 			XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
575 				(offset * XSDFEC_LDPC_REG_JUMP),
576 			wdata);
577 	return 0;
578 }
579 
580 static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
581 			     u16 qc_off, u32 offset)
582 {
583 	u32 wdata;
584 
585 	wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
586 		 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
587 	if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
588 	    XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
589 		dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
590 			XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
591 				(offset * XSDFEC_LDPC_REG_JUMP));
592 		return -EINVAL;
593 	}
594 	xsdfec_regwrite(xsdfec,
595 			XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
596 				(offset * XSDFEC_LDPC_REG_JUMP),
597 			wdata);
598 	return 0;
599 }
600 
601 static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
602 			      u32 *src_ptr, u32 len, const u32 base_addr,
603 			      const u32 depth)
604 {
605 	u32 reg = 0;
606 	int res, i, nr_pages;
607 	u32 n;
608 	u32 *addr = NULL;
609 	struct page *pages[MAX_NUM_PAGES];
610 
611 	/*
612 	 * Writes that go beyond the length of
613 	 * Shared Scale(SC) table should fail
614 	 */
615 	if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
616 	    len > depth / XSDFEC_REG_WIDTH_JUMP ||
617 	    offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
618 		dev_dbg(xsdfec->dev, "Write exceeds SC table length");
619 		return -EINVAL;
620 	}
621 
622 	n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
623 	if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
624 		n += 1;
625 
626 	if (WARN_ON_ONCE(n > INT_MAX))
627 		return -EINVAL;
628 
629 	nr_pages = n;
630 
631 	res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
632 	if (res < nr_pages) {
633 		if (res > 0)
634 			unpin_user_pages(pages, res);
635 
636 		return -EINVAL;
637 	}
638 
639 	for (i = 0; i < nr_pages; i++) {
640 		addr = kmap_local_page(pages[i]);
641 		do {
642 			xsdfec_regwrite(xsdfec,
643 					base_addr + ((offset + reg) *
644 						     XSDFEC_REG_WIDTH_JUMP),
645 					addr[reg]);
646 			reg++;
647 		} while ((reg < len) &&
648 			 ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
649 		kunmap_local(addr);
650 		unpin_user_page(pages[i]);
651 	}
652 	return 0;
653 }
654 
655 static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
656 {
657 	struct xsdfec_ldpc_params *ldpc;
658 	int ret, n;
659 
660 	ldpc = memdup_user(arg, sizeof(*ldpc));
661 	if (IS_ERR(ldpc))
662 		return PTR_ERR(ldpc);
663 
664 	if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
665 		ret = -EIO;
666 		goto err_out;
667 	}
668 
669 	/* Verify Device has not started */
670 	if (xsdfec->state == XSDFEC_STARTED) {
671 		ret = -EIO;
672 		goto err_out;
673 	}
674 
675 	if (xsdfec->config.code_wr_protect) {
676 		ret = -EIO;
677 		goto err_out;
678 	}
679 
680 	/* Write Reg 0 */
681 	ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
682 				ldpc->code_id);
683 	if (ret)
684 		goto err_out;
685 
686 	/* Write Reg 1 */
687 	ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
688 				ldpc->code_id);
689 	if (ret)
690 		goto err_out;
691 
692 	/* Write Reg 2 */
693 	ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
694 				ldpc->norm_type, ldpc->special_qc,
695 				ldpc->no_final_parity, ldpc->max_schedule,
696 				ldpc->code_id);
697 	if (ret)
698 		goto err_out;
699 
700 	/* Write Reg 3 */
701 	ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
702 				ldpc->qc_off, ldpc->code_id);
703 	if (ret)
704 		goto err_out;
705 
706 	/* Write Shared Codes */
707 	n = ldpc->nlayers / 4;
708 	if (ldpc->nlayers % 4)
709 		n++;
710 
711 	ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
712 				 XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
713 				 XSDFEC_SC_TABLE_DEPTH);
714 	if (ret < 0)
715 		goto err_out;
716 
717 	ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
718 				 ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
719 				 XSDFEC_LA_TABLE_DEPTH);
720 	if (ret < 0)
721 		goto err_out;
722 
723 	ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
724 				 ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
725 				 XSDFEC_QC_TABLE_DEPTH);
726 err_out:
727 	kfree(ldpc);
728 	return ret;
729 }
730 
731 static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
732 {
733 	bool order_invalid;
734 	enum xsdfec_order order;
735 	int err;
736 
737 	err = get_user(order, (enum xsdfec_order __user *)arg);
738 	if (err)
739 		return -EFAULT;
740 
741 	order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
742 			(order != XSDFEC_OUT_OF_ORDER);
743 	if (order_invalid)
744 		return -EINVAL;
745 
746 	/* Verify Device has not started */
747 	if (xsdfec->state == XSDFEC_STARTED)
748 		return -EIO;
749 
750 	xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
751 
752 	xsdfec->config.order = order;
753 
754 	return 0;
755 }
756 
757 static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
758 {
759 	bool bypass;
760 	int err;
761 
762 	err = get_user(bypass, arg);
763 	if (err)
764 		return -EFAULT;
765 
766 	/* Verify Device has not started */
767 	if (xsdfec->state == XSDFEC_STARTED)
768 		return -EIO;
769 
770 	if (bypass)
771 		xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
772 	else
773 		xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
774 
775 	xsdfec->config.bypass = bypass;
776 
777 	return 0;
778 }
779 
780 static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
781 {
782 	u32 reg_value;
783 	bool is_active;
784 	int err;
785 
786 	reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
787 	/* using a double ! operator instead of casting */
788 	is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
789 	err = put_user(is_active, arg);
790 	if (err)
791 		return -EFAULT;
792 
793 	return err;
794 }
795 
796 static u32
797 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
798 {
799 	u32 axis_width_field = 0;
800 
801 	switch (axis_width_cfg) {
802 	case XSDFEC_1x128b:
803 		axis_width_field = 0;
804 		break;
805 	case XSDFEC_2x128b:
806 		axis_width_field = 1;
807 		break;
808 	case XSDFEC_4x128b:
809 		axis_width_field = 2;
810 		break;
811 	}
812 
813 	return axis_width_field;
814 }
815 
816 static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
817 	axis_word_inc_cfg)
818 {
819 	u32 axis_words_field = 0;
820 
821 	if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
822 	    axis_word_inc_cfg == XSDFEC_IN_BLOCK)
823 		axis_words_field = 0;
824 	else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
825 		axis_words_field = 1;
826 
827 	return axis_words_field;
828 }
829 
830 static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
831 {
832 	u32 reg_value;
833 	u32 dout_words_field;
834 	u32 dout_width_field;
835 	u32 din_words_field;
836 	u32 din_width_field;
837 	struct xsdfec_config *config = &xsdfec->config;
838 
839 	/* translate config info to register values */
840 	dout_words_field =
841 		xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
842 	dout_width_field =
843 		xsdfec_translate_axis_width_cfg_val(config->dout_width);
844 	din_words_field =
845 		xsdfec_translate_axis_words_cfg_val(config->din_word_include);
846 	din_width_field =
847 		xsdfec_translate_axis_width_cfg_val(config->din_width);
848 
849 	reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
850 	reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
851 	reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
852 	reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
853 
854 	xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
855 
856 	return 0;
857 }
858 
859 static int xsdfec_start(struct xsdfec_dev *xsdfec)
860 {
861 	u32 regread;
862 
863 	regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
864 	regread &= 0x1;
865 	if (regread != xsdfec->config.code) {
866 		dev_dbg(xsdfec->dev,
867 			"%s SDFEC HW code does not match driver code, reg %d, code %d",
868 			__func__, regread, xsdfec->config.code);
869 		return -EINVAL;
870 	}
871 
872 	/* Set AXIS enable */
873 	xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
874 			XSDFEC_AXIS_ENABLE_MASK);
875 	/* Done */
876 	xsdfec->state = XSDFEC_STARTED;
877 	return 0;
878 }
879 
880 static int xsdfec_stop(struct xsdfec_dev *xsdfec)
881 {
882 	u32 regread;
883 
884 	if (xsdfec->state != XSDFEC_STARTED)
885 		dev_dbg(xsdfec->dev, "Device not started correctly");
886 	/* Disable AXIS_ENABLE Input interfaces only */
887 	regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
888 	regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
889 	xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
890 	/* Stop */
891 	xsdfec->state = XSDFEC_STOPPED;
892 	return 0;
893 }
894 
895 static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
896 {
897 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
898 	xsdfec->isr_err_count = 0;
899 	xsdfec->uecc_count = 0;
900 	xsdfec->cecc_count = 0;
901 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
902 
903 	return 0;
904 }
905 
906 static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
907 {
908 	int err;
909 	struct xsdfec_stats user_stats;
910 
911 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
912 	user_stats.isr_err_count = xsdfec->isr_err_count;
913 	user_stats.cecc_count = xsdfec->cecc_count;
914 	user_stats.uecc_count = xsdfec->uecc_count;
915 	xsdfec->stats_updated = false;
916 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
917 
918 	err = copy_to_user(arg, &user_stats, sizeof(user_stats));
919 	if (err)
920 		err = -EFAULT;
921 
922 	return err;
923 }
924 
925 static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
926 {
927 	/* Ensure registers are aligned with core configuration */
928 	xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
929 	xsdfec_cfg_axi_streams(xsdfec);
930 	update_config_from_hw(xsdfec);
931 
932 	return 0;
933 }
934 
935 static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
936 			     unsigned long data)
937 {
938 	struct xsdfec_dev *xsdfec;
939 	void __user *arg = (void __user *)data;
940 	int rval;
941 
942 	xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
943 
944 	/* In failed state allow only reset and get status IOCTLs */
945 	if (xsdfec->state == XSDFEC_NEEDS_RESET &&
946 	    (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
947 	     cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
948 		return -EPERM;
949 	}
950 
951 	switch (cmd) {
952 	case XSDFEC_START_DEV:
953 		rval = xsdfec_start(xsdfec);
954 		break;
955 	case XSDFEC_STOP_DEV:
956 		rval = xsdfec_stop(xsdfec);
957 		break;
958 	case XSDFEC_CLEAR_STATS:
959 		rval = xsdfec_clear_stats(xsdfec);
960 		break;
961 	case XSDFEC_GET_STATS:
962 		rval = xsdfec_get_stats(xsdfec, arg);
963 		break;
964 	case XSDFEC_GET_STATUS:
965 		rval = xsdfec_get_status(xsdfec, arg);
966 		break;
967 	case XSDFEC_GET_CONFIG:
968 		rval = xsdfec_get_config(xsdfec, arg);
969 		break;
970 	case XSDFEC_SET_DEFAULT_CONFIG:
971 		rval = xsdfec_set_default_config(xsdfec);
972 		break;
973 	case XSDFEC_SET_IRQ:
974 		rval = xsdfec_set_irq(xsdfec, arg);
975 		break;
976 	case XSDFEC_SET_TURBO:
977 		rval = xsdfec_set_turbo(xsdfec, arg);
978 		break;
979 	case XSDFEC_GET_TURBO:
980 		rval = xsdfec_get_turbo(xsdfec, arg);
981 		break;
982 	case XSDFEC_ADD_LDPC_CODE_PARAMS:
983 		rval = xsdfec_add_ldpc(xsdfec, arg);
984 		break;
985 	case XSDFEC_SET_ORDER:
986 		rval = xsdfec_set_order(xsdfec, arg);
987 		break;
988 	case XSDFEC_SET_BYPASS:
989 		rval = xsdfec_set_bypass(xsdfec, arg);
990 		break;
991 	case XSDFEC_IS_ACTIVE:
992 		rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
993 		break;
994 	default:
995 		rval = -ENOTTY;
996 		break;
997 	}
998 	return rval;
999 }
1000 
1001 static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
1002 {
1003 	__poll_t mask = 0;
1004 	struct xsdfec_dev *xsdfec;
1005 
1006 	xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
1007 
1008 	poll_wait(file, &xsdfec->waitq, wait);
1009 
1010 	/* XSDFEC ISR detected an error */
1011 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1012 	if (xsdfec->state_updated)
1013 		mask |= EPOLLIN | EPOLLPRI;
1014 
1015 	if (xsdfec->stats_updated)
1016 		mask |= EPOLLIN | EPOLLRDNORM;
1017 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1018 
1019 	return mask;
1020 }
1021 
1022 static const struct file_operations xsdfec_fops = {
1023 	.owner = THIS_MODULE,
1024 	.unlocked_ioctl = xsdfec_dev_ioctl,
1025 	.poll = xsdfec_poll,
1026 	.compat_ioctl = compat_ptr_ioctl,
1027 };
1028 
1029 static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1030 {
1031 	struct device *dev = xsdfec->dev;
1032 	struct device_node *node = dev->of_node;
1033 	int rval;
1034 	const char *fec_code;
1035 	u32 din_width;
1036 	u32 din_word_include;
1037 	u32 dout_width;
1038 	u32 dout_word_include;
1039 
1040 	rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1041 	if (rval < 0)
1042 		return rval;
1043 
1044 	if (!strcasecmp(fec_code, "ldpc"))
1045 		xsdfec->config.code = XSDFEC_LDPC_CODE;
1046 	else if (!strcasecmp(fec_code, "turbo"))
1047 		xsdfec->config.code = XSDFEC_TURBO_CODE;
1048 	else
1049 		return -EINVAL;
1050 
1051 	rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
1052 				    &din_word_include);
1053 	if (rval < 0)
1054 		return rval;
1055 
1056 	if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1057 		xsdfec->config.din_word_include = din_word_include;
1058 	else
1059 		return -EINVAL;
1060 
1061 	rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
1062 	if (rval < 0)
1063 		return rval;
1064 
1065 	switch (din_width) {
1066 	/* Fall through and set for valid values */
1067 	case XSDFEC_1x128b:
1068 	case XSDFEC_2x128b:
1069 	case XSDFEC_4x128b:
1070 		xsdfec->config.din_width = din_width;
1071 		break;
1072 	default:
1073 		return -EINVAL;
1074 	}
1075 
1076 	rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
1077 				    &dout_word_include);
1078 	if (rval < 0)
1079 		return rval;
1080 
1081 	if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1082 		xsdfec->config.dout_word_include = dout_word_include;
1083 	else
1084 		return -EINVAL;
1085 
1086 	rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
1087 	if (rval < 0)
1088 		return rval;
1089 
1090 	switch (dout_width) {
1091 	/* Fall through and set for valid values */
1092 	case XSDFEC_1x128b:
1093 	case XSDFEC_2x128b:
1094 	case XSDFEC_4x128b:
1095 		xsdfec->config.dout_width = dout_width;
1096 		break;
1097 	default:
1098 		return -EINVAL;
1099 	}
1100 
1101 	/* Write LDPC to CODE Register */
1102 	xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
1103 
1104 	xsdfec_cfg_axi_streams(xsdfec);
1105 
1106 	return 0;
1107 }
1108 
1109 static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
1110 {
1111 	struct xsdfec_dev *xsdfec = dev_id;
1112 	irqreturn_t ret = IRQ_HANDLED;
1113 	u32 ecc_err;
1114 	u32 isr_err;
1115 	u32 uecc_count;
1116 	u32 cecc_count;
1117 	u32 isr_err_count;
1118 	u32 aecc_count;
1119 	u32 tmp;
1120 
1121 	WARN_ON(xsdfec->irq != irq);
1122 
1123 	/* Mask Interrupts */
1124 	xsdfec_isr_enable(xsdfec, false);
1125 	xsdfec_ecc_isr_enable(xsdfec, false);
1126 	/* Read ISR */
1127 	ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1128 	isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1129 	/* Clear the interrupts */
1130 	xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
1131 	xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
1132 
1133 	tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
1134 	/* Count uncorrectable 2-bit errors */
1135 	uecc_count = hweight32(tmp);
1136 	/* Count all ECC errors */
1137 	aecc_count = hweight32(ecc_err);
1138 	/* Number of correctable 1-bit ECC error */
1139 	cecc_count = aecc_count - 2 * uecc_count;
1140 	/* Count ISR errors */
1141 	isr_err_count = hweight32(isr_err);
1142 	dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
1143 		uecc_count, aecc_count, cecc_count, isr_err_count);
1144 	dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
1145 		xsdfec->cecc_count, xsdfec->isr_err_count);
1146 
1147 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1148 	/* Add new errors to a 2-bits counter */
1149 	if (uecc_count)
1150 		xsdfec->uecc_count += uecc_count;
1151 	/* Add new errors to a 1-bits counter */
1152 	if (cecc_count)
1153 		xsdfec->cecc_count += cecc_count;
1154 	/* Add new errors to a ISR counter */
1155 	if (isr_err_count)
1156 		xsdfec->isr_err_count += isr_err_count;
1157 
1158 	/* Update state/stats flag */
1159 	if (uecc_count) {
1160 		if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
1161 			xsdfec->state = XSDFEC_NEEDS_RESET;
1162 		else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
1163 			xsdfec->state = XSDFEC_PL_RECONFIGURE;
1164 		xsdfec->stats_updated = true;
1165 		xsdfec->state_updated = true;
1166 	}
1167 
1168 	if (cecc_count)
1169 		xsdfec->stats_updated = true;
1170 
1171 	if (isr_err_count) {
1172 		xsdfec->state = XSDFEC_NEEDS_RESET;
1173 		xsdfec->stats_updated = true;
1174 		xsdfec->state_updated = true;
1175 	}
1176 
1177 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1178 	dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
1179 		xsdfec->stats_updated);
1180 
1181 	/* Enable another polling */
1182 	if (xsdfec->state_updated || xsdfec->stats_updated)
1183 		wake_up_interruptible(&xsdfec->waitq);
1184 	else
1185 		ret = IRQ_NONE;
1186 
1187 	/* Unmask Interrupts */
1188 	xsdfec_isr_enable(xsdfec, true);
1189 	xsdfec_ecc_isr_enable(xsdfec, true);
1190 
1191 	return ret;
1192 }
1193 
1194 static int xsdfec_clk_init(struct platform_device *pdev,
1195 			   struct xsdfec_clks *clks)
1196 {
1197 	int err;
1198 
1199 	clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
1200 	if (IS_ERR(clks->core_clk)) {
1201 		dev_err(&pdev->dev, "failed to get core_clk");
1202 		return PTR_ERR(clks->core_clk);
1203 	}
1204 
1205 	clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1206 	if (IS_ERR(clks->axi_clk)) {
1207 		dev_err(&pdev->dev, "failed to get axi_clk");
1208 		return PTR_ERR(clks->axi_clk);
1209 	}
1210 
1211 	clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
1212 	if (IS_ERR(clks->din_words_clk)) {
1213 		if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
1214 			err = PTR_ERR(clks->din_words_clk);
1215 			return err;
1216 		}
1217 		clks->din_words_clk = NULL;
1218 	}
1219 
1220 	clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
1221 	if (IS_ERR(clks->din_clk)) {
1222 		if (PTR_ERR(clks->din_clk) != -ENOENT) {
1223 			err = PTR_ERR(clks->din_clk);
1224 			return err;
1225 		}
1226 		clks->din_clk = NULL;
1227 	}
1228 
1229 	clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
1230 	if (IS_ERR(clks->dout_clk)) {
1231 		if (PTR_ERR(clks->dout_clk) != -ENOENT) {
1232 			err = PTR_ERR(clks->dout_clk);
1233 			return err;
1234 		}
1235 		clks->dout_clk = NULL;
1236 	}
1237 
1238 	clks->dout_words_clk =
1239 		devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
1240 	if (IS_ERR(clks->dout_words_clk)) {
1241 		if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
1242 			err = PTR_ERR(clks->dout_words_clk);
1243 			return err;
1244 		}
1245 		clks->dout_words_clk = NULL;
1246 	}
1247 
1248 	clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
1249 	if (IS_ERR(clks->ctrl_clk)) {
1250 		if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
1251 			err = PTR_ERR(clks->ctrl_clk);
1252 			return err;
1253 		}
1254 		clks->ctrl_clk = NULL;
1255 	}
1256 
1257 	clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
1258 	if (IS_ERR(clks->status_clk)) {
1259 		if (PTR_ERR(clks->status_clk) != -ENOENT) {
1260 			err = PTR_ERR(clks->status_clk);
1261 			return err;
1262 		}
1263 		clks->status_clk = NULL;
1264 	}
1265 
1266 	err = clk_prepare_enable(clks->core_clk);
1267 	if (err) {
1268 		dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
1269 		return err;
1270 	}
1271 
1272 	err = clk_prepare_enable(clks->axi_clk);
1273 	if (err) {
1274 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
1275 		goto err_disable_core_clk;
1276 	}
1277 
1278 	err = clk_prepare_enable(clks->din_clk);
1279 	if (err) {
1280 		dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
1281 		goto err_disable_axi_clk;
1282 	}
1283 
1284 	err = clk_prepare_enable(clks->din_words_clk);
1285 	if (err) {
1286 		dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
1287 		goto err_disable_din_clk;
1288 	}
1289 
1290 	err = clk_prepare_enable(clks->dout_clk);
1291 	if (err) {
1292 		dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
1293 		goto err_disable_din_words_clk;
1294 	}
1295 
1296 	err = clk_prepare_enable(clks->dout_words_clk);
1297 	if (err) {
1298 		dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
1299 			err);
1300 		goto err_disable_dout_clk;
1301 	}
1302 
1303 	err = clk_prepare_enable(clks->ctrl_clk);
1304 	if (err) {
1305 		dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
1306 		goto err_disable_dout_words_clk;
1307 	}
1308 
1309 	err = clk_prepare_enable(clks->status_clk);
1310 	if (err) {
1311 		dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
1312 		goto err_disable_ctrl_clk;
1313 	}
1314 
1315 	return err;
1316 
1317 err_disable_ctrl_clk:
1318 	clk_disable_unprepare(clks->ctrl_clk);
1319 err_disable_dout_words_clk:
1320 	clk_disable_unprepare(clks->dout_words_clk);
1321 err_disable_dout_clk:
1322 	clk_disable_unprepare(clks->dout_clk);
1323 err_disable_din_words_clk:
1324 	clk_disable_unprepare(clks->din_words_clk);
1325 err_disable_din_clk:
1326 	clk_disable_unprepare(clks->din_clk);
1327 err_disable_axi_clk:
1328 	clk_disable_unprepare(clks->axi_clk);
1329 err_disable_core_clk:
1330 	clk_disable_unprepare(clks->core_clk);
1331 
1332 	return err;
1333 }
1334 
1335 static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
1336 {
1337 	clk_disable_unprepare(clks->status_clk);
1338 	clk_disable_unprepare(clks->ctrl_clk);
1339 	clk_disable_unprepare(clks->dout_words_clk);
1340 	clk_disable_unprepare(clks->dout_clk);
1341 	clk_disable_unprepare(clks->din_words_clk);
1342 	clk_disable_unprepare(clks->din_clk);
1343 	clk_disable_unprepare(clks->core_clk);
1344 	clk_disable_unprepare(clks->axi_clk);
1345 }
1346 
1347 static int xsdfec_probe(struct platform_device *pdev)
1348 {
1349 	struct xsdfec_dev *xsdfec;
1350 	struct device *dev;
1351 	int err;
1352 	bool irq_enabled = true;
1353 
1354 	xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1355 	if (!xsdfec)
1356 		return -ENOMEM;
1357 
1358 	xsdfec->dev = &pdev->dev;
1359 	spin_lock_init(&xsdfec->error_data_lock);
1360 
1361 	err = xsdfec_clk_init(pdev, &xsdfec->clks);
1362 	if (err)
1363 		return err;
1364 
1365 	dev = xsdfec->dev;
1366 	xsdfec->regs = devm_platform_ioremap_resource(pdev, 0);
1367 	if (IS_ERR(xsdfec->regs)) {
1368 		err = PTR_ERR(xsdfec->regs);
1369 		goto err_xsdfec_dev;
1370 	}
1371 
1372 	xsdfec->irq = platform_get_irq(pdev, 0);
1373 	if (xsdfec->irq < 0) {
1374 		dev_dbg(dev, "platform_get_irq failed");
1375 		irq_enabled = false;
1376 	}
1377 
1378 	err = xsdfec_parse_of(xsdfec);
1379 	if (err < 0)
1380 		goto err_xsdfec_dev;
1381 
1382 	update_config_from_hw(xsdfec);
1383 
1384 	/* Save driver private data */
1385 	platform_set_drvdata(pdev, xsdfec);
1386 
1387 	if (irq_enabled) {
1388 		init_waitqueue_head(&xsdfec->waitq);
1389 		/* Register IRQ thread */
1390 		err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1391 						xsdfec_irq_thread, IRQF_ONESHOT,
1392 						"xilinx-sdfec16", xsdfec);
1393 		if (err < 0) {
1394 			dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1395 			goto err_xsdfec_dev;
1396 		}
1397 	}
1398 
1399 	err = ida_alloc(&dev_nrs, GFP_KERNEL);
1400 	if (err < 0)
1401 		goto err_xsdfec_dev;
1402 	xsdfec->dev_id = err;
1403 
1404 	snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
1405 	xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
1406 	xsdfec->miscdev.name = xsdfec->dev_name;
1407 	xsdfec->miscdev.fops = &xsdfec_fops;
1408 	xsdfec->miscdev.parent = dev;
1409 	err = misc_register(&xsdfec->miscdev);
1410 	if (err) {
1411 		dev_err(dev, "error:%d. Unable to register device", err);
1412 		goto err_xsdfec_ida;
1413 	}
1414 	return 0;
1415 
1416 err_xsdfec_ida:
1417 	ida_free(&dev_nrs, xsdfec->dev_id);
1418 err_xsdfec_dev:
1419 	xsdfec_disable_all_clks(&xsdfec->clks);
1420 	return err;
1421 }
1422 
1423 static int xsdfec_remove(struct platform_device *pdev)
1424 {
1425 	struct xsdfec_dev *xsdfec;
1426 
1427 	xsdfec = platform_get_drvdata(pdev);
1428 	misc_deregister(&xsdfec->miscdev);
1429 	ida_free(&dev_nrs, xsdfec->dev_id);
1430 	xsdfec_disable_all_clks(&xsdfec->clks);
1431 	return 0;
1432 }
1433 
1434 static const struct of_device_id xsdfec_of_match[] = {
1435 	{
1436 		.compatible = "xlnx,sd-fec-1.1",
1437 	},
1438 	{ /* end of table */ }
1439 };
1440 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1441 
1442 static struct platform_driver xsdfec_driver = {
1443 	.driver = {
1444 		.name = "xilinx-sdfec",
1445 		.of_match_table = xsdfec_of_match,
1446 	},
1447 	.probe = xsdfec_probe,
1448 	.remove =  xsdfec_remove,
1449 };
1450 
1451 module_platform_driver(xsdfec_driver);
1452 
1453 MODULE_AUTHOR("Xilinx, Inc");
1454 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1455 MODULE_LICENSE("GPL");
1456