xref: /linux/drivers/soc/fsl/qbman/qman_ccsr.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "qman_priv.h"
32 
33 u16 qman_ip_rev;
34 EXPORT_SYMBOL(qman_ip_rev);
35 u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
36 EXPORT_SYMBOL(qm_channel_pool1);
37 u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
38 EXPORT_SYMBOL(qm_channel_caam);
39 
40 /* Register offsets */
41 #define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
42 #define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
43 #define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
44 #define REG_DD_CFG		0x0200
45 #define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
46 #define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
47 #define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
48 #define REG_PFDR_FPC		0x0400
49 #define REG_PFDR_FP_HEAD	0x0404
50 #define REG_PFDR_FP_TAIL	0x0408
51 #define REG_PFDR_FP_LWIT	0x0410
52 #define REG_PFDR_CFG		0x0414
53 #define REG_SFDR_CFG		0x0500
54 #define REG_SFDR_IN_USE		0x0504
55 #define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
56 #define REG_WQ_DEF_ENC_WQID	0x0630
57 #define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
58 #define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
59 #define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
60 #define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
61 #define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
62 #define REG_CM_CFG		0x0800
63 #define REG_ECSR		0x0a00
64 #define REG_ECIR		0x0a04
65 #define REG_EADR		0x0a08
66 #define REG_ECIR2		0x0a0c
67 #define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
68 #define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
69 #define REG_MCR			0x0b00
70 #define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
71 #define REG_MISC_CFG		0x0be0
72 #define REG_HID_CFG		0x0bf0
73 #define REG_IDLE_STAT		0x0bf4
74 #define REG_IP_REV_1		0x0bf8
75 #define REG_IP_REV_2		0x0bfc
76 #define REG_FQD_BARE		0x0c00
77 #define REG_PFDR_BARE		0x0c20
78 #define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
79 #define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
80 #define REG_QCSP_BARE		0x0c80
81 #define REG_QCSP_BAR		0x0c84
82 #define REG_CI_SCHED_CFG	0x0d00
83 #define REG_SRCIDR		0x0d04
84 #define REG_LIODNR		0x0d08
85 #define REG_CI_RLM_AVG		0x0d14
86 #define REG_ERR_ISR		0x0e00
87 #define REG_ERR_IER		0x0e04
88 #define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
89 #define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
90 #define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
91 
92 /* Assists for QMAN_MCR */
93 #define MCR_INIT_PFDR		0x01000000
94 #define MCR_get_rslt(v)		(u8)((v) >> 24)
95 #define MCR_rslt_idle(r)	(!(r) || ((r) >= 0xf0))
96 #define MCR_rslt_ok(r)		((r) == 0xf0)
97 #define MCR_rslt_eaccess(r)	((r) == 0xf8)
98 #define MCR_rslt_inval(r)	((r) == 0xff)
99 
100 /*
101  * Corenet initiator settings. Stash request queues are 4-deep to match cores
102  * ability to snarf. Stash priority is 3, other priorities are 2.
103  */
104 #define QM_CI_SCHED_CFG_SRCCIV		4
105 #define QM_CI_SCHED_CFG_SRQ_W		3
106 #define QM_CI_SCHED_CFG_RW_W		2
107 #define QM_CI_SCHED_CFG_BMAN_W		2
108 /* write SRCCIV enable */
109 #define QM_CI_SCHED_CFG_SRCCIV_EN	BIT(31)
110 
111 /* Follows WQ_CS_CFG0-5 */
112 enum qm_wq_class {
113 	qm_wq_portal = 0,
114 	qm_wq_pool = 1,
115 	qm_wq_fman0 = 2,
116 	qm_wq_fman1 = 3,
117 	qm_wq_caam = 4,
118 	qm_wq_pme = 5,
119 	qm_wq_first = qm_wq_portal,
120 	qm_wq_last = qm_wq_pme
121 };
122 
123 /* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
124 enum qm_memory {
125 	qm_memory_fqd,
126 	qm_memory_pfdr
127 };
128 
129 /* Used by all error interrupt registers except 'inhibit' */
130 #define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
131 #define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
132 #define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
133 #define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
134 #define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
135 #define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
136 #define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
137 #define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
138 #define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
139 #define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
140 #define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
141 #define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
142 #define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
143 #define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
144 #define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
145 #define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
146 #define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
147 #define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
148 
149 /* QMAN_ECIR valid error bit */
150 #define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
151 			 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
152 			 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
153 #define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
154 			 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
155 			 QM_EIRQ_IFSI)
156 
157 struct qm_ecir {
158 	u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
159 };
160 
161 static bool qm_ecir_is_dcp(const struct qm_ecir *p)
162 {
163 	return p->info & BIT(29);
164 }
165 
166 static int qm_ecir_get_pnum(const struct qm_ecir *p)
167 {
168 	return (p->info >> 24) & 0x1f;
169 }
170 
171 static int qm_ecir_get_fqid(const struct qm_ecir *p)
172 {
173 	return p->info & (BIT(24) - 1);
174 }
175 
176 struct qm_ecir2 {
177 	u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
178 };
179 
180 static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
181 {
182 	return p->info & BIT(31);
183 }
184 
185 static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
186 {
187 	return p->info & (BIT(10) - 1);
188 }
189 
190 struct qm_eadr {
191 	u32 info; /* memid[24-27], eadr[0-11] */
192 		  /* v3: memid[24-28], eadr[0-15] */
193 };
194 
195 static int qm_eadr_get_memid(const struct qm_eadr *p)
196 {
197 	return (p->info >> 24) & 0xf;
198 }
199 
200 static int qm_eadr_get_eadr(const struct qm_eadr *p)
201 {
202 	return p->info & (BIT(12) - 1);
203 }
204 
205 static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
206 {
207 	return (p->info >> 24) & 0x1f;
208 }
209 
210 static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
211 {
212 	return p->info & (BIT(16) - 1);
213 }
214 
215 struct qman_hwerr_txt {
216 	u32 mask;
217 	const char *txt;
218 };
219 
220 
221 static const struct qman_hwerr_txt qman_hwerr_txts[] = {
222 	{ QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
223 	{ QM_EIRQ_CTDE, "Corenet Target Data Error" },
224 	{ QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
225 	{ QM_EIRQ_PLWI, "PFDR Low Watermark" },
226 	{ QM_EIRQ_MBEI, "Multi-bit ECC Error" },
227 	{ QM_EIRQ_SBEI, "Single-bit ECC Error" },
228 	{ QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
229 	{ QM_EIRQ_ICVI, "Invalid Command Verb" },
230 	{ QM_EIRQ_IFSI, "Invalid Flow Control State" },
231 	{ QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
232 	{ QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
233 	{ QM_EIRQ_IDSI, "Invalid Dequeue Source" },
234 	{ QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
235 	{ QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
236 	{ QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
237 	{ QM_EIRQ_IESI, "Invalid Enqueue State" },
238 	{ QM_EIRQ_IECI, "Invalid Enqueue Channel" },
239 	{ QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
240 };
241 
242 struct qman_error_info_mdata {
243 	u16 addr_mask;
244 	u16 bits;
245 	const char *txt;
246 };
247 
248 static const struct qman_error_info_mdata error_mdata[] = {
249 	{ 0x01FF, 24, "FQD cache tag memory 0" },
250 	{ 0x01FF, 24, "FQD cache tag memory 1" },
251 	{ 0x01FF, 24, "FQD cache tag memory 2" },
252 	{ 0x01FF, 24, "FQD cache tag memory 3" },
253 	{ 0x0FFF, 512, "FQD cache memory" },
254 	{ 0x07FF, 128, "SFDR memory" },
255 	{ 0x01FF, 72, "WQ context memory" },
256 	{ 0x00FF, 240, "CGR memory" },
257 	{ 0x00FF, 302, "Internal Order Restoration List memory" },
258 	{ 0x01FF, 256, "SW portal ring memory" },
259 };
260 
261 #define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
262 
263 /*
264  * TODO: unimplemented registers
265  *
266  * Keeping a list here of QMan registers I have not yet covered;
267  * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
268  * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
269  * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
270  */
271 
272 /* Pointer to the start of the QMan's CCSR space */
273 static u32 __iomem *qm_ccsr_start;
274 /* A SDQCR mask comprising all the available/visible pool channels */
275 static u32 qm_pools_sdqcr;
276 
277 static inline u32 qm_ccsr_in(u32 offset)
278 {
279 	return ioread32be(qm_ccsr_start + offset/4);
280 }
281 
282 static inline void qm_ccsr_out(u32 offset, u32 val)
283 {
284 	iowrite32be(val, qm_ccsr_start + offset/4);
285 }
286 
287 u32 qm_get_pools_sdqcr(void)
288 {
289 	return qm_pools_sdqcr;
290 }
291 
292 enum qm_dc_portal {
293 	qm_dc_portal_fman0 = 0,
294 	qm_dc_portal_fman1 = 1
295 };
296 
297 static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
298 {
299 	DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
300 		    portal == qm_dc_portal_fman1);
301 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
302 		qm_ccsr_out(REG_DCP_CFG(portal),
303 			    (ed ? 0x1000 : 0) | (sernd & 0x3ff));
304 	else
305 		qm_ccsr_out(REG_DCP_CFG(portal),
306 			    (ed ? 0x100 : 0) | (sernd & 0x1f));
307 }
308 
309 static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
310 				 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
311 				 u8 csw5, u8 csw6, u8 csw7)
312 {
313 	qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
314 		    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
315 		    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
316 		    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
317 }
318 
319 static void qm_set_hid(void)
320 {
321 	qm_ccsr_out(REG_HID_CFG, 0);
322 }
323 
324 static void qm_set_corenet_initiator(void)
325 {
326 	qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
327 		    (QM_CI_SCHED_CFG_SRCCIV << 24) |
328 		    (QM_CI_SCHED_CFG_SRQ_W << 8) |
329 		    (QM_CI_SCHED_CFG_RW_W << 4) |
330 		    QM_CI_SCHED_CFG_BMAN_W);
331 }
332 
333 static void qm_get_version(u16 *id, u8 *major, u8 *minor)
334 {
335 	u32 v = qm_ccsr_in(REG_IP_REV_1);
336 	*id = (v >> 16);
337 	*major = (v >> 8) & 0xff;
338 	*minor = v & 0xff;
339 }
340 
341 #define PFDR_AR_EN		BIT(31)
342 static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
343 {
344 	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
345 	u32 exp = ilog2(size);
346 
347 	/* choke if size isn't within range */
348 	DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
349 		    is_power_of_2(size));
350 	/* choke if 'ba' has lower-alignment than 'size' */
351 	DPAA_ASSERT(!(ba & (size - 1)));
352 	qm_ccsr_out(offset, upper_32_bits(ba));
353 	qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
354 	qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
355 }
356 
357 static void qm_set_pfdr_threshold(u32 th, u8 k)
358 {
359 	qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
360 	qm_ccsr_out(REG_PFDR_CFG, k);
361 }
362 
363 static void qm_set_sfdr_threshold(u16 th)
364 {
365 	qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
366 }
367 
368 static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
369 {
370 	u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
371 
372 	DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
373 	/* Make sure the command interface is 'idle' */
374 	if (!MCR_rslt_idle(rslt)) {
375 		dev_crit(dev, "QMAN_MCR isn't idle");
376 		WARN_ON(1);
377 	}
378 
379 	/* Write the MCR command params then the verb */
380 	qm_ccsr_out(REG_MCP(0), pfdr_start);
381 	/*
382 	 * TODO: remove this - it's a workaround for a model bug that is
383 	 * corrected in more recent versions. We use the workaround until
384 	 * everyone has upgraded.
385 	 */
386 	qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
387 	dma_wmb();
388 	qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
389 	/* Poll for the result */
390 	do {
391 		rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
392 	} while (!MCR_rslt_idle(rslt));
393 	if (MCR_rslt_ok(rslt))
394 		return 0;
395 	if (MCR_rslt_eaccess(rslt))
396 		return -EACCES;
397 	if (MCR_rslt_inval(rslt))
398 		return -EINVAL;
399 	dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
400 	return -ENODEV;
401 }
402 
403 /*
404  * Ideally we would use the DMA API to turn rmem->base into a DMA address
405  * (especially if iommu translations ever get involved).  Unfortunately, the
406  * DMA API currently does not allow mapping anything that is not backed with
407  * a struct page.
408  */
409 static dma_addr_t fqd_a, pfdr_a;
410 static size_t fqd_sz, pfdr_sz;
411 
412 static int qman_fqd(struct reserved_mem *rmem)
413 {
414 	fqd_a = rmem->base;
415 	fqd_sz = rmem->size;
416 
417 	WARN_ON(!(fqd_a && fqd_sz));
418 
419 	return 0;
420 }
421 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
422 
423 static int qman_pfdr(struct reserved_mem *rmem)
424 {
425 	pfdr_a = rmem->base;
426 	pfdr_sz = rmem->size;
427 
428 	WARN_ON(!(pfdr_a && pfdr_sz));
429 
430 	return 0;
431 }
432 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
433 
434 static unsigned int qm_get_fqid_maxcnt(void)
435 {
436 	return fqd_sz / 64;
437 }
438 
439 /*
440  * Flush this memory range from data cache so that QMAN originated
441  * transactions for this memory region could be marked non-coherent.
442  */
443 static int zero_priv_mem(struct device *dev, struct device_node *node,
444 			 phys_addr_t addr, size_t sz)
445 {
446 	/* map as cacheable, non-guarded */
447 	void __iomem *tmpp = ioremap_prot(addr, sz, 0);
448 
449 	if (!tmpp)
450 		return -ENOMEM;
451 
452 	memset_io(tmpp, 0, sz);
453 	flush_dcache_range((unsigned long)tmpp,
454 			   (unsigned long)tmpp + sz);
455 	iounmap(tmpp);
456 
457 	return 0;
458 }
459 
460 static void log_edata_bits(struct device *dev, u32 bit_count)
461 {
462 	u32 i, j, mask = 0xffffffff;
463 
464 	dev_warn(dev, "ErrInt, EDATA:\n");
465 	i = bit_count / 32;
466 	if (bit_count % 32) {
467 		i++;
468 		mask = ~(mask << bit_count % 32);
469 	}
470 	j = 16 - i;
471 	dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
472 	j++;
473 	for (; j < 16; j++)
474 		dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
475 }
476 
477 static void log_additional_error_info(struct device *dev, u32 isr_val,
478 				      u32 ecsr_val)
479 {
480 	struct qm_ecir ecir_val;
481 	struct qm_eadr eadr_val;
482 	int memid;
483 
484 	ecir_val.info = qm_ccsr_in(REG_ECIR);
485 	/* Is portal info valid */
486 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
487 		struct qm_ecir2 ecir2_val;
488 
489 		ecir2_val.info = qm_ccsr_in(REG_ECIR2);
490 		if (ecsr_val & PORTAL_ECSR_ERR) {
491 			dev_warn(dev, "ErrInt: %s id %d\n",
492 				 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
493 				 qm_ecir2_get_pnum(&ecir2_val));
494 		}
495 		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
496 			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
497 				 qm_ecir_get_fqid(&ecir_val));
498 
499 		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
500 			eadr_val.info = qm_ccsr_in(REG_EADR);
501 			memid = qm_eadr_v3_get_memid(&eadr_val);
502 			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
503 				 error_mdata[memid].txt,
504 				 error_mdata[memid].addr_mask
505 					& qm_eadr_v3_get_eadr(&eadr_val));
506 			log_edata_bits(dev, error_mdata[memid].bits);
507 		}
508 	} else {
509 		if (ecsr_val & PORTAL_ECSR_ERR) {
510 			dev_warn(dev, "ErrInt: %s id %d\n",
511 				 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
512 				 qm_ecir_get_pnum(&ecir_val));
513 		}
514 		if (ecsr_val & FQID_ECSR_ERR)
515 			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
516 				 qm_ecir_get_fqid(&ecir_val));
517 
518 		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
519 			eadr_val.info = qm_ccsr_in(REG_EADR);
520 			memid = qm_eadr_get_memid(&eadr_val);
521 			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
522 				 error_mdata[memid].txt,
523 				 error_mdata[memid].addr_mask
524 					& qm_eadr_get_eadr(&eadr_val));
525 			log_edata_bits(dev, error_mdata[memid].bits);
526 		}
527 	}
528 }
529 
530 static irqreturn_t qman_isr(int irq, void *ptr)
531 {
532 	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
533 	struct device *dev = ptr;
534 
535 	ier_val = qm_ccsr_in(REG_ERR_IER);
536 	isr_val = qm_ccsr_in(REG_ERR_ISR);
537 	ecsr_val = qm_ccsr_in(REG_ECSR);
538 	isr_mask = isr_val & ier_val;
539 
540 	if (!isr_mask)
541 		return IRQ_NONE;
542 
543 	for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
544 		if (qman_hwerr_txts[i].mask & isr_mask) {
545 			dev_err_ratelimited(dev, "ErrInt: %s\n",
546 					    qman_hwerr_txts[i].txt);
547 			if (qman_hwerr_txts[i].mask & ecsr_val) {
548 				log_additional_error_info(dev, isr_mask,
549 							  ecsr_val);
550 				/* Re-arm error capture registers */
551 				qm_ccsr_out(REG_ECSR, ecsr_val);
552 			}
553 			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
554 				dev_dbg(dev, "Disabling error 0x%x\n",
555 					qman_hwerr_txts[i].mask);
556 				ier_val &= ~qman_hwerr_txts[i].mask;
557 				qm_ccsr_out(REG_ERR_IER, ier_val);
558 			}
559 		}
560 	}
561 	qm_ccsr_out(REG_ERR_ISR, isr_val);
562 
563 	return IRQ_HANDLED;
564 }
565 
566 static int qman_init_ccsr(struct device *dev)
567 {
568 	int i, err;
569 
570 	/* FQD memory */
571 	qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
572 	/* PFDR memory */
573 	qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
574 	err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
575 	if (err)
576 		return err;
577 	/* thresholds */
578 	qm_set_pfdr_threshold(512, 64);
579 	qm_set_sfdr_threshold(128);
580 	/* clear stale PEBI bit from interrupt status register */
581 	qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
582 	/* corenet initiator settings */
583 	qm_set_corenet_initiator();
584 	/* HID settings */
585 	qm_set_hid();
586 	/* Set scheduling weights to defaults */
587 	for (i = qm_wq_first; i <= qm_wq_last; i++)
588 		qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
589 	/* We are not prepared to accept ERNs for hardware enqueues */
590 	qm_set_dc(qm_dc_portal_fman0, 1, 0);
591 	qm_set_dc(qm_dc_portal_fman1, 1, 0);
592 	return 0;
593 }
594 
595 #define LIO_CFG_LIODN_MASK 0x0fff0000
596 void qman_liodn_fixup(u16 channel)
597 {
598 	static int done;
599 	static u32 liodn_offset;
600 	u32 before, after;
601 	int idx = channel - QM_CHANNEL_SWPORTAL0;
602 
603 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
604 		before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
605 	else
606 		before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
607 	if (!done) {
608 		liodn_offset = before & LIO_CFG_LIODN_MASK;
609 		done = 1;
610 		return;
611 	}
612 	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
613 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
614 		qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
615 	else
616 		qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
617 }
618 
619 #define IO_CFG_SDEST_MASK 0x00ff0000
620 void qman_set_sdest(u16 channel, unsigned int cpu_idx)
621 {
622 	int idx = channel - QM_CHANNEL_SWPORTAL0;
623 	u32 before, after;
624 
625 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
626 		before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
627 		/* Each pair of vcpu share the same SRQ(SDEST) */
628 		cpu_idx /= 2;
629 		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
630 		qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
631 	} else {
632 		before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
633 		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
634 		qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
635 	}
636 }
637 
638 static int qman_resource_init(struct device *dev)
639 {
640 	int pool_chan_num, cgrid_num;
641 	int ret, i;
642 
643 	switch (qman_ip_rev >> 8) {
644 	case 1:
645 		pool_chan_num = 15;
646 		cgrid_num = 256;
647 		break;
648 	case 2:
649 		pool_chan_num = 3;
650 		cgrid_num = 64;
651 		break;
652 	case 3:
653 		pool_chan_num = 15;
654 		cgrid_num = 256;
655 		break;
656 	default:
657 		return -ENODEV;
658 	}
659 
660 	ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
661 			   pool_chan_num, -1);
662 	if (ret) {
663 		dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
664 		return ret;
665 	}
666 
667 	ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
668 	if (ret) {
669 		dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
670 		return ret;
671 	}
672 
673 	/* parse pool channels into the SDQCR mask */
674 	for (i = 0; i < cgrid_num; i++)
675 		qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
676 
677 	ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
678 			   qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
679 	if (ret) {
680 		dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
681 		return ret;
682 	}
683 
684 	return 0;
685 }
686 
687 static int fsl_qman_probe(struct platform_device *pdev)
688 {
689 	struct device *dev = &pdev->dev;
690 	struct device_node *node = dev->of_node;
691 	struct resource *res;
692 	int ret, err_irq;
693 	u16 id;
694 	u8 major, minor;
695 
696 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
697 	if (!res) {
698 		dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
699 			node->full_name);
700 		return -ENXIO;
701 	}
702 	qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
703 	if (!qm_ccsr_start)
704 		return -ENXIO;
705 
706 	qm_get_version(&id, &major, &minor);
707 	if (major == 1 && minor == 0) {
708 		dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
709 			return -ENODEV;
710 	} else if (major == 1 && minor == 1)
711 		qman_ip_rev = QMAN_REV11;
712 	else if	(major == 1 && minor == 2)
713 		qman_ip_rev = QMAN_REV12;
714 	else if (major == 2 && minor == 0)
715 		qman_ip_rev = QMAN_REV20;
716 	else if (major == 3 && minor == 0)
717 		qman_ip_rev = QMAN_REV30;
718 	else if (major == 3 && minor == 1)
719 		qman_ip_rev = QMAN_REV31;
720 	else {
721 		dev_err(dev, "Unknown QMan version\n");
722 		return -ENODEV;
723 	}
724 
725 	if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
726 		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
727 		qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
728 	}
729 
730 	ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
731 	WARN_ON(ret);
732 	if (ret)
733 		return -ENODEV;
734 
735 	ret = qman_init_ccsr(dev);
736 	if (ret) {
737 		dev_err(dev, "CCSR setup failed\n");
738 		return ret;
739 	}
740 
741 	err_irq = platform_get_irq(pdev, 0);
742 	if (err_irq <= 0) {
743 		dev_info(dev, "Can't get %s property 'interrupts'\n",
744 			 node->full_name);
745 		return -ENODEV;
746 	}
747 	ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
748 			       dev);
749 	if (ret)  {
750 		dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
751 			ret, node->full_name);
752 		return ret;
753 	}
754 
755 	/*
756 	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
757 	 * to resource allocation during driver init).
758 	 */
759 	qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
760 	/* Enable Error Interrupts */
761 	qm_ccsr_out(REG_ERR_IER, 0xffffffff);
762 
763 	qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
764 	if (IS_ERR(qm_fqalloc)) {
765 		ret = PTR_ERR(qm_fqalloc);
766 		dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
767 		return ret;
768 	}
769 
770 	qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
771 	if (IS_ERR(qm_qpalloc)) {
772 		ret = PTR_ERR(qm_qpalloc);
773 		dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
774 		return ret;
775 	}
776 
777 	qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
778 	if (IS_ERR(qm_cgralloc)) {
779 		ret = PTR_ERR(qm_cgralloc);
780 		dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
781 		return ret;
782 	}
783 
784 	ret = qman_resource_init(dev);
785 	if (ret)
786 		return ret;
787 
788 	ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
789 	if (ret)
790 		return ret;
791 
792 	ret = qman_wq_alloc();
793 	if (ret)
794 		return ret;
795 
796 	return 0;
797 }
798 
799 static const struct of_device_id fsl_qman_ids[] = {
800 	{
801 		.compatible = "fsl,qman",
802 	},
803 	{}
804 };
805 
806 static struct platform_driver fsl_qman_driver = {
807 	.driver = {
808 		.name = KBUILD_MODNAME,
809 		.of_match_table = fsl_qman_ids,
810 		.suppress_bind_attrs = true,
811 	},
812 	.probe = fsl_qman_probe,
813 };
814 
815 builtin_platform_driver(fsl_qman_driver);
816