1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
4 *
5 * eHEA ethernet device driver for IBM eServer System p
6 *
7 * (C) Copyright IBM Corp. 2006
8 *
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
13 */
14
15 #ifndef __EHEA_HW_H__
16 #define __EHEA_HW_H__
17
18 #define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
19 #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
20 #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
21 #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
22
23 #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
24
25 struct ehea_qptemm {
26 u64 qpx_hcr;
27 u64 qpx_c;
28 u64 qpx_herr;
29 u64 qpx_aer;
30 u64 qpx_sqa;
31 u64 qpx_sqc;
32 u64 qpx_rq1a;
33 u64 qpx_rq1c;
34 u64 qpx_st;
35 u64 qpx_aerr;
36 u64 qpx_tenure;
37 u64 qpx_reserved1[(0x098 - 0x058) / 8];
38 u64 qpx_portp;
39 u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
40 u64 qpx_t;
41 u64 qpx_sqhp;
42 u64 qpx_sqptp;
43 u64 qpx_reserved3[(0x140 - 0x118) / 8];
44 u64 qpx_sqwsize;
45 u64 qpx_reserved4[(0x170 - 0x148) / 8];
46 u64 qpx_sqsize;
47 u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
48 u64 qpx_sigt;
49 u64 qpx_wqecnt;
50 u64 qpx_rq1hp;
51 u64 qpx_rq1ptp;
52 u64 qpx_rq1size;
53 u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
54 u64 qpx_rq1wsize;
55 u64 qpx_reserved7[(0x240 - 0x228) / 8];
56 u64 qpx_pd;
57 u64 qpx_scqn;
58 u64 qpx_rcqn;
59 u64 qpx_aeqn;
60 u64 reserved49;
61 u64 qpx_ram;
62 u64 qpx_reserved8[(0x300 - 0x270) / 8];
63 u64 qpx_rq2a;
64 u64 qpx_rq2c;
65 u64 qpx_rq2hp;
66 u64 qpx_rq2ptp;
67 u64 qpx_rq2size;
68 u64 qpx_rq2wsize;
69 u64 qpx_rq2th;
70 u64 qpx_rq3a;
71 u64 qpx_rq3c;
72 u64 qpx_rq3hp;
73 u64 qpx_rq3ptp;
74 u64 qpx_rq3size;
75 u64 qpx_rq3wsize;
76 u64 qpx_rq3th;
77 u64 qpx_lpn;
78 u64 qpx_reserved9[(0x400 - 0x378) / 8];
79 u64 reserved_ext[(0x500 - 0x400) / 8];
80 u64 reserved2[(0x1000 - 0x500) / 8];
81 };
82
83 #define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
84
85 #define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
86
87 struct ehea_mrmwmm {
88 u64 mrx_hcr;
89 u64 mrx_c;
90 u64 mrx_herr;
91 u64 mrx_aer;
92 u64 mrx_pp;
93 u64 reserved1;
94 u64 reserved2;
95 u64 reserved3;
96 u64 reserved4[(0x200 - 0x40) / 8];
97 u64 mrx_ctl[64];
98 };
99
100 #define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
101
102 struct ehea_qpedmm {
103
104 u64 reserved0[(0x400) / 8];
105 u64 qpedx_phh;
106 u64 qpedx_ppsgp;
107 u64 qpedx_ppsgu;
108 u64 qpedx_ppdgp;
109 u64 qpedx_ppdgu;
110 u64 qpedx_aph;
111 u64 qpedx_apsgp;
112 u64 qpedx_apsgu;
113 u64 qpedx_apdgp;
114 u64 qpedx_apdgu;
115 u64 qpedx_apav;
116 u64 qpedx_apsav;
117 u64 qpedx_hcr;
118 u64 reserved1[4];
119 u64 qpedx_rrl0;
120 u64 qpedx_rrrkey0;
121 u64 qpedx_rrva0;
122 u64 reserved2;
123 u64 qpedx_rrl1;
124 u64 qpedx_rrrkey1;
125 u64 qpedx_rrva1;
126 u64 reserved3;
127 u64 qpedx_rrl2;
128 u64 qpedx_rrrkey2;
129 u64 qpedx_rrva2;
130 u64 reserved4;
131 u64 qpedx_rrl3;
132 u64 qpedx_rrrkey3;
133 u64 qpedx_rrva3;
134 };
135
136 #define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
137 #define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
138 #define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
139 #define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
140
141 #define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
142
143 struct ehea_cqtemm {
144 u64 cqx_hcr;
145 u64 cqx_c;
146 u64 cqx_herr;
147 u64 cqx_aer;
148 u64 cqx_ptp;
149 u64 cqx_tp;
150 u64 cqx_fec;
151 u64 cqx_feca;
152 u64 cqx_ep;
153 u64 cqx_eq;
154 u64 reserved1;
155 u64 cqx_n0;
156 u64 cqx_n1;
157 u64 reserved2[(0x1000 - 0x60) / 8];
158 };
159
160 #define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
161
162 struct ehea_eqtemm {
163 u64 eqx_hcr;
164 u64 eqx_c;
165 u64 eqx_herr;
166 u64 eqx_aer;
167 u64 eqx_ptp;
168 u64 eqx_tp;
169 u64 eqx_ssba;
170 u64 eqx_psba;
171 u64 eqx_cec;
172 u64 eqx_meql;
173 u64 eqx_xisbi;
174 u64 eqx_xisc;
175 u64 eqx_it;
176 };
177
178 /*
179 * These access functions will be changed when the dissuccsion about
180 * the new access methods for POWER has settled.
181 */
182
epa_load(struct h_epa epa,u32 offset)183 static inline u64 epa_load(struct h_epa epa, u32 offset)
184 {
185 return __raw_readq((void __iomem *)(epa.addr + offset));
186 }
187
epa_store(struct h_epa epa,u32 offset,u64 value)188 static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
189 {
190 __raw_writeq(value, (void __iomem *)(epa.addr + offset));
191 epa_load(epa, offset); /* synchronize explicitly to eHEA */
192 }
193
epa_store_acc(struct h_epa epa,u32 offset,u64 value)194 static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
195 {
196 __raw_writeq(value, (void __iomem *)(epa.addr + offset));
197 }
198
199 #define epa_store_cq(epa, offset, value)\
200 epa_store(epa, CQTEMM_OFFSET(offset), value)
201 #define epa_load_cq(epa, offset)\
202 epa_load(epa, CQTEMM_OFFSET(offset))
203
ehea_update_sqa(struct ehea_qp * qp,u16 nr_wqes)204 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
205 {
206 struct h_epa epa = qp->epas.kernel;
207 epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
208 EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
209 }
210
ehea_update_rq3a(struct ehea_qp * qp,u16 nr_wqes)211 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
212 {
213 struct h_epa epa = qp->epas.kernel;
214 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
215 EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
216 }
217
ehea_update_rq2a(struct ehea_qp * qp,u16 nr_wqes)218 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
219 {
220 struct h_epa epa = qp->epas.kernel;
221 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
222 EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
223 }
224
ehea_update_rq1a(struct ehea_qp * qp,u16 nr_wqes)225 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
226 {
227 struct h_epa epa = qp->epas.kernel;
228 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
229 EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
230 }
231
ehea_update_feca(struct ehea_cq * cq,u32 nr_cqes)232 static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
233 {
234 struct h_epa epa = cq->epas.kernel;
235 epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
236 EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
237 }
238
ehea_reset_cq_n1(struct ehea_cq * cq)239 static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
240 {
241 struct h_epa epa = cq->epas.kernel;
242 epa_store_cq(epa, cqx_n1,
243 EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
244 }
245
ehea_reset_cq_ep(struct ehea_cq * my_cq)246 static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
247 {
248 struct h_epa epa = my_cq->epas.kernel;
249 epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
250 EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
251 }
252
253 #endif /* __EHEA_HW_H__ */
254