1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2021 Marvell International Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/crc8.h>
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_init_ops.h"
16 #include "qed_iro_hsi.h"
17 #include "qed_reg_addr.h"
18
19 #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
20
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22 {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
23 {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
24 {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
25 };
26
27 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
28 {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
29 };
30
31 /* General constants */
32 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
33 QM_PQ_ELEMENT_SIZE, \
34 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
36 0x100) - 1 : 0)
37 #define QM_INVALID_PQ_ID 0xffff
38
39 /* Max link speed (in Mbps) */
40 #define QM_MAX_LINK_SPEED 100000
41
42 /* Feature enable */
43 #define QM_BYPASS_EN 1
44 #define QM_BYTE_CRD_EN 1
45
46 /* Initial VOQ byte credit */
47 #define QM_INITIAL_VOQ_BYTE_CRD 98304
48 /* Other PQ constants */
49 #define QM_OTHER_PQS_PER_PF 4
50
51 /* VOQ constants */
52 #define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
53 #define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
54
55 /* WFQ constants */
56
57 /* PF WFQ increment value, 0x9000 = 4*9*1024 */
58 #define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
59
60 /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
61 #define QM_PF_WFQ_UPPER_BOUND 62500000
62
63 /* PF WFQ max increment value, 0.7 * upper bound */
64 #define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
65
66 /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
67 #define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
68
69 /* VP WFQ increment value */
70 #define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
71
72 /* VP WFQ min increment value */
73 #define QM_VP_WFQ_MIN_INC_VAL 10800
74
75 /* VP WFQ max increment value, 2^30 */
76 #define QM_VP_WFQ_MAX_INC_VAL 0x40000000
77
78 /* VP WFQ bypass threshold */
79 #define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
80
81 /* VP RL credit task cost */
82 #define QM_VP_RL_CRD_TASK_COST 9700
83
84 /* Bit of VOQ in VP WFQ PQ map */
85 #define QM_VP_WFQ_PQ_VOQ_SHIFT 0
86
87 /* Bit of PF in VP WFQ PQ map */
88 #define QM_VP_WFQ_PQ_PF_SHIFT 5
89
90 /* RL constants */
91
92 /* Period in us */
93 #define QM_RL_PERIOD 5
94
95 /* Period in 25MHz cycles */
96 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
97
98 /* RL increment value - rate is specified in mbps */
99 #define QM_RL_INC_VAL(rate) ({ \
100 typeof(rate) __rate = (rate); \
101 max_t(u32, \
102 (u32)(((__rate ? __rate : \
103 100000) * \
104 QM_RL_PERIOD * \
105 101) / (8 * 100)), 1); })
106
107 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
108 #define QM_PF_RL_UPPER_BOUND 62500000
109
110 /* Max PF RL increment value is 0.7 * upper bound */
111 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
112
113 /* QCN RL Upper bound, speed is in Mpbs */
114 #define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
115 u32, \
116 (u32)(((speed) * \
117 QM_RL_PERIOD * 101) / (8 * 100)), \
118 QM_VP_RL_CRD_TASK_COST \
119 + 1000))
120
121 /* AFullOprtnstcCrdMask constants */
122 #define QM_OPPOR_LINE_VOQ_DEF 1
123 #define QM_OPPOR_FW_STOP_DEF 0
124 #define QM_OPPOR_PQ_EMPTY_DEF 1
125
126 /* Command Queue constants */
127
128 /* Pure LB CmdQ lines (+spare) */
129 #define PBF_CMDQ_PURE_LB_LINES 150
130
131 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
132 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
133 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
134 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
135
136 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
137 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
138 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
139 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
140
141 /* Returns the VOQ line credit for the specified number of PBF command lines.
142 * PBF lines are specified in 256b units.
143 */
144 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
145 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
146
147 /* BTB: blocks constants (block size = 256B) */
148
149 /* 256B blocks in 9700B packet */
150 #define BTB_JUMBO_PKT_BLOCKS 38
151
152 /* Headroom per-port */
153 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
154 #define BTB_PURE_LB_FACTOR 10
155
156 /* Factored (hence really 0.7) */
157 #define BTB_PURE_LB_RATIO 7
158
159 /* QM stop command constants */
160 #define QM_STOP_PQ_MASK_WIDTH 32
161 #define QM_STOP_CMD_ADDR 2
162 #define QM_STOP_CMD_STRUCT_SIZE 2
163 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
164 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
165 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
166 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
167 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
168 #define QM_STOP_CMD_GROUP_ID_MASK 15
169 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
170 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
171 #define QM_STOP_CMD_PQ_TYPE_MASK 1
172 #define QM_STOP_CMD_MAX_POLL_COUNT 100
173 #define QM_STOP_CMD_POLL_PERIOD_US 500
174
175 /* QM command macros */
176 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
177 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
178 SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
179 cmd ## _ ## field, \
180 value)
181
182 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
183 rl_id, ext_voq, wrr) \
184 do { \
185 u32 __reg = 0; \
186 \
187 BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
188 memset(&(map), 0, sizeof(map)); \
189 SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
190 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
191 !!(rl_valid)); \
192 SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
193 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
194 SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
195 SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
196 (wrr)); \
197 \
198 STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
199 __reg); \
200 (map).reg = cpu_to_le32(__reg); \
201 } while (0)
202
203 #define WRITE_PQ_INFO_TO_RAM 1
204 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
205 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
206 ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
207 (((rl) >> 8) << 9))
208
209 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
210 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
211 XSTORM_PQ_INFO_OFFSET(pq_id))
212
213 static const char * const s_protocol_types[] = {
214 "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
215 "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
216 "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
217 "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
218 };
219
220 static const char *s_ramrod_cmd_ids[][28] = {
221 {
222 "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
223 "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
224 "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
225 "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
226 "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
227 "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
228 "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
229 { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
230 "FCOE_RAMROD_CMD_ID_STAT_FUNC",
231 "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
232 "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
233 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
234 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
235 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
236 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
237 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
238 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
239 "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
240 "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
241 "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
242 "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
243 "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
244 "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
245 "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
246 "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
247 { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
248 "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
249 "CORE_RAMROD_TX_QUEUE_STOP",
250 "CORE_RAMROD_RX_QUEUE_FLUSH",
251 "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
252 { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
253 "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
254 "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
255 "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
256 "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
257 "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
258 "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
259 "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
260 "ETH_RAMROD_RX_ADD_UDP_FILTER",
261 "ETH_RAMROD_RX_DELETE_UDP_FILTER",
262 "ETH_RAMROD_RX_CREATE_GFT_ACTION",
263 "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
264 "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
265 "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
266 "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
267 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
268 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
269 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
270 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
271 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
272 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
273 "RDMA_RAMROD_STOP_NS_TRACKING",
274 "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
275 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
276 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
277 "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
278 "IWARP_RAMROD_CMD_ID_MODIFY_QP",
279 "IWARP_RAMROD_CMD_ID_DESTROY_QP",
280 "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
281 { NULL }, /*TOE*/
282 { NULL }, /*PREROCE*/
283 { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
284 "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
285 "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
286 "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
287 };
288
289 /******************** INTERNAL IMPLEMENTATION *********************/
290
291 /* Returns the external VOQ number */
qed_get_ext_voq(struct qed_hwfn * p_hwfn,u8 port_id,u8 tc,u8 max_phys_tcs_per_port)292 static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
293 u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
294 {
295 if (tc == PURE_LB_TC)
296 return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
297 else
298 return port_id * max_phys_tcs_per_port + tc;
299 }
300
301 /* Prepare PF RL enable/disable runtime init values */
qed_enable_pf_rl(struct qed_hwfn * p_hwfn,bool pf_rl_en)302 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
303 {
304 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
305 if (pf_rl_en) {
306 u8 num_ext_voqs = MAX_NUM_VOQS;
307 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
308
309 /* Enable RLs for all VOQs */
310 STORE_RT_REG(p_hwfn,
311 QM_REG_RLPFVOQENABLE_RT_OFFSET,
312 (u32)voq_bit_mask);
313
314 /* Write RL period */
315 STORE_RT_REG(p_hwfn,
316 QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
317 STORE_RT_REG(p_hwfn,
318 QM_REG_RLPFPERIODTIMER_RT_OFFSET,
319 QM_RL_PERIOD_CLK_25M);
320
321 /* Set credit threshold for QM bypass flow */
322 if (QM_BYPASS_EN)
323 STORE_RT_REG(p_hwfn,
324 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
325 QM_PF_RL_UPPER_BOUND);
326 }
327 }
328
329 /* Prepare PF WFQ enable/disable runtime init values */
qed_enable_pf_wfq(struct qed_hwfn * p_hwfn,bool pf_wfq_en)330 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
331 {
332 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
333
334 /* Set credit threshold for QM bypass flow */
335 if (pf_wfq_en && QM_BYPASS_EN)
336 STORE_RT_REG(p_hwfn,
337 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
338 QM_PF_WFQ_UPPER_BOUND);
339 }
340
341 /* Prepare global RL enable/disable runtime init values */
qed_enable_global_rl(struct qed_hwfn * p_hwfn,bool global_rl_en)342 static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
343 {
344 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
345 global_rl_en ? 1 : 0);
346 if (global_rl_en) {
347 /* Write RL period (use timer 0 only) */
348 STORE_RT_REG(p_hwfn,
349 QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
350 QM_RL_PERIOD_CLK_25M);
351 STORE_RT_REG(p_hwfn,
352 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
353 QM_RL_PERIOD_CLK_25M);
354
355 /* Set credit threshold for QM bypass flow */
356 if (QM_BYPASS_EN)
357 STORE_RT_REG(p_hwfn,
358 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
359 QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
360 }
361 }
362
363 /* Prepare VPORT WFQ enable/disable runtime init values */
qed_enable_vport_wfq(struct qed_hwfn * p_hwfn,bool vport_wfq_en)364 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
365 {
366 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
367 vport_wfq_en ? 1 : 0);
368
369 /* Set credit threshold for QM bypass flow */
370 if (vport_wfq_en && QM_BYPASS_EN)
371 STORE_RT_REG(p_hwfn,
372 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
373 QM_VP_WFQ_BYPASS_THRESH);
374 }
375
376 /* Prepare runtime init values to allocate PBF command queue lines for
377 * the specified VOQ.
378 */
qed_cmdq_lines_voq_rt_init(struct qed_hwfn * p_hwfn,u8 ext_voq,u16 cmdq_lines)379 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
380 u8 ext_voq, u16 cmdq_lines)
381 {
382 u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
383
384 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
385 (u32)cmdq_lines);
386 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
387 qm_line_crd);
388 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
389 qm_line_crd);
390 }
391
392 /* Prepare runtime init values to allocate PBF command queue lines. */
393 static void
qed_cmdq_lines_rt_init(struct qed_hwfn * p_hwfn,u8 max_ports_per_engine,u8 max_phys_tcs_per_port,struct init_qm_port_params port_params[MAX_NUM_PORTS])394 qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
395 u8 max_ports_per_engine,
396 u8 max_phys_tcs_per_port,
397 struct init_qm_port_params port_params[MAX_NUM_PORTS])
398 {
399 u8 tc, ext_voq, port_id, num_tcs_in_port;
400 u8 num_ext_voqs = MAX_NUM_VOQS;
401
402 /* Clear PBF lines of all VOQs */
403 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
404 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
405
406 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
407 u16 phys_lines, phys_lines_per_tc;
408
409 if (!port_params[port_id].active)
410 continue;
411
412 /* Find number of command queue lines to divide between the
413 * active physical TCs.
414 */
415 phys_lines = port_params[port_id].num_pbf_cmd_lines;
416 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
417
418 /* Find #lines per active physical TC */
419 num_tcs_in_port = 0;
420 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
421 if (((port_params[port_id].active_phys_tcs >>
422 tc) & 0x1) == 1)
423 num_tcs_in_port++;
424 phys_lines_per_tc = phys_lines / num_tcs_in_port;
425
426 /* Init registers per active TC */
427 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
428 ext_voq = qed_get_ext_voq(p_hwfn,
429 port_id,
430 tc, max_phys_tcs_per_port);
431 if (((port_params[port_id].active_phys_tcs >>
432 tc) & 0x1) == 1)
433 qed_cmdq_lines_voq_rt_init(p_hwfn,
434 ext_voq,
435 phys_lines_per_tc);
436 }
437
438 /* Init registers for pure LB TC */
439 ext_voq = qed_get_ext_voq(p_hwfn,
440 port_id,
441 PURE_LB_TC, max_phys_tcs_per_port);
442 qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
443 PBF_CMDQ_PURE_LB_LINES);
444 }
445 }
446
447 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
448 * specified port. The guaranteed BTB space is divided between the TCs as
449 * follows (shared space Is currently not used):
450 * 1. Parameters:
451 * B - BTB blocks for this port
452 * C - Number of physical TCs for this port
453 * 2. Calculation:
454 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
455 * headroom.
456 * b. B = B - 38 (remainder after global headroom allocation).
457 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
458 * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
459 * e. B/C blocks are allocated for each physical TC.
460 * Assumptions:
461 * - MTU is up to 9700 bytes (38 blocks)
462 * - All TCs are considered symmetrical (same rate and packet size)
463 * - No optimization for lossy TC (all are considered lossless). Shared space
464 * is not enabled and allocated for each TC.
465 */
466 static void
qed_btb_blocks_rt_init(struct qed_hwfn * p_hwfn,u8 max_ports_per_engine,u8 max_phys_tcs_per_port,struct init_qm_port_params port_params[MAX_NUM_PORTS])467 qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
468 u8 max_ports_per_engine,
469 u8 max_phys_tcs_per_port,
470 struct init_qm_port_params port_params[MAX_NUM_PORTS])
471 {
472 u32 usable_blocks, pure_lb_blocks, phys_blocks;
473 u8 tc, ext_voq, port_id, num_tcs_in_port;
474
475 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
476 if (!port_params[port_id].active)
477 continue;
478
479 /* Subtract headroom blocks */
480 usable_blocks = port_params[port_id].num_btb_blocks -
481 BTB_HEADROOM_BLOCKS;
482
483 /* Find blocks per physical TC. Use factor to avoid floating
484 * arithmethic.
485 */
486 num_tcs_in_port = 0;
487 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
488 if (((port_params[port_id].active_phys_tcs >>
489 tc) & 0x1) == 1)
490 num_tcs_in_port++;
491
492 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
493 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
494 BTB_PURE_LB_RATIO);
495 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
496 pure_lb_blocks / BTB_PURE_LB_FACTOR);
497 phys_blocks = (usable_blocks - pure_lb_blocks) /
498 num_tcs_in_port;
499
500 /* Init physical TCs */
501 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
502 if (((port_params[port_id].active_phys_tcs >>
503 tc) & 0x1) == 1) {
504 ext_voq =
505 qed_get_ext_voq(p_hwfn,
506 port_id,
507 tc,
508 max_phys_tcs_per_port);
509 STORE_RT_REG(p_hwfn,
510 PBF_BTB_GUARANTEED_RT_OFFSET
511 (ext_voq), phys_blocks);
512 }
513 }
514
515 /* Init pure LB TC */
516 ext_voq = qed_get_ext_voq(p_hwfn,
517 port_id,
518 PURE_LB_TC, max_phys_tcs_per_port);
519 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
520 pure_lb_blocks);
521 }
522 }
523
524 /* Prepare runtime init values for the specified RL.
525 * Set max link speed (100Gbps) per rate limiter.
526 * Return -1 on error.
527 */
qed_global_rl_rt_init(struct qed_hwfn * p_hwfn)528 static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
529 {
530 u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
531 (u32)QM_RL_CRD_REG_SIGN_BIT;
532 u32 inc_val;
533 u16 rl_id;
534
535 /* Go over all global RLs */
536 for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
537 inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
538
539 STORE_RT_REG(p_hwfn,
540 QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
541 (u32)QM_RL_CRD_REG_SIGN_BIT);
542 STORE_RT_REG(p_hwfn,
543 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
544 upper_bound);
545 STORE_RT_REG(p_hwfn,
546 QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
547 }
548
549 return 0;
550 }
551
552 /* Returns the upper bound for the specified Vport RL parameters.
553 * link_speed is in Mbps.
554 * Returns 0 in case of error.
555 */
qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,u32 link_speed)556 static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
557 u32 link_speed)
558 {
559 switch (vport_rl_type) {
560 case QM_RL_TYPE_NORMAL:
561 return QM_INITIAL_VOQ_BYTE_CRD;
562 case QM_RL_TYPE_QCN:
563 return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
564 default:
565 return 0;
566 }
567 }
568
569 /* Prepare VPORT RL runtime init values.
570 * Return -1 on error.
571 */
qed_vport_rl_rt_init(struct qed_hwfn * p_hwfn,u16 start_rl,u16 num_rls,u32 link_speed,struct init_qm_rl_params * rl_params)572 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
573 u16 start_rl,
574 u16 num_rls,
575 u32 link_speed,
576 struct init_qm_rl_params *rl_params)
577 {
578 u16 i, rl_id;
579
580 if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
581 DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
582 return -1;
583 }
584
585 /* Go over all PF VPORTs */
586 for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
587 u32 upper_bound, inc_val;
588
589 upper_bound =
590 qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
591 rl_params[i].vport_rl_type,
592 link_speed);
593
594 inc_val =
595 QM_RL_INC_VAL(rl_params[i].vport_rl ?
596 rl_params[i].vport_rl : link_speed);
597 if (inc_val > upper_bound) {
598 DP_NOTICE(p_hwfn,
599 "Invalid RL rate - limit configuration\n");
600 return -1;
601 }
602
603 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
604 (u32)QM_RL_CRD_REG_SIGN_BIT);
605 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
606 upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
607 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
608 inc_val);
609 }
610
611 return 0;
612 }
613
614 /* Prepare Tx PQ mapping runtime init values for the specified PF */
qed_tx_pq_map_rt_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_qm_pf_rt_init_params * p_params,u32 base_mem_addr_4kb)615 static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
616 struct qed_ptt *p_ptt,
617 struct qed_qm_pf_rt_init_params *p_params,
618 u32 base_mem_addr_4kb)
619 {
620 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
621 struct init_qm_vport_params *vport_params = p_params->vport_params;
622 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
623 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
624 struct init_qm_pq_params *pq_params = p_params->pq_params;
625 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
626
627 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
628
629 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
630 last_pq_group = (p_params->start_pq + num_pqs - 1) /
631 QM_PF_QUEUE_GROUP_SIZE;
632
633 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
634 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
635 mem_addr_4kb = base_mem_addr_4kb;
636
637 /* Set mapping from PQ group to PF */
638 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
639 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
640 (u32)(p_params->pf_id));
641
642 /* Set PQ sizes */
643 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
644 QM_PQ_SIZE_256B(p_params->num_pf_cids));
645 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
646 QM_PQ_SIZE_256B(p_params->num_vf_cids));
647
648 /* Go over all Tx PQs */
649 for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
650 u16 *p_first_tx_pq_id, vport_id_in_pf;
651 struct qm_rf_pq_map tx_pq_map;
652 u8 tc_id = pq_params[i].tc_id;
653 bool is_vf_pq;
654 u8 ext_voq;
655
656 ext_voq = qed_get_ext_voq(p_hwfn,
657 pq_params[i].port_id,
658 tc_id,
659 p_params->max_phys_tcs_per_port);
660 is_vf_pq = (i >= p_params->num_pf_pqs);
661
662 /* Update first Tx PQ of VPORT/TC */
663 vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
664 p_first_tx_pq_id =
665 &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
666 if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
667 u32 map_val =
668 (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
669 (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
670
671 /* Create new VP PQ */
672 *p_first_tx_pq_id = pq_id;
673
674 /* Map VP PQ to VOQ and PF */
675 STORE_RT_REG(p_hwfn,
676 QM_REG_WFQVPMAP_RT_OFFSET +
677 *p_first_tx_pq_id,
678 map_val);
679 }
680
681 /* Prepare PQ map entry */
682 QM_INIT_TX_PQ_MAP(p_hwfn,
683 tx_pq_map,
684 pq_id,
685 *p_first_tx_pq_id,
686 pq_params[i].rl_valid,
687 pq_params[i].rl_id,
688 ext_voq, pq_params[i].wrr_group);
689
690 /* Set PQ base address */
691 STORE_RT_REG(p_hwfn,
692 QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
693 mem_addr_4kb);
694
695 /* Clear PQ pointer table entry (64 bit) */
696 if (p_params->is_pf_loading)
697 for (j = 0; j < 2; j++)
698 STORE_RT_REG(p_hwfn,
699 QM_REG_PTRTBLTX_RT_OFFSET +
700 (pq_id * 2) + j, 0);
701
702 /* Write PQ info to RAM */
703 if (WRITE_PQ_INFO_TO_RAM != 0) {
704 u32 pq_info = 0;
705
706 pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
707 p_params->pf_id,
708 tc_id,
709 pq_params[i].port_id,
710 pq_params[i].rl_valid,
711 pq_params[i].rl_id);
712 qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
713 pq_info);
714 }
715
716 /* If VF PQ, add indication to PQ VF mask */
717 if (is_vf_pq) {
718 tx_pq_vf_mask[pq_id /
719 QM_PF_QUEUE_GROUP_SIZE] |=
720 BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
721 mem_addr_4kb += vport_pq_mem_4kb;
722 } else {
723 mem_addr_4kb += pq_mem_4kb;
724 }
725 }
726
727 /* Store Tx PQ VF mask to size select register */
728 for (i = 0; i < num_tx_pq_vf_masks; i++)
729 if (tx_pq_vf_mask[i])
730 STORE_RT_REG(p_hwfn,
731 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
732 tx_pq_vf_mask[i]);
733
734 return 0;
735 }
736
737 /* Prepare Other PQ mapping runtime init values for the specified PF */
qed_other_pq_map_rt_init(struct qed_hwfn * p_hwfn,u8 pf_id,bool is_pf_loading,u32 num_pf_cids,u32 num_tids,u32 base_mem_addr_4kb)738 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
739 u8 pf_id,
740 bool is_pf_loading,
741 u32 num_pf_cids,
742 u32 num_tids, u32 base_mem_addr_4kb)
743 {
744 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
745 u16 i, j, pq_id, pq_group;
746
747 /* A single other PQ group is used in each PF, where PQ group i is used
748 * in PF i.
749 */
750 pq_group = pf_id;
751 pq_size = num_pf_cids + num_tids;
752 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
753 mem_addr_4kb = base_mem_addr_4kb;
754
755 /* Map PQ group to PF */
756 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
757 (u32)(pf_id));
758
759 /* Set PQ sizes */
760 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
761 QM_PQ_SIZE_256B(pq_size));
762
763 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
764 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
765 /* Set PQ base address */
766 STORE_RT_REG(p_hwfn,
767 QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
768 mem_addr_4kb);
769
770 /* Clear PQ pointer table entry */
771 if (is_pf_loading)
772 for (j = 0; j < 2; j++)
773 STORE_RT_REG(p_hwfn,
774 QM_REG_PTRTBLOTHER_RT_OFFSET +
775 (pq_id * 2) + j, 0);
776
777 mem_addr_4kb += pq_mem_4kb;
778 }
779 }
780
781 /* Prepare PF WFQ runtime init values for the specified PF.
782 * Return -1 on error.
783 */
qed_pf_wfq_rt_init(struct qed_hwfn * p_hwfn,struct qed_qm_pf_rt_init_params * p_params)784 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
785 struct qed_qm_pf_rt_init_params *p_params)
786 {
787 u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
788 struct init_qm_pq_params *pq_params = p_params->pq_params;
789 u32 inc_val, crd_reg_offset;
790 u8 ext_voq;
791 u16 i;
792
793 inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
794 if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
795 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
796 return -1;
797 }
798
799 for (i = 0; i < num_tx_pqs; i++) {
800 ext_voq = qed_get_ext_voq(p_hwfn,
801 pq_params[i].port_id,
802 pq_params[i].tc_id,
803 p_params->max_phys_tcs_per_port);
804 crd_reg_offset =
805 (p_params->pf_id < MAX_NUM_PFS_BB ?
806 QM_REG_WFQPFCRD_RT_OFFSET :
807 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
808 ext_voq * MAX_NUM_PFS_BB +
809 (p_params->pf_id % MAX_NUM_PFS_BB);
810 OVERWRITE_RT_REG(p_hwfn,
811 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
812 }
813
814 STORE_RT_REG(p_hwfn,
815 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
816 QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
817 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
818 inc_val);
819
820 return 0;
821 }
822
823 /* Prepare PF RL runtime init values for the specified PF.
824 * Return -1 on error.
825 */
qed_pf_rl_rt_init(struct qed_hwfn * p_hwfn,u8 pf_id,u32 pf_rl)826 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
827 {
828 u32 inc_val = QM_RL_INC_VAL(pf_rl);
829
830 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
831 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
832 return -1;
833 }
834
835 STORE_RT_REG(p_hwfn,
836 QM_REG_RLPFCRD_RT_OFFSET + pf_id,
837 (u32)QM_RL_CRD_REG_SIGN_BIT);
838 STORE_RT_REG(p_hwfn,
839 QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
840 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
841 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
842
843 return 0;
844 }
845
846 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
847 * Return -1 on error.
848 */
qed_vp_wfq_rt_init(struct qed_hwfn * p_hwfn,u16 num_vports,struct init_qm_vport_params * vport_params)849 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
850 u16 num_vports,
851 struct init_qm_vport_params *vport_params)
852 {
853 u16 vport_pq_id, wfq, i;
854 u32 inc_val;
855 u8 tc;
856
857 /* Go over all PF VPORTs */
858 for (i = 0; i < num_vports; i++) {
859 /* Each VPORT can have several VPORT PQ IDs for various TCs */
860 for (tc = 0; tc < NUM_OF_TCS; tc++) {
861 /* Check if VPORT/TC is valid */
862 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
863 if (vport_pq_id == QM_INVALID_PQ_ID)
864 continue;
865
866 /* Find WFQ weight (per VPORT or per VPORT+TC) */
867 wfq = vport_params[i].wfq;
868 wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
869 inc_val = QM_VP_WFQ_INC_VAL(wfq);
870 if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
871 DP_NOTICE(p_hwfn,
872 "Invalid VPORT WFQ weight configuration\n");
873 return -1;
874 }
875
876 /* Config registers */
877 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
878 vport_pq_id,
879 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
880 STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
881 vport_pq_id,
882 inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
883 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
884 vport_pq_id, inc_val);
885 }
886 }
887
888 return 0;
889 }
890
qed_poll_on_qm_cmd_ready(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)891 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
892 struct qed_ptt *p_ptt)
893 {
894 u32 reg_val, i;
895
896 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
897 i++) {
898 udelay(QM_STOP_CMD_POLL_PERIOD_US);
899 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
900 }
901
902 /* Check if timeout while waiting for SDM command ready */
903 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
904 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
905 "Timeout when waiting for QM SDM command ready signal\n");
906 return false;
907 }
908
909 return true;
910 }
911
qed_send_qm_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd_addr,u32 cmd_data_lsb,u32 cmd_data_msb)912 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
913 struct qed_ptt *p_ptt,
914 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
915 {
916 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
917 return false;
918
919 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
920 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
921 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
922 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
923 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
924
925 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
926 }
927
928 /******************** INTERFACE IMPLEMENTATION *********************/
929
qed_qm_pf_mem_size(u32 num_pf_cids,u32 num_vf_cids,u32 num_tids,u16 num_pf_pqs,u16 num_vf_pqs)930 u32 qed_qm_pf_mem_size(u32 num_pf_cids,
931 u32 num_vf_cids,
932 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
933 {
934 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
935 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
936 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
937 }
938
qed_qm_common_rt_init(struct qed_hwfn * p_hwfn,struct qed_qm_common_rt_init_params * p_params)939 int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
940 struct qed_qm_common_rt_init_params *p_params)
941 {
942 u32 mask = 0;
943
944 /* Init AFullOprtnstcCrdMask */
945 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
946 QM_OPPOR_LINE_VOQ_DEF);
947 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
948 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
949 p_params->pf_wfq_en ? 1 : 0);
950 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
951 p_params->vport_wfq_en ? 1 : 0);
952 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
953 p_params->pf_rl_en ? 1 : 0);
954 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
955 p_params->global_rl_en ? 1 : 0);
956 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
957 SET_FIELD(mask,
958 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
959 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
960
961 /* Enable/disable PF RL */
962 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
963
964 /* Enable/disable PF WFQ */
965 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
966
967 /* Enable/disable global RL */
968 qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
969
970 /* Enable/disable VPORT WFQ */
971 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
972
973 /* Init PBF CMDQ line credit */
974 qed_cmdq_lines_rt_init(p_hwfn,
975 p_params->max_ports_per_engine,
976 p_params->max_phys_tcs_per_port,
977 p_params->port_params);
978
979 /* Init BTB blocks in PBF */
980 qed_btb_blocks_rt_init(p_hwfn,
981 p_params->max_ports_per_engine,
982 p_params->max_phys_tcs_per_port,
983 p_params->port_params);
984
985 qed_global_rl_rt_init(p_hwfn);
986
987 return 0;
988 }
989
qed_qm_pf_rt_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_qm_pf_rt_init_params * p_params)990 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
991 struct qed_ptt *p_ptt,
992 struct qed_qm_pf_rt_init_params *p_params)
993 {
994 struct init_qm_vport_params *vport_params = p_params->vport_params;
995 u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
996 p_params->num_tids) *
997 QM_OTHER_PQS_PER_PF;
998 u16 i;
999 u8 tc;
1000
1001 /* Clear first Tx PQ ID array for each VPORT */
1002 for (i = 0; i < p_params->num_vports; i++)
1003 for (tc = 0; tc < NUM_OF_TCS; tc++)
1004 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
1005
1006 /* Map Other PQs (if any) */
1007 qed_other_pq_map_rt_init(p_hwfn,
1008 p_params->pf_id,
1009 p_params->is_pf_loading, p_params->num_pf_cids,
1010 p_params->num_tids, 0);
1011
1012 /* Map Tx PQs */
1013 if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
1014 return -1;
1015
1016 /* Init PF WFQ */
1017 if (p_params->pf_wfq)
1018 if (qed_pf_wfq_rt_init(p_hwfn, p_params))
1019 return -1;
1020
1021 /* Init PF RL */
1022 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
1023 return -1;
1024
1025 /* Init VPORT WFQ */
1026 if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
1027 return -1;
1028
1029 /* Set VPORT RL */
1030 if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
1031 p_params->num_rls, p_params->link_speed,
1032 p_params->rl_params))
1033 return -1;
1034
1035 return 0;
1036 }
1037
qed_init_pf_wfq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 pf_id,u16 pf_wfq)1038 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
1039 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
1040 {
1041 u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
1042
1043 if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
1044 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
1045 return -1;
1046 }
1047
1048 qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
1049
1050 return 0;
1051 }
1052
qed_init_pf_rl(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 pf_id,u32 pf_rl)1053 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
1054 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
1055 {
1056 u32 inc_val = QM_RL_INC_VAL(pf_rl);
1057
1058 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
1059 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
1060 return -1;
1061 }
1062
1063 qed_wr(p_hwfn,
1064 p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
1065 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
1066
1067 return 0;
1068 }
1069
qed_init_vport_wfq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 first_tx_pq_id[NUM_OF_TCS],u16 wfq)1070 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
1071 struct qed_ptt *p_ptt,
1072 u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
1073 {
1074 int result = 0;
1075 u16 vport_pq_id;
1076 u8 tc;
1077
1078 for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
1079 vport_pq_id = first_tx_pq_id[tc];
1080 if (vport_pq_id != QM_INVALID_PQ_ID)
1081 result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
1082 vport_pq_id, wfq);
1083 }
1084
1085 return result;
1086 }
1087
qed_init_vport_tc_wfq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 first_tx_pq_id,u16 wfq)1088 int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1089 u16 first_tx_pq_id, u16 wfq)
1090 {
1091 u32 inc_val;
1092
1093 if (first_tx_pq_id == QM_INVALID_PQ_ID)
1094 return -1;
1095
1096 inc_val = QM_VP_WFQ_INC_VAL(wfq);
1097 if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
1098 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
1099 return -1;
1100 }
1101
1102 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
1103 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
1104 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
1105 inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
1106 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
1107 inc_val);
1108
1109 return 0;
1110 }
1111
qed_init_global_rl(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 rl_id,u32 rate_limit,enum init_qm_rl_type vport_rl_type)1112 int qed_init_global_rl(struct qed_hwfn *p_hwfn,
1113 struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
1114 enum init_qm_rl_type vport_rl_type)
1115 {
1116 u32 inc_val, upper_bound;
1117
1118 upper_bound =
1119 (vport_rl_type ==
1120 QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
1121 QM_INITIAL_VOQ_BYTE_CRD;
1122 inc_val = QM_RL_INC_VAL(rate_limit);
1123 if (inc_val > upper_bound) {
1124 DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
1125 return -1;
1126 }
1127
1128 qed_wr(p_hwfn, p_ptt,
1129 QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
1130 qed_wr(p_hwfn,
1131 p_ptt,
1132 QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
1133 upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
1134 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
1135
1136 return 0;
1137 }
1138
qed_send_qm_stop_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool is_release_cmd,bool is_tx_pq,u16 start_pq,u16 num_pqs)1139 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
1140 struct qed_ptt *p_ptt,
1141 bool is_release_cmd,
1142 bool is_tx_pq, u16 start_pq, u16 num_pqs)
1143 {
1144 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
1145 u32 pq_mask = 0, last_pq, pq_id;
1146
1147 last_pq = start_pq + num_pqs - 1;
1148
1149 /* Set command's PQ type */
1150 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1151
1152 /* Go over requested PQs */
1153 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1154 /* Set PQ bit in mask (stop command only) */
1155 if (!is_release_cmd)
1156 pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
1157
1158 /* If last PQ or end of PQ mask, write command */
1159 if ((pq_id == last_pq) ||
1160 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1161 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1162 QM_CMD_SET_FIELD(cmd_arr,
1163 QM_STOP_CMD, PAUSE_MASK, pq_mask);
1164 QM_CMD_SET_FIELD(cmd_arr,
1165 QM_STOP_CMD,
1166 GROUP_ID,
1167 pq_id / QM_STOP_PQ_MASK_WIDTH);
1168 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
1169 cmd_arr[0], cmd_arr[1]))
1170 return false;
1171 pq_mask = 0;
1172 }
1173 }
1174
1175 return true;
1176 }
1177
1178 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1179 do { \
1180 typeof(var) *__p_var = &(var); \
1181 typeof(offset) __offset = offset; \
1182 *__p_var = (*__p_var & ~BIT(__offset)) | \
1183 ((enable) ? BIT(__offset) : 0); \
1184 } while (0)
1185
1186 #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
1187 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
1188
1189 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
1190 do { \
1191 u32 i; \
1192 \
1193 for (i = 0; i < (arr_size); i++) \
1194 qed_wr(dev, ptt, \
1195 ((addr) + (4 * i)), \
1196 ((u32 *)&(arr))[i]); \
1197 } while (0)
1198
1199 /**
1200 * qed_dmae_to_grc() - Internal function for writing from host to
1201 * wide-bus registers (split registers are not supported yet).
1202 *
1203 * @p_hwfn: HW device data.
1204 * @p_ptt: PTT window used for writing the registers.
1205 * @p_data: Pointer to source data.
1206 * @addr: Destination register address.
1207 * @len_in_dwords: Data length in dwords (u32).
1208 *
1209 * Return: Length of the written data in dwords (u32) or -1 on invalid
1210 * input.
1211 */
qed_dmae_to_grc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,__le32 * p_data,u32 addr,u32 len_in_dwords)1212 static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1213 __le32 *p_data, u32 addr, u32 len_in_dwords)
1214 {
1215 struct qed_dmae_params params = { 0 };
1216 u32 *data_cpu;
1217 int rc;
1218
1219 if (!p_data)
1220 return -1;
1221
1222 /* Set DMAE params */
1223 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1224
1225 /* Execute DMAE command */
1226 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
1227 (u64)(uintptr_t)(p_data),
1228 addr, len_in_dwords, ¶ms);
1229
1230 /* If not read using DMAE, read using GRC */
1231 if (rc) {
1232 DP_VERBOSE(p_hwfn,
1233 QED_MSG_DEBUG,
1234 "Failed writing to chip using DMAE, using GRC instead\n");
1235
1236 /* Swap to CPU byteorder and write to registers using GRC */
1237 data_cpu = (__force u32 *)p_data;
1238 le32_to_cpu_array(data_cpu, len_in_dwords);
1239
1240 ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords);
1241 cpu_to_le32_array(data_cpu, len_in_dwords);
1242 }
1243
1244 return len_in_dwords;
1245 }
1246
qed_set_vxlan_dest_port(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 dest_port)1247 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1248 struct qed_ptt *p_ptt, u16 dest_port)
1249 {
1250 /* Update PRS register */
1251 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1252
1253 /* Update NIG register */
1254 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1255
1256 /* Update PBF register */
1257 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1258 }
1259
qed_set_vxlan_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool vxlan_enable)1260 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1261 struct qed_ptt *p_ptt, bool vxlan_enable)
1262 {
1263 u32 reg_val;
1264 u8 shift;
1265
1266 /* Update PRS register */
1267 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1268 SET_FIELD(reg_val,
1269 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
1270 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1271 if (reg_val) {
1272 reg_val =
1273 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1274
1275 /* Update output only if tunnel blocks not included. */
1276 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1277 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1278 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1279 }
1280
1281 /* Update NIG register */
1282 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1283 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
1284 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
1285 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1286
1287 /* Update DORQ register */
1288 qed_wr(p_hwfn,
1289 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1290 }
1291
qed_set_gre_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool eth_gre_enable,bool ip_gre_enable)1292 void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1293 struct qed_ptt *p_ptt,
1294 bool eth_gre_enable, bool ip_gre_enable)
1295 {
1296 u32 reg_val;
1297 u8 shift;
1298
1299 /* Update PRS register */
1300 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1301 SET_FIELD(reg_val,
1302 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
1303 eth_gre_enable);
1304 SET_FIELD(reg_val,
1305 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
1306 ip_gre_enable);
1307 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1308 if (reg_val) {
1309 reg_val =
1310 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1311
1312 /* Update output only if tunnel blocks not included. */
1313 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1314 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1315 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1316 }
1317
1318 /* Update NIG register */
1319 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1320 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
1321 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
1322 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
1323 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
1324 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1325
1326 /* Update DORQ registers */
1327 qed_wr(p_hwfn,
1328 p_ptt,
1329 DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1330 qed_wr(p_hwfn,
1331 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1332 }
1333
qed_set_geneve_dest_port(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 dest_port)1334 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1335 struct qed_ptt *p_ptt, u16 dest_port)
1336 {
1337 /* Update PRS register */
1338 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1339
1340 /* Update NIG register */
1341 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1342
1343 /* Update PBF register */
1344 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1345 }
1346
qed_set_geneve_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool eth_geneve_enable,bool ip_geneve_enable)1347 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1348 struct qed_ptt *p_ptt,
1349 bool eth_geneve_enable, bool ip_geneve_enable)
1350 {
1351 u32 reg_val;
1352
1353 /* Update PRS register */
1354 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1355 SET_FIELD(reg_val,
1356 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
1357 eth_geneve_enable);
1358 SET_FIELD(reg_val,
1359 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
1360 ip_geneve_enable);
1361 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1362 if (reg_val) {
1363 reg_val =
1364 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1365
1366 /* Update output only if tunnel blocks not included. */
1367 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1368 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1369 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1370 }
1371
1372 /* Update NIG register */
1373 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1374 eth_geneve_enable ? 1 : 0);
1375 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1376
1377 /* EDPM with geneve tunnel not supported in BB */
1378 if (QED_IS_BB_B0(p_hwfn->cdev))
1379 return;
1380
1381 /* Update DORQ registers */
1382 qed_wr(p_hwfn,
1383 p_ptt,
1384 DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1385 eth_geneve_enable ? 1 : 0);
1386 qed_wr(p_hwfn,
1387 p_ptt,
1388 DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1389 ip_geneve_enable ? 1 : 0);
1390 }
1391
1392 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
1393 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
1394
qed_set_vxlan_no_l2_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool enable)1395 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
1396 struct qed_ptt *p_ptt, bool enable)
1397 {
1398 u32 reg_val, cfg_mask;
1399
1400 /* read PRS config register */
1401 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1402
1403 /* set VXLAN_NO_L2_ENABLE mask */
1404 cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1405
1406 if (enable) {
1407 /* set VXLAN_NO_L2_ENABLE flag */
1408 reg_val |= cfg_mask;
1409
1410 /* update PRS FIC register */
1411 qed_wr(p_hwfn,
1412 p_ptt,
1413 PRS_REG_OUTPUT_FORMAT_4_0,
1414 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1415 } else {
1416 /* clear VXLAN_NO_L2_ENABLE flag */
1417 reg_val &= ~cfg_mask;
1418 }
1419
1420 /* write PRS config register */
1421 qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1422 }
1423
1424 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1425 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1426 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1427 #define PARSER_ETH_CONN_CM_HDR 0
1428 #define CAM_LINE_SIZE sizeof(u32)
1429 #define RAM_LINE_SIZE sizeof(u64)
1430 #define REG_SIZE sizeof(u32)
1431
qed_gft_disable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 pf_id)1432 void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
1433 {
1434 struct regpair ram_line = { 0 };
1435
1436 /* Disable gft search for PF */
1437 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1438
1439 /* Clean ram & cam for next gft session */
1440
1441 /* Zero camline */
1442 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1443
1444 /* Zero ramline */
1445 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1446 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1447 sizeof(ram_line) / REG_SIZE);
1448 }
1449
qed_gft_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 pf_id,bool tcp,bool udp,bool ipv4,bool ipv6,enum gft_profile_type profile_type)1450 void qed_gft_config(struct qed_hwfn *p_hwfn,
1451 struct qed_ptt *p_ptt,
1452 u16 pf_id,
1453 bool tcp,
1454 bool udp,
1455 bool ipv4, bool ipv6, enum gft_profile_type profile_type)
1456 {
1457 struct regpair ram_line;
1458 u32 search_non_ip_as_gft;
1459 u32 reg_val, cam_line;
1460 u32 lo = 0, hi = 0;
1461
1462 if (!ipv6 && !ipv4)
1463 DP_NOTICE(p_hwfn,
1464 "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1465 if (!tcp && !udp)
1466 DP_NOTICE(p_hwfn,
1467 "gft_config: must accept at least on of - udp or tcp\n");
1468 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1469 DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
1470
1471 /* Set RFS event ID to be awakened i Tstorm By Prs */
1472 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1473 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1474 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1475 qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1476
1477 /* Do not load context only cid in PRS on match. */
1478 qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1479
1480 /* Do not use tenant ID exist bit for gft search */
1481 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1482
1483 /* Set Cam */
1484 cam_line = 0;
1485 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1486
1487 /* Filters are per PF!! */
1488 SET_FIELD(cam_line,
1489 GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1490 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1491 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1492
1493 if (!(tcp && udp)) {
1494 SET_FIELD(cam_line,
1495 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1496 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1497 if (tcp)
1498 SET_FIELD(cam_line,
1499 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1500 GFT_PROFILE_TCP_PROTOCOL);
1501 else
1502 SET_FIELD(cam_line,
1503 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1504 GFT_PROFILE_UDP_PROTOCOL);
1505 }
1506
1507 if (!(ipv4 && ipv6)) {
1508 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1509 if (ipv4)
1510 SET_FIELD(cam_line,
1511 GFT_CAM_LINE_MAPPED_IP_VERSION,
1512 GFT_PROFILE_IPV4);
1513 else
1514 SET_FIELD(cam_line,
1515 GFT_CAM_LINE_MAPPED_IP_VERSION,
1516 GFT_PROFILE_IPV6);
1517 }
1518
1519 /* Write characteristics to cam */
1520 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1521 cam_line);
1522 cam_line =
1523 qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1524
1525 /* Write line to RAM - compare to filter 4 tuple */
1526
1527 /* Search no IP as GFT */
1528 search_non_ip_as_gft = 0;
1529
1530 /* Tunnel type */
1531 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1532 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1533
1534 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1535 SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
1536 SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
1537 SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1538 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1539 SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1);
1540 SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
1541 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1542 SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1543 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1544 SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
1545 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1546 SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
1547 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1548 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1549 SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
1550 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1551 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1552 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1553
1554 /* Allow tunneled traffic without inner IP */
1555 search_non_ip_as_gft = 1;
1556 }
1557
1558 ram_line.lo = cpu_to_le32(lo);
1559 ram_line.hi = cpu_to_le32(hi);
1560
1561 qed_wr(p_hwfn,
1562 p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
1563 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1564 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1565 sizeof(ram_line) / REG_SIZE);
1566
1567 /* Set default profile so that no filter match will happen */
1568 ram_line.lo = cpu_to_le32(0xffffffff);
1569 ram_line.hi = cpu_to_le32(0x3ff);
1570 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1571 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1572 PRS_GFT_CAM_LINES_NO_MATCH,
1573 sizeof(ram_line) / REG_SIZE);
1574
1575 /* Enable gft search */
1576 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1577 }
1578
1579 DECLARE_CRC8_TABLE(cdu_crc8_table);
1580
1581 /* Calculate and return CDU validation byte per connection type/region/cid */
qed_calc_cdu_validation_byte(u8 conn_type,u8 region,u32 cid)1582 static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1583 {
1584 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1585 u8 crc, validation_byte = 0;
1586 static u8 crc8_table_valid; /* automatically initialized to 0 */
1587 u32 validation_string = 0;
1588 __be32 data_to_crc;
1589
1590 if (!crc8_table_valid) {
1591 crc8_populate_msb(cdu_crc8_table, 0x07);
1592 crc8_table_valid = 1;
1593 }
1594
1595 /* The CRC is calculated on the String-to-compress:
1596 * [31:8] = {CID[31:20],CID[11:0]}
1597 * [7:4] = Region
1598 * [3:0] = Type
1599 */
1600 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1601 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1602
1603 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1604 validation_string |= ((region & 0xF) << 4);
1605
1606 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1607 validation_string |= (conn_type & 0xF);
1608
1609 /* Convert to big-endian and calculate CRC8 */
1610 data_to_crc = cpu_to_be32(validation_string);
1611 crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1612 CRC8_INIT_VALUE);
1613
1614 /* The validation byte [7:0] is composed:
1615 * for type A validation
1616 * [7] = active configuration bit
1617 * [6:0] = crc[6:0]
1618 *
1619 * for type B validation
1620 * [7] = active configuration bit
1621 * [6:3] = connection_type[3:0]
1622 * [2:0] = crc[2:0]
1623 */
1624 validation_byte |=
1625 ((validation_cfg >>
1626 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1627
1628 if ((validation_cfg >>
1629 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1630 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1631 else
1632 validation_byte |= crc & 0x7F;
1633
1634 return validation_byte;
1635 }
1636
1637 /* Calcualte and set validation bytes for session context */
qed_calc_session_ctx_validation(void * p_ctx_mem,u16 ctx_size,u8 ctx_type,u32 cid)1638 void qed_calc_session_ctx_validation(void *p_ctx_mem,
1639 u16 ctx_size, u8 ctx_type, u32 cid)
1640 {
1641 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1642
1643 p_ctx = (u8 * const)p_ctx_mem;
1644 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1645 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1646 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1647
1648 memset(p_ctx, 0, ctx_size);
1649
1650 *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
1651 *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
1652 *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
1653 }
1654
1655 /* Calcualte and set validation bytes for task context */
qed_calc_task_ctx_validation(void * p_ctx_mem,u16 ctx_size,u8 ctx_type,u32 tid)1656 void qed_calc_task_ctx_validation(void *p_ctx_mem,
1657 u16 ctx_size, u8 ctx_type, u32 tid)
1658 {
1659 u8 *p_ctx, *region1_val_ptr;
1660
1661 p_ctx = (u8 * const)p_ctx_mem;
1662 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1663
1664 memset(p_ctx, 0, ctx_size);
1665
1666 *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
1667 }
1668
1669 /* Memset session context to 0 while preserving validation bytes */
qed_memset_session_ctx(void * p_ctx_mem,u32 ctx_size,u8 ctx_type)1670 void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1671 {
1672 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1673 u8 x_val, t_val, u_val;
1674
1675 p_ctx = (u8 * const)p_ctx_mem;
1676 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1677 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1678 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1679
1680 x_val = *x_val_ptr;
1681 t_val = *t_val_ptr;
1682 u_val = *u_val_ptr;
1683
1684 memset(p_ctx, 0, ctx_size);
1685
1686 *x_val_ptr = x_val;
1687 *t_val_ptr = t_val;
1688 *u_val_ptr = u_val;
1689 }
1690
1691 /* Memset task context to 0 while preserving validation bytes */
qed_memset_task_ctx(void * p_ctx_mem,u32 ctx_size,u8 ctx_type)1692 void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1693 {
1694 u8 *p_ctx, *region1_val_ptr;
1695 u8 region1_val;
1696
1697 p_ctx = (u8 * const)p_ctx_mem;
1698 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1699
1700 region1_val = *region1_val_ptr;
1701
1702 memset(p_ctx, 0, ctx_size);
1703
1704 *region1_val_ptr = region1_val;
1705 }
1706
1707 /* Enable and configure context validation */
qed_enable_context_validation(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1708 void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
1709 struct qed_ptt *p_ptt)
1710 {
1711 u32 ctx_validation;
1712
1713 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1714 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1715 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1716
1717 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1718 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1719 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1720
1721 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1722 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1723 qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1724 }
1725
qed_get_protocol_type_str(u32 protocol_type)1726 const char *qed_get_protocol_type_str(u32 protocol_type)
1727 {
1728 if (protocol_type >= ARRAY_SIZE(s_protocol_types))
1729 return "Invalid protocol type";
1730
1731 return s_protocol_types[protocol_type];
1732 }
1733
qed_get_ramrod_cmd_id_str(u32 protocol_type,u32 ramrod_cmd_id)1734 const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
1735 {
1736 const char *ramrod_cmd_id_str;
1737
1738 if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
1739 return "Invalid protocol type";
1740
1741 if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
1742 return "Invalid Ramrod command ID";
1743
1744 ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];
1745
1746 if (!ramrod_cmd_id_str)
1747 return "Invalid Ramrod command ID";
1748
1749 return ramrod_cmd_id_str;
1750 }
1751
qed_get_rdma_assert_ram_addr(struct qed_hwfn * p_hwfn,u8 storm_id)1752 static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
1753 {
1754 switch (storm_id) {
1755 case 0:
1756 return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1757 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1758 case 1:
1759 return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1760 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1761 case 2:
1762 return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1763 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1764 case 3:
1765 return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1766 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1767 case 4:
1768 return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1769 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1770 case 5:
1771 return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1772 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1773
1774 default:
1775 return 0;
1776 }
1777 }
1778
qed_set_rdma_error_level(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 assert_level[NUM_STORMS])1779 void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
1780 struct qed_ptt *p_ptt,
1781 u8 assert_level[NUM_STORMS])
1782 {
1783 u8 storm_id;
1784
1785 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1786 u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
1787
1788 qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
1789 }
1790 }
1791
1792 #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
1793 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
1794
qed_get_overlay_addr_ram_addr(struct qed_hwfn * p_hwfn,u8 storm_id)1795 static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
1796 {
1797 switch (storm_id) {
1798 case 0:
1799 return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1800 TSTORM_OVERLAY_BUF_ADDR_OFFSET;
1801 case 1:
1802 return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1803 MSTORM_OVERLAY_BUF_ADDR_OFFSET;
1804 case 2:
1805 return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1806 USTORM_OVERLAY_BUF_ADDR_OFFSET;
1807 case 3:
1808 return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1809 XSTORM_OVERLAY_BUF_ADDR_OFFSET;
1810 case 4:
1811 return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1812 YSTORM_OVERLAY_BUF_ADDR_OFFSET;
1813 case 5:
1814 return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1815 PSTORM_OVERLAY_BUF_ADDR_OFFSET;
1816
1817 default:
1818 return 0;
1819 }
1820 }
1821
qed_fw_overlay_mem_alloc(struct qed_hwfn * p_hwfn,const u32 * const fw_overlay_in_buf,u32 buf_size_in_bytes)1822 struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
1823 const u32 * const
1824 fw_overlay_in_buf,
1825 u32 buf_size_in_bytes)
1826 {
1827 u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
1828 struct phys_mem_desc *allocated_mem;
1829
1830 if (!buf_size)
1831 return NULL;
1832
1833 allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
1834 GFP_KERNEL);
1835 if (!allocated_mem)
1836 return NULL;
1837
1838 /* For each Storm, set physical address in RAM */
1839 while (buf_offset < buf_size) {
1840 struct phys_mem_desc *storm_mem_desc;
1841 struct fw_overlay_buf_hdr *hdr;
1842 u32 storm_buf_size;
1843 u8 storm_id;
1844
1845 hdr =
1846 (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
1847 storm_buf_size = GET_FIELD(hdr->data,
1848 FW_OVERLAY_BUF_HDR_BUF_SIZE);
1849 storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
1850 if (storm_id >= NUM_STORMS)
1851 break;
1852 storm_mem_desc = allocated_mem + storm_id;
1853 storm_mem_desc->size = storm_buf_size * sizeof(u32);
1854
1855 /* Allocate physical memory for Storm's overlays buffer */
1856 storm_mem_desc->virt_addr =
1857 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1858 storm_mem_desc->size,
1859 &storm_mem_desc->phys_addr, GFP_KERNEL);
1860 if (!storm_mem_desc->virt_addr)
1861 break;
1862
1863 /* Skip overlays buffer header */
1864 buf_offset += OVERLAY_HDR_SIZE_DWORDS;
1865
1866 /* Copy Storm's overlays buffer to allocated memory */
1867 memcpy(storm_mem_desc->virt_addr,
1868 &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
1869
1870 /* Advance to next Storm */
1871 buf_offset += storm_buf_size;
1872 }
1873
1874 /* If memory allocation has failed, free all allocated memory */
1875 if (buf_offset < buf_size) {
1876 qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
1877 return NULL;
1878 }
1879
1880 return allocated_mem;
1881 }
1882
qed_fw_overlay_init_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct phys_mem_desc * fw_overlay_mem)1883 void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
1884 struct qed_ptt *p_ptt,
1885 struct phys_mem_desc *fw_overlay_mem)
1886 {
1887 u8 storm_id;
1888
1889 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1890 struct phys_mem_desc *storm_mem_desc =
1891 (struct phys_mem_desc *)fw_overlay_mem + storm_id;
1892 u32 ram_addr, i;
1893
1894 /* Skip Storms with no FW overlays */
1895 if (!storm_mem_desc->virt_addr)
1896 continue;
1897
1898 /* Calculate overlay RAM GRC address of current PF */
1899 ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
1900 sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
1901
1902 /* Write Storm's overlay physical address to RAM */
1903 for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
1904 qed_wr(p_hwfn, p_ptt, ram_addr,
1905 ((u32 *)&storm_mem_desc->phys_addr)[i]);
1906 }
1907 }
1908
qed_fw_overlay_mem_free(struct qed_hwfn * p_hwfn,struct phys_mem_desc ** fw_overlay_mem)1909 void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
1910 struct phys_mem_desc **fw_overlay_mem)
1911 {
1912 u8 storm_id;
1913
1914 if (!fw_overlay_mem || !(*fw_overlay_mem))
1915 return;
1916
1917 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1918 struct phys_mem_desc *storm_mem_desc =
1919 (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
1920
1921 /* Free Storm's physical memory */
1922 if (storm_mem_desc->virt_addr)
1923 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1924 storm_mem_desc->size,
1925 storm_mem_desc->virt_addr,
1926 storm_mem_desc->phys_addr);
1927 }
1928
1929 /* Free allocated virtual memory */
1930 kfree(*fw_overlay_mem);
1931 *fw_overlay_mem = NULL;
1932 }
1933