1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #ifndef _MLXSW_CMD_H
5 #define _MLXSW_CMD_H
6
7 #include "item.h"
8
9 #define MLXSW_CMD_MBOX_SIZE 4096
10
mlxsw_cmd_mbox_alloc(void)11 static inline char *mlxsw_cmd_mbox_alloc(void)
12 {
13 return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
14 }
15
mlxsw_cmd_mbox_free(char * mbox)16 static inline void mlxsw_cmd_mbox_free(char *mbox)
17 {
18 kfree(mbox);
19 }
20
mlxsw_cmd_mbox_zero(char * mbox)21 static inline void mlxsw_cmd_mbox_zero(char *mbox)
22 {
23 memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
24 }
25
26 struct mlxsw_core;
27
28 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
29 u32 in_mod, bool out_mbox_direct, bool reset_ok,
30 char *in_mbox, size_t in_mbox_size,
31 char *out_mbox, size_t out_mbox_size);
32
mlxsw_cmd_exec_in(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod,char * in_mbox,size_t in_mbox_size)33 static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
34 u8 opcode_mod, u32 in_mod, char *in_mbox,
35 size_t in_mbox_size)
36 {
37 return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
38 false, in_mbox, in_mbox_size, NULL, 0);
39 }
40
mlxsw_cmd_exec_out(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod,bool out_mbox_direct,char * out_mbox,size_t out_mbox_size)41 static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
42 u8 opcode_mod, u32 in_mod,
43 bool out_mbox_direct,
44 char *out_mbox, size_t out_mbox_size)
45 {
46 return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
47 out_mbox_direct, false, NULL, 0,
48 out_mbox, out_mbox_size);
49 }
50
mlxsw_cmd_exec_none(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod)51 static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
52 u8 opcode_mod, u32 in_mod)
53 {
54 return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
55 false, NULL, 0, NULL, 0);
56 }
57
58 enum mlxsw_cmd_opcode {
59 MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
60 MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
61 MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
62 MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
63 MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
64 MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
65 MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
66 MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
67 MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
68 MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
69 MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
70 MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
71 MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
72 MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
73 MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
74 MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
75 MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
76 MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
77 };
78
mlxsw_cmd_opcode_str(u16 opcode)79 static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
80 {
81 switch (opcode) {
82 case MLXSW_CMD_OPCODE_QUERY_FW:
83 return "QUERY_FW";
84 case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
85 return "QUERY_BOARDINFO";
86 case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
87 return "QUERY_AQ_CAP";
88 case MLXSW_CMD_OPCODE_MAP_FA:
89 return "MAP_FA";
90 case MLXSW_CMD_OPCODE_UNMAP_FA:
91 return "UNMAP_FA";
92 case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
93 return "CONFIG_PROFILE";
94 case MLXSW_CMD_OPCODE_ACCESS_REG:
95 return "ACCESS_REG";
96 case MLXSW_CMD_OPCODE_SW2HW_DQ:
97 return "SW2HW_DQ";
98 case MLXSW_CMD_OPCODE_HW2SW_DQ:
99 return "HW2SW_DQ";
100 case MLXSW_CMD_OPCODE_2ERR_DQ:
101 return "2ERR_DQ";
102 case MLXSW_CMD_OPCODE_QUERY_DQ:
103 return "QUERY_DQ";
104 case MLXSW_CMD_OPCODE_SW2HW_CQ:
105 return "SW2HW_CQ";
106 case MLXSW_CMD_OPCODE_HW2SW_CQ:
107 return "HW2SW_CQ";
108 case MLXSW_CMD_OPCODE_QUERY_CQ:
109 return "QUERY_CQ";
110 case MLXSW_CMD_OPCODE_SW2HW_EQ:
111 return "SW2HW_EQ";
112 case MLXSW_CMD_OPCODE_HW2SW_EQ:
113 return "HW2SW_EQ";
114 case MLXSW_CMD_OPCODE_QUERY_EQ:
115 return "QUERY_EQ";
116 case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
117 return "QUERY_RESOURCES";
118 default:
119 return "*UNKNOWN*";
120 }
121 }
122
123 enum mlxsw_cmd_status {
124 /* Command execution succeeded. */
125 MLXSW_CMD_STATUS_OK = 0x00,
126 /* Internal error (e.g. bus error) occurred while processing command. */
127 MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
128 /* Operation/command not supported or opcode modifier not supported. */
129 MLXSW_CMD_STATUS_BAD_OP = 0x02,
130 /* Parameter not supported, parameter out of range. */
131 MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
132 /* System was not enabled or bad system state. */
133 MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
134 /* Attempt to access reserved or unallocated resource, or resource in
135 * inappropriate ownership.
136 */
137 MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
138 /* Requested resource is currently executing a command. */
139 MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
140 /* Required capability exceeds device limits. */
141 MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
142 /* Resource is not in the appropriate state or ownership. */
143 MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
144 /* Index out of range (might be beyond table size or attempt to
145 * access a reserved resource).
146 */
147 MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
148 /* NVMEM checksum/CRC failed. */
149 MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
150 /* Device is currently running reset */
151 MLXSW_CMD_STATUS_RUNNING_RESET = 0x26,
152 /* Bad management packet (silently discarded). */
153 MLXSW_CMD_STATUS_BAD_PKT = 0x30,
154 };
155
mlxsw_cmd_status_str(u8 status)156 static inline const char *mlxsw_cmd_status_str(u8 status)
157 {
158 switch (status) {
159 case MLXSW_CMD_STATUS_OK:
160 return "OK";
161 case MLXSW_CMD_STATUS_INTERNAL_ERR:
162 return "INTERNAL_ERR";
163 case MLXSW_CMD_STATUS_BAD_OP:
164 return "BAD_OP";
165 case MLXSW_CMD_STATUS_BAD_PARAM:
166 return "BAD_PARAM";
167 case MLXSW_CMD_STATUS_BAD_SYS_STATE:
168 return "BAD_SYS_STATE";
169 case MLXSW_CMD_STATUS_BAD_RESOURCE:
170 return "BAD_RESOURCE";
171 case MLXSW_CMD_STATUS_RESOURCE_BUSY:
172 return "RESOURCE_BUSY";
173 case MLXSW_CMD_STATUS_EXCEED_LIM:
174 return "EXCEED_LIM";
175 case MLXSW_CMD_STATUS_BAD_RES_STATE:
176 return "BAD_RES_STATE";
177 case MLXSW_CMD_STATUS_BAD_INDEX:
178 return "BAD_INDEX";
179 case MLXSW_CMD_STATUS_BAD_NVMEM:
180 return "BAD_NVMEM";
181 case MLXSW_CMD_STATUS_RUNNING_RESET:
182 return "RUNNING_RESET";
183 case MLXSW_CMD_STATUS_BAD_PKT:
184 return "BAD_PKT";
185 default:
186 return "*UNKNOWN*";
187 }
188 }
189
190 /* QUERY_FW - Query Firmware
191 * -------------------------
192 * OpMod == 0, INMmod == 0
193 * -----------------------
194 * The QUERY_FW command retrieves information related to firmware, command
195 * interface version and the amount of resources that should be allocated to
196 * the firmware.
197 */
198
mlxsw_cmd_query_fw(struct mlxsw_core * mlxsw_core,char * out_mbox)199 static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
200 char *out_mbox)
201 {
202 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
203 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
204 }
205
206 /* cmd_mbox_query_fw_fw_pages
207 * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
208 */
209 MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
210
211 /* cmd_mbox_query_fw_fw_rev_major
212 * Firmware Revision - Major
213 */
214 MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
215
216 /* cmd_mbox_query_fw_fw_rev_subminor
217 * Firmware Sub-minor version (Patch level)
218 */
219 MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
220
221 /* cmd_mbox_query_fw_fw_rev_minor
222 * Firmware Revision - Minor
223 */
224 MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
225
226 /* cmd_mbox_query_fw_core_clk
227 * Internal Clock Frequency (in MHz)
228 */
229 MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
230
231 /* cmd_mbox_query_fw_cmd_interface_rev
232 * Command Interface Interpreter Revision ID. This number is bumped up
233 * every time a non-backward-compatible change is done for the command
234 * interface. The current cmd_interface_rev is 1.
235 */
236 MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
237
238 /* cmd_mbox_query_fw_dt
239 * If set, Debug Trace is supported
240 */
241 MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
242
243 /* cmd_mbox_query_fw_api_version
244 * Indicates the version of the API, to enable software querying
245 * for compatibility. The current api_version is 1.
246 */
247 MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
248
249 /* cmd_mbox_query_fw_fw_hour
250 * Firmware timestamp - hour
251 */
252 MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
253
254 /* cmd_mbox_query_fw_fw_minutes
255 * Firmware timestamp - minutes
256 */
257 MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
258
259 /* cmd_mbox_query_fw_fw_seconds
260 * Firmware timestamp - seconds
261 */
262 MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
263
264 /* cmd_mbox_query_fw_fw_year
265 * Firmware timestamp - year
266 */
267 MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
268
269 /* cmd_mbox_query_fw_fw_month
270 * Firmware timestamp - month
271 */
272 MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
273
274 /* cmd_mbox_query_fw_fw_day
275 * Firmware timestamp - day
276 */
277 MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
278
279 /* cmd_mbox_query_fw_lag_mode_support
280 * 0: CONFIG_PROFILE.lag_mode is not supported by FW
281 * 1: CONFIG_PROFILE.lag_mode is supported by FW
282 */
283 MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1);
284
285 /* cmd_mbox_query_fw_cff_support
286 * 0: CONFIG_PROFILE.flood_mode = 5 (CFF) is not supported by FW
287 * 1: CONFIG_PROFILE.flood_mode = 5 (CFF) is supported by FW
288 */
289 MLXSW_ITEM32(cmd_mbox, query_fw, cff_support, 0x18, 2, 1);
290
291 /* cmd_mbox_query_fw_clr_int_base_offset
292 * Clear Interrupt register's offset from clr_int_bar register
293 * in PCI address space.
294 */
295 MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
296
297 /* cmd_mbox_query_fw_clr_int_bar
298 * PCI base address register (BAR) where clr_int register is located.
299 * 00 - BAR 0-1 (64 bit BAR)
300 */
301 MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
302
303 /* cmd_mbox_query_fw_error_buf_offset
304 * Read Only buffer for internal error reports of offset
305 * from error_buf_bar register in PCI address space).
306 */
307 MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
308
309 /* cmd_mbox_query_fw_error_buf_size
310 * Internal error buffer size in DWORDs
311 */
312 MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
313
314 /* cmd_mbox_query_fw_error_int_bar
315 * PCI base address register (BAR) where error buffer
316 * register is located.
317 * 00 - BAR 0-1 (64 bit BAR)
318 */
319 MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
320
321 /* cmd_mbox_query_fw_doorbell_page_offset
322 * Offset of the doorbell page
323 */
324 MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
325
326 /* cmd_mbox_query_fw_doorbell_page_bar
327 * PCI base address register (BAR) of the doorbell page
328 * 00 - BAR 0-1 (64 bit BAR)
329 */
330 MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
331
332 /* cmd_mbox_query_fw_free_running_clock_offset
333 * The offset of the free running clock page
334 */
335 MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
336
337 /* cmd_mbox_query_fw_fr_rn_clk_bar
338 * PCI base address register (BAR) of the free running clock page
339 * 0: BAR 0
340 * 1: 64 bit BAR
341 */
342 MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
343
344 /* cmd_mbox_query_fw_utc_sec_offset
345 * The offset of the UTC_Sec page
346 */
347 MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
348
349 /* cmd_mbox_query_fw_utc_sec_bar
350 * PCI base address register (BAR) of the UTC_Sec page
351 * 0: BAR 0
352 * 1: 64 bit BAR
353 * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
354 */
355 MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
356
357 /* cmd_mbox_query_fw_utc_nsec_offset
358 * The offset of the UTC_nSec page
359 */
360 MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
361
362 /* cmd_mbox_query_fw_utc_nsec_bar
363 * PCI base address register (BAR) of the UTC_nSec page
364 * 0: BAR 0
365 * 1: 64 bit BAR
366 * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
367 */
368 MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
369
370 /* QUERY_BOARDINFO - Query Board Information
371 * -----------------------------------------
372 * OpMod == 0 (N/A), INMmod == 0 (N/A)
373 * -----------------------------------
374 * The QUERY_BOARDINFO command retrieves adapter specific parameters.
375 */
376
mlxsw_cmd_boardinfo(struct mlxsw_core * mlxsw_core,char * out_mbox)377 static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
378 char *out_mbox)
379 {
380 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
381 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
382 }
383
384 /* cmd_mbox_boardinfo_intapin
385 * When PCIe interrupt messages are being used, this value is used for clearing
386 * an interrupt. When using MSI-X, this register is not used.
387 */
388 MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
389
390 /* cmd_mbox_boardinfo_vsd_vendor_id
391 * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
392 * specifying/formatting the VSD. The vsd_vendor_id identifies the management
393 * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
394 * format and encoding as long as they use their assigned vsd_vendor_id.
395 */
396 MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
397
398 /* cmd_mbox_boardinfo_vsd
399 * Vendor Specific Data. The VSD string that is burnt to the Flash
400 * with the firmware.
401 */
402 #define MLXSW_CMD_BOARDINFO_VSD_LEN 208
403 MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
404
405 /* cmd_mbox_boardinfo_psid
406 * The PSID field is a 16-ascii (byte) character string which acts as
407 * the board ID. The PSID format is used in conjunction with
408 * Mellanox vsd_vendor_id (15B3h).
409 */
410 #define MLXSW_CMD_BOARDINFO_PSID_LEN 16
411 MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
412
413 /* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
414 * -----------------------------------------------------
415 * OpMod == 0 (N/A), INMmod == 0 (N/A)
416 * -----------------------------------
417 * The QUERY_AQ_CAP command returns the device asynchronous queues
418 * capabilities supported.
419 */
420
mlxsw_cmd_query_aq_cap(struct mlxsw_core * mlxsw_core,char * out_mbox)421 static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
422 char *out_mbox)
423 {
424 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
425 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
426 }
427
428 /* cmd_mbox_query_aq_cap_log_max_sdq_sz
429 * Log (base 2) of max WQEs allowed on SDQ.
430 */
431 MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
432
433 /* cmd_mbox_query_aq_cap_max_num_sdqs
434 * Maximum number of SDQs.
435 */
436 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
437
438 /* cmd_mbox_query_aq_cap_log_max_rdq_sz
439 * Log (base 2) of max WQEs allowed on RDQ.
440 */
441 MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
442
443 /* cmd_mbox_query_aq_cap_max_num_rdqs
444 * Maximum number of RDQs.
445 */
446 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
447
448 /* cmd_mbox_query_aq_cap_log_max_cq_sz
449 * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
450 */
451 MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
452
453 /* cmd_mbox_query_aq_cap_log_max_cqv2_sz
454 * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
455 */
456 MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
457
458 /* cmd_mbox_query_aq_cap_max_num_cqs
459 * Maximum number of CQs.
460 */
461 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
462
463 /* cmd_mbox_query_aq_cap_log_max_eq_sz
464 * Log (base 2) of max EQEs allowed on EQ.
465 */
466 MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
467
468 /* cmd_mbox_query_aq_cap_max_num_eqs
469 * Maximum number of EQs.
470 */
471 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
472
473 /* cmd_mbox_query_aq_cap_max_sg_sq
474 * The maximum S/G list elements in an DSQ. DSQ must not contain
475 * more S/G entries than indicated here.
476 */
477 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
478
479 /* cmd_mbox_query_aq_cap_
480 * The maximum S/G list elements in an DRQ. DRQ must not contain
481 * more S/G entries than indicated here.
482 */
483 MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
484
485 /* MAP_FA - Map Firmware Area
486 * --------------------------
487 * OpMod == 0 (N/A), INMmod == Number of VPM entries
488 * -------------------------------------------------
489 * The MAP_FA command passes physical pages to the switch. These pages
490 * are used to store the device firmware. MAP_FA can be executed multiple
491 * times until all the firmware area is mapped (the size that should be
492 * mapped is retrieved through the QUERY_FW command). All required pages
493 * must be mapped to finish the initialization phase. Physical memory
494 * passed in this command must be pinned.
495 */
496
497 #define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
498
mlxsw_cmd_map_fa(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 vpm_entries_count)499 static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
500 char *in_mbox, u32 vpm_entries_count)
501 {
502 return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
503 0, vpm_entries_count,
504 in_mbox, MLXSW_CMD_MBOX_SIZE);
505 }
506
507 /* cmd_mbox_map_fa_pa
508 * Physical Address.
509 */
510 MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
511
512 /* cmd_mbox_map_fa_log2size
513 * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
514 * that starts at PA_L/H.
515 */
516 MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
517
518 /* UNMAP_FA - Unmap Firmware Area
519 * ------------------------------
520 * OpMod == 0 (N/A), INMmod == 0 (N/A)
521 * -----------------------------------
522 * The UNMAP_FA command unload the firmware and unmaps all the
523 * firmware area. After this command is completed the device will not access
524 * the pages that were mapped to the firmware area. After executing UNMAP_FA
525 * command, software reset must be done prior to execution of MAP_FW command.
526 */
527
mlxsw_cmd_unmap_fa(struct mlxsw_core * mlxsw_core)528 static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
529 {
530 return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
531 }
532
533 /* QUERY_RESOURCES - Query chip resources
534 * --------------------------------------
535 * OpMod == 0 (N/A) , INMmod is index
536 * ----------------------------------
537 * The QUERY_RESOURCES command retrieves information related to chip resources
538 * by resource ID. Every command returns 32 entries. INmod is being use as base.
539 * for example, index 1 will return entries 32-63. When the tables end and there
540 * are no more sources in the table, will return resource id 0xFFF to indicate
541 * it.
542 */
543
544 #define MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID 0xffff
545 #define MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES 100
546 #define MLXSW_CMD_QUERY_RESOURCES_PER_QUERY 32
547
mlxsw_cmd_query_resources(struct mlxsw_core * mlxsw_core,char * out_mbox,int index)548 static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
549 char *out_mbox, int index)
550 {
551 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
552 0, index, false, out_mbox,
553 MLXSW_CMD_MBOX_SIZE);
554 }
555
556 /* cmd_mbox_query_resource_id
557 * The resource id. 0xFFFF indicates table's end.
558 */
559 MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
560
561 /* cmd_mbox_query_resource_data
562 * The resource
563 */
564 MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
565 0x00, 0, 40, 0x8, 0, false);
566
567 /* CONFIG_PROFILE (Set) - Configure Switch Profile
568 * ------------------------------
569 * OpMod == 1 (Set), INMmod == 0 (N/A)
570 * -----------------------------------
571 * The CONFIG_PROFILE command sets the switch profile. The command can be
572 * executed on the device only once at startup in order to allocate and
573 * configure all switch resources and prepare it for operational mode.
574 * It is not possible to change the device profile after the chip is
575 * in operational mode.
576 * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
577 * state therefore it is required to perform software reset to the device
578 * following an unsuccessful completion of the command. It is required
579 * to perform software reset to the device to change an existing profile.
580 */
581
mlxsw_cmd_config_profile_set(struct mlxsw_core * mlxsw_core,char * in_mbox)582 static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
583 char *in_mbox)
584 {
585 return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
586 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
587 }
588
589 /* cmd_mbox_config_profile_set_max_vepa_channels
590 * Capability bit. Setting a bit to 1 configures the profile
591 * according to the mailbox contents.
592 */
593 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
594
595 /* cmd_mbox_config_profile_set_max_lag
596 * Capability bit. Setting a bit to 1 configures the profile
597 * according to the mailbox contents.
598 */
599 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
600
601 /* cmd_mbox_config_profile_set_max_port_per_lag
602 * Capability bit. Setting a bit to 1 configures the profile
603 * according to the mailbox contents.
604 */
605 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
606
607 /* cmd_mbox_config_profile_set_max_mid
608 * Capability bit. Setting a bit to 1 configures the profile
609 * according to the mailbox contents.
610 */
611 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
612
613 /* cmd_mbox_config_profile_set_max_pgt
614 * Capability bit. Setting a bit to 1 configures the profile
615 * according to the mailbox contents.
616 */
617 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
618
619 /* cmd_mbox_config_profile_set_max_system_port
620 * Capability bit. Setting a bit to 1 configures the profile
621 * according to the mailbox contents.
622 */
623 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
624
625 /* cmd_mbox_config_profile_set_max_vlan_groups
626 * Capability bit. Setting a bit to 1 configures the profile
627 * according to the mailbox contents.
628 */
629 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
630
631 /* cmd_mbox_config_profile_set_max_regions
632 * Capability bit. Setting a bit to 1 configures the profile
633 * according to the mailbox contents.
634 */
635 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
636
637 /* cmd_mbox_config_profile_set_flood_mode
638 * Capability bit. Setting a bit to 1 configures the profile
639 * according to the mailbox contents.
640 */
641 MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
642
643 /* cmd_mbox_config_profile_set_max_flood_tables
644 * Capability bit. Setting a bit to 1 configures the profile
645 * according to the mailbox contents.
646 */
647 MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
648
649 /* cmd_mbox_config_profile_set_max_ib_mc
650 * Capability bit. Setting a bit to 1 configures the profile
651 * according to the mailbox contents.
652 */
653 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
654
655 /* cmd_mbox_config_profile_set_max_pkey
656 * Capability bit. Setting a bit to 1 configures the profile
657 * according to the mailbox contents.
658 */
659 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
660
661 /* cmd_mbox_config_profile_set_adaptive_routing_group_cap
662 * Capability bit. Setting a bit to 1 configures the profile
663 * according to the mailbox contents.
664 */
665 MLXSW_ITEM32(cmd_mbox, config_profile,
666 set_adaptive_routing_group_cap, 0x0C, 14, 1);
667
668 /* cmd_mbox_config_profile_set_ar_sec
669 * Capability bit. Setting a bit to 1 configures the profile
670 * according to the mailbox contents.
671 */
672 MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
673
674 /* cmd_mbox_config_profile_set_ubridge
675 * Capability bit. Setting a bit to 1 configures the profile
676 * according to the mailbox contents.
677 */
678 MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
679
680 /* cmd_mbox_config_profile_set_kvd_linear_size
681 * Capability bit. Setting a bit to 1 configures the profile
682 * according to the mailbox contents.
683 */
684 MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
685
686 /* cmd_mbox_config_profile_set_kvd_hash_single_size
687 * Capability bit. Setting a bit to 1 configures the profile
688 * according to the mailbox contents.
689 */
690 MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
691
692 /* cmd_mbox_config_profile_set_kvd_hash_double_size
693 * Capability bit. Setting a bit to 1 configures the profile
694 * according to the mailbox contents.
695 */
696 MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
697
698 /* cmd_mbox_config_profile_set_cqe_version
699 * Capability bit. Setting a bit to 1 configures the profile
700 * according to the mailbox contents.
701 */
702 MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
703
704 /* cmd_mbox_config_profile_set_cqe_time_stamp_type
705 * Capability bit. Setting a bit to 1 configures the profile
706 * according to the mailbox contents.
707 */
708 MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
709
710 /* cmd_mbox_config_profile_set_lag_mode
711 * Capability bit. Setting a bit to 1 configures the lag_mode
712 * according to the mailbox contents.
713 */
714 MLXSW_ITEM32(cmd_mbox, config_profile, set_lag_mode, 0x08, 7, 1);
715
716 /* cmd_mbox_config_profile_max_vepa_channels
717 * Maximum number of VEPA channels per port (0 through 16)
718 * 0 - multi-channel VEPA is disabled
719 */
720 MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
721
722 /* cmd_mbox_config_profile_max_lag
723 * Maximum number of LAG IDs requested.
724 * Reserved when Spectrum-1/2/3, supported from Spectrum-4 and above.
725 * For Spectrum-4, firmware sets 128 for values between 1-128 and 256 for values
726 * between 129-256.
727 */
728 MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
729
730 /* cmd_mbox_config_profile_max_port_per_lag
731 * Maximum number of ports per LAG requested.
732 */
733 MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
734
735 /* cmd_mbox_config_profile_max_mid
736 * Maximum Multicast IDs.
737 * Multicast IDs are allocated from 0 to max_mid-1
738 */
739 MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
740
741 /* cmd_mbox_config_profile_max_pgt
742 * Maximum records in the Port Group Table per Switch Partition.
743 * Port Group Table indexes are from 0 to max_pgt-1
744 */
745 MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
746
747 /* cmd_mbox_config_profile_max_system_port
748 * The maximum number of system ports that can be allocated.
749 */
750 MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
751
752 /* cmd_mbox_config_profile_max_vlan_groups
753 * Maximum number VLAN Groups for VLAN binding.
754 */
755 MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
756
757 /* cmd_mbox_config_profile_max_regions
758 * Maximum number of TCAM Regions.
759 */
760 MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
761
762 /* cmd_mbox_config_profile_max_flood_tables
763 * Maximum number of single-entry flooding tables. Different flooding tables
764 * can be associated with different packet types.
765 */
766 MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
767
768 /* cmd_mbox_config_profile_max_vid_flood_tables
769 * Maximum number of per-vid flooding tables. Flooding tables are associated
770 * to the different packet types for the different switch partitions.
771 * Table size is 4K entries covering all VID space.
772 */
773 MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
774
775 enum mlxsw_cmd_mbox_config_profile_flood_mode {
776 /* Mixed mode, where:
777 * max_flood_tables indicates the number of single-entry tables.
778 * max_vid_flood_tables indicates the number of per-VID tables.
779 * max_fid_offset_flood_tables indicates the number of FID-offset
780 * tables. max_fid_flood_tables indicates the number of per-FID tables.
781 * Reserved when unified bridge model is used.
782 */
783 MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
784 /* Controlled flood tables. Reserved when legacy bridge model is
785 * used.
786 */
787 MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
788 /* CFF - Compressed FID Flood (CFF) mode.
789 * Reserved when legacy bridge model is used.
790 * Supported only by Spectrum-2+.
791 */
792 MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF = 5,
793 };
794
795 /* cmd_mbox_config_profile_flood_mode
796 * Flooding mode to use.
797 */
798 MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
799
800 /* cmd_mbox_config_profile_max_fid_offset_flood_tables
801 * Maximum number of FID-offset flooding tables.
802 */
803 MLXSW_ITEM32(cmd_mbox, config_profile,
804 max_fid_offset_flood_tables, 0x34, 24, 4);
805
806 /* cmd_mbox_config_profile_fid_offset_flood_table_size
807 * The size (number of entries) of each FID-offset flood table.
808 */
809 MLXSW_ITEM32(cmd_mbox, config_profile,
810 fid_offset_flood_table_size, 0x34, 0, 16);
811
812 /* cmd_mbox_config_profile_max_fid_flood_tables
813 * Maximum number of per-FID flooding tables.
814 *
815 * Note: This flooding tables cover special FIDs only (vFIDs), starting at
816 * FID value 4K and higher.
817 */
818 MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
819
820 /* cmd_mbox_config_profile_fid_flood_table_size
821 * The size (number of entries) of each per-FID table.
822 */
823 MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
824
825 /* cmd_mbox_config_profile_max_ib_mc
826 * Maximum number of multicast FDB records for InfiniBand
827 * FDB (in 512 chunks) per InfiniBand switch partition.
828 */
829 MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
830
831 /* cmd_mbox_config_profile_max_pkey
832 * Maximum per port PKEY table size (for PKEY enforcement)
833 */
834 MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
835
836 /* cmd_mbox_config_profile_ar_sec
837 * Primary/secondary capability
838 * Describes the number of adaptive routing sub-groups
839 * 0 - disable primary/secondary (single group)
840 * 1 - enable primary/secondary (2 sub-groups)
841 * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
842 * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
843 */
844 MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
845
846 /* cmd_mbox_config_profile_adaptive_routing_group_cap
847 * Adaptive Routing Group Capability. Indicates the number of AR groups
848 * supported. Note that when Primary/secondary is enabled, each
849 * primary/secondary couple consumes 2 adaptive routing entries.
850 */
851 MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
852
853 /* cmd_mbox_config_profile_arn
854 * Adaptive Routing Notification Enable
855 * Not supported in SwitchX, SwitchX-2
856 */
857 MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
858
859 /* cmd_mbox_config_profile_ubridge
860 * Unified Bridge
861 * 0 - non unified bridge
862 * 1 - unified bridge
863 */
864 MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
865
866 enum mlxsw_cmd_mbox_config_profile_lag_mode {
867 /* FW manages PGT LAG table */
868 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW,
869 /* SW manages PGT LAG table */
870 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW,
871 };
872
873 /* cmd_mbox_config_profile_lag_mode
874 * LAG mode
875 * Configured if set_lag_mode is set
876 * Supported from Spectrum-2 and above.
877 * Supported only when ubridge = 1
878 */
879 MLXSW_ITEM32(cmd_mbox, config_profile, lag_mode, 0x50, 3, 1);
880
881 /* cmd_mbox_config_kvd_linear_size
882 * KVD Linear Size
883 * Valid for Spectrum only
884 * Allowed values are 128*N where N=0 or higher
885 */
886 MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
887
888 /* cmd_mbox_config_profile_kvd_hash_single_size
889 * KVD Hash single-entries size
890 * Valid for Spectrum only
891 * Allowed values are 128*N where N=0 or higher
892 * Must be greater or equal to cap_min_kvd_hash_single_size
893 * Must be smaller or equal to cap_kvd_size - kvd_linear_size
894 */
895 MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
896
897 /* cmd_mbox_config_profile_kvd_hash_double_size
898 * KVD Hash double-entries size (units of single-size entries)
899 * Valid for Spectrum only
900 * Allowed values are 128*N where N=0 or higher
901 * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
902 * Must be smaller or equal to cap_kvd_size - kvd_linear_size
903 */
904 MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
905
906 /* cmd_mbox_config_profile_swid_config_mask
907 * Modify Switch Partition Configuration mask. When set, the configu-
908 * ration value for the Switch Partition are taken from the mailbox.
909 * When clear, the current configuration values are used.
910 * Bit 0 - set type
911 * Bit 1 - properties
912 * Other - reserved
913 */
914 MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
915 0x60, 24, 8, 0x08, 0x00, false);
916
917 /* cmd_mbox_config_profile_swid_config_type
918 * Switch Partition type.
919 * 0000 - disabled (Switch Partition does not exist)
920 * 0001 - InfiniBand
921 * 0010 - Ethernet
922 * 1000 - router port (SwitchX-2 only)
923 * Other - reserved
924 */
925 MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
926 0x60, 20, 4, 0x08, 0x00, false);
927
928 /* cmd_mbox_config_profile_swid_config_properties
929 * Switch Partition properties.
930 */
931 MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
932 0x60, 0, 8, 0x08, 0x00, false);
933
934 enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
935 /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
936 MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
937 /* FRC - Free Running Clock, units of 1nSec.
938 * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
939 */
940 MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
941 /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
942 * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
943 */
944 MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
945 };
946
947 /* cmd_mbox_config_profile_cqe_time_stamp_type
948 * CQE time_stamp_type for non-mirror-packets.
949 * Configured if set_cqe_time_stamp_type is set.
950 * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
951 */
952 MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
953
954 /* cmd_mbox_config_profile_cqe_version
955 * CQE version:
956 * 0: CQE version is 0
957 * 1: CQE version is either 1 or 2
958 * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
959 */
960 MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
961
962 /* ACCESS_REG - Access EMAD Supported Register
963 * ----------------------------------
964 * OpMod == 0 (N/A), INMmod == 0 (N/A)
965 * -------------------------------------
966 * The ACCESS_REG command supports accessing device registers. This access
967 * is mainly used for bootstrapping.
968 */
969
mlxsw_cmd_access_reg(struct mlxsw_core * mlxsw_core,bool reset_ok,char * in_mbox,char * out_mbox)970 static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
971 bool reset_ok,
972 char *in_mbox, char *out_mbox)
973 {
974 return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
975 0, 0, false, reset_ok,
976 in_mbox, MLXSW_CMD_MBOX_SIZE,
977 out_mbox, MLXSW_CMD_MBOX_SIZE);
978 }
979
980 /* SW2HW_DQ - Software to Hardware DQ
981 * ----------------------------------
982 * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
983 * INMmod == DQ number
984 * ----------------------------------------------
985 * The SW2HW_DQ command transitions a descriptor queue from software to
986 * hardware ownership. The command enables posting WQEs and ringing DoorBells
987 * on the descriptor queue.
988 */
989
__mlxsw_cmd_sw2hw_dq(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 dq_number,u8 opcode_mod)990 static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
991 char *in_mbox, u32 dq_number,
992 u8 opcode_mod)
993 {
994 return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
995 opcode_mod, dq_number,
996 in_mbox, MLXSW_CMD_MBOX_SIZE);
997 }
998
999 enum {
1000 MLXSW_CMD_OPCODE_MOD_SDQ = 0,
1001 MLXSW_CMD_OPCODE_MOD_RDQ = 1,
1002 };
1003
mlxsw_cmd_sw2hw_sdq(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 dq_number)1004 static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
1005 char *in_mbox, u32 dq_number)
1006 {
1007 return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
1008 MLXSW_CMD_OPCODE_MOD_SDQ);
1009 }
1010
mlxsw_cmd_sw2hw_rdq(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 dq_number)1011 static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
1012 char *in_mbox, u32 dq_number)
1013 {
1014 return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
1015 MLXSW_CMD_OPCODE_MOD_RDQ);
1016 }
1017
1018 /* cmd_mbox_sw2hw_dq_cq
1019 * Number of the CQ that this Descriptor Queue reports completions to.
1020 */
1021 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
1022
1023 enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp {
1024 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE,
1025 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE,
1026 };
1027
1028 /* cmd_mbox_sw2hw_dq_sdq_lp
1029 * SDQ local Processing
1030 * 0: local processing by wqe.lp
1031 * 1: local processing (ignoring wqe.lp)
1032 */
1033 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1);
1034
1035 /* cmd_mbox_sw2hw_dq_sdq_tclass
1036 * SDQ: CPU Egress TClass
1037 * RDQ: Reserved
1038 */
1039 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
1040
1041 /* cmd_mbox_sw2hw_dq_log2_dq_sz
1042 * Log (base 2) of the Descriptor Queue size in 4KB pages.
1043 */
1044 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
1045
1046 /* cmd_mbox_sw2hw_dq_pa
1047 * Physical Address.
1048 */
1049 MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
1050
1051 /* HW2SW_DQ - Hardware to Software DQ
1052 * ----------------------------------
1053 * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
1054 * INMmod == DQ number
1055 * ----------------------------------------------
1056 * The HW2SW_DQ command transitions a descriptor queue from hardware to
1057 * software ownership. Incoming packets on the DQ are silently discarded,
1058 * SW should not post descriptors on nonoperational DQs.
1059 */
1060
__mlxsw_cmd_hw2sw_dq(struct mlxsw_core * mlxsw_core,u32 dq_number,u8 opcode_mod)1061 static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
1062 u32 dq_number, u8 opcode_mod)
1063 {
1064 return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
1065 opcode_mod, dq_number);
1066 }
1067
mlxsw_cmd_hw2sw_sdq(struct mlxsw_core * mlxsw_core,u32 dq_number)1068 static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
1069 u32 dq_number)
1070 {
1071 return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
1072 MLXSW_CMD_OPCODE_MOD_SDQ);
1073 }
1074
mlxsw_cmd_hw2sw_rdq(struct mlxsw_core * mlxsw_core,u32 dq_number)1075 static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
1076 u32 dq_number)
1077 {
1078 return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
1079 MLXSW_CMD_OPCODE_MOD_RDQ);
1080 }
1081
1082 /* 2ERR_DQ - To Error DQ
1083 * ---------------------
1084 * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
1085 * INMmod == DQ number
1086 * ----------------------------------------------
1087 * The 2ERR_DQ command transitions the DQ into the error state from the state
1088 * in which it has been. While the command is executed, some in-process
1089 * descriptors may complete. Once the DQ transitions into the error state,
1090 * if there are posted descriptors on the RDQ/SDQ, the hardware writes
1091 * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
1092 * When the command is completed successfully, the DQ is already in
1093 * the error state.
1094 */
1095
__mlxsw_cmd_2err_dq(struct mlxsw_core * mlxsw_core,u32 dq_number,u8 opcode_mod)1096 static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
1097 u32 dq_number, u8 opcode_mod)
1098 {
1099 return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
1100 opcode_mod, dq_number);
1101 }
1102
mlxsw_cmd_2err_sdq(struct mlxsw_core * mlxsw_core,u32 dq_number)1103 static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
1104 u32 dq_number)
1105 {
1106 return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
1107 MLXSW_CMD_OPCODE_MOD_SDQ);
1108 }
1109
mlxsw_cmd_2err_rdq(struct mlxsw_core * mlxsw_core,u32 dq_number)1110 static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
1111 u32 dq_number)
1112 {
1113 return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
1114 MLXSW_CMD_OPCODE_MOD_RDQ);
1115 }
1116
1117 /* QUERY_DQ - Query DQ
1118 * ---------------------
1119 * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
1120 * INMmod == DQ number
1121 * ----------------------------------------------
1122 * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
1123 *
1124 * Note: Output mailbox has the same format as SW2HW_DQ.
1125 */
1126
__mlxsw_cmd_query_dq(struct mlxsw_core * mlxsw_core,char * out_mbox,u32 dq_number,u8 opcode_mod)1127 static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
1128 char *out_mbox, u32 dq_number,
1129 u8 opcode_mod)
1130 {
1131 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
1132 opcode_mod, dq_number, false,
1133 out_mbox, MLXSW_CMD_MBOX_SIZE);
1134 }
1135
mlxsw_cmd_query_sdq(struct mlxsw_core * mlxsw_core,char * out_mbox,u32 dq_number)1136 static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
1137 char *out_mbox, u32 dq_number)
1138 {
1139 return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
1140 MLXSW_CMD_OPCODE_MOD_SDQ);
1141 }
1142
mlxsw_cmd_query_rdq(struct mlxsw_core * mlxsw_core,char * out_mbox,u32 dq_number)1143 static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
1144 char *out_mbox, u32 dq_number)
1145 {
1146 return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
1147 MLXSW_CMD_OPCODE_MOD_RDQ);
1148 }
1149
1150 /* SW2HW_CQ - Software to Hardware CQ
1151 * ----------------------------------
1152 * OpMod == 0 (N/A), INMmod == CQ number
1153 * -------------------------------------
1154 * The SW2HW_CQ command transfers ownership of a CQ context entry from software
1155 * to hardware. The command takes the CQ context entry from the input mailbox
1156 * and stores it in the CQC in the ownership of the hardware. The command fails
1157 * if the requested CQC entry is already in the ownership of the hardware.
1158 */
1159
mlxsw_cmd_sw2hw_cq(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 cq_number)1160 static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
1161 char *in_mbox, u32 cq_number)
1162 {
1163 return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
1164 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
1165 }
1166
1167 enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
1168 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
1169 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
1170 };
1171
1172 /* cmd_mbox_sw2hw_cq_cqe_ver
1173 * CQE Version.
1174 */
1175 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
1176
1177 /* cmd_mbox_sw2hw_cq_c_eqn
1178 * Event Queue this CQ reports completion events to.
1179 */
1180 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
1181
1182 /* cmd_mbox_sw2hw_cq_st
1183 * Event delivery state machine
1184 * 0x0 - FIRED
1185 * 0x1 - ARMED (Request for Notification)
1186 */
1187 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
1188
1189 /* cmd_mbox_sw2hw_cq_log_cq_size
1190 * Log (base 2) of the CQ size (in entries).
1191 */
1192 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
1193
1194 /* cmd_mbox_sw2hw_cq_producer_counter
1195 * Producer Counter. The counter is incremented for each CQE that is
1196 * written by the HW to the CQ.
1197 * Maintained by HW (valid for the QUERY_CQ command only)
1198 */
1199 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
1200
1201 /* cmd_mbox_sw2hw_cq_pa
1202 * Physical Address.
1203 */
1204 MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
1205
1206 /* HW2SW_CQ - Hardware to Software CQ
1207 * ----------------------------------
1208 * OpMod == 0 (N/A), INMmod == CQ number
1209 * -------------------------------------
1210 * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
1211 * to software. The CQC entry is invalidated as a result of this command.
1212 */
1213
mlxsw_cmd_hw2sw_cq(struct mlxsw_core * mlxsw_core,u32 cq_number)1214 static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
1215 u32 cq_number)
1216 {
1217 return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
1218 0, cq_number);
1219 }
1220
1221 /* QUERY_CQ - Query CQ
1222 * ----------------------------------
1223 * OpMod == 0 (N/A), INMmod == CQ number
1224 * -------------------------------------
1225 * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
1226 * The command stores the snapshot in the output mailbox in the software format.
1227 * Note that the CQ context state and values are not affected by the QUERY_CQ
1228 * command. The QUERY_CQ command is for debug purposes only.
1229 *
1230 * Note: Output mailbox has the same format as SW2HW_CQ.
1231 */
1232
mlxsw_cmd_query_cq(struct mlxsw_core * mlxsw_core,char * out_mbox,u32 cq_number)1233 static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
1234 char *out_mbox, u32 cq_number)
1235 {
1236 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
1237 0, cq_number, false,
1238 out_mbox, MLXSW_CMD_MBOX_SIZE);
1239 }
1240
1241 /* SW2HW_EQ - Software to Hardware EQ
1242 * ----------------------------------
1243 * OpMod == 0 (N/A), INMmod == EQ number
1244 * -------------------------------------
1245 * The SW2HW_EQ command transfers ownership of an EQ context entry from software
1246 * to hardware. The command takes the EQ context entry from the input mailbox
1247 * and stores it in the EQC in the ownership of the hardware. The command fails
1248 * if the requested EQC entry is already in the ownership of the hardware.
1249 */
1250
mlxsw_cmd_sw2hw_eq(struct mlxsw_core * mlxsw_core,char * in_mbox,u32 eq_number)1251 static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
1252 char *in_mbox, u32 eq_number)
1253 {
1254 return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
1255 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
1256 }
1257
1258 /* cmd_mbox_sw2hw_eq_int_msix
1259 * When set, MSI-X cycles will be generated by this EQ.
1260 * When cleared, an interrupt will be generated by this EQ.
1261 */
1262 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
1263
1264 /* cmd_mbox_sw2hw_eq_st
1265 * Event delivery state machine
1266 * 0x0 - FIRED
1267 * 0x1 - ARMED (Request for Notification)
1268 * 0x11 - Always ARMED
1269 * other - reserved
1270 */
1271 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
1272
1273 /* cmd_mbox_sw2hw_eq_log_eq_size
1274 * Log (base 2) of the EQ size (in entries).
1275 */
1276 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
1277
1278 /* cmd_mbox_sw2hw_eq_producer_counter
1279 * Producer Counter. The counter is incremented for each EQE that is written
1280 * by the HW to the EQ.
1281 * Maintained by HW (valid for the QUERY_EQ command only)
1282 */
1283 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
1284
1285 /* cmd_mbox_sw2hw_eq_pa
1286 * Physical Address.
1287 */
1288 MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
1289
1290 /* HW2SW_EQ - Hardware to Software EQ
1291 * ----------------------------------
1292 * OpMod == 0 (N/A), INMmod == EQ number
1293 * -------------------------------------
1294 */
1295
mlxsw_cmd_hw2sw_eq(struct mlxsw_core * mlxsw_core,u32 eq_number)1296 static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
1297 u32 eq_number)
1298 {
1299 return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
1300 0, eq_number);
1301 }
1302
1303 /* QUERY_EQ - Query EQ
1304 * ----------------------------------
1305 * OpMod == 0 (N/A), INMmod == EQ number
1306 * -------------------------------------
1307 *
1308 * Note: Output mailbox has the same format as SW2HW_EQ.
1309 */
1310
mlxsw_cmd_query_eq(struct mlxsw_core * mlxsw_core,char * out_mbox,u32 eq_number)1311 static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
1312 char *out_mbox, u32 eq_number)
1313 {
1314 return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
1315 0, eq_number, false,
1316 out_mbox, MLXSW_CMD_MBOX_SIZE);
1317 }
1318
1319 #endif
1320