1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /**
33 * @defgroup sli SLI-4 Base APIs
34 */
35
36 /**
37 * @file
38 * All common (i.e. transport-independent) SLI-4 functions are implemented
39 * in this file.
40 */
41
42 #include "sli4.h"
43
44 #if defined(OCS_INCLUDE_DEBUG)
45 #include "ocs_utils.h"
46 #endif
47
48 #define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
49 #define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
50
51 static int32_t sli_fw_init(sli4_t *);
52 static int32_t sli_fw_term(sli4_t *);
53 static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian);
54 static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t);
55 static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t);
56 static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *);
57 static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t);
58
59 const uint8_t sli4_fw_initialize[] = {
60 0xff, 0x12, 0x34, 0xff,
61 0xff, 0x56, 0x78, 0xff,
62 };
63
64 const uint8_t sli4_fw_deinitialize[] = {
65 0xff, 0xaa, 0xbb, 0xff,
66 0xff, 0xcc, 0xdd, 0xff,
67 };
68
69 typedef struct {
70 uint32_t rev_id;
71 uint32_t family; /* generation */
72 sli4_asic_type_e type;
73 sli4_asic_rev_e rev;
74 } sli4_asic_entry_t;
75
76 sli4_asic_entry_t sli4_asic_table[] = {
77 { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0},
78 { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1},
79 { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2},
80 { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
81 { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
82 { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0},
83 { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0},
84 { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1},
85 { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0},
86 { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0},
87 { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0},
88 { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
89 { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0},
90 { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1},
91 { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
92 { 0x1, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_A1},
93 { 0x10, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_B0},
94 { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0},
95 };
96
97 /*
98 * @brief Convert queue type enum (SLI_QTYPE_*) into a string.
99 */
100 const char *SLI_QNAME[] = {
101 "Event Queue",
102 "Completion Queue",
103 "Mailbox Queue",
104 "Work Queue",
105 "Receive Queue",
106 "Undefined"
107 };
108
109 /**
110 * @brief Define the mapping of registers to their BAR and offset.
111 *
112 * @par Description
113 * Although SLI-4 specification defines a common set of registers, their locations
114 * (both BAR and offset) depend on the interface type. This array maps a register
115 * enum to an array of BAR/offset pairs indexed by the interface type. For
116 * example, to access the bootstrap mailbox register on an interface type 0
117 * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset.
118 *
119 * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off)
120 * indicates an invalid mapping.
121 */
122 const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = {
123 /* SLI4_REG_BMBX */
124 {
125 { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG },
126 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } , { 0, SLI4_BMBX_REG },
127 },
128 /* SLI4_REG_EQCQ_DOORBELL */
129 {
130 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
131 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
132 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
133 { 1, SLI4_IF6_EQ_DOORBELL_REG }
134 },
135 // SLI4_REG_CQ_DOORBELL
136 {
137 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
138 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
139 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
140 { 1, SLI4_IF6_CQ_DOORBELL_REG }
141 },
142 /* SLI4_REG_FCOE_RQ_DOORBELL */
143 {
144 { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG },
145 { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
146 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
147 { 1, SLI4_IF6_RQ_DOORBELL_REG }
148 },
149 /* SLI4_REG_IO_WQ_DOORBELL */
150 {
151 { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG },
152 { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
153 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
154 { 1, SLI4_IF6_WQ_DOORBELL_REG }
155 },
156 /* SLI4_REG_MQ_DOORBELL */
157 {
158 { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
159 { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
160 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
161 { 1, SLI4_IF6_MQ_DOORBELL_REG }
162 },
163 /* SLI4_REG_PHYSDEV_CONTROL */
164 {
165 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
166 { 0, SLI4_PHSDEV_CONTROL_REG_236 }, { 0, SLI4_PHSDEV_CONTROL_REG_236 },
167 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
168 { 0, SLI4_PHSDEV_CONTROL_REG_236 }
169 },
170 /* SLI4_REG_SLIPORT_CONTROL */
171 {
172 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
173 { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX },
174 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
175 { 0, SLI4_SLIPORT_CONTROL_REG },
176 },
177 /* SLI4_REG_SLIPORT_ERROR1 */
178 {
179 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
180 { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX },
181 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
182 { 0, SLI4_SLIPORT_ERROR1 },
183 },
184 /* SLI4_REG_SLIPORT_ERROR2 */
185 {
186 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
187 { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX },
188 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
189 { 0, SLI4_SLIPORT_ERROR2 },
190 },
191 /* SLI4_REG_SLIPORT_SEMAPHORE */
192 {
193 { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 },
194 { 0, SLI4_PORT_SEMAPHORE_REG_236 }, { 0, SLI4_PORT_SEMAPHORE_REG_236 },
195 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
196 { 0, SLI4_PORT_SEMAPHORE_REG_236 },
197 },
198 /* SLI4_REG_SLIPORT_STATUS */
199 {
200 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
201 { 0, SLI4_PORT_STATUS_REG_236 }, { 0, SLI4_PORT_STATUS_REG_236 },
202 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
203 { 0, SLI4_PORT_STATUS_REG_236 },
204 },
205 /* SLI4_REG_UERR_MASK_HI */
206 {
207 { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX },
208 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
209 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
210 { UINT32_MAX, UINT32_MAX }
211 },
212 /* SLI4_REG_UERR_MASK_LO */
213 {
214 { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX },
215 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
216 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
217 { UINT32_MAX, UINT32_MAX }
218 },
219 /* SLI4_REG_UERR_STATUS_HI */
220 {
221 { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX },
222 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
223 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
224 { UINT32_MAX, UINT32_MAX }
225 },
226 /* SLI4_REG_UERR_STATUS_LO */
227 {
228 { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX },
229 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
230 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
231 { UINT32_MAX, UINT32_MAX }
232 },
233 /* SLI4_REG_SW_UE_CSR1 */
234 {
235 { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX },
236 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
237 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
238 { UINT32_MAX, UINT32_MAX }
239 },
240 /* SLI4_REG_SW_UE_CSR2 */
241 {
242 { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX },
243 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
244 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
245 { UINT32_MAX, UINT32_MAX }
246 },
247 };
248
249 /**
250 * @brief Read the given SLI register.
251 *
252 * @param sli Pointer to the SLI context.
253 * @param reg Register name enum.
254 *
255 * @return Returns the register value.
256 */
257 uint32_t
sli_reg_read(sli4_t * sli,sli4_regname_e reg)258 sli_reg_read(sli4_t *sli, sli4_regname_e reg)
259 {
260 const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
261
262 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
263 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
264 return UINT32_MAX;
265 }
266
267 return ocs_reg_read32(sli->os, r->rset, r->off);
268 }
269
270 /**
271 * @brief Write the value to the given SLI register.
272 *
273 * @param sli Pointer to the SLI context.
274 * @param reg Register name enum.
275 * @param val Value to write.
276 *
277 * @return None.
278 */
279 void
sli_reg_write(sli4_t * sli,sli4_regname_e reg,uint32_t val)280 sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val)
281 {
282 const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
283
284 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
285 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
286 return;
287 }
288
289 ocs_reg_write32(sli->os, r->rset, r->off, val);
290 }
291
292 /**
293 * @brief Check if the SLI_INTF register is valid.
294 *
295 * @param val 32-bit SLI_INTF register value.
296 *
297 * @return Returns 0 on success, or a non-zero value on failure.
298 */
299 static uint8_t
sli_intf_valid_check(uint32_t val)300 sli_intf_valid_check(uint32_t val)
301 {
302 return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID;
303 }
304
305 /**
306 * @brief Retrieve the SLI revision level.
307 *
308 * @param val 32-bit SLI_INTF register value.
309 *
310 * @return Returns the SLI revision level.
311 */
312 static uint8_t
sli_intf_sli_revision(uint32_t val)313 sli_intf_sli_revision(uint32_t val)
314 {
315 return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK);
316 }
317
318 static uint8_t
sli_intf_sli_family(uint32_t val)319 sli_intf_sli_family(uint32_t val)
320 {
321 return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK);
322 }
323
324 /**
325 * @brief Retrieve the SLI interface type.
326 *
327 * @param val 32-bit SLI_INTF register value.
328 *
329 * @return Returns the SLI interface type.
330 */
331 static uint8_t
sli_intf_if_type(uint32_t val)332 sli_intf_if_type(uint32_t val)
333 {
334 return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK);
335 }
336
337 /**
338 * @brief Retrieve PCI revision ID.
339 *
340 * @param val 32-bit PCI CLASS_REVISION register value.
341 *
342 * @return Returns the PCI revision ID.
343 */
344 static uint8_t
sli_pci_rev_id(uint32_t val)345 sli_pci_rev_id(uint32_t val)
346 {
347 return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK);
348 }
349
350 /**
351 * @brief retrieve SLI ASIC generation
352 *
353 * @param val 32-bit SLI_ASIC_ID register value
354 *
355 * @return SLI ASIC generation
356 */
357 static uint8_t
sli_asic_gen(uint32_t val)358 sli_asic_gen(uint32_t val)
359 {
360 return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK);
361 }
362
363 /**
364 * @brief Wait for the bootstrap mailbox to report "ready".
365 *
366 * @param sli4 SLI context pointer.
367 * @param msec Number of milliseconds to wait.
368 *
369 * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred).
370 */
371 static int32_t
sli_bmbx_wait(sli4_t * sli4,uint32_t msec)372 sli_bmbx_wait(sli4_t *sli4, uint32_t msec)
373 {
374 uint32_t val = 0;
375
376 do {
377 ocs_udelay(SLI4_BMBX_DELAY_US);
378 val = sli_reg_read(sli4, SLI4_REG_BMBX);
379 msec--;
380 } while(msec && !(val & SLI4_BMBX_RDY));
381
382 return(!(val & SLI4_BMBX_RDY));
383 }
384
385 /**
386 * @brief Write bootstrap mailbox.
387 *
388 * @param sli4 SLI context pointer.
389 *
390 * @return Returns 0 if command succeeded, or non-zero otherwise.
391 */
392 static int32_t
sli_bmbx_write(sli4_t * sli4)393 sli_bmbx_write(sli4_t *sli4)
394 {
395 uint32_t val = 0;
396
397 /* write buffer location to bootstrap mailbox register */
398 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE);
399 val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys);
400 sli_reg_write(sli4, SLI4_REG_BMBX, val);
401
402 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
403 ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n");
404 return -1;
405 }
406 val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys);
407 sli_reg_write(sli4, SLI4_REG_BMBX, val);
408
409 /* wait for SLI Port to set ready bit */
410 return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/);
411 }
412
413 #if defined(OCS_INCLUDE_DEBUG)
414 /**
415 * @ingroup sli
416 * @brief Dump BMBX mailbox command.
417 *
418 * @par Description
419 * Convenience function for dumping BMBX mailbox commands. Takes
420 * into account which mailbox command is given since SLI_CONFIG
421 * commands are special.
422 *
423 * @b Note: This function takes advantage of
424 * the one-command-at-a-time nature of the BMBX to be able to
425 * display non-embedded SLI_CONFIG commands. This will not work
426 * for mailbox commands on the MQ. Luckily, all current non-emb
427 * mailbox commands go through the BMBX.
428 *
429 * @param sli4 SLI context pointer.
430 * @param mbx Pointer to mailbox command to dump.
431 * @param prefix Prefix for dump label.
432 *
433 * @return None.
434 */
435 static void
sli_dump_bmbx_command(sli4_t * sli4,void * mbx,const char * prefix)436 sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix)
437 {
438 uint32_t size = 0;
439 char label[64];
440 uint32_t i;
441 /* Mailbox diagnostic logging */
442 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx;
443
444 if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) {
445 return;
446 }
447
448 if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) {
449 sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr;
450 sli4_req_hdr_t *sli_config_hdr;
451 if (sli_config->emb) {
452 ocs_snprintf(label, sizeof(label), "%s (emb)", prefix);
453
454 /* if embedded, dump entire command */
455 sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed;
456 size = sizeof(*sli_config) - sizeof(sli_config->payload) +
457 sli_config_hdr->request_length + (4*sizeof(uint32_t));
458 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
459 (uint8_t *)sli4->bmbx.virt, size);
460 } else {
461 sli4_sli_config_pmd_t *pmd;
462 ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix);
463
464 /* if non-embedded, break up into two parts: SLI_CONFIG hdr
465 and the payload(s) */
466 size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count);
467 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
468 (uint8_t *)sli4->bmbx.virt, size);
469
470 /* as sanity check, make sure first PMD matches what was saved */
471 pmd = &sli_config->payload.mem;
472 if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) &&
473 (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) {
474 for (i = 0; i < sli_config->pmd_count; i++, pmd++) {
475 sli_config_hdr = sli4->bmbx_non_emb_pmd->virt;
476 ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])",
477 prefix, i);
478 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
479 (uint8_t *)sli4->bmbx_non_emb_pmd->virt,
480 sli_config_hdr->request_length + (4*sizeof(uint32_t)));
481 }
482 } else {
483 ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n",
484 pmd->address_high, pmd->address_low,
485 ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys),
486 ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys));
487 }
488 }
489 } else {
490 /* not an SLI_CONFIG command, just display first 64 bytes, like we do
491 for MQEs */
492 size = 64;
493 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix,
494 (uint8_t *)mbx, size);
495 }
496 }
497 #endif
498
499 /**
500 * @ingroup sli
501 * @brief Submit a command to the bootstrap mailbox and check the status.
502 *
503 * @param sli4 SLI context pointer.
504 *
505 * @return Returns 0 on success, or a non-zero value on failure.
506 */
507 int32_t
sli_bmbx_command(sli4_t * sli4)508 sli_bmbx_command(sli4_t *sli4)
509 {
510 void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
511
512 #if defined(OCS_INCLUDE_DEBUG)
513 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd");
514 #endif
515
516 if (sli_fw_error_status(sli4) > 0) {
517 ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox "
518 "command rejected status=%#x error1=%#x error2=%#x\n",
519 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
520 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
521 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
522 return -1;
523 }
524
525 if (sli_bmbx_write(sli4)) {
526 ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n",
527 (void*)sli4->bmbx.phys,
528 sli_reg_read(sli4, SLI4_REG_BMBX));
529 return -1;
530 }
531
532 /* check completion queue entry status */
533 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD);
534 if (((sli4_mcqe_t *)cqe)->val) {
535 #if defined(OCS_INCLUDE_DEBUG)
536 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl");
537 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t));
538 #endif
539 return sli_cqe_mq(cqe);
540 } else {
541 ocs_log_err(sli4->os, "invalid or wrong type\n");
542 return -1;
543 }
544 }
545
546 /****************************************************************************
547 * Messages
548 */
549
550 /**
551 * @ingroup sli
552 * @brief Write a CONFIG_LINK command to the provided buffer.
553 *
554 * @param sli4 SLI context pointer.
555 * @param buf Virtual pointer to the destination buffer.
556 * @param size Buffer size, in bytes.
557 *
558 * @return Returns the number of bytes written.
559 */
560 int32_t
sli_cmd_config_link(sli4_t * sli4,void * buf,size_t size)561 sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size)
562 {
563 sli4_cmd_config_link_t *config_link = buf;
564
565 ocs_memset(buf, 0, size);
566
567 config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK;
568
569 /* Port interprets zero in a field as "use default value" */
570
571 return sizeof(sli4_cmd_config_link_t);
572 }
573
574 /**
575 * @ingroup sli
576 * @brief Write a DOWN_LINK command to the provided buffer.
577 *
578 * @param sli4 SLI context pointer.
579 * @param buf Virtual pointer to the destination buffer.
580 * @param size Buffer size, in bytes.
581 *
582 * @return Returns the number of bytes written.
583 */
584 int32_t
sli_cmd_down_link(sli4_t * sli4,void * buf,size_t size)585 sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size)
586 {
587 sli4_mbox_command_header_t *hdr = buf;
588
589 ocs_memset(buf, 0, size);
590
591 hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK;
592
593 /* Port interprets zero in a field as "use default value" */
594
595 return sizeof(sli4_mbox_command_header_t);
596 }
597
598 /**
599 * @ingroup sli
600 * @brief Write a DUMP Type 4 command to the provided buffer.
601 *
602 * @param sli4 SLI context pointer.
603 * @param buf Virtual pointer to the destination buffer.
604 * @param size Buffer size, in bytes.
605 * @param wki The well known item ID.
606 *
607 * @return Returns the number of bytes written.
608 */
609 int32_t
sli_cmd_dump_type4(sli4_t * sli4,void * buf,size_t size,uint16_t wki)610 sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki)
611 {
612 sli4_cmd_dump4_t *cmd = buf;
613
614 ocs_memset(buf, 0, size);
615
616 cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP;
617 cmd->type = 4;
618 cmd->wki_selection = wki;
619 return sizeof(sli4_cmd_dump4_t);
620 }
621
622 /**
623 * @ingroup sli
624 * @brief Write a COMMON_READ_TRANSCEIVER_DATA command.
625 *
626 * @param sli4 SLI context.
627 * @param buf Destination buffer for the command.
628 * @param size Buffer size, in bytes.
629 * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2).
630 * @param dma DMA structure from which the data will be copied.
631 *
632 * @note This creates a Version 0 message.
633 *
634 * @return Returns the number of bytes written.
635 */
636 int32_t
sli_cmd_common_read_transceiver_data(sli4_t * sli4,void * buf,size_t size,uint32_t page_num,ocs_dma_t * dma)637 sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num,
638 ocs_dma_t *dma)
639 {
640 sli4_req_common_read_transceiver_data_t *req = NULL;
641 uint32_t sli_config_off = 0;
642 uint32_t payload_size;
643
644 if (dma == NULL) {
645 /* Payload length must accommodate both request and response */
646 payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t),
647 sizeof(sli4_res_common_read_transceiver_data_t));
648 } else {
649 payload_size = dma->size;
650 }
651
652 if (sli4->port_type == SLI4_PORT_TYPE_FC) {
653 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma);
654 }
655
656 if (dma == NULL) {
657 req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off);
658 } else {
659 req = (sli4_req_common_read_transceiver_data_t *)dma->virt;
660 ocs_memset(req, 0, dma->size);
661 }
662
663 req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA;
664 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
665 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
666
667 req->page_number = page_num;
668 req->port = sli4->physical_port;
669
670 return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t));
671 }
672
673 /**
674 * @ingroup sli
675 * @brief Write a READ_LINK_STAT command to the provided buffer.
676 *
677 * @param sli4 SLI context pointer.
678 * @param buf Virtual pointer to the destination buffer.
679 * @param size Buffer size, in bytes.
680 * @param req_ext_counters If TRUE, then the extended counters will be requested.
681 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
682 * @param clear_all_counters If TRUE, the counters will be cleared.
683 *
684 * @return Returns the number of bytes written.
685 */
686 int32_t
sli_cmd_read_link_stats(sli4_t * sli4,void * buf,size_t size,uint8_t req_ext_counters,uint8_t clear_overflow_flags,uint8_t clear_all_counters)687 sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size,
688 uint8_t req_ext_counters,
689 uint8_t clear_overflow_flags,
690 uint8_t clear_all_counters)
691 {
692 sli4_cmd_read_link_stats_t *cmd = buf;
693
694 ocs_memset(buf, 0, size);
695
696 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT;
697 cmd->rec = req_ext_counters;
698 cmd->clrc = clear_all_counters;
699 cmd->clof = clear_overflow_flags;
700 return sizeof(sli4_cmd_read_link_stats_t);
701 }
702
703 /**
704 * @ingroup sli
705 * @brief Write a READ_STATUS command to the provided buffer.
706 *
707 * @param sli4 SLI context pointer.
708 * @param buf Virtual pointer to the destination buffer.
709 * @param size Buffer size, in bytes.
710 * @param clear_counters If TRUE, the counters will be cleared.
711 *
712 * @return Returns the number of bytes written.
713 */
714 int32_t
sli_cmd_read_status(sli4_t * sli4,void * buf,size_t size,uint8_t clear_counters)715 sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size,
716 uint8_t clear_counters)
717 {
718 sli4_cmd_read_status_t *cmd = buf;
719
720 ocs_memset(buf, 0, size);
721
722 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS;
723 cmd->cc = clear_counters;
724 return sizeof(sli4_cmd_read_status_t);
725 }
726
727 /**
728 * @brief Write a FW_DEINITIALIZE command to the provided buffer.
729 *
730 * @param sli4 SLI context pointer.
731 * @param buf Virtual pointer to the destination buffer.
732 * @param size Buffer size, in bytes.
733 *
734 * @return Returns the number of bytes written.
735 */
736 static int32_t
sli_cmd_fw_deinitialize(sli4_t * sli4,void * buf,size_t size)737 sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size)
738 {
739
740 ocs_memset(buf, 0, size);
741 ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize));
742
743 return sizeof(sli4_fw_deinitialize);
744 }
745
746 /**
747 * @brief Write a FW_INITIALIZE command to the provided buffer.
748 *
749 * @param sli4 SLI context pointer.
750 * @param buf Virtual pointer to the destination buffer.
751 * @param size Buffer size, in bytes.
752 *
753 * @return Returns the number of bytes written.
754 */
755 static int32_t
sli_cmd_fw_initialize(sli4_t * sli4,void * buf,size_t size)756 sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size)
757 {
758
759 ocs_memset(buf, 0, size);
760 ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize));
761
762 return sizeof(sli4_fw_initialize);
763 }
764
765 /**
766 * @ingroup sli
767 * @brief update INIT_LINK flags with the sli config topology.
768 *
769 * @param sli4 SLI context pointer.
770 * @param init_link Pointer to the init link command
771 *
772 * @return Returns 0 on success, -1 on failure
773 */
774 static int32_t
sli4_set_link_flags_config_topo(sli4_t * sli4,sli4_cmd_init_link_t * init_link)775 sli4_set_link_flags_config_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link)
776 {
777
778 switch (sli4->config.topology) {
779 case SLI4_READ_CFG_TOPO_FC:
780 // Attempt P2P but failover to FC-AL
781 init_link->link_flags.enable_topology_failover = TRUE;
782 init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
783 break;
784 case SLI4_READ_CFG_TOPO_FC_AL:
785 init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY;
786 return (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code));
787
788 case SLI4_READ_CFG_TOPO_FC_DA:
789 init_link->link_flags.topology = FC_TOPOLOGY_P2P;
790 break;
791 default:
792 ocs_log_err(sli4->os, "unsupported topology %#x\n", sli4->config.topology);
793 return -1;
794 }
795
796 return 0;
797 }
798
799 /**
800 * @ingroup sli
801 * @brief update INIT_LINK flags with the persistent topology.
802 * PT stores value in compatible form, directly assign to link_flags
803 *
804 * @param sli4 SLI context pointer.
805 * @param init_link Pointer to the init link command
806 *
807 * @return Returns 0 on success, -1 on failure
808 */
809 static int32_t
sli4_set_link_flags_persistent_topo(sli4_t * sli4,sli4_cmd_init_link_t * init_link)810 sli4_set_link_flags_persistent_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link)
811 {
812 if ((sli4->config.pt == SLI4_INIT_LINK_F_FCAL_ONLY) &&
813 (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code)))
814 return -1;
815
816 init_link->link_flags.enable_topology_failover = sli4->config.tf;
817 init_link->link_flags.topology = sli4->config.pt;
818
819 return 0;
820 }
821
822 /**
823 * @ingroup sli
824 * @brief Write an INIT_LINK command to the provided buffer.
825 *
826 * @param sli4 SLI context pointer.
827 * @param buf Virtual pointer to the destination buffer.
828 * @param size Buffer size, in bytes.
829 * @param speed Link speed.
830 * @param reset_alpa For native FC, this is the selective reset AL_PA
831 *
832 * @return Returns the number of bytes written.
833 */
834 int32_t
sli_cmd_init_link(sli4_t * sli4,void * buf,size_t size,uint32_t speed,uint8_t reset_alpa)835 sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa)
836 {
837 sli4_cmd_init_link_t *init_link = buf;
838 int32_t rc = 0;
839
840 ocs_memset(buf, 0, size);
841
842 init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK;
843
844 /* Most fields only have meaning for FC links */
845 if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) {
846 init_link->selective_reset_al_pa = reset_alpa;
847 init_link->link_flags.loopback = FALSE;
848
849 init_link->link_speed_selection_code = speed;
850 switch (speed) {
851 case FC_LINK_SPEED_1G:
852 case FC_LINK_SPEED_2G:
853 case FC_LINK_SPEED_4G:
854 case FC_LINK_SPEED_8G:
855 case FC_LINK_SPEED_16G:
856 case FC_LINK_SPEED_32G:
857 init_link->link_flags.fixed_speed = TRUE;
858 break;
859 case FC_LINK_SPEED_10G:
860 ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed);
861 return 0;
862 }
863
864 init_link->link_flags.unfair = FALSE;
865 init_link->link_flags.skip_lirp_lilp = FALSE;
866 init_link->link_flags.gen_loop_validity_check = FALSE;
867 init_link->link_flags.skip_lisa = FALSE;
868 init_link->link_flags.select_hightest_al_pa = FALSE;
869
870 //update topology in the link flags for link bring up
871 ocs_log_info(sli4->os, "bring up link with topology: %d, PTV: %d, TF: %d, PT: %d \n",
872 sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt);
873 if (sli4->config.ptv)
874 rc = sli4_set_link_flags_persistent_topo(sli4, init_link);
875 else
876 rc = sli4_set_link_flags_config_topo(sli4, init_link);
877
878 }
879
880 return rc ? 0 : sizeof(sli4_cmd_init_link_t);
881 }
882
883 /**
884 * @ingroup sli
885 * @brief Write an INIT_VFI command to the provided buffer.
886 *
887 * @param sli4 SLI context pointer.
888 * @param buf Virtual pointer to the destination buffer.
889 * @param size Buffer size, in bytes.
890 * @param vfi VFI
891 * @param fcfi FCFI
892 * @param vpi VPI (Set to -1 if unused.)
893 *
894 * @return Returns the number of bytes written.
895 */
896 int32_t
sli_cmd_init_vfi(sli4_t * sli4,void * buf,size_t size,uint16_t vfi,uint16_t fcfi,uint16_t vpi)897 sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi,
898 uint16_t fcfi, uint16_t vpi)
899 {
900 sli4_cmd_init_vfi_t *init_vfi = buf;
901
902 ocs_memset(buf, 0, size);
903
904 init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI;
905
906 init_vfi->vfi = vfi;
907 init_vfi->fcfi = fcfi;
908
909 /*
910 * If the VPI is valid, initialize it at the same time as
911 * the VFI
912 */
913 if (0xffff != vpi) {
914 init_vfi->vp = TRUE;
915 init_vfi->vpi = vpi;
916 }
917
918 return sizeof(sli4_cmd_init_vfi_t);
919 }
920
921 /**
922 * @ingroup sli
923 * @brief Write an INIT_VPI command to the provided buffer.
924 *
925 * @param sli4 SLI context pointer.
926 * @param buf Virtual pointer to the destination buffer.
927 * @param size Buffer size, in bytes.
928 * @param vpi VPI allocated.
929 * @param vfi VFI associated with this VPI.
930 *
931 * @return Returns the number of bytes written.
932 */
933 int32_t
sli_cmd_init_vpi(sli4_t * sli4,void * buf,size_t size,uint16_t vpi,uint16_t vfi)934 sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi)
935 {
936 sli4_cmd_init_vpi_t *init_vpi = buf;
937
938 ocs_memset(buf, 0, size);
939
940 init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI;
941 init_vpi->vpi = vpi;
942 init_vpi->vfi = vfi;
943
944 return sizeof(sli4_cmd_init_vpi_t);
945 }
946
947 /**
948 * @ingroup sli
949 * @brief Write a POST_XRI command to the provided buffer.
950 *
951 * @param sli4 SLI context pointer.
952 * @param buf Virtual pointer to the destination buffer.
953 * @param size Buffer size, in bytes.
954 * @param xri_base Starting XRI value for range of XRI given to SLI Port.
955 * @param xri_count Number of XRIs provided to the SLI Port.
956 *
957 * @return Returns the number of bytes written.
958 */
959 int32_t
sli_cmd_post_xri(sli4_t * sli4,void * buf,size_t size,uint16_t xri_base,uint16_t xri_count)960 sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count)
961 {
962 sli4_cmd_post_xri_t *post_xri = buf;
963
964 ocs_memset(buf, 0, size);
965
966 post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI;
967 post_xri->xri_base = xri_base;
968 post_xri->xri_count = xri_count;
969
970 if (sli4->config.auto_xfer_rdy == 0) {
971 post_xri->enx = TRUE;
972 post_xri->val = TRUE;
973 }
974
975 return sizeof(sli4_cmd_post_xri_t);
976 }
977
978 /**
979 * @ingroup sli
980 * @brief Write a RELEASE_XRI command to the provided buffer.
981 *
982 * @param sli4 SLI context pointer.
983 * @param buf Virtual pointer to the destination buffer.
984 * @param size Buffer size, in bytes.
985 * @param num_xri The number of XRIs to be released.
986 *
987 * @return Returns the number of bytes written.
988 */
989 int32_t
sli_cmd_release_xri(sli4_t * sli4,void * buf,size_t size,uint8_t num_xri)990 sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri)
991 {
992 sli4_cmd_release_xri_t *release_xri = buf;
993
994 ocs_memset(buf, 0, size);
995
996 release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI;
997 release_xri->xri_count = num_xri;
998
999 return sizeof(sli4_cmd_release_xri_t);
1000 }
1001
1002 /**
1003 * @brief Write a READ_CONFIG command to the provided buffer.
1004 *
1005 * @param sli4 SLI context pointer.
1006 * @param buf Virtual pointer to the destination buffer.
1007 * @param size Buffer size, in bytes
1008 *
1009 * @return Returns the number of bytes written.
1010 */
1011 static int32_t
sli_cmd_read_config(sli4_t * sli4,void * buf,size_t size)1012 sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size)
1013 {
1014 sli4_cmd_read_config_t *read_config = buf;
1015
1016 ocs_memset(buf, 0, size);
1017
1018 read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG;
1019
1020 return sizeof(sli4_cmd_read_config_t);
1021 }
1022
1023 /**
1024 * @brief Write a READ_NVPARMS command to the provided buffer.
1025 *
1026 * @param sli4 SLI context pointer.
1027 * @param buf Virtual pointer to the destination buffer.
1028 * @param size Buffer size, in bytes.
1029 *
1030 * @return Returns the number of bytes written.
1031 */
1032 int32_t
sli_cmd_read_nvparms(sli4_t * sli4,void * buf,size_t size)1033 sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size)
1034 {
1035 sli4_cmd_read_nvparms_t *read_nvparms = buf;
1036
1037 ocs_memset(buf, 0, size);
1038
1039 read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS;
1040
1041 return sizeof(sli4_cmd_read_nvparms_t);
1042 }
1043
1044 /**
1045 * @brief Write a WRITE_NVPARMS command to the provided buffer.
1046 *
1047 * @param sli4 SLI context pointer.
1048 * @param buf Virtual pointer to the destination buffer.
1049 * @param size Buffer size, in bytes.
1050 * @param wwpn WWPN to write - pointer to array of 8 uint8_t.
1051 * @param wwnn WWNN to write - pointer to array of 8 uint8_t.
1052 * @param hard_alpa Hard ALPA to write.
1053 * @param preferred_d_id Preferred D_ID to write.
1054 *
1055 * @return Returns the number of bytes written.
1056 */
1057 int32_t
sli_cmd_write_nvparms(sli4_t * sli4,void * buf,size_t size,uint8_t * wwpn,uint8_t * wwnn,uint8_t hard_alpa,uint32_t preferred_d_id)1058 sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa,
1059 uint32_t preferred_d_id)
1060 {
1061 sli4_cmd_write_nvparms_t *write_nvparms = buf;
1062
1063 ocs_memset(buf, 0, size);
1064
1065 write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS;
1066 ocs_memcpy(write_nvparms->wwpn, wwpn, 8);
1067 ocs_memcpy(write_nvparms->wwnn, wwnn, 8);
1068 write_nvparms->hard_alpa = hard_alpa;
1069 write_nvparms->preferred_d_id = preferred_d_id;
1070
1071 return sizeof(sli4_cmd_write_nvparms_t);
1072 }
1073
1074 /**
1075 * @brief Write a READ_REV command to the provided buffer.
1076 *
1077 * @param sli4 SLI context pointer.
1078 * @param buf Virtual pointer to the destination buffer.
1079 * @param size Buffer size, in bytes.
1080 * @param vpd Pointer to the buffer.
1081 *
1082 * @return Returns the number of bytes written.
1083 */
1084 static int32_t
sli_cmd_read_rev(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * vpd)1085 sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd)
1086 {
1087 sli4_cmd_read_rev_t *read_rev = buf;
1088
1089 ocs_memset(buf, 0, size);
1090
1091 read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV;
1092
1093 if (vpd && vpd->size) {
1094 read_rev->vpd = TRUE;
1095
1096 read_rev->available_length = vpd->size;
1097
1098 read_rev->physical_address_low = ocs_addr32_lo(vpd->phys);
1099 read_rev->physical_address_high = ocs_addr32_hi(vpd->phys);
1100 }
1101
1102 return sizeof(sli4_cmd_read_rev_t);
1103 }
1104
1105 /**
1106 * @ingroup sli
1107 * @brief Write a READ_SPARM64 command to the provided buffer.
1108 *
1109 * @param sli4 SLI context pointer.
1110 * @param buf Virtual pointer to the destination buffer.
1111 * @param size Buffer size, in bytes.
1112 * @param dma DMA buffer for the service parameters.
1113 * @param vpi VPI used to determine the WWN.
1114 *
1115 * @return Returns the number of bytes written.
1116 */
1117 int32_t
sli_cmd_read_sparm64(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma,uint16_t vpi)1118 sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
1119 uint16_t vpi)
1120 {
1121 sli4_cmd_read_sparm64_t *read_sparm64 = buf;
1122
1123 ocs_memset(buf, 0, size);
1124
1125 if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) {
1126 ocs_log_test(sli4->os, "special VPI not supported!!!\n");
1127 return -1;
1128 }
1129
1130 if (!dma || !dma->phys) {
1131 ocs_log_test(sli4->os, "bad DMA buffer\n");
1132 return -1;
1133 }
1134
1135 read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64;
1136
1137 read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
1138 read_sparm64->bde_64.buffer_length = dma->size;
1139 read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1140 read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1141
1142 read_sparm64->vpi = vpi;
1143
1144 return sizeof(sli4_cmd_read_sparm64_t);
1145 }
1146
1147 /**
1148 * @ingroup sli
1149 * @brief Write a READ_TOPOLOGY command to the provided buffer.
1150 *
1151 * @param sli4 SLI context pointer.
1152 * @param buf Virtual pointer to the destination buffer.
1153 * @param size Buffer size, in bytes.
1154 * @param dma DMA buffer for loop map (optional).
1155 *
1156 * @return Returns the number of bytes written.
1157 */
1158 int32_t
sli_cmd_read_topology(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma)1159 sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
1160 {
1161 sli4_cmd_read_topology_t *read_topo = buf;
1162
1163 ocs_memset(buf, 0, size);
1164
1165 read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY;
1166
1167 if (dma && dma->size) {
1168 if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
1169 ocs_log_test(sli4->os, "loop map buffer too small %jd\n",
1170 dma->size);
1171 return 0;
1172 }
1173
1174 ocs_memset(dma->virt, 0, dma->size);
1175
1176 read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64;
1177 read_topo->bde_loop_map.buffer_length = dma->size;
1178 read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1179 read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1180 }
1181
1182 return sizeof(sli4_cmd_read_topology_t);
1183 }
1184
1185 /**
1186 * @ingroup sli
1187 * @brief Write a REG_FCFI command to the provided buffer.
1188 *
1189 * @param sli4 SLI context pointer.
1190 * @param buf Virtual pointer to the destination buffer.
1191 * @param size Buffer size, in bytes.
1192 * @param index FCF index returned by READ_FCF_TABLE.
1193 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
1194 * @param vlan_id VLAN ID tag.
1195 *
1196 * @return Returns the number of bytes written.
1197 */
1198 int32_t
sli_cmd_reg_fcfi(sli4_t * sli4,void * buf,size_t size,uint16_t index,sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG],uint16_t vlan_id)1199 sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id)
1200 {
1201 sli4_cmd_reg_fcfi_t *reg_fcfi = buf;
1202 uint32_t i;
1203
1204 ocs_memset(buf, 0, size);
1205
1206 reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI;
1207
1208 reg_fcfi->fcf_index = index;
1209
1210 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1211 switch(i) {
1212 case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break;
1213 case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break;
1214 case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break;
1215 case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break;
1216 }
1217 reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
1218 reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
1219 reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
1220 reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
1221 }
1222
1223 if (vlan_id) {
1224 reg_fcfi->vv = TRUE;
1225 reg_fcfi->vlan_tag = vlan_id;
1226 }
1227
1228 return sizeof(sli4_cmd_reg_fcfi_t);
1229 }
1230
1231 /**
1232 * @brief Write REG_FCFI_MRQ to provided command buffer
1233 *
1234 * @param sli4 SLI context pointer.
1235 * @param buf Virtual pointer to the destination buffer.
1236 * @param size Buffer size, in bytes.
1237 * @param fcf_index FCF index returned by READ_FCF_TABLE.
1238 * @param vlan_id VLAN ID tag.
1239 * @param rr_quant Round robin quanta if RQ selection policy is 2
1240 * @param rq_selection_policy RQ selection policy
1241 * @param num_rqs Array of count of RQs per filter
1242 * @param rq_ids Array of RQ ids per filter
1243 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
1244 *
1245 * @return returns 0 for success, a negative error code value for failure.
1246 */
1247 int32_t
sli_cmd_reg_fcfi_mrq(sli4_t * sli4,void * buf,size_t size,uint8_t mode,uint16_t fcf_index,uint16_t vlan_id,uint8_t rq_selection_policy,uint8_t mrq_bit_mask,uint16_t num_mrqs,sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG])1248 sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode,
1249 uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy,
1250 uint8_t mrq_bit_mask, uint16_t num_mrqs,
1251 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG])
1252 {
1253 sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf;
1254 uint32_t i;
1255
1256 ocs_memset(buf, 0, size);
1257
1258 reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ;
1259 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1260 reg_fcfi_mrq->fcf_index = fcf_index;
1261 if (vlan_id) {
1262 reg_fcfi_mrq->vv = TRUE;
1263 reg_fcfi_mrq->vlan_tag = vlan_id;
1264 }
1265 goto done;
1266 }
1267
1268 reg_fcfi_mrq->mode = mode;
1269 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1270 reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
1271 reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
1272 reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
1273 reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
1274
1275 switch(i) {
1276 case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break;
1277 case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break;
1278 case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break;
1279 case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break;
1280 }
1281 }
1282
1283 reg_fcfi_mrq->rq_selection_policy = rq_selection_policy;
1284 reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask;
1285 reg_fcfi_mrq->num_mrq_pairs = num_mrqs;
1286 done:
1287 return sizeof(sli4_cmd_reg_fcfi_mrq_t);
1288 }
1289
1290 /**
1291 * @ingroup sli
1292 * @brief Write a REG_RPI command to the provided buffer.
1293 *
1294 * @param sli4 SLI context pointer.
1295 * @param buf Virtual pointer to the destination buffer.
1296 * @param size Buffer size, in bytes.
1297 * @param nport_id Remote F/N_Port_ID.
1298 * @param rpi Previously-allocated Remote Port Indicator.
1299 * @param vpi Previously-allocated Virtual Port Indicator.
1300 * @param dma DMA buffer that contains the remote port's service parameters.
1301 * @param update Boolean indicating an update to an existing RPI (TRUE)
1302 * or a new registration (FALSE).
1303 *
1304 * @return Returns the number of bytes written.
1305 */
1306 int32_t
sli_cmd_reg_rpi(sli4_t * sli4,void * buf,size_t size,uint32_t nport_id,uint16_t rpi,uint16_t vpi,ocs_dma_t * dma,uint8_t update,uint8_t enable_t10_pi)1307 sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi,
1308 uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi)
1309 {
1310 sli4_cmd_reg_rpi_t *reg_rpi = buf;
1311
1312 ocs_memset(buf, 0, size);
1313
1314 reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI;
1315
1316 reg_rpi->rpi = rpi;
1317 reg_rpi->remote_n_port_id = nport_id;
1318 reg_rpi->upd = update;
1319 reg_rpi->etow = enable_t10_pi;
1320
1321 reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
1322 reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN;
1323 reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1324 reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1325
1326 reg_rpi->vpi = vpi;
1327
1328 return sizeof(sli4_cmd_reg_rpi_t);
1329 }
1330
1331 /**
1332 * @ingroup sli
1333 * @brief Write a REG_VFI command to the provided buffer.
1334 *
1335 * @param sli4 SLI context pointer.
1336 * @param buf Virtual pointer to the destination buffer.
1337 * @param size Buffer size, in bytes.
1338 * @param domain Pointer to the domain object.
1339 *
1340 * @return Returns the number of bytes written.
1341 */
1342 int32_t
sli_cmd_reg_vfi(sli4_t * sli4,void * buf,size_t size,ocs_domain_t * domain)1343 sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain)
1344 {
1345 sli4_cmd_reg_vfi_t *reg_vfi = buf;
1346
1347 if (!sli4 || !buf || !domain) {
1348 return 0;
1349 }
1350
1351 ocs_memset(buf, 0, size);
1352
1353 reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI;
1354
1355 reg_vfi->vfi = domain->indicator;
1356
1357 reg_vfi->fcfi = domain->fcf_indicator;
1358
1359 /* TODO contents of domain->dma only valid if topo == FABRIC */
1360 reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64;
1361 reg_vfi->sparm.buffer_length = 0x70;
1362 reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys);
1363 reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys);
1364
1365 reg_vfi->e_d_tov = sli4->config.e_d_tov;
1366 reg_vfi->r_a_tov = sli4->config.r_a_tov;
1367
1368 reg_vfi->vp = TRUE;
1369 reg_vfi->vpi = domain->sport->indicator;
1370 ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn));
1371 reg_vfi->local_n_port_id = domain->sport->fc_id;
1372
1373 return sizeof(sli4_cmd_reg_vfi_t);
1374 }
1375
1376 /**
1377 * @ingroup sli
1378 * @brief Write a REG_VPI command to the provided buffer.
1379 *
1380 * @param sli4 SLI context pointer.
1381 * @param buf Virtual pointer to the destination buffer.
1382 * @param size Buffer size, in bytes.
1383 * @param sport Point to SLI Port object.
1384 * @param update Boolean indicating whether to update the existing VPI (true)
1385 * or create a new VPI (false).
1386 *
1387 * @return Returns the number of bytes written.
1388 */
1389 int32_t
sli_cmd_reg_vpi(sli4_t * sli4,void * buf,size_t size,ocs_sli_port_t * sport,uint8_t update)1390 sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update)
1391 {
1392 sli4_cmd_reg_vpi_t *reg_vpi = buf;
1393
1394 if (!sli4 || !buf || !sport) {
1395 return 0;
1396 }
1397
1398 ocs_memset(buf, 0, size);
1399
1400 reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI;
1401
1402 reg_vpi->local_n_port_id = sport->fc_id;
1403 reg_vpi->upd = update != 0;
1404 ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn));
1405 reg_vpi->vpi = sport->indicator;
1406 reg_vpi->vfi = sport->domain->indicator;
1407
1408 return sizeof(sli4_cmd_reg_vpi_t);
1409 }
1410
1411 /**
1412 * @brief Write a REQUEST_FEATURES command to the provided buffer.
1413 *
1414 * @param sli4 SLI context pointer.
1415 * @param buf Virtual pointer to the destination buffer.
1416 * @param size Buffer size, in bytes.
1417 * @param mask Features to request.
1418 * @param query Use feature query mode (does not change FW).
1419 *
1420 * @return Returns the number of bytes written.
1421 */
1422 static int32_t
sli_cmd_request_features(sli4_t * sli4,void * buf,size_t size,sli4_features_t mask,uint8_t query)1423 sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query)
1424 {
1425 sli4_cmd_request_features_t *features = buf;
1426
1427 ocs_memset(buf, 0, size);
1428
1429 features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES;
1430
1431 if (query) {
1432 features->qry = TRUE;
1433 }
1434 features->command.dword = mask.dword;
1435
1436 return sizeof(sli4_cmd_request_features_t);
1437 }
1438
1439 /**
1440 * @ingroup sli
1441 * @brief Write a SLI_CONFIG command to the provided buffer.
1442 *
1443 * @param sli4 SLI context pointer.
1444 * @param buf Virtual pointer to the destination buffer.
1445 * @param size Buffer size, in bytes.
1446 * @param length Length in bytes of attached command.
1447 * @param dma DMA buffer for non-embedded commands.
1448 *
1449 * @return Returns the number of bytes written.
1450 */
1451 int32_t
sli_cmd_sli_config(sli4_t * sli4,void * buf,size_t size,uint32_t length,ocs_dma_t * dma)1452 sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma)
1453 {
1454 sli4_cmd_sli_config_t *sli_config = NULL;
1455
1456 if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) {
1457 ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n",
1458 length, sizeof(sli_config->payload.embed));
1459 return -1;
1460 }
1461
1462 sli_config = buf;
1463
1464 ocs_memset(buf, 0, size);
1465
1466 sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG;
1467 if (NULL == dma) {
1468 sli_config->emb = TRUE;
1469 sli_config->payload_length = length;
1470 } else {
1471 sli_config->emb = FALSE;
1472
1473 sli_config->pmd_count = 1;
1474
1475 sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys);
1476 sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys);
1477 sli_config->payload.mem.length = dma->size;
1478 sli_config->payload_length = dma->size;
1479 #if defined(OCS_INCLUDE_DEBUG)
1480 /* save pointer to DMA for BMBX dumping purposes */
1481 sli4->bmbx_non_emb_pmd = dma;
1482 #endif
1483 }
1484
1485 return offsetof(sli4_cmd_sli_config_t, payload.embed);
1486 }
1487
1488 /**
1489 * @brief Initialize SLI Port control register.
1490 *
1491 * @param sli4 SLI context pointer.
1492 * @param endian Endian value to write.
1493 *
1494 * @return Returns 0 on success, or a negative error code value on failure.
1495 */
1496
1497 static int32_t
sli_sliport_control(sli4_t * sli4,uint32_t endian)1498 sli_sliport_control(sli4_t *sli4, uint32_t endian)
1499 {
1500 uint32_t iter;
1501 int32_t rc;
1502
1503 rc = -1;
1504
1505 /* Initialize port, endian */
1506 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP);
1507
1508 for (iter = 0; iter < 3000; iter ++) {
1509 ocs_udelay(SLI4_INIT_PORT_DELAY_US);
1510 if (sli_fw_ready(sli4) == 1) {
1511 rc = 0;
1512 break;
1513 }
1514 }
1515
1516 if (rc != 0) {
1517 ocs_log_crit(sli4->os, "port failed to become ready after initialization\n");
1518 }
1519
1520 return rc;
1521 }
1522
1523 /**
1524 * @ingroup sli
1525 * @brief Write a UNREG_FCFI command to the provided buffer.
1526 *
1527 * @param sli4 SLI context pointer.
1528 * @param buf Virtual pointer to the destination buffer.
1529 * @param size Buffer size, in bytes.
1530 * @param indicator Indicator value.
1531 *
1532 * @return Returns the number of bytes written.
1533 */
1534 int32_t
sli_cmd_unreg_fcfi(sli4_t * sli4,void * buf,size_t size,uint16_t indicator)1535 sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator)
1536 {
1537 sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf;
1538
1539 if (!sli4 || !buf) {
1540 return 0;
1541 }
1542
1543 ocs_memset(buf, 0, size);
1544
1545 unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI;
1546
1547 unreg_fcfi->fcfi = indicator;
1548
1549 return sizeof(sli4_cmd_unreg_fcfi_t);
1550 }
1551
1552 /**
1553 * @ingroup sli
1554 * @brief Write an UNREG_RPI command to the provided buffer.
1555 *
1556 * @param sli4 SLI context pointer.
1557 * @param buf Virtual pointer to the destination buffer.
1558 * @param size Buffer size, in bytes.
1559 * @param indicator Indicator value.
1560 * @param which Type of unregister, such as node, port, domain, or FCF.
1561 * @param fc_id FC address.
1562 *
1563 * @return Returns the number of bytes written.
1564 */
1565 int32_t
sli_cmd_unreg_rpi(sli4_t * sli4,void * buf,size_t size,uint16_t indicator,sli4_resource_e which,uint32_t fc_id)1566 sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which,
1567 uint32_t fc_id)
1568 {
1569 sli4_cmd_unreg_rpi_t *unreg_rpi = buf;
1570 uint8_t index_indicator = 0;
1571
1572 if (!sli4 || !buf) {
1573 return 0;
1574 }
1575
1576 ocs_memset(buf, 0, size);
1577
1578 unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI;
1579
1580 switch (which) {
1581 case SLI_RSRC_FCOE_RPI:
1582 index_indicator = SLI4_UNREG_RPI_II_RPI;
1583 if (fc_id != UINT32_MAX) {
1584 unreg_rpi->dp = TRUE;
1585 unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff;
1586 }
1587 break;
1588 case SLI_RSRC_FCOE_VPI:
1589 index_indicator = SLI4_UNREG_RPI_II_VPI;
1590 break;
1591 case SLI_RSRC_FCOE_VFI:
1592 index_indicator = SLI4_UNREG_RPI_II_VFI;
1593 break;
1594 case SLI_RSRC_FCOE_FCFI:
1595 index_indicator = SLI4_UNREG_RPI_II_FCFI;
1596 break;
1597 default:
1598 ocs_log_test(sli4->os, "unknown type %#x\n", which);
1599 return 0;
1600 }
1601
1602 unreg_rpi->ii = index_indicator;
1603 unreg_rpi->index = indicator;
1604
1605 return sizeof(sli4_cmd_unreg_rpi_t);
1606 }
1607
1608 /**
1609 * @ingroup sli
1610 * @brief Write an UNREG_VFI command to the provided buffer.
1611 *
1612 * @param sli4 SLI context pointer.
1613 * @param buf Virtual pointer to the destination buffer.
1614 * @param size Buffer size, in bytes.
1615 * @param domain Pointer to the domain object
1616 * @param which Type of unregister, such as domain, FCFI, or everything.
1617 *
1618 * @return Returns the number of bytes written.
1619 */
1620 int32_t
sli_cmd_unreg_vfi(sli4_t * sli4,void * buf,size_t size,ocs_domain_t * domain,uint32_t which)1621 sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which)
1622 {
1623 sli4_cmd_unreg_vfi_t *unreg_vfi = buf;
1624
1625 if (!sli4 || !buf || !domain) {
1626 return 0;
1627 }
1628
1629 ocs_memset(buf, 0, size);
1630
1631 unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI;
1632 switch (which) {
1633 case SLI4_UNREG_TYPE_DOMAIN:
1634 unreg_vfi->index = domain->indicator;
1635 break;
1636 case SLI4_UNREG_TYPE_FCF:
1637 unreg_vfi->index = domain->fcf_indicator;
1638 break;
1639 case SLI4_UNREG_TYPE_ALL:
1640 unreg_vfi->index = UINT16_MAX;
1641 break;
1642 default:
1643 return 0;
1644 }
1645
1646 if (SLI4_UNREG_TYPE_DOMAIN != which) {
1647 unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI;
1648 }
1649
1650 return sizeof(sli4_cmd_unreg_vfi_t);
1651 }
1652
1653 /**
1654 * @ingroup sli
1655 * @brief Write an UNREG_VPI command to the provided buffer.
1656 *
1657 * @param sli4 SLI context pointer.
1658 * @param buf Virtual pointer to the destination buffer.
1659 * @param size Buffer size, in bytes.
1660 * @param indicator Indicator value.
1661 * @param which Type of unregister: port, domain, FCFI, everything
1662 *
1663 * @return Returns the number of bytes written.
1664 */
1665 int32_t
sli_cmd_unreg_vpi(sli4_t * sli4,void * buf,size_t size,uint16_t indicator,uint32_t which)1666 sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which)
1667 {
1668 sli4_cmd_unreg_vpi_t *unreg_vpi = buf;
1669
1670 if (!sli4 || !buf) {
1671 return 0;
1672 }
1673
1674 ocs_memset(buf, 0, size);
1675
1676 unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI;
1677 unreg_vpi->index = indicator;
1678 switch (which) {
1679 case SLI4_UNREG_TYPE_PORT:
1680 unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI;
1681 break;
1682 case SLI4_UNREG_TYPE_DOMAIN:
1683 unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI;
1684 break;
1685 case SLI4_UNREG_TYPE_FCF:
1686 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
1687 break;
1688 case SLI4_UNREG_TYPE_ALL:
1689 unreg_vpi->index = UINT16_MAX; /* override indicator */
1690 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
1691 break;
1692 default:
1693 return 0;
1694 }
1695
1696 return sizeof(sli4_cmd_unreg_vpi_t);
1697 }
1698
1699 /**
1700 * @ingroup sli
1701 * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer.
1702 *
1703 * @param sli4 SLI context pointer.
1704 * @param buf Virtual pointer to the destination buffer.
1705 * @param size Buffer size, in bytes.
1706 * @param max_burst_len if the write FCP_DL is less than this size,
1707 * then the SLI port will generate the auto XFER_RDY.
1708 *
1709 * @return Returns the number of bytes written.
1710 */
1711 int32_t
sli_cmd_config_auto_xfer_rdy(sli4_t * sli4,void * buf,size_t size,uint32_t max_burst_len)1712 sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len)
1713 {
1714 sli4_cmd_config_auto_xfer_rdy_t *req = buf;
1715
1716 if (!sli4 || !buf) {
1717 return 0;
1718 }
1719
1720 ocs_memset(buf, 0, size);
1721
1722 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY;
1723 req->max_burst_len = max_burst_len;
1724
1725 return sizeof(sli4_cmd_config_auto_xfer_rdy_t);
1726 }
1727
1728 /**
1729 * @ingroup sli
1730 * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer.
1731 *
1732 * @param sli4 SLI context pointer.
1733 * @param buf Virtual pointer to the destination buffer.
1734 * @param size Buffer size, in bytes.
1735 * @param max_burst_len if the write FCP_DL is less than this size,
1736 * @param esoc enable start offset computation,
1737 * @param block_size block size,
1738 * then the SLI port will generate the auto XFER_RDY.
1739 *
1740 * @return Returns the number of bytes written.
1741 */
1742 int32_t
sli_cmd_config_auto_xfer_rdy_hp(sli4_t * sli4,void * buf,size_t size,uint32_t max_burst_len,uint32_t esoc,uint32_t block_size)1743 sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len,
1744 uint32_t esoc, uint32_t block_size )
1745 {
1746 sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf;
1747
1748 if (!sli4 || !buf) {
1749 return 0;
1750 }
1751
1752 ocs_memset(buf, 0, size);
1753
1754 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP;
1755 req->max_burst_len = max_burst_len;
1756 req->esoc = esoc;
1757 req->block_size = block_size;
1758 return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t);
1759 }
1760
1761 /**
1762 * @brief Write a COMMON_FUNCTION_RESET command.
1763 *
1764 * @param sli4 SLI context.
1765 * @param buf Destination buffer for the command.
1766 * @param size Buffer size, in bytes.
1767 *
1768 * @return Returns the number of bytes written.
1769 */
1770 static int32_t
sli_cmd_common_function_reset(sli4_t * sli4,void * buf,size_t size)1771 sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size)
1772 {
1773 sli4_req_common_function_reset_t *reset = NULL;
1774 uint32_t sli_config_off = 0;
1775
1776 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1777 uint32_t payload_size;
1778
1779 /* Payload length must accommodate both request and response */
1780 payload_size = max(sizeof(sli4_req_common_function_reset_t),
1781 sizeof(sli4_res_common_function_reset_t));
1782
1783 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
1784 NULL);
1785 }
1786 reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off);
1787
1788 reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET;
1789 reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1790
1791 return(sli_config_off + sizeof(sli4_req_common_function_reset_t));
1792 }
1793
1794 /**
1795 * @brief Write a COMMON_CREATE_CQ command.
1796 *
1797 * @param sli4 SLI context.
1798 * @param buf Destination buffer for the command.
1799 * @param size Buffer size, in bytes.
1800 * @param qmem DMA memory for the queue.
1801 * @param eq_id Associated EQ_ID
1802 * @param ignored This parameter carries the ULP which is only used for WQ and RQs
1803 *
1804 * @note This creates a Version 0 message.
1805 *
1806 * @return Returns the number of bytes written.
1807 */
1808 static int32_t
sli_cmd_common_create_cq(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t eq_id,uint16_t ignored)1809 sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size,
1810 ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored)
1811 {
1812 sli4_req_common_create_cq_v0_t *cqv0 = NULL;
1813 sli4_req_common_create_cq_v2_t *cqv2 = NULL;
1814 uint32_t sli_config_off = 0;
1815 uint32_t p;
1816 uintptr_t addr;
1817 uint32_t if_type = sli4->if_type;
1818 uint32_t page_bytes = 0;
1819 uint32_t num_pages = 0;
1820 uint32_t cmd_size = 0;
1821 uint32_t page_size = 0;
1822 uint32_t n_cqe = 0;
1823
1824 /* First calculate number of pages and the mailbox cmd length */
1825 switch (if_type)
1826 {
1827 case SLI4_IF_TYPE_BE3_SKH_PF:
1828 page_bytes = SLI_PAGE_SIZE;
1829 num_pages = sli_page_count(qmem->size, page_bytes);
1830 cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages);
1831 break;
1832 case SLI4_IF_TYPE_LANCER_FC_ETH:
1833 case SLI4_IF_TYPE_LANCER_G7:
1834 n_cqe = qmem->size / SLI4_CQE_BYTES;
1835 switch (n_cqe) {
1836 case 256:
1837 case 512:
1838 case 1024:
1839 case 2048:
1840 page_size = 1;
1841 break;
1842 case 4096:
1843 page_size = 2;
1844 break;
1845 default:
1846 return 0;
1847 }
1848 page_bytes = page_size * SLI_PAGE_SIZE;
1849 num_pages = sli_page_count(qmem->size, page_bytes);
1850 cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages);
1851 break;
1852 default:
1853 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type);
1854 return -1;
1855 }
1856
1857 /* now that we have the mailbox command size, we can set SLI_CONFIG fields */
1858 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1859 uint32_t payload_size;
1860
1861 /* Payload length must accommodate both request and response */
1862 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t));
1863
1864 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
1865 NULL);
1866 }
1867
1868 switch (if_type)
1869 {
1870 case SLI4_IF_TYPE_BE3_SKH_PF:
1871 cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off);
1872 cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
1873 cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1874 cqv0->hdr.version = 0;
1875 cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
1876
1877 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
1878 cqv0->num_pages = num_pages;
1879 switch (cqv0->num_pages) {
1880 case 1:
1881 cqv0->cqecnt = SLI4_CQ_CNT_256;
1882 break;
1883 case 2:
1884 cqv0->cqecnt = SLI4_CQ_CNT_512;
1885 break;
1886 case 4:
1887 cqv0->cqecnt = SLI4_CQ_CNT_1024;
1888 break;
1889 default:
1890 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages);
1891 return -1;
1892 }
1893 cqv0->evt = TRUE;
1894 cqv0->valid = TRUE;
1895 /* TODO cq->nodelay = ???; */
1896 /* TODO cq->clswm = ???; */
1897 cqv0->arm = FALSE;
1898 cqv0->eq_id = eq_id;
1899
1900 for (p = 0, addr = qmem->phys;
1901 p < cqv0->num_pages;
1902 p++, addr += page_bytes) {
1903 cqv0->page_physical_address[p].low = ocs_addr32_lo(addr);
1904 cqv0->page_physical_address[p].high = ocs_addr32_hi(addr);
1905 }
1906
1907 break;
1908 case SLI4_IF_TYPE_LANCER_FC_ETH:
1909 case SLI4_IF_TYPE_LANCER_G7:
1910 {
1911 cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off);
1912 cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
1913 cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1914 cqv2->hdr.version = 2;
1915 cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
1916
1917 if (if_type == SLI4_IF_TYPE_LANCER_G7)
1918 cqv2->autovalid = TRUE;
1919
1920 cqv2->page_size = page_size;
1921
1922 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
1923 cqv2->num_pages = num_pages;
1924 if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) {
1925 return 0;
1926 }
1927
1928 switch (cqv2->num_pages) {
1929 case 1:
1930 cqv2->cqecnt = SLI4_CQ_CNT_256;
1931 break;
1932 case 2:
1933 cqv2->cqecnt = SLI4_CQ_CNT_512;
1934 break;
1935 case 4:
1936 cqv2->cqecnt = SLI4_CQ_CNT_1024;
1937 break;
1938 case 8:
1939 cqv2->cqecnt = SLI4_CQ_CNT_LARGE;
1940 cqv2->cqe_count = n_cqe;
1941 break;
1942 default:
1943 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages);
1944 return -1;
1945 }
1946
1947 cqv2->evt = TRUE;
1948 cqv2->valid = TRUE;
1949 /* TODO cq->nodelay = ???; */
1950 /* TODO cq->clswm = ???; */
1951 cqv2->arm = FALSE;
1952 cqv2->eq_id = eq_id;
1953
1954 for (p = 0, addr = qmem->phys;
1955 p < cqv2->num_pages;
1956 p++, addr += page_bytes) {
1957 cqv2->page_physical_address[p].low = ocs_addr32_lo(addr);
1958 cqv2->page_physical_address[p].high = ocs_addr32_hi(addr);
1959 }
1960 }
1961 break;
1962 }
1963
1964 return (sli_config_off + cmd_size);
1965 }
1966
1967 /**
1968 * @brief Write a COMMON_DESTROY_CQ command.
1969 *
1970 * @param sli4 SLI context.
1971 * @param buf Destination buffer for the command.
1972 * @param size Buffer size, in bytes.
1973 * @param cq_id CQ ID
1974 *
1975 * @note This creates a Version 0 message.
1976 *
1977 * @return Returns the number of bytes written.
1978 */
1979 static int32_t
sli_cmd_common_destroy_cq(sli4_t * sli4,void * buf,size_t size,uint16_t cq_id)1980 sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id)
1981 {
1982 sli4_req_common_destroy_cq_t *cq = NULL;
1983 uint32_t sli_config_off = 0;
1984
1985 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1986 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
1987 /* Payload length must accommodate both request and response */
1988 max(sizeof(sli4_req_common_destroy_cq_t),
1989 sizeof(sli4_res_hdr_t)),
1990 NULL);
1991 }
1992 cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off);
1993
1994 cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ;
1995 cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1996 cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) -
1997 sizeof(sli4_req_hdr_t);
1998 cq->cq_id = cq_id;
1999
2000 return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t));
2001 }
2002
2003 /**
2004 * @brief Write a COMMON_MODIFY_EQ_DELAY command.
2005 *
2006 * @param sli4 SLI context.
2007 * @param buf Destination buffer for the command.
2008 * @param size Buffer size, in bytes.
2009 * @param q Queue object array.
2010 * @param num_q Queue object array count.
2011 * @param shift Phase shift for staggering interrupts.
2012 * @param delay_mult Delay multiplier for limiting interrupt frequency.
2013 *
2014 * @return Returns the number of bytes written.
2015 */
2016 static int32_t
sli_cmd_common_modify_eq_delay(sli4_t * sli4,void * buf,size_t size,sli4_queue_t * q,int num_q,uint32_t shift,uint32_t delay_mult)2017 sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift,
2018 uint32_t delay_mult)
2019 {
2020 sli4_req_common_modify_eq_delay_t *modify_delay = NULL;
2021 uint32_t sli_config_off = 0;
2022 int i;
2023
2024 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2025 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2026 /* Payload length must accommodate both request and response */
2027 max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)),
2028 NULL);
2029 }
2030
2031 modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off);
2032
2033 modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY;
2034 modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2035 modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) -
2036 sizeof(sli4_req_hdr_t);
2037
2038 modify_delay->num_eq = num_q;
2039
2040 for (i = 0; i<num_q; i++) {
2041 modify_delay->eq_delay_record[i].eq_id = q[i].id;
2042 modify_delay->eq_delay_record[i].phase = shift;
2043 modify_delay->eq_delay_record[i].delay_multiplier = delay_mult;
2044 }
2045
2046 return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t));
2047 }
2048
2049 /**
2050 * @brief Write a COMMON_CREATE_EQ command.
2051 *
2052 * @param sli4 SLI context.
2053 * @param buf Destination buffer for the command.
2054 * @param size Buffer size, in bytes.
2055 * @param qmem DMA memory for the queue.
2056 * @param ignored1 Ignored (used for consistency among queue creation functions).
2057 * @param ignored2 Ignored (used for consistency among queue creation functions).
2058 *
2059 * @note Other queue creation routines use the last parameter to pass in
2060 * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP,
2061 * so these parameters are ignored
2062 *
2063 * @note This creates a Version 0 message
2064 *
2065 * @return Returns the number of bytes written.
2066 */
2067 static int32_t
sli_cmd_common_create_eq(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t ignored1,uint16_t ignored2)2068 sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
2069 uint16_t ignored1, uint16_t ignored2)
2070 {
2071 sli4_req_common_create_eq_t *eq = NULL;
2072 uint32_t sli_config_off = 0;
2073 uint32_t p;
2074 uintptr_t addr;
2075
2076 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2077 uint32_t payload_size;
2078
2079 /* Payload length must accommodate both request and response */
2080 payload_size = max(sizeof(sli4_req_common_create_eq_t),
2081 sizeof(sli4_res_common_create_queue_t));
2082
2083 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
2084 NULL);
2085 }
2086 eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off);
2087
2088 eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ;
2089 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2090 eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) -
2091 sizeof(sli4_req_hdr_t);
2092 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) {
2093 eq->hdr.version = 2;
2094 eq->autovalid = TRUE;
2095 }
2096 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
2097 eq->num_pages = qmem->size / SLI_PAGE_SIZE;
2098 switch (eq->num_pages) {
2099 case 1:
2100 eq->eqesz = SLI4_EQE_SIZE_4;
2101 eq->count = SLI4_EQ_CNT_1024;
2102 break;
2103 case 2:
2104 eq->eqesz = SLI4_EQE_SIZE_4;
2105 eq->count = SLI4_EQ_CNT_2048;
2106 break;
2107 case 4:
2108 eq->eqesz = SLI4_EQE_SIZE_4;
2109 eq->count = SLI4_EQ_CNT_4096;
2110 break;
2111 default:
2112 ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages);
2113 return -1;
2114 }
2115 eq->valid = TRUE;
2116 eq->arm = FALSE;
2117 eq->delay_multiplier = 32;
2118
2119 for (p = 0, addr = qmem->phys;
2120 p < eq->num_pages;
2121 p++, addr += SLI_PAGE_SIZE) {
2122 eq->page_address[p].low = ocs_addr32_lo(addr);
2123 eq->page_address[p].high = ocs_addr32_hi(addr);
2124 }
2125
2126 return(sli_config_off + sizeof(sli4_req_common_create_eq_t));
2127 }
2128
2129 /**
2130 * @brief Write a COMMON_DESTROY_EQ command.
2131 *
2132 * @param sli4 SLI context.
2133 * @param buf Destination buffer for the command.
2134 * @param size Buffer size, in bytes.
2135 * @param eq_id Queue ID to destroy.
2136 *
2137 * @note Other queue creation routines use the last parameter to pass in
2138 * the associated Q_ID. EQ doesn't have an associated queue so this
2139 * parameter is ignored.
2140 *
2141 * @note This creates a Version 0 message.
2142 *
2143 * @return Returns the number of bytes written.
2144 */
2145 static int32_t
sli_cmd_common_destroy_eq(sli4_t * sli4,void * buf,size_t size,uint16_t eq_id)2146 sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id)
2147 {
2148 sli4_req_common_destroy_eq_t *eq = NULL;
2149 uint32_t sli_config_off = 0;
2150
2151 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2152 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2153 /* Payload length must accommodate both request and response */
2154 max(sizeof(sli4_req_common_destroy_eq_t),
2155 sizeof(sli4_res_hdr_t)),
2156 NULL);
2157 }
2158 eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off);
2159
2160 eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ;
2161 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2162 eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) -
2163 sizeof(sli4_req_hdr_t);
2164
2165 eq->eq_id = eq_id;
2166
2167 return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t));
2168 }
2169
2170 /**
2171 * @brief Write a LOWLEVEL_SET_WATCHDOG command.
2172 *
2173 * @param sli4 SLI context.
2174 * @param buf Destination buffer for the command.
2175 * @param size Buffer size, in bytes.
2176 * @param timeout watchdog timer timeout in seconds
2177 *
2178 * @return void
2179 */
2180 void
sli4_cmd_lowlevel_set_watchdog(sli4_t * sli4,void * buf,size_t size,uint16_t timeout)2181 sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout)
2182 {
2183
2184 sli4_req_lowlevel_set_watchdog_t *req = NULL;
2185 uint32_t sli_config_off = 0;
2186
2187 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2188 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2189 /* Payload length must accommodate both request and response */
2190 max(sizeof(sli4_req_lowlevel_set_watchdog_t),
2191 sizeof(sli4_res_lowlevel_set_watchdog_t)),
2192 NULL);
2193 }
2194 req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off);
2195
2196 req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG;
2197 req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL;
2198 req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t);
2199 req->watchdog_timeout = timeout;
2200
2201 return;
2202 }
2203
2204 static int32_t
sli_cmd_common_get_cntl_attributes(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma)2205 sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2206 {
2207 sli4_req_hdr_t *hdr = NULL;
2208 uint32_t sli_config_off = 0;
2209
2210 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2211 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2212 sizeof(sli4_req_hdr_t),
2213 dma);
2214 }
2215
2216 if (dma == NULL) {
2217 return 0;
2218 }
2219
2220 ocs_memset(dma->virt, 0, dma->size);
2221
2222 hdr = dma->virt;
2223
2224 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES;
2225 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2226 hdr->request_length = dma->size;
2227
2228 return(sli_config_off + sizeof(sli4_req_hdr_t));
2229 }
2230
2231 /**
2232 * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command.
2233 *
2234 * @param sli4 SLI context.
2235 * @param buf Destination buffer for the command.
2236 * @param size Buffer size, in bytes.
2237 * @param dma DMA structure from which the data will be copied.
2238 *
2239 * @note This creates a Version 0 message.
2240 *
2241 * @return Returns the number of bytes written.
2242 */
2243 static int32_t
sli_cmd_common_get_cntl_addl_attributes(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma)2244 sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2245 {
2246 sli4_req_hdr_t *hdr = NULL;
2247 uint32_t sli_config_off = 0;
2248
2249 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2250 sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma);
2251 }
2252
2253 if (dma == NULL) {
2254 return 0;
2255 }
2256
2257 ocs_memset(dma->virt, 0, dma->size);
2258
2259 hdr = dma->virt;
2260
2261 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES;
2262 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2263 hdr->request_length = dma->size;
2264
2265 return(sli_config_off + sizeof(sli4_req_hdr_t));
2266 }
2267
2268 /**
2269 * @brief Write a COMMON_CREATE_MQ_EXT command.
2270 *
2271 * @param sli4 SLI context.
2272 * @param buf Destination buffer for the command.
2273 * @param size Buffer size, in bytes.
2274 * @param qmem DMA memory for the queue.
2275 * @param cq_id Associated CQ_ID.
2276 * @param ignored This parameter carries the ULP which is only used for WQ and RQs
2277 *
2278 * @note This creates a Version 0 message.
2279 *
2280 * @return Returns the number of bytes written.
2281 */
2282 static int32_t
sli_cmd_common_create_mq_ext(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t cq_id,uint16_t ignored)2283 sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size,
2284 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored)
2285 {
2286 sli4_req_common_create_mq_ext_t *mq = NULL;
2287 uint32_t sli_config_off = 0;
2288 uint32_t p;
2289 uintptr_t addr;
2290
2291 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2292 uint32_t payload_size;
2293
2294 /* Payload length must accommodate both request and response */
2295 payload_size = max(sizeof(sli4_req_common_create_mq_ext_t),
2296 sizeof(sli4_res_common_create_queue_t));
2297
2298 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
2299 NULL);
2300 }
2301 mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off);
2302
2303 mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT;
2304 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2305 mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) -
2306 sizeof(sli4_req_hdr_t);
2307 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
2308 mq->num_pages = qmem->size / SLI_PAGE_SIZE;
2309 switch (mq->num_pages) {
2310 case 1:
2311 mq->ring_size = SLI4_MQE_SIZE_16;
2312 break;
2313 case 2:
2314 mq->ring_size = SLI4_MQE_SIZE_32;
2315 break;
2316 case 4:
2317 mq->ring_size = SLI4_MQE_SIZE_64;
2318 break;
2319 case 8:
2320 mq->ring_size = SLI4_MQE_SIZE_128;
2321 break;
2322 default:
2323 ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages);
2324 return -1;
2325 }
2326
2327 /* TODO break this down by sli4->config.topology */
2328 mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE;
2329
2330 if (sli4->config.mq_create_version) {
2331 mq->cq_id_v1 = cq_id;
2332 mq->hdr.version = 1;
2333 }
2334 else {
2335 mq->cq_id_v0 = cq_id;
2336 }
2337 mq->val = TRUE;
2338
2339 for (p = 0, addr = qmem->phys;
2340 p < mq->num_pages;
2341 p++, addr += SLI_PAGE_SIZE) {
2342 mq->page_physical_address[p].low = ocs_addr32_lo(addr);
2343 mq->page_physical_address[p].high = ocs_addr32_hi(addr);
2344 }
2345
2346 return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t));
2347 }
2348
2349 /**
2350 * @brief Write a COMMON_DESTROY_MQ command.
2351 *
2352 * @param sli4 SLI context.
2353 * @param buf Destination buffer for the command.
2354 * @param size Buffer size, in bytes.
2355 * @param mq_id MQ ID
2356 *
2357 * @note This creates a Version 0 message.
2358 *
2359 * @return Returns the number of bytes written.
2360 */
2361 static int32_t
sli_cmd_common_destroy_mq(sli4_t * sli4,void * buf,size_t size,uint16_t mq_id)2362 sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id)
2363 {
2364 sli4_req_common_destroy_mq_t *mq = NULL;
2365 uint32_t sli_config_off = 0;
2366
2367 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2368 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2369 /* Payload length must accommodate both request and response */
2370 max(sizeof(sli4_req_common_destroy_mq_t),
2371 sizeof(sli4_res_hdr_t)),
2372 NULL);
2373 }
2374 mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off);
2375
2376 mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ;
2377 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2378 mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) -
2379 sizeof(sli4_req_hdr_t);
2380
2381 mq->mq_id = mq_id;
2382
2383 return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t));
2384 }
2385
2386 /**
2387 * @ingroup sli
2388 * @brief Write a COMMON_NOP command
2389 *
2390 * @param sli4 SLI context.
2391 * @param buf Destination buffer for the command.
2392 * @param size Buffer size, in bytes.
2393 * @param context NOP context value (passed to response, except on FC/FCoE).
2394 *
2395 * @return Returns the number of bytes written.
2396 */
2397 int32_t
sli_cmd_common_nop(sli4_t * sli4,void * buf,size_t size,uint64_t context)2398 sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context)
2399 {
2400 sli4_req_common_nop_t *nop = NULL;
2401 uint32_t sli_config_off = 0;
2402
2403 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2404 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2405 /* Payload length must accommodate both request and response */
2406 max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)),
2407 NULL);
2408 }
2409
2410 nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off);
2411
2412 nop->hdr.opcode = SLI4_OPC_COMMON_NOP;
2413 nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2414 nop->hdr.request_length = 8;
2415
2416 ocs_memcpy(&nop->context, &context, sizeof(context));
2417
2418 return(sli_config_off + sizeof(sli4_req_common_nop_t));
2419 }
2420
2421 /**
2422 * @ingroup sli
2423 * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command.
2424 *
2425 * @param sli4 SLI context.
2426 * @param buf Destination buffer for the command.
2427 * @param size Buffer size, in bytes.
2428 * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI).
2429 *
2430 * @return Returns the number of bytes written.
2431 */
2432 int32_t
sli_cmd_common_get_resource_extent_info(sli4_t * sli4,void * buf,size_t size,uint16_t rtype)2433 sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype)
2434 {
2435 sli4_req_common_get_resource_extent_info_t *extent = NULL;
2436 uint32_t sli_config_off = 0;
2437
2438 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2439 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2440 sizeof(sli4_req_common_get_resource_extent_info_t),
2441 NULL);
2442 }
2443
2444 extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off);
2445
2446 extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO;
2447 extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2448 extent->hdr.request_length = 4;
2449
2450 extent->resource_type = rtype;
2451
2452 return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t));
2453 }
2454
2455 /**
2456 * @ingroup sli
2457 * @brief Write a COMMON_GET_SLI4_PARAMETERS command.
2458 *
2459 * @param sli4 SLI context.
2460 * @param buf Destination buffer for the command.
2461 * @param size Buffer size, in bytes.
2462 *
2463 * @return Returns the number of bytes written.
2464 */
2465 int32_t
sli_cmd_common_get_sli4_parameters(sli4_t * sli4,void * buf,size_t size)2466 sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size)
2467 {
2468 sli4_req_hdr_t *hdr = NULL;
2469 uint32_t sli_config_off = 0;
2470
2471 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2472 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2473 sizeof(sli4_res_common_get_sli4_parameters_t),
2474 NULL);
2475 }
2476
2477 hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off);
2478
2479 hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS;
2480 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2481 hdr->request_length = 0x50;
2482
2483 return(sli_config_off + sizeof(sli4_req_hdr_t));
2484 }
2485
2486 /**
2487 * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer.
2488 *
2489 * @param sli4 SLI context pointer.
2490 * @param buf Virtual pointer to destination buffer.
2491 * @param size Buffer size in bytes.
2492 *
2493 * @return Returns the number of bytes written
2494 */
2495 static int32_t
sli_cmd_common_query_fw_config(sli4_t * sli4,void * buf,size_t size)2496 sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size)
2497 {
2498 sli4_req_common_query_fw_config_t *fw_config;
2499 uint32_t sli_config_off = 0;
2500 uint32_t payload_size;
2501
2502 /* Payload length must accommodate both request and response */
2503 payload_size = max(sizeof(sli4_req_common_query_fw_config_t),
2504 sizeof(sli4_res_common_query_fw_config_t));
2505
2506 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2507 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2508 payload_size,
2509 NULL);
2510 }
2511
2512 fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off);
2513 fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG;
2514 fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2515 fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2516 return sli_config_off + sizeof(sli4_req_common_query_fw_config_t);
2517 }
2518
2519 /**
2520 * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer.
2521 *
2522 * @param sli4 SLI context pointer.
2523 * @param buf Virtual pointer to destination buffer.
2524 * @param size Buffer size in bytes.
2525 *
2526 * @note Function supports both version 0 and 1 forms of this command via
2527 * the IF_TYPE.
2528 *
2529 * @return Returns the number of bytes written.
2530 */
2531 static int32_t
sli_cmd_common_get_port_name(sli4_t * sli4,void * buf,size_t size)2532 sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size)
2533 {
2534 sli4_req_common_get_port_name_t *port_name;
2535 uint32_t sli_config_off = 0;
2536 uint32_t payload_size;
2537 uint8_t version = 0;
2538 uint8_t pt = 0;
2539
2540 /* Select command version according to IF_TYPE */
2541 switch (sli4->if_type) {
2542 case SLI4_IF_TYPE_BE3_SKH_PF:
2543 case SLI4_IF_TYPE_BE3_SKH_VF:
2544 version = 0;
2545 break;
2546 case SLI4_IF_TYPE_LANCER_FC_ETH:
2547 case SLI4_IF_TYPE_LANCER_RDMA:
2548 case SLI4_IF_TYPE_LANCER_G7:
2549 version = 1;
2550 break;
2551 default:
2552 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type);
2553 return 0;
2554 }
2555
2556 /* Payload length must accommodate both request and response */
2557 payload_size = max(sizeof(sli4_req_common_get_port_name_t),
2558 sizeof(sli4_res_common_get_port_name_t));
2559
2560 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2561 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2562 payload_size,
2563 NULL);
2564
2565 pt = 1;
2566 }
2567
2568 port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off);
2569
2570 port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME;
2571 port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2572 port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t));
2573 port_name->hdr.version = version;
2574
2575 /* Set the port type value (ethernet=0, FC=1) for V1 commands */
2576 if (version == 1) {
2577 port_name->pt = pt;
2578 }
2579
2580 return sli_config_off + port_name->hdr.request_length;
2581 }
2582
2583 /**
2584 * @ingroup sli
2585 * @brief Write a COMMON_WRITE_OBJECT command.
2586 *
2587 * @param sli4 SLI context.
2588 * @param buf Destination buffer for the command.
2589 * @param size Buffer size, in bytes.
2590 * @param noc True if the object should be written but not committed to flash.
2591 * @param eof True if this is the last write for this object.
2592 * @param desired_write_length Number of bytes of data to write to the object.
2593 * @param offset Offset, in bytes, from the start of the object.
2594 * @param object_name Name of the object to write.
2595 * @param dma DMA structure from which the data will be copied.
2596 *
2597 * @return Returns the number of bytes written.
2598 */
2599 int32_t
sli_cmd_common_write_object(sli4_t * sli4,void * buf,size_t size,uint16_t noc,uint16_t eof,uint32_t desired_write_length,uint32_t offset,char * object_name,ocs_dma_t * dma)2600 sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size,
2601 uint16_t noc, uint16_t eof, uint32_t desired_write_length,
2602 uint32_t offset,
2603 char *object_name,
2604 ocs_dma_t *dma)
2605 {
2606 sli4_req_common_write_object_t *wr_obj = NULL;
2607 uint32_t sli_config_off = 0;
2608 sli4_bde_t *host_buffer;
2609
2610 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2611 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2612 sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t),
2613 NULL);
2614 }
2615
2616 wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off);
2617
2618 wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT;
2619 wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2620 wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
2621 wr_obj->hdr.timeout = 0;
2622 wr_obj->hdr.version = 0;
2623
2624 wr_obj->noc = noc;
2625 wr_obj->eof = eof;
2626 wr_obj->desired_write_length = desired_write_length;
2627 wr_obj->write_offset = offset;
2628 ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name));
2629 wr_obj->host_buffer_descriptor_count = 1;
2630
2631 host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor;
2632
2633 /* Setup to transfer xfer_size bytes to device */
2634 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
2635 host_buffer->buffer_length = desired_write_length;
2636 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
2637 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
2638
2639 return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t));
2640 }
2641
2642 /**
2643 * @ingroup sli
2644 * @brief Write a COMMON_DELETE_OBJECT command.
2645 *
2646 * @param sli4 SLI context.
2647 * @param buf Destination buffer for the command.
2648 * @param size Buffer size, in bytes.
2649 * @param object_name Name of the object to write.
2650 *
2651 * @return Returns the number of bytes written.
2652 */
2653 int32_t
sli_cmd_common_delete_object(sli4_t * sli4,void * buf,size_t size,char * object_name)2654 sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size,
2655 char *object_name)
2656 {
2657 sli4_req_common_delete_object_t *del_obj = NULL;
2658 uint32_t sli_config_off = 0;
2659
2660 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2661 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2662 sizeof (sli4_req_common_delete_object_t),
2663 NULL);
2664 }
2665
2666 del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off);
2667
2668 del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT;
2669 del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2670 del_obj->hdr.request_length = sizeof(*del_obj);
2671 del_obj->hdr.timeout = 0;
2672 del_obj->hdr.version = 0;
2673
2674 ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name));
2675 return(sli_config_off + sizeof(sli4_req_common_delete_object_t));
2676 }
2677
2678 /**
2679 * @ingroup sli
2680 * @brief Write a COMMON_READ_OBJECT command.
2681 *
2682 * @param sli4 SLI context.
2683 * @param buf Destination buffer for the command.
2684 * @param size Buffer size, in bytes.
2685 * @param desired_read_length Number of bytes of data to read from the object.
2686 * @param offset Offset, in bytes, from the start of the object.
2687 * @param object_name Name of the object to read.
2688 * @param dma DMA structure from which the data will be copied.
2689 *
2690 * @return Returns the number of bytes written.
2691 */
2692 int32_t
sli_cmd_common_read_object(sli4_t * sli4,void * buf,size_t size,uint32_t desired_read_length,uint32_t offset,char * object_name,ocs_dma_t * dma)2693 sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size,
2694 uint32_t desired_read_length,
2695 uint32_t offset,
2696 char *object_name,
2697 ocs_dma_t *dma)
2698 {
2699 sli4_req_common_read_object_t *rd_obj = NULL;
2700 uint32_t sli_config_off = 0;
2701 sli4_bde_t *host_buffer;
2702
2703 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2704 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2705 sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t),
2706 NULL);
2707 }
2708
2709 rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off);
2710
2711 rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT;
2712 rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2713 rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
2714 rd_obj->hdr.timeout = 0;
2715 rd_obj->hdr.version = 0;
2716
2717 rd_obj->desired_read_length = desired_read_length;
2718 rd_obj->read_offset = offset;
2719 ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name));
2720 rd_obj->host_buffer_descriptor_count = 1;
2721
2722 host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor;
2723
2724 /* Setup to transfer xfer_size bytes to device */
2725 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
2726 host_buffer->buffer_length = desired_read_length;
2727 if (dma != NULL) {
2728 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
2729 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
2730 } else {
2731 host_buffer->u.data.buffer_address_low = 0;
2732 host_buffer->u.data.buffer_address_high = 0;
2733 }
2734
2735 return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t));
2736 }
2737
2738 /**
2739 * @ingroup sli
2740 * @brief Write a DMTF_EXEC_CLP_CMD command.
2741 *
2742 * @param sli4 SLI context.
2743 * @param buf Destination buffer for the command.
2744 * @param size Buffer size, in bytes.
2745 * @param cmd DMA structure that describes the buffer for the command.
2746 * @param resp DMA structure that describes the buffer for the response.
2747 *
2748 * @return Returns the number of bytes written.
2749 */
2750 int32_t
sli_cmd_dmtf_exec_clp_cmd(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * cmd,ocs_dma_t * resp)2751 sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size,
2752 ocs_dma_t *cmd,
2753 ocs_dma_t *resp)
2754 {
2755 sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL;
2756 uint32_t sli_config_off = 0;
2757
2758 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2759 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2760 sizeof (sli4_req_dmtf_exec_clp_cmd_t),
2761 NULL);
2762 }
2763
2764 clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off);
2765
2766 clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD;
2767 clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF;
2768 clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) -
2769 sizeof(sli4_req_hdr_t);
2770 clp_cmd->hdr.timeout = 0;
2771 clp_cmd->hdr.version = 0;
2772 clp_cmd->cmd_buf_length = cmd->size;
2773 clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys);
2774 clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys);
2775 clp_cmd->resp_buf_length = resp->size;
2776 clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys);
2777 clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys);
2778
2779 return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t));
2780 }
2781
2782 /**
2783 * @ingroup sli
2784 * @brief Write a COMMON_SET_DUMP_LOCATION command.
2785 *
2786 * @param sli4 SLI context.
2787 * @param buf Destination buffer for the command.
2788 * @param size Buffer size, in bytes.
2789 * @param query Zero to set dump location, non-zero to query dump size
2790 * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or
2791 * set to 0 if the buffer is a contiguous dump area.
2792 * @param buffer DMA structure to which the dump will be copied.
2793 *
2794 * @return Returns the number of bytes written.
2795 */
2796 int32_t
sli_cmd_common_set_dump_location(sli4_t * sli4,void * buf,size_t size,uint8_t query,uint8_t is_buffer_list,ocs_dma_t * buffer,uint8_t fdb)2797 sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size,
2798 uint8_t query, uint8_t is_buffer_list,
2799 ocs_dma_t *buffer, uint8_t fdb)
2800 {
2801 sli4_req_common_set_dump_location_t *set_dump_loc = NULL;
2802 uint32_t sli_config_off = 0;
2803
2804 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2805 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2806 sizeof (sli4_req_common_set_dump_location_t),
2807 NULL);
2808 }
2809
2810 set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off);
2811
2812 set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION;
2813 set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2814 set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t);
2815 set_dump_loc->hdr.timeout = 0;
2816 set_dump_loc->hdr.version = 0;
2817
2818 set_dump_loc->blp = is_buffer_list;
2819 set_dump_loc->qry = query;
2820 set_dump_loc->fdb = fdb;
2821
2822 if (buffer) {
2823 set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys);
2824 set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys);
2825 set_dump_loc->buffer_length = buffer->len;
2826 } else {
2827 set_dump_loc->buf_addr_low = 0;
2828 set_dump_loc->buf_addr_high = 0;
2829 set_dump_loc->buffer_length = 0;
2830 }
2831
2832 return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t));
2833 }
2834
2835 /**
2836 * @ingroup sli
2837 * @brief Write a COMMON_SET_FEATURES command.
2838 *
2839 * @param sli4 SLI context.
2840 * @param buf Destination buffer for the command.
2841 * @param size Buffer size, in bytes.
2842 * @param feature Feature to set.
2843 * @param param_len Length of the parameter (must be a multiple of 4 bytes).
2844 * @param parameter Pointer to the parameter value.
2845 *
2846 * @return Returns the number of bytes written.
2847 */
2848 int32_t
sli_cmd_common_set_features(sli4_t * sli4,void * buf,size_t size,uint32_t feature,uint32_t param_len,void * parameter)2849 sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size,
2850 uint32_t feature,
2851 uint32_t param_len,
2852 void* parameter)
2853 {
2854 sli4_req_common_set_features_t *cmd = NULL;
2855 uint32_t sli_config_off = 0;
2856
2857 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2858 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2859 sizeof (sli4_req_common_set_features_t),
2860 NULL);
2861 }
2862
2863 cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off);
2864
2865 cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES;
2866 cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2867 cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t);
2868 cmd->hdr.timeout = 0;
2869 cmd->hdr.version = 0;
2870
2871 cmd->feature = feature;
2872 cmd->param_len = param_len;
2873 ocs_memcpy(cmd->params, parameter, param_len);
2874
2875 return(sli_config_off + sizeof(sli4_req_common_set_features_t));
2876 }
2877
2878 /**
2879 * @ingroup sli
2880 * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command.
2881 *
2882 * @param sli4 SLI context.
2883 * @param buf Destination buffer for the command.
2884 * @param size Buffer size in bytes.
2885 * @param dma DMA capable memory used to retrieve profile.
2886 *
2887 * @return Returns the number of bytes written.
2888 */
2889 int32_t
sli_cmd_common_get_profile_config(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma)2890 sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2891 {
2892 sli4_req_common_get_profile_config_t *req = NULL;
2893 uint32_t sli_config_off = 0;
2894 uint32_t payload_size;
2895
2896 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2897 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2898 sizeof (sli4_req_common_get_profile_config_t),
2899 dma);
2900 }
2901
2902 if (dma != NULL) {
2903 req = dma->virt;
2904 ocs_memset(req, 0, dma->size);
2905 payload_size = dma->size;
2906 } else {
2907 req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off);
2908 payload_size = sizeof(sli4_req_common_get_profile_config_t);
2909 }
2910
2911 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG;
2912 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2913 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2914 req->hdr.version = 1;
2915
2916 return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t));
2917 }
2918
2919 /**
2920 * @ingroup sli
2921 * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command.
2922 *
2923 * @param sli4 SLI context.
2924 * @param buf Destination buffer for the command.
2925 * @param size Buffer size, in bytes.
2926 * @param dma DMA capable memory containing profile.
2927 * @param profile_id Profile ID to configure.
2928 * @param descriptor_count Number of descriptors in DMA buffer.
2929 * @param isap Implicit Set Active Profile value to use.
2930 *
2931 * @return Returns the number of bytes written.
2932 */
2933 int32_t
sli_cmd_common_set_profile_config(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma,uint8_t profile_id,uint32_t descriptor_count,uint8_t isap)2934 sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
2935 uint8_t profile_id, uint32_t descriptor_count, uint8_t isap)
2936 {
2937 sli4_req_common_set_profile_config_t *req = NULL;
2938 uint32_t cmd_off = 0;
2939 uint32_t payload_size;
2940
2941 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2942 cmd_off = sli_cmd_sli_config(sli4, buf, size,
2943 sizeof (sli4_req_common_set_profile_config_t),
2944 dma);
2945 }
2946
2947 if (dma != NULL) {
2948 req = dma->virt;
2949 ocs_memset(req, 0, dma->size);
2950 payload_size = dma->size;
2951 } else {
2952 req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off);
2953 payload_size = sizeof(sli4_req_common_set_profile_config_t);
2954 }
2955
2956 req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG;
2957 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2958 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2959 req->hdr.version = 1;
2960 req->profile_id = profile_id;
2961 req->desc_count = descriptor_count;
2962 req->isap = isap;
2963
2964 return(cmd_off + sizeof(sli4_req_common_set_profile_config_t));
2965 }
2966
2967 /**
2968 * @ingroup sli
2969 * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command.
2970 *
2971 * @param sli4 SLI context.
2972 * @param buf Destination buffer for the command.
2973 * @param size Buffer size in bytes.
2974 * @param start_profile_index First profile index to return.
2975 * @param dma Buffer into which the list will be written.
2976 *
2977 * @return Returns the number of bytes written.
2978 */
2979 int32_t
sli_cmd_common_get_profile_list(sli4_t * sli4,void * buf,size_t size,uint32_t start_profile_index,ocs_dma_t * dma)2980 sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size,
2981 uint32_t start_profile_index, ocs_dma_t *dma)
2982 {
2983 sli4_req_common_get_profile_list_t *req = NULL;
2984 uint32_t cmd_off = 0;
2985 uint32_t payload_size;
2986
2987 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2988 cmd_off = sli_cmd_sli_config(sli4, buf, size,
2989 sizeof (sli4_req_common_get_profile_list_t),
2990 dma);
2991 }
2992
2993 if (dma != NULL) {
2994 req = dma->virt;
2995 ocs_memset(req, 0, dma->size);
2996 payload_size = dma->size;
2997 } else {
2998 req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off);
2999 payload_size = sizeof(sli4_req_common_get_profile_list_t);
3000 }
3001
3002 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST;
3003 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3004 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3005 req->hdr.version = 0;
3006
3007 req->start_profile_index = start_profile_index;
3008
3009 return(cmd_off + sizeof(sli4_req_common_get_profile_list_t));
3010 }
3011
3012 /**
3013 * @ingroup sli
3014 * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command.
3015 *
3016 * @param sli4 SLI context.
3017 * @param buf Destination buffer for the command.
3018 * @param size Buffer size in bytes.
3019 *
3020 * @return Returns the number of bytes written.
3021 */
3022 int32_t
sli_cmd_common_get_active_profile(sli4_t * sli4,void * buf,size_t size)3023 sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size)
3024 {
3025 sli4_req_common_get_active_profile_t *req = NULL;
3026 uint32_t cmd_off = 0;
3027 uint32_t payload_size;
3028
3029 /* Payload length must accommodate both request and response */
3030 payload_size = max(sizeof(sli4_req_common_get_active_profile_t),
3031 sizeof(sli4_res_common_get_active_profile_t));
3032
3033 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3034 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3035 payload_size,
3036 NULL);
3037 }
3038
3039 req = (sli4_req_common_get_active_profile_t *)
3040 ((uint8_t*)buf + cmd_off);
3041
3042 req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE;
3043 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3044 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3045 req->hdr.version = 0;
3046
3047 return(cmd_off + sizeof(sli4_req_common_get_active_profile_t));
3048 }
3049
3050 /**
3051 * @ingroup sli
3052 * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command.
3053 *
3054 * @param sli4 SLI context.
3055 * @param buf Destination buffer for the command.
3056 * @param size Buffer size in bytes.
3057 * @param fd If non-zero, set profile to factory default.
3058 * @param active_profile_id ID of new active profile.
3059 *
3060 * @return Returns the number of bytes written.
3061 */
3062 int32_t
sli_cmd_common_set_active_profile(sli4_t * sli4,void * buf,size_t size,uint32_t fd,uint32_t active_profile_id)3063 sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size,
3064 uint32_t fd, uint32_t active_profile_id)
3065 {
3066 sli4_req_common_set_active_profile_t *req = NULL;
3067 uint32_t cmd_off = 0;
3068 uint32_t payload_size;
3069
3070 /* Payload length must accommodate both request and response */
3071 payload_size = max(sizeof(sli4_req_common_set_active_profile_t),
3072 sizeof(sli4_res_common_set_active_profile_t));
3073
3074 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3075 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3076 payload_size,
3077 NULL);
3078 }
3079
3080 req = (sli4_req_common_set_active_profile_t *)
3081 ((uint8_t*)buf + cmd_off);
3082
3083 req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE;
3084 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3085 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3086 req->hdr.version = 0;
3087 req->fd = fd;
3088 req->active_profile_id = active_profile_id;
3089
3090 return(cmd_off + sizeof(sli4_req_common_set_active_profile_t));
3091 }
3092
3093 /**
3094 * @ingroup sli
3095 * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command.
3096 *
3097 * @param sli4 SLI context.
3098 * @param buf Destination buffer for the command.
3099 * @param size Buffer size in bytes.
3100 * @param dma Buffer to store the supported link configuration modes from the physical device.
3101 *
3102 * @return Returns the number of bytes written.
3103 */
3104 int32_t
sli_cmd_common_get_reconfig_link_info(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma)3105 sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
3106 {
3107 sli4_req_common_get_reconfig_link_info_t *req = NULL;
3108 uint32_t cmd_off = 0;
3109 uint32_t payload_size;
3110
3111 /* Payload length must accommodate both request and response */
3112 payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t),
3113 sizeof(sli4_res_common_get_reconfig_link_info_t));
3114
3115 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3116 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3117 payload_size,
3118 dma);
3119 }
3120
3121 if (dma != NULL) {
3122 req = dma->virt;
3123 ocs_memset(req, 0, dma->size);
3124 payload_size = dma->size;
3125 } else {
3126 req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off);
3127 payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t);
3128 }
3129
3130 req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO;
3131 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3132 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3133 req->hdr.version = 0;
3134
3135 return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t));
3136 }
3137
3138 /**
3139 * @ingroup sli
3140 * @brief Write a COMMON_SET_RECONFIG_LINK_ID command.
3141 *
3142 * @param sli4 SLI context.
3143 * @param buf destination buffer for the command.
3144 * @param size buffer size in bytes.
3145 * @param fd If non-zero, set link config to factory default.
3146 * @param active_link_config_id ID of new active profile.
3147 * @param dma Buffer to assign the link configuration mode that is to become active from the physical device.
3148 *
3149 * @return Returns the number of bytes written.
3150 */
3151 int32_t
sli_cmd_common_set_reconfig_link_id(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma,uint32_t fd,uint32_t active_link_config_id)3152 sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
3153 uint32_t fd, uint32_t active_link_config_id)
3154 {
3155 sli4_req_common_set_reconfig_link_id_t *req = NULL;
3156 uint32_t cmd_off = 0;
3157 uint32_t payload_size;
3158
3159 /* Payload length must accommodate both request and response */
3160 payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t),
3161 sizeof(sli4_res_common_set_reconfig_link_id_t));
3162
3163 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3164 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3165 payload_size,
3166 NULL);
3167 }
3168
3169 if (dma != NULL) {
3170 req = dma->virt;
3171 ocs_memset(req, 0, dma->size);
3172 payload_size = dma->size;
3173 } else {
3174 req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off);
3175 payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t);
3176 }
3177
3178 req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID;
3179 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3180 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3181 req->hdr.version = 0;
3182 req->fd = fd;
3183 req->next_link_config_id = active_link_config_id;
3184
3185 return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t));
3186 }
3187
3188 /**
3189 * @ingroup sli
3190 * @brief Check the mailbox/queue completion entry.
3191 *
3192 * @param buf Pointer to the MCQE.
3193 *
3194 * @return Returns 0 on success, or a non-zero value on failure.
3195 */
3196 int32_t
sli_cqe_mq(void * buf)3197 sli_cqe_mq(void *buf)
3198 {
3199 sli4_mcqe_t *mcqe = buf;
3200
3201 /*
3202 * Firmware can split mbx completions into two MCQEs: first with only
3203 * the "consumed" bit set and a second with the "complete" bit set.
3204 * Thus, ignore MCQE unless "complete" is set.
3205 */
3206 if (!mcqe->cmp) {
3207 return -2;
3208 }
3209
3210 if (mcqe->completion_status) {
3211 ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
3212 mcqe->completion_status,
3213 mcqe->extended_status,
3214 mcqe->con,
3215 mcqe->cmp,
3216 mcqe->ae,
3217 mcqe->val);
3218 }
3219
3220 return mcqe->completion_status;
3221 }
3222
3223 /**
3224 * @ingroup sli
3225 * @brief Check the asynchronous event completion entry.
3226 *
3227 * @param sli4 SLI context.
3228 * @param buf Pointer to the ACQE.
3229 *
3230 * @return Returns 0 on success, or a non-zero value on failure.
3231 */
3232 int32_t
sli_cqe_async(sli4_t * sli4,void * buf)3233 sli_cqe_async(sli4_t *sli4, void *buf)
3234 {
3235 sli4_acqe_t *acqe = buf;
3236 int32_t rc = -1;
3237
3238 if (!sli4 || !buf) {
3239 ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf);
3240 return -1;
3241 }
3242
3243 switch (acqe->event_code) {
3244 case SLI4_ACQE_EVENT_CODE_LINK_STATE:
3245 rc = sli_fc_process_link_state(sli4, buf);
3246 break;
3247 case SLI4_ACQE_EVENT_CODE_FCOE_FIP:
3248 rc = sli_fc_process_fcoe(sli4, buf);
3249 break;
3250 case SLI4_ACQE_EVENT_CODE_GRP_5:
3251 /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n");
3252 break;
3253 case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
3254 ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
3255 acqe->event_type, acqe->event_data[0], acqe->event_data[1]);
3256 #if defined(OCS_INCLUDE_DEBUG)
3257 ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe));
3258 #endif
3259 break;
3260 case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
3261 rc = sli_fc_process_link_attention(sli4, buf);
3262 break;
3263 default:
3264 /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code);
3265 }
3266
3267 return rc;
3268 }
3269
3270 /**
3271 * @brief Check the SLI_CONFIG response.
3272 *
3273 * @par Description
3274 * Function checks the SLI_CONFIG response and the payload status.
3275 *
3276 * @param buf Pointer to SLI_CONFIG response.
3277 *
3278 * @return Returns 0 on success, or a non-zero value on failure.
3279 */
3280 static int32_t
sli_res_sli_config(void * buf)3281 sli_res_sli_config(void *buf)
3282 {
3283 sli4_cmd_sli_config_t *sli_config = buf;
3284
3285 if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) {
3286 ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf,
3287 buf ? sli_config->hdr.command : -1);
3288 return -1;
3289 }
3290
3291 if (sli_config->hdr.status) {
3292 return sli_config->hdr.status;
3293 }
3294
3295 if (sli_config->emb) {
3296 return sli_config->payload.embed[4];
3297 } else {
3298 ocs_log_test(NULL, "external buffers not supported\n");
3299 return -1;
3300 }
3301 }
3302
3303 /**
3304 * @brief Issue a COMMON_FUNCTION_RESET command.
3305 *
3306 * @param sli4 SLI context.
3307 *
3308 * @return Returns 0 on success, or a non-zero value on failure.
3309 */
3310 static int32_t
sli_common_function_reset(sli4_t * sli4)3311 sli_common_function_reset(sli4_t *sli4)
3312 {
3313
3314 if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3315 if (sli_bmbx_command(sli4)) {
3316 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n");
3317 return -1;
3318 }
3319 if (sli_res_sli_config(sli4->bmbx.virt)) {
3320 ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n");
3321 return -1;
3322 }
3323 } else {
3324 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3325 return -1;
3326 }
3327
3328 return 0;
3329 }
3330
3331 /**
3332 * @brief check to see if the FW is ready.
3333 *
3334 * @par Description
3335 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
3336 *
3337 * @param sli4 SLI context.
3338 * @param timeout_ms Time, in milliseconds, to wait for the port to be ready
3339 * before failing.
3340 *
3341 * @return Returns TRUE for ready, or FALSE otherwise.
3342 */
3343 static int32_t
sli_wait_for_fw_ready(sli4_t * sli4,uint32_t timeout_ms)3344 sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms)
3345 {
3346 uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000);
3347 uint32_t ready = FALSE;
3348
3349 do {
3350 iter--;
3351 ocs_udelay(SLI4_INIT_PORT_DELAY_US);
3352 if (sli_fw_ready(sli4) == 1) {
3353 ready = TRUE;
3354 }
3355 } while (!ready && (iter > 0));
3356
3357 return ready;
3358 }
3359
3360 /**
3361 * @brief Initialize the firmware.
3362 *
3363 * @par Description
3364 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
3365 *
3366 * @param sli4 SLI context.
3367 *
3368 * @return Returns 0 on success, or a non-zero value on failure.
3369 */
3370 static int32_t
sli_fw_init(sli4_t * sli4)3371 sli_fw_init(sli4_t *sli4)
3372 {
3373 uint32_t ready;
3374 uint32_t endian;
3375
3376 /*
3377 * Is firmware ready for operation?
3378 */
3379 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
3380 if (!ready) {
3381 ocs_log_crit(sli4->os, "FW status is NOT ready\n");
3382 return -1;
3383 }
3384
3385 /*
3386 * Reset port to a known state
3387 */
3388 switch (sli4->if_type) {
3389 case SLI4_IF_TYPE_BE3_SKH_PF:
3390 case SLI4_IF_TYPE_BE3_SKH_VF:
3391 /* No SLIPORT_CONTROL register so use command sequence instead */
3392 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
3393 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
3394 return -1;
3395 }
3396
3397 if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3398 if (sli_bmbx_command(sli4)) {
3399 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n");
3400 return -1;
3401 }
3402 } else {
3403 ocs_log_crit(sli4->os, "bad FW_INIT write\n");
3404 return -1;
3405 }
3406
3407 if (sli_common_function_reset(sli4)) {
3408 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3409 return -1;
3410 }
3411 break;
3412 case SLI4_IF_TYPE_LANCER_FC_ETH:
3413 case SLI4_IF_TYPE_LANCER_G7:
3414 #if BYTE_ORDER == LITTLE_ENDIAN
3415 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
3416 #else
3417 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
3418 #endif
3419
3420 if (sli_sliport_control(sli4, endian))
3421 return -1;
3422 break;
3423 default:
3424 ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type);
3425 return -1;
3426 }
3427
3428 return 0;
3429 }
3430
3431 /**
3432 * @brief Terminate the firmware.
3433 *
3434 * @param sli4 SLI context.
3435 *
3436 * @return Returns 0 on success, or a non-zero value on failure.
3437 */
3438 static int32_t
sli_fw_term(sli4_t * sli4)3439 sli_fw_term(sli4_t *sli4)
3440 {
3441 uint32_t endian;
3442
3443 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF ||
3444 sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) {
3445 /* No SLIPORT_CONTROL register so use command sequence instead */
3446 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
3447 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
3448 return -1;
3449 }
3450
3451 if (sli_common_function_reset(sli4)) {
3452 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3453 return -1;
3454 }
3455
3456 if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3457 if (sli_bmbx_command(sli4)) {
3458 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n");
3459 return -1;
3460 }
3461 } else {
3462 ocs_log_test(sli4->os, "bad FW_DEINIT write\n");
3463 return -1;
3464 }
3465 } else {
3466 #if BYTE_ORDER == LITTLE_ENDIAN
3467 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
3468 #else
3469 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
3470 #endif
3471 /* type 2 etc. use SLIPORT_CONTROL to initialize port */
3472 sli_sliport_control(sli4, endian);
3473 }
3474 return 0;
3475 }
3476
3477 /**
3478 * @brief Write the doorbell register associated with the queue object.
3479 *
3480 * @param sli4 SLI context.
3481 * @param q Queue object.
3482 *
3483 * @return Returns 0 on success, or a non-zero value on failure.
3484 */
3485 static int32_t
sli_queue_doorbell(sli4_t * sli4,sli4_queue_t * q)3486 sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q)
3487 {
3488 uint32_t val = 0;
3489
3490 switch (q->type) {
3491 case SLI_QTYPE_EQ:
3492 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
3493 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, FALSE);
3494 else
3495 val = sli_eq_doorbell(q->n_posted, q->id, FALSE);
3496 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3497 break;
3498 case SLI_QTYPE_CQ:
3499 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
3500 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, FALSE);
3501 else
3502 val = sli_cq_doorbell(q->n_posted, q->id, FALSE);
3503 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3504 break;
3505 case SLI_QTYPE_MQ:
3506 val = SLI4_MQ_DOORBELL(q->n_posted, q->id);
3507 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3508 break;
3509 case SLI_QTYPE_RQ:
3510 {
3511 uint32_t n_posted = q->n_posted;
3512 /*
3513 * FC/FCoE has different rules for Receive Queues. The host
3514 * should only update the doorbell of the RQ-pair containing
3515 * the headers since the header / payload RQs are treated
3516 * as a matched unit.
3517 */
3518 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3519 /*
3520 * In RQ-pair, an RQ either contains the FC header
3521 * (i.e. is_hdr == TRUE) or the payload.
3522 *
3523 * Don't ring doorbell for payload RQ
3524 */
3525 if (!q->u.flag.is_hdr) {
3526 break;
3527 }
3528 /*
3529 * Some RQ cannot be incremented one entry at a time. Instead,
3530 * the driver collects a number of entries and updates the
3531 * RQ in batches.
3532 */
3533 if (q->u.flag.rq_batch) {
3534 if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) {
3535 break;
3536 }
3537 n_posted = SLI4_QUEUE_RQ_BATCH;
3538 }
3539 }
3540
3541 val = SLI4_RQ_DOORBELL(n_posted, q->id);
3542 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3543 break;
3544 }
3545 case SLI_QTYPE_WQ:
3546 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) {
3547 val = SLI4_WQ_DOORBELL(q->n_posted, 0, q->id);
3548 } else {
3549 /* For iftype = 2 and 3, q->index value is ignored */
3550 val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id);
3551 }
3552
3553 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3554 break;
3555 default:
3556 ocs_log_test(sli4->os, "bad queue type %d\n", q->type);
3557 return -1;
3558 }
3559
3560 return 0;
3561 }
3562
3563 static int32_t
sli_request_features(sli4_t * sli4,sli4_features_t * features,uint8_t query)3564 sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query)
3565 {
3566
3567 if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
3568 *features, query)) {
3569 sli4_cmd_request_features_t *req_features = sli4->bmbx.virt;
3570
3571 if (sli_bmbx_command(sli4)) {
3572 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n");
3573 return -1;
3574 }
3575 if (req_features->hdr.status) {
3576 ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n",
3577 req_features->hdr.status);
3578 return -1;
3579 }
3580 features->dword = req_features->response.dword;
3581 } else {
3582 ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n");
3583 return -1;
3584 }
3585
3586 return 0;
3587 }
3588
3589 /**
3590 * @brief Calculate max queue entries.
3591 *
3592 * @param sli4 SLI context.
3593 *
3594 * @return Returns 0 on success, or a non-zero value on failure.
3595 */
3596 void
sli_calc_max_qentries(sli4_t * sli4)3597 sli_calc_max_qentries(sli4_t *sli4)
3598 {
3599 sli4_qtype_e q;
3600 uint32_t alloc_size, qentries, qentry_size;
3601
3602 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
3603 sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q],
3604 sli4->config.count_mask[q]);
3605 }
3606
3607 /* single, continguous DMA allocations will be called for each queue
3608 * of size (max_qentries * queue entry size); since these can be large,
3609 * check against the OS max DMA allocation size
3610 */
3611 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
3612 qentries = sli4->config.max_qentries[q];
3613 qentry_size = sli_get_queue_entry_size(sli4, q);
3614 alloc_size = qentries * qentry_size;
3615 if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
3616 while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
3617 /* cut the qentries in hwf until alloc_size <= max DMA alloc size */
3618 qentries >>= 1;
3619 alloc_size = qentries * qentry_size;
3620 }
3621 ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n",
3622 SLI_QNAME[q], sli4->config.max_qentries[q],
3623 qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE));
3624 sli4->config.max_qentries[q] = qentries;
3625 }
3626 }
3627 }
3628
3629 /**
3630 * @brief Issue a FW_CONFIG mailbox command and store the results.
3631 *
3632 * @param sli4 SLI context.
3633 *
3634 * @return Returns 0 on success, or a non-zero value on failure.
3635 */
3636 static int32_t
sli_query_fw_config(sli4_t * sli4)3637 sli_query_fw_config(sli4_t *sli4)
3638 {
3639 /*
3640 * Read the device configuration
3641 *
3642 * Note: Only ulp0 fields contain values
3643 */
3644 if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3645 sli4_res_common_query_fw_config_t *fw_config =
3646 (sli4_res_common_query_fw_config_t *)
3647 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
3648
3649 if (sli_bmbx_command(sli4)) {
3650 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n");
3651 return -1;
3652 }
3653 if (fw_config->hdr.status) {
3654 ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n",
3655 fw_config->hdr.status);
3656 return -1;
3657 }
3658
3659 sli4->physical_port = fw_config->physical_port;
3660 sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1);
3661 sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode &
3662 (SLI4_ULP_MODE_FCOE_INI |
3663 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
3664 sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode &
3665 (SLI4_ULP_MODE_FCOE_INI |
3666 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
3667
3668 if (sli4->config.dual_ulp_capable) {
3669 /*
3670 * Lancer will not support this, so we use the values
3671 * from the READ_CONFIG.
3672 */
3673 if (sli4->config.is_ulp_fc[0] &&
3674 sli4->config.is_ulp_fc[1]) {
3675 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total;
3676 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total;
3677 } else if (sli4->config.is_ulp_fc[0]) {
3678 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total;
3679 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total;
3680 } else {
3681 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total;
3682 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total;
3683 }
3684 }
3685 } else {
3686 ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n");
3687 return -1;
3688 }
3689 return 0;
3690 }
3691
3692 static int32_t
sli_get_config(sli4_t * sli4)3693 sli_get_config(sli4_t *sli4)
3694 {
3695 ocs_dma_t get_cntl_addl_data;
3696
3697 /*
3698 * Read the device configuration
3699 */
3700 if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3701 sli4_res_read_config_t *read_config = sli4->bmbx.virt;
3702 uint32_t i;
3703 uint32_t total;
3704
3705 if (sli_bmbx_command(sli4)) {
3706 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n");
3707 return -1;
3708 }
3709 if (read_config->hdr.status) {
3710 ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n",
3711 read_config->hdr.status);
3712 return -1;
3713 }
3714
3715 sli4->config.has_extents = read_config->ext;
3716 if (FALSE == sli4->config.has_extents) {
3717 uint32_t i = 0;
3718 uint32_t *base = sli4->config.extent[0].base;
3719
3720 if (!base) {
3721 if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t),
3722 OCS_M_ZERO | OCS_M_NOWAIT))) {
3723 ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n");
3724 return -1;
3725 }
3726 }
3727
3728 for (i = 0; i < SLI_RSRC_MAX; i++) {
3729 sli4->config.extent[i].number = 1;
3730 sli4->config.extent[i].n_alloc = 0;
3731 sli4->config.extent[i].base = &base[i];
3732 }
3733
3734 sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base;
3735 sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count;
3736
3737 sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base;
3738 sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count;
3739
3740 sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base;
3741 sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count;
3742
3743 sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base;
3744 sli4->config.extent[SLI_RSRC_FCOE_XRI].size = OCS_MIN(255,read_config->xri_count);
3745
3746 sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0;
3747 sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count;
3748 } else {
3749 /* TODO extents*/
3750 ;
3751 }
3752
3753 for (i = 0; i < SLI_RSRC_MAX; i++) {
3754 total = sli4->config.extent[i].number * sli4->config.extent[i].size;
3755 sli4->config.extent[i].use_map = ocs_bitmap_alloc(total);
3756 if (NULL == sli4->config.extent[i].use_map) {
3757 ocs_log_err(sli4->os, "bitmap memory allocation failed "
3758 "resource %d\n", i);
3759 return -1;
3760 }
3761 sli4->config.extent[i].map_size = total;
3762 }
3763
3764 sli4->config.topology = read_config->topology;
3765 sli4->config.ptv = read_config->ptv;
3766 if (sli4->config.ptv){
3767 sli4->config.tf = read_config->tf;
3768 sli4->config.pt = read_config->pt;
3769 }
3770 ocs_log_info(sli4->os, "Persistent Topology: PTV: %d, TF: %d, PT: %d \n",
3771 sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt);
3772
3773 switch (sli4->config.topology) {
3774 case SLI4_READ_CFG_TOPO_FCOE:
3775 ocs_log_debug(sli4->os, "FCoE\n");
3776 break;
3777 case SLI4_READ_CFG_TOPO_FC:
3778 ocs_log_debug(sli4->os, "FC (unknown)\n");
3779 break;
3780 case SLI4_READ_CFG_TOPO_FC_DA:
3781 ocs_log_debug(sli4->os, "FC (direct attach)\n");
3782 break;
3783 case SLI4_READ_CFG_TOPO_FC_AL:
3784 ocs_log_debug(sli4->os, "FC (arbitrated loop)\n");
3785 break;
3786 default:
3787 ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology);
3788 }
3789
3790 sli4->config.e_d_tov = read_config->e_d_tov;
3791 sli4->config.r_a_tov = read_config->r_a_tov;
3792
3793 sli4->config.link_module_type = read_config->lmt;
3794
3795 sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count;
3796 sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count;
3797 sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count;
3798 sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count;
3799
3800 /*
3801 * READ_CONFIG doesn't give the max number of MQ. Applications
3802 * will typically want 1, but we may need another at some future
3803 * date. Dummy up a "max" MQ count here.
3804 */
3805 sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT;
3806 } else {
3807 ocs_log_err(sli4->os, "bad READ_CONFIG write\n");
3808 return -1;
3809 }
3810
3811 if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3812 sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *)
3813 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
3814
3815 if (sli_bmbx_command(sli4)) {
3816 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n");
3817 return -1;
3818 } else if (parms->hdr.status) {
3819 ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n",
3820 parms->hdr.status, parms->hdr.additional_status);
3821 return -1;
3822 }
3823
3824 sli4->config.auto_reg = parms->areg;
3825 sli4->config.auto_xfer_rdy = parms->agxf;
3826 sli4->config.hdr_template_req = parms->hdrr;
3827 sli4->config.t10_dif_inline_capable = parms->timm;
3828 sli4->config.t10_dif_separate_capable = parms->tsmm;
3829
3830 sli4->config.mq_create_version = parms->mqv;
3831 sli4->config.cq_create_version = parms->cqv;
3832 sli4->config.rq_min_buf_size = parms->min_rq_buffer_size;
3833 sli4->config.rq_max_buf_size = parms->max_rq_buffer_size;
3834
3835 sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt;
3836 sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt;
3837 sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt;
3838 sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt;
3839 sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt;
3840
3841 /* save count methods and masks for each queue type */
3842 sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask;
3843 sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method;
3844 sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask;
3845 sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method;
3846 sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask;
3847 sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method;
3848 sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask;
3849 sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method;
3850 sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask;
3851 sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method;
3852
3853 /* now calculate max queue entries */
3854 sli_calc_max_qentries(sli4);
3855
3856 sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */
3857 sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */
3858 /* ignore HLM here. Use value from REQUEST_FEATURES */
3859
3860 sli4->config.sge_supported_length = parms->sge_supported_length;
3861 if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE)
3862 sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE;
3863
3864 sli4->config.sgl_pre_registration_required = parms->sglr;
3865 /* default to using pre-registered SGL's */
3866 sli4->config.sgl_pre_registered = TRUE;
3867
3868 sli4->config.perf_hint = parms->phon;
3869 sli4->config.perf_wq_id_association = parms->phwq;
3870
3871 sli4->config.rq_batch = parms->rq_db_window;
3872
3873 /* save the fields for skyhawk SGL chaining */
3874 sli4->config.sgl_chaining_params.chaining_capable =
3875 (parms->sglc == 1);
3876 sli4->config.sgl_chaining_params.frag_num_field_offset =
3877 parms->frag_num_field_offset;
3878 sli4->config.sgl_chaining_params.frag_num_field_mask =
3879 (1ull << parms->frag_num_field_size) - 1;
3880 sli4->config.sgl_chaining_params.sgl_index_field_offset =
3881 parms->sgl_index_field_offset;
3882 sli4->config.sgl_chaining_params.sgl_index_field_mask =
3883 (1ull << parms->sgl_index_field_size) - 1;
3884 sli4->config.sgl_chaining_params.chain_sge_initial_value_lo =
3885 parms->chain_sge_initial_value_lo;
3886 sli4->config.sgl_chaining_params.chain_sge_initial_value_hi =
3887 parms->chain_sge_initial_value_hi;
3888
3889 /* Use the highest available WQE size. */
3890 if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) {
3891 sli4->config.wqe_size = SLI4_WQE_EXT_BYTES;
3892 } else {
3893 sli4->config.wqe_size = SLI4_WQE_BYTES;
3894 }
3895 }
3896
3897 if (sli_query_fw_config(sli4)) {
3898 ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n");
3899 return -1;
3900 }
3901
3902 sli4->config.port_number = 0;
3903
3904 /*
3905 * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
3906 * uses VPD DMA buffer as the response won't fit in the embedded
3907 * buffer.
3908 */
3909 if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
3910 sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt;
3911
3912 if (sli_bmbx_command(sli4)) {
3913 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n");
3914 return -1;
3915 } else if (attr->hdr.status) {
3916 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n",
3917 attr->hdr.status, attr->hdr.additional_status);
3918 return -1;
3919 }
3920
3921 sli4->config.port_number = attr->port_number;
3922
3923 ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string,
3924 sizeof(sli4->config.bios_version_string));
3925 } else {
3926 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
3927 return -1;
3928 }
3929
3930 if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t),
3931 OCS_MIN_DMA_ALIGNMENT)) {
3932 ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n");
3933 } else {
3934 if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
3935 &get_cntl_addl_data)) {
3936 sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt;
3937
3938 if (sli_bmbx_command(sli4)) {
3939 ocs_log_crit(sli4->os,
3940 "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n");
3941 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3942 return -1;
3943 }
3944 if (attr->hdr.status) {
3945 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n",
3946 attr->hdr.status);
3947 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3948 return -1;
3949 }
3950
3951 ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name));
3952
3953 ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name);
3954 } else {
3955 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n");
3956 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3957 return -1;
3958 }
3959
3960 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3961 }
3962
3963 if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3964 sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) +
3965 offsetof(sli4_cmd_sli_config_t, payload.embed));
3966
3967 if (sli_bmbx_command(sli4)) {
3968 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n");
3969 return -1;
3970 }
3971
3972 sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number];
3973 }
3974 sli4->config.port_name[1] = '\0';
3975
3976 if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
3977 sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt;
3978
3979 if (sli_bmbx_command(sli4)) {
3980 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n");
3981 return -1;
3982 }
3983 if (read_rev->hdr.status) {
3984 ocs_log_err(sli4->os, "READ_REV bad status %#x\n",
3985 read_rev->hdr.status);
3986 return -1;
3987 }
3988
3989 sli4->config.fw_rev[0] = read_rev->first_fw_id;
3990 ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0]));
3991
3992 sli4->config.fw_rev[1] = read_rev->second_fw_id;
3993 ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1]));
3994
3995 sli4->config.hw_rev[0] = read_rev->first_hw_revision;
3996 sli4->config.hw_rev[1] = read_rev->second_hw_revision;
3997 sli4->config.hw_rev[2] = read_rev->third_hw_revision;
3998
3999 ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n",
4000 read_rev->first_fw_name, read_rev->first_fw_id,
4001 read_rev->second_fw_name, read_rev->second_fw_id);
4002
4003 ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision,
4004 read_rev->second_hw_revision);
4005
4006 /* Check that all VPD data was returned */
4007 if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) {
4008 ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n",
4009 read_rev->available_length,
4010 read_rev->returned_vpd_length,
4011 read_rev->actual_vpd_length);
4012 }
4013 sli4->vpd.length = read_rev->returned_vpd_length;
4014 } else {
4015 ocs_log_err(sli4->os, "bad READ_REV write\n");
4016 return -1;
4017 }
4018
4019 if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
4020 sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt;
4021
4022 if (sli_bmbx_command(sli4)) {
4023 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n");
4024 return -1;
4025 }
4026 if (read_nvparms->hdr.status) {
4027 ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n",
4028 read_nvparms->hdr.status);
4029 return -1;
4030 }
4031
4032 ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn));
4033 ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn));
4034
4035 ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4036 sli4->config.wwpn[0],
4037 sli4->config.wwpn[1],
4038 sli4->config.wwpn[2],
4039 sli4->config.wwpn[3],
4040 sli4->config.wwpn[4],
4041 sli4->config.wwpn[5],
4042 sli4->config.wwpn[6],
4043 sli4->config.wwpn[7]);
4044 ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4045 sli4->config.wwnn[0],
4046 sli4->config.wwnn[1],
4047 sli4->config.wwnn[2],
4048 sli4->config.wwnn[3],
4049 sli4->config.wwnn[4],
4050 sli4->config.wwnn[5],
4051 sli4->config.wwnn[6],
4052 sli4->config.wwnn[7]);
4053 } else {
4054 ocs_log_err(sli4->os, "bad READ_NVPARMS write\n");
4055 return -1;
4056 }
4057
4058 return 0;
4059 }
4060
4061 /****************************************************************************
4062 * Public functions
4063 */
4064
4065 /**
4066 * @ingroup sli
4067 * @brief Set up the SLI context.
4068 *
4069 * @param sli4 SLI context.
4070 * @param os Device abstraction.
4071 * @param port_type Protocol type of port (for example, FC and NIC).
4072 *
4073 * @return Returns 0 on success, or a non-zero value on failure.
4074 */
4075 int32_t
sli_setup(sli4_t * sli4,ocs_os_handle_t os,sli4_port_type_e port_type)4076 sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type)
4077 {
4078 uint32_t sli_intf = UINT32_MAX;
4079 uint32_t pci_class_rev = 0;
4080 uint32_t rev_id = 0;
4081 uint32_t family = 0;
4082 uint32_t i;
4083 sli4_asic_entry_t *asic;
4084
4085 ocs_memset(sli4, 0, sizeof(sli4_t));
4086
4087 sli4->os = os;
4088 sli4->port_type = port_type;
4089
4090 /*
4091 * Read the SLI_INTF register to discover the register layout
4092 * and other capability information
4093 */
4094 sli_intf = ocs_config_read32(os, SLI4_INTF_REG);
4095
4096 if (sli_intf_valid_check(sli_intf)) {
4097 ocs_log_err(os, "SLI_INTF is not valid\n");
4098 return -1;
4099 }
4100
4101 /* driver only support SLI-4 */
4102 sli4->sli_rev = sli_intf_sli_revision(sli_intf);
4103 if (4 != sli4->sli_rev) {
4104 ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n",
4105 sli_intf);
4106 return -1;
4107 }
4108
4109 sli4->sli_family = sli_intf_sli_family(sli_intf);
4110
4111 sli4->if_type = sli_intf_if_type(sli_intf);
4112
4113 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) ||
4114 (SLI4_IF_TYPE_LANCER_G7 == sli4->if_type)) {
4115 ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n",
4116 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
4117 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
4118 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
4119 }
4120
4121 /*
4122 * set the ASIC type and revision
4123 */
4124 pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION);
4125 rev_id = sli_pci_rev_id(pci_class_rev);
4126 family = sli4->sli_family;
4127 if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
4128 uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG);
4129 family = sli_asic_gen(asic_id);
4130 }
4131
4132 for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) {
4133 if ((rev_id == asic->rev_id) && (family == asic->family)) {
4134 sli4->asic_type = asic->type;
4135 sli4->asic_rev = asic->rev;
4136 break;
4137 }
4138 }
4139 /* Fail if no matching asic type/rev was found */
4140 if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) {
4141 ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id);
4142 return -1;
4143 }
4144
4145 /*
4146 * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
4147 * entry, a CQ with a single 16 byte entry, and no event queue.
4148 * Alignment must be 16 bytes as the low order address bits in the
4149 * address register are also control / status.
4150 */
4151 if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE +
4152 sizeof(sli4_mcqe_t), 16)) {
4153 ocs_log_err(os, "bootstrap mailbox allocation failed\n");
4154 return -1;
4155 }
4156
4157 if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
4158 ocs_log_err(os, "bad alignment for bootstrap mailbox\n");
4159 return -1;
4160 }
4161
4162 ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
4163 ocs_addr32_hi(sli4->bmbx.phys),
4164 ocs_addr32_lo(sli4->bmbx.phys),
4165 sli4->bmbx.size);
4166
4167 /* TODO 4096 is arbitrary. What should this value actually be? */
4168 if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) {
4169 /* Note that failure isn't fatal in this specific case */
4170 sli4->vpd.data.size = 0;
4171 ocs_log_test(os, "VPD buffer allocation failed\n");
4172 }
4173
4174 if (sli_fw_init(sli4)) {
4175 ocs_log_err(sli4->os, "FW initialization failed\n");
4176 return -1;
4177 }
4178
4179 /*
4180 * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
4181 * in addition to any other desired features
4182 */
4183 sli4->config.features.flag.iaab = TRUE;
4184 sli4->config.features.flag.npiv = TRUE;
4185 sli4->config.features.flag.dif = TRUE;
4186 sli4->config.features.flag.vf = TRUE;
4187 sli4->config.features.flag.fcpc = TRUE;
4188 sli4->config.features.flag.iaar = TRUE;
4189 sli4->config.features.flag.hlm = TRUE;
4190 sli4->config.features.flag.perfh = TRUE;
4191 sli4->config.features.flag.rxseq = TRUE;
4192 sli4->config.features.flag.rxri = TRUE;
4193 sli4->config.features.flag.mrqp = TRUE;
4194
4195 /* use performance hints if available */
4196 if (sli4->config.perf_hint) {
4197 sli4->config.features.flag.perfh = TRUE;
4198 }
4199
4200 if (sli_request_features(sli4, &sli4->config.features, TRUE)) {
4201 return -1;
4202 }
4203
4204 if (sli_get_config(sli4)) {
4205 return -1;
4206 }
4207
4208 return 0;
4209 }
4210
4211 bool
sli_persist_topology_enabled(sli4_t * sli4)4212 sli_persist_topology_enabled(sli4_t *sli4)
4213 {
4214 return (sli4->config.ptv);
4215 }
4216
4217 int32_t
sli_init(sli4_t * sli4)4218 sli_init(sli4_t *sli4)
4219 {
4220
4221 if (sli4->config.has_extents) {
4222 /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */;
4223 ocs_log_test(sli4->os, "XXX need to implement extent allocation\n");
4224 return -1;
4225 }
4226
4227 sli4->config.features.flag.hlm = sli4->config.high_login_mode;
4228 sli4->config.features.flag.rxseq = FALSE;
4229 sli4->config.features.flag.rxri = FALSE;
4230
4231 if (sli_request_features(sli4, &sli4->config.features, FALSE)) {
4232 return -1;
4233 }
4234
4235 return 0;
4236 }
4237
4238 int32_t
sli_reset(sli4_t * sli4)4239 sli_reset(sli4_t *sli4)
4240 {
4241 uint32_t i;
4242
4243 if (sli_fw_init(sli4)) {
4244 ocs_log_crit(sli4->os, "FW initialization failed\n");
4245 return -1;
4246 }
4247
4248 if (sli4->config.extent[0].base) {
4249 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
4250 sli4->config.extent[0].base = NULL;
4251 }
4252
4253 for (i = 0; i < SLI_RSRC_MAX; i++) {
4254 if (sli4->config.extent[i].use_map) {
4255 ocs_bitmap_free(sli4->config.extent[i].use_map);
4256 sli4->config.extent[i].use_map = NULL;
4257 }
4258 sli4->config.extent[i].base = NULL;
4259 }
4260
4261 if (sli_get_config(sli4)) {
4262 return -1;
4263 }
4264
4265 return 0;
4266 }
4267
4268 /**
4269 * @ingroup sli
4270 * @brief Issue a Firmware Reset.
4271 *
4272 * @par Description
4273 * Issues a Firmware Reset to the chip. This reset affects the entire chip,
4274 * so all PCI function on the same PCI bus and device are affected.
4275 * @n @n This type of reset can be used to activate newly downloaded firmware.
4276 * @n @n The driver should be considered to be in an unknown state after this
4277 * reset and should be reloaded.
4278 *
4279 * @param sli4 SLI context.
4280 *
4281 * @return Returns 0 on success, or -1 otherwise.
4282 */
4283
4284 int32_t
sli_fw_reset(sli4_t * sli4)4285 sli_fw_reset(sli4_t *sli4)
4286 {
4287 uint32_t val;
4288 uint32_t ready;
4289
4290 /*
4291 * Firmware must be ready before issuing the reset.
4292 */
4293 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
4294 if (!ready) {
4295 ocs_log_crit(sli4->os, "FW status is NOT ready\n");
4296 return -1;
4297 }
4298 switch(sli4->if_type) {
4299 case SLI4_IF_TYPE_BE3_SKH_PF:
4300 /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */
4301 val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR);
4302 val |= SLI4_PCI_SOFT_RESET_MASK;
4303 ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val);
4304 break;
4305 case SLI4_IF_TYPE_LANCER_FC_ETH:
4306 /* Lancer uses PHYDEV_CONTROL */
4307
4308 val = SLI4_PHYDEV_CONTROL_FRST;
4309 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val);
4310 break;
4311 default:
4312 ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type);
4313 return -1;
4314 break;
4315 }
4316
4317 /* wait for the FW to become ready after the reset */
4318 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
4319 if (!ready) {
4320 ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n");
4321 return -1;
4322 }
4323 return 0;
4324 }
4325
4326 /**
4327 * @ingroup sli
4328 * @brief Tear down a SLI context.
4329 *
4330 * @param sli4 SLI context.
4331 *
4332 * @return Returns 0 on success, or non-zero otherwise.
4333 */
4334 int32_t
sli_teardown(sli4_t * sli4)4335 sli_teardown(sli4_t *sli4)
4336 {
4337 uint32_t i;
4338
4339 if (sli4->config.extent[0].base) {
4340 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
4341 sli4->config.extent[0].base = NULL;
4342 }
4343
4344 for (i = 0; i < SLI_RSRC_MAX; i++) {
4345 if (sli4->config.has_extents) {
4346 /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */;
4347 }
4348
4349 sli4->config.extent[i].base = NULL;
4350
4351 ocs_bitmap_free(sli4->config.extent[i].use_map);
4352 sli4->config.extent[i].use_map = NULL;
4353 }
4354
4355 if (sli_fw_term(sli4)) {
4356 ocs_log_err(sli4->os, "FW deinitialization failed\n");
4357 }
4358
4359 ocs_dma_free(sli4->os, &sli4->vpd.data);
4360 ocs_dma_free(sli4->os, &sli4->bmbx);
4361
4362 return 0;
4363 }
4364
4365 /**
4366 * @ingroup sli
4367 * @brief Register a callback for the given event.
4368 *
4369 * @param sli4 SLI context.
4370 * @param which Event of interest.
4371 * @param func Function to call when the event occurs.
4372 * @param arg Argument passed to the callback function.
4373 *
4374 * @return Returns 0 on success, or non-zero otherwise.
4375 */
4376 int32_t
sli_callback(sli4_t * sli4,sli4_callback_e which,void * func,void * arg)4377 sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg)
4378 {
4379
4380 if (!sli4 || !func || (which >= SLI4_CB_MAX)) {
4381 ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n",
4382 sli4, which, func);
4383 return -1;
4384 }
4385
4386 switch (which) {
4387 case SLI4_CB_LINK:
4388 sli4->link = func;
4389 sli4->link_arg = arg;
4390 break;
4391 case SLI4_CB_FIP:
4392 sli4->fip = func;
4393 sli4->fip_arg = arg;
4394 break;
4395 default:
4396 ocs_log_test(sli4->os, "unknown callback %#x\n", which);
4397 return -1;
4398 }
4399
4400 return 0;
4401 }
4402
4403 /**
4404 * @ingroup sli
4405 * @brief Initialize a queue object.
4406 *
4407 * @par Description
4408 * This initializes the sli4_queue_t object members, including the underlying
4409 * DMA memory.
4410 *
4411 * @param sli4 SLI context.
4412 * @param q Pointer to queue object.
4413 * @param qtype Type of queue to create.
4414 * @param size Size of each entry.
4415 * @param n_entries Number of entries to allocate.
4416 * @param align Starting memory address alignment.
4417 *
4418 * @note Checks if using the existing DMA memory (if any) is possible. If not,
4419 * it frees the existing memory and re-allocates.
4420 *
4421 * @return Returns 0 on success, or non-zero otherwise.
4422 */
4423 int32_t
__sli_queue_init(sli4_t * sli4,sli4_queue_t * q,uint32_t qtype,size_t size,uint32_t n_entries,uint32_t align)4424 __sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype,
4425 size_t size, uint32_t n_entries, uint32_t align)
4426 {
4427
4428 if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) {
4429 if (q->dma.size) {
4430 ocs_dma_free(sli4->os, &q->dma);
4431 }
4432
4433 ocs_memset(q, 0, sizeof(sli4_queue_t));
4434
4435 if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) {
4436 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
4437 return -1;
4438 }
4439
4440 ocs_memset(q->dma.virt, 0, size * n_entries);
4441
4442 ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]",
4443 SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock);
4444
4445 q->type = qtype;
4446 q->size = size;
4447 q->length = n_entries;
4448
4449 /* Limit to hwf the queue size per interrupt */
4450 q->proc_limit = n_entries / 2;
4451
4452 if ( (q->type == SLI_QTYPE_EQ) || (q->type == SLI_QTYPE_CQ) ) {
4453 /* For prism, phase will be flipped after a sweep through eq and cq */
4454 q->phase = 1;
4455 }
4456
4457 switch(q->type) {
4458 case SLI_QTYPE_EQ:
4459 q->posted_limit = q->length / 2;
4460 break;
4461 default:
4462 if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) ||
4463 (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) {
4464 /* For Skyhawk, ring the doorbell more often */
4465 q->posted_limit = 8;
4466 } else {
4467 q->posted_limit = 64;
4468 }
4469 break;
4470 }
4471 }
4472
4473 return 0;
4474 }
4475
4476 /**
4477 * @ingroup sli
4478 * @brief Issue the command to create a queue.
4479 *
4480 * @param sli4 SLI context.
4481 * @param q Pointer to queue object.
4482 *
4483 * @return Returns 0 on success, or non-zero otherwise.
4484 */
4485 int32_t
__sli_create_queue(sli4_t * sli4,sli4_queue_t * q)4486 __sli_create_queue(sli4_t *sli4, sli4_queue_t *q)
4487 {
4488 sli4_res_common_create_queue_t *res_q = NULL;
4489
4490 if (sli_bmbx_command(sli4)){
4491 ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n",
4492 SLI_QNAME[q->type]);
4493 ocs_dma_free(sli4->os, &q->dma);
4494 return -1;
4495 }
4496 if (sli_res_sli_config(sli4->bmbx.virt)) {
4497 ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]);
4498 ocs_dma_free(sli4->os, &q->dma);
4499 return -1;
4500 }
4501 res_q = (void *)((uint8_t *)sli4->bmbx.virt +
4502 offsetof(sli4_cmd_sli_config_t, payload));
4503
4504 if (res_q->hdr.status) {
4505 ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n",
4506 SLI_QNAME[q->type],
4507 res_q->hdr.status, res_q->hdr.additional_status);
4508 ocs_dma_free(sli4->os, &q->dma);
4509 return -1;
4510 } else {
4511 q->id = res_q->q_id;
4512 q->doorbell_offset = res_q->db_offset;
4513 q->doorbell_rset = res_q->db_rs;
4514
4515 switch (q->type) {
4516 case SLI_QTYPE_EQ:
4517 /* No doorbell information in response for EQs */
4518 q->doorbell_offset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].off;
4519 q->doorbell_rset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].rset;
4520 break;
4521 case SLI_QTYPE_CQ:
4522 /* No doorbell information in response for CQs */
4523 q->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off;
4524 q->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset;
4525 break;
4526 case SLI_QTYPE_MQ:
4527 /* No doorbell information in response for MQs */
4528 q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off;
4529 q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset;
4530 break;
4531 case SLI_QTYPE_RQ:
4532 /* set the doorbell for non-skyhawks */
4533 if (!sli4->config.dual_ulp_capable) {
4534 q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
4535 q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
4536 }
4537 break;
4538 case SLI_QTYPE_WQ:
4539 /* set the doorbell for non-skyhawks */
4540 if (!sli4->config.dual_ulp_capable) {
4541 q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off;
4542 q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset;
4543 }
4544 break;
4545 default:
4546 break;
4547 }
4548 }
4549
4550 return 0;
4551 }
4552
4553 /**
4554 * @ingroup sli
4555 * @brief Get queue entry size.
4556 *
4557 * Get queue entry size given queue type.
4558 *
4559 * @param sli4 SLI context
4560 * @param qtype Type for which the entry size is returned.
4561 *
4562 * @return Returns > 0 on success (queue entry size), or a negative value on failure.
4563 */
4564 int32_t
sli_get_queue_entry_size(sli4_t * sli4,uint32_t qtype)4565 sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype)
4566 {
4567 uint32_t size = 0;
4568
4569 if (!sli4) {
4570 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
4571 return -1;
4572 }
4573
4574 switch (qtype) {
4575 case SLI_QTYPE_EQ:
4576 size = sizeof(uint32_t);
4577 break;
4578 case SLI_QTYPE_CQ:
4579 size = 16;
4580 break;
4581 case SLI_QTYPE_MQ:
4582 size = 256;
4583 break;
4584 case SLI_QTYPE_WQ:
4585 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4586 size = sli4->config.wqe_size;
4587 } else {
4588 /* TODO */
4589 ocs_log_test(sli4->os, "unsupported queue entry size\n");
4590 return -1;
4591 }
4592 break;
4593 case SLI_QTYPE_RQ:
4594 size = SLI4_FCOE_RQE_SIZE;
4595 break;
4596 default:
4597 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
4598 return -1;
4599 }
4600 return size;
4601 }
4602
4603 /**
4604 * @ingroup sli
4605 * @brief Modify the delay timer for all the EQs
4606 *
4607 * @param sli4 SLI context.
4608 * @param eq Array of EQs.
4609 * @param num_eq Count of EQs.
4610 * @param shift Phase shift for staggering interrupts.
4611 * @param delay_mult Delay multiplier for limiting interrupt frequency.
4612 *
4613 * @return Returns 0 on success, or -1 otherwise.
4614 */
4615 int32_t
sli_eq_modify_delay(sli4_t * sli4,sli4_queue_t * eq,uint32_t num_eq,uint32_t shift,uint32_t delay_mult)4616 sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult)
4617 {
4618
4619 sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult);
4620
4621 if (sli_bmbx_command(sli4)) {
4622 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
4623 return -1;
4624 }
4625 if (sli_res_sli_config(sli4->bmbx.virt)) {
4626 ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n");
4627 return -1;
4628 }
4629
4630 return 0;
4631 }
4632
4633 /**
4634 * @ingroup sli
4635 * @brief Allocate a queue.
4636 *
4637 * @par Description
4638 * Allocates DMA memory and configures the requested queue type.
4639 *
4640 * @param sli4 SLI context.
4641 * @param qtype Type of queue to create.
4642 * @param q Pointer to the queue object.
4643 * @param n_entries Number of entries to allocate.
4644 * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on).
4645 * @param ulp The ULP to bind, which is only used for WQ and RQs
4646 *
4647 * @return Returns 0 on success, or -1 otherwise.
4648 */
4649 int32_t
sli_queue_alloc(sli4_t * sli4,uint32_t qtype,sli4_queue_t * q,uint32_t n_entries,sli4_queue_t * assoc,uint16_t ulp)4650 sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries,
4651 sli4_queue_t *assoc, uint16_t ulp)
4652 {
4653 int32_t size;
4654 uint32_t align = 0;
4655 sli4_create_q_fn_t create = NULL;
4656
4657 if (!sli4 || !q) {
4658 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
4659 return -1;
4660 }
4661
4662 /* get queue size */
4663 size = sli_get_queue_entry_size(sli4, qtype);
4664 if (size < 0)
4665 return -1;
4666 align = SLI_PAGE_SIZE;
4667
4668 switch (qtype) {
4669 case SLI_QTYPE_EQ:
4670 create = sli_cmd_common_create_eq;
4671 break;
4672 case SLI_QTYPE_CQ:
4673 create = sli_cmd_common_create_cq;
4674 break;
4675 case SLI_QTYPE_MQ:
4676 /* Validate the number of entries */
4677 switch (n_entries) {
4678 case 16:
4679 case 32:
4680 case 64:
4681 case 128:
4682 break;
4683 default:
4684 ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries);
4685 return -1;
4686 }
4687 assoc->u.flag.is_mq = TRUE;
4688 create = sli_cmd_common_create_mq_ext;
4689 break;
4690 case SLI_QTYPE_WQ:
4691 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4692 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
4693 create = sli_cmd_fcoe_wq_create;
4694 } else {
4695 create = sli_cmd_fcoe_wq_create_v1;
4696 }
4697 } else {
4698 /* TODO */
4699 ocs_log_test(sli4->os, "unsupported WQ create\n");
4700 return -1;
4701 }
4702 break;
4703 default:
4704 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
4705 return -1;
4706 }
4707
4708 if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) {
4709 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
4710 return -1;
4711 }
4712
4713 if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) {
4714 if (__sli_create_queue(sli4, q)) {
4715 ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]);
4716 return -1;
4717 }
4718 q->ulp = ulp;
4719 } else {
4720 ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]);
4721 return -1;
4722 }
4723
4724 return 0;
4725 }
4726
4727 /**
4728 * @ingroup sli
4729 * @brief Allocate a c queue set.
4730 *
4731 * @param sli4 SLI context.
4732 * @param num_cqs to create
4733 * @param qs Pointers to the queue objects.
4734 * @param n_entries Number of entries to allocate per CQ.
4735 * @param eqs Associated event queues
4736 *
4737 * @return Returns 0 on success, or -1 otherwise.
4738 */
4739 int32_t
sli_cq_alloc_set(sli4_t * sli4,sli4_queue_t * qs[],uint32_t num_cqs,uint32_t n_entries,sli4_queue_t * eqs[])4740 sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs,
4741 uint32_t n_entries, sli4_queue_t *eqs[])
4742 {
4743 uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0;
4744 uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
4745 uintptr_t addr;
4746 ocs_dma_t dma;
4747 sli4_req_common_create_cq_set_v0_t *req = NULL;
4748 sli4_res_common_create_queue_set_t *res = NULL;
4749
4750 if (!sli4) {
4751 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
4752 return -1;
4753 }
4754
4755 memset(&dma, 0, sizeof(dma));
4756
4757 /* Align the queue DMA memory */
4758 for (i = 0; i < num_cqs; i++) {
4759 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES,
4760 n_entries, SLI_PAGE_SIZE)) {
4761 ocs_log_err(sli4->os, "Queue init failed.\n");
4762 goto error;
4763 }
4764 }
4765
4766 n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
4767 switch (n_cqe) {
4768 case 256:
4769 case 512:
4770 case 1024:
4771 case 2048:
4772 page_size = 1;
4773 break;
4774 case 4096:
4775 page_size = 2;
4776 break;
4777 default:
4778 return -1;
4779 }
4780
4781 page_bytes = page_size * SLI_PAGE_SIZE;
4782 num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
4783 cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs);
4784 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t));
4785
4786 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
4787 ocs_log_err(sli4->os, "DMA allocation failed\n");
4788 goto error;
4789 }
4790 ocs_memset(dma.virt, 0, payload_size);
4791
4792 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
4793 payload_size, &dma) == -1) {
4794 goto error;
4795 }
4796
4797 /* Fill the request structure */
4798
4799 req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt);
4800 req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET;
4801 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
4802 req->hdr.version = 0;
4803 req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
4804 req->page_size = page_size;
4805
4806 req->num_pages = num_pages_cq;
4807 switch (req->num_pages) {
4808 case 1:
4809 req->cqecnt = SLI4_CQ_CNT_256;
4810 break;
4811 case 2:
4812 req->cqecnt = SLI4_CQ_CNT_512;
4813 break;
4814 case 4:
4815 req->cqecnt = SLI4_CQ_CNT_1024;
4816 break;
4817 case 8:
4818 req->cqecnt = SLI4_CQ_CNT_LARGE;
4819 req->cqe_count = n_cqe;
4820 break;
4821 default:
4822 ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages);
4823 goto error;
4824 }
4825
4826 req->evt = TRUE;
4827 req->valid = TRUE;
4828 req->arm = FALSE;
4829 req->num_cq_req = num_cqs;
4830
4831 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
4832 req->autovalid = TRUE;
4833
4834 /* Fill page addresses of all the CQs. */
4835 for (i = 0; i < num_cqs; i++) {
4836 req->eq_id[i] = eqs[i]->id;
4837 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) {
4838 req->page_physical_address[offset].low = ocs_addr32_lo(addr);
4839 req->page_physical_address[offset].high = ocs_addr32_hi(addr);
4840 offset++;
4841 }
4842 }
4843
4844 if (sli_bmbx_command(sli4)) {
4845 ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n");
4846 goto error;
4847 }
4848
4849 res = (void *)((uint8_t *)dma.virt);
4850 if (res->hdr.status) {
4851 ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n",
4852 res->hdr.status, res->hdr.additional_status);
4853 goto error;
4854 } else {
4855 /* Check if we got all requested CQs. */
4856 if (res->num_q_allocated != num_cqs) {
4857 ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n");
4858 goto error;
4859 }
4860
4861 /* Fill the resp cq ids. */
4862 for (i = 0; i < num_cqs; i++) {
4863 qs[i]->id = res->q_id + i;
4864 qs[i]->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off;
4865 qs[i]->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset;
4866 }
4867 }
4868
4869 ocs_dma_free(sli4->os, &dma);
4870
4871 return 0;
4872
4873 error:
4874 for (i = 0; i < num_cqs; i++) {
4875 if (qs[i]->dma.size) {
4876 ocs_dma_free(sli4->os, &qs[i]->dma);
4877 }
4878 }
4879
4880 if (dma.size) {
4881 ocs_dma_free(sli4->os, &dma);
4882 }
4883
4884 return -1;
4885 }
4886
4887 /**
4888 * @ingroup sli
4889 * @brief Free a queue.
4890 *
4891 * @par Description
4892 * Frees DMA memory and de-registers the requested queue.
4893 *
4894 * @param sli4 SLI context.
4895 * @param q Pointer to the queue object.
4896 * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues.
4897 * @param free_memory Non-zero if the DMA memory associated with the queue should be freed.
4898 *
4899 * @return Returns 0 on success, or -1 otherwise.
4900 */
4901 int32_t
sli_queue_free(sli4_t * sli4,sli4_queue_t * q,uint32_t destroy_queues,uint32_t free_memory)4902 sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory)
4903 {
4904 sli4_destroy_q_fn_t destroy = NULL;
4905 int32_t rc = -1;
4906
4907 if (!sli4 || !q) {
4908 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
4909 return -1;
4910 }
4911
4912 if (destroy_queues) {
4913 switch (q->type) {
4914 case SLI_QTYPE_EQ:
4915 destroy = sli_cmd_common_destroy_eq;
4916 break;
4917 case SLI_QTYPE_CQ:
4918 destroy = sli_cmd_common_destroy_cq;
4919 break;
4920 case SLI_QTYPE_MQ:
4921 destroy = sli_cmd_common_destroy_mq;
4922 break;
4923 case SLI_QTYPE_WQ:
4924 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4925 destroy = sli_cmd_fcoe_wq_destroy;
4926 } else {
4927 /* TODO */
4928 ocs_log_test(sli4->os, "unsupported WQ destroy\n");
4929 return -1;
4930 }
4931 break;
4932 case SLI_QTYPE_RQ:
4933 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4934 destroy = sli_cmd_fcoe_rq_destroy;
4935 } else {
4936 /* TODO */
4937 ocs_log_test(sli4->os, "unsupported RQ destroy\n");
4938 return -1;
4939 }
4940 break;
4941 default:
4942 ocs_log_test(sli4->os, "bad queue type %d\n",
4943 q->type);
4944 return -1;
4945 }
4946
4947 /*
4948 * Destroying queues makes BE3 sad (version 0 interface type). Rely
4949 * on COMMON_FUNCTION_RESET to free host allocated queue resources
4950 * inside the SLI Port.
4951 */
4952 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
4953 destroy = NULL;
4954 }
4955
4956 /* Destroy the queue if the operation is defined */
4957 if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) {
4958 sli4_res_hdr_t *res = NULL;
4959
4960 if (sli_bmbx_command(sli4)){
4961 ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n",
4962 SLI_QNAME[q->type]);
4963 } else if (sli_res_sli_config(sli4->bmbx.virt)) {
4964 ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]);
4965 } else {
4966 res = (void *)((uint8_t *)sli4->bmbx.virt +
4967 offsetof(sli4_cmd_sli_config_t, payload));
4968
4969 if (res->status) {
4970 ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n",
4971 SLI_QNAME[q->type],
4972 res->status, res->additional_status);
4973 } else {
4974 rc = 0;
4975 }
4976 }
4977 }
4978 }
4979
4980 if (free_memory) {
4981 ocs_lock_free(&q->lock);
4982
4983 if (ocs_dma_free(sli4->os, &q->dma)) {
4984 ocs_log_err(sli4->os, "%s queue ID %d free failed\n",
4985 SLI_QNAME[q->type], q->id);
4986 rc = -1;
4987 }
4988 }
4989
4990 return rc;
4991 }
4992
4993 int32_t
sli_queue_reset(sli4_t * sli4,sli4_queue_t * q)4994 sli_queue_reset(sli4_t *sli4, sli4_queue_t *q)
4995 {
4996
4997 ocs_lock(&q->lock);
4998
4999 q->index = 0;
5000 q->n_posted = 0;
5001
5002 if (SLI_QTYPE_MQ == q->type) {
5003 q->u.r_idx = 0;
5004 }
5005
5006 if (q->dma.virt != NULL) {
5007 ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length));
5008 }
5009
5010 ocs_unlock(&q->lock);
5011
5012 return 0;
5013 }
5014
5015 /**
5016 * @ingroup sli
5017 * @brief Check if the given queue is empty.
5018 *
5019 * @par Description
5020 * If the valid bit of the current entry is unset, the queue is empty.
5021 *
5022 * @param sli4 SLI context.
5023 * @param q Pointer to the queue object.
5024 *
5025 * @return Returns TRUE if empty, or FALSE otherwise.
5026 */
5027 int32_t
sli_queue_is_empty(sli4_t * sli4,sli4_queue_t * q)5028 sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q)
5029 {
5030 int32_t rc = TRUE;
5031 uint8_t *qe = q->dma.virt;
5032
5033 ocs_lock(&q->lock);
5034
5035 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
5036
5037 qe += q->index * q->size;
5038
5039 rc = !sli_queue_entry_is_valid(q, qe, FALSE);
5040
5041 ocs_unlock(&q->lock);
5042
5043 return rc;
5044 }
5045
5046 /**
5047 * @ingroup sli
5048 * @brief Arm an EQ.
5049 *
5050 * @param sli4 SLI context.
5051 * @param q Pointer to queue object.
5052 * @param arm If TRUE, arm the EQ.
5053 *
5054 * @return Returns 0 on success, or non-zero otherwise.
5055 */
5056 int32_t
sli_queue_eq_arm(sli4_t * sli4,sli4_queue_t * q,uint8_t arm)5057 sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
5058 {
5059 uint32_t val = 0;
5060
5061 ocs_lock(&q->lock);
5062 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5063 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm);
5064 else
5065 val = sli_eq_doorbell(q->n_posted, q->id, arm);
5066
5067 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5068 q->n_posted = 0;
5069 ocs_unlock(&q->lock);
5070
5071 return 0;
5072 }
5073
5074 /**
5075 * @ingroup sli
5076 * @brief Arm a queue.
5077 *
5078 * @param sli4 SLI context.
5079 * @param q Pointer to queue object.
5080 * @param arm If TRUE, arm the queue.
5081 *
5082 * @return Returns 0 on success, or non-zero otherwise.
5083 */
5084 int32_t
sli_queue_arm(sli4_t * sli4,sli4_queue_t * q,uint8_t arm)5085 sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
5086 {
5087 uint32_t val = 0;
5088
5089 ocs_lock(&q->lock);
5090
5091 switch (q->type) {
5092 case SLI_QTYPE_EQ:
5093 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5094 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm);
5095 else
5096 val = sli_eq_doorbell(q->n_posted, q->id, arm);
5097 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5098 q->n_posted = 0;
5099 break;
5100 case SLI_QTYPE_CQ:
5101 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5102 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, arm);
5103 else
5104 val = sli_cq_doorbell(q->n_posted, q->id, arm);
5105 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5106 q->n_posted = 0;
5107 break;
5108 default:
5109 ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n",
5110 SLI_QNAME[q->type]);
5111 }
5112
5113 ocs_unlock(&q->lock);
5114
5115 return 0;
5116 }
5117
5118 /**
5119 * @ingroup sli
5120 * @brief Write an entry to the queue object.
5121 *
5122 * Note: Assumes the q->lock will be locked and released by the caller.
5123 *
5124 * @param sli4 SLI context.
5125 * @param q Pointer to the queue object.
5126 * @param entry Pointer to the entry contents.
5127 *
5128 * @return Returns queue index on success, or negative error value otherwise.
5129 */
5130 int32_t
_sli_queue_write(sli4_t * sli4,sli4_queue_t * q,uint8_t * entry)5131 _sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5132 {
5133 int32_t rc = 0;
5134 uint8_t *qe = q->dma.virt;
5135 uint32_t qindex;
5136
5137 qindex = q->index;
5138 qe += q->index * q->size;
5139
5140 if (entry) {
5141 if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) {
5142 sli_set_wq_id_association(entry, q->id);
5143 }
5144 #if defined(OCS_INCLUDE_DEBUG)
5145 switch (q->type) {
5146 case SLI_QTYPE_WQ: {
5147 ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size);
5148 break;
5149 }
5150 case SLI_QTYPE_MQ:
5151 /* Note: we don't really need to dump the whole
5152 * 256 bytes, just do 64 */
5153 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64);
5154 break;
5155
5156 default:
5157 break;
5158 }
5159 #endif
5160 ocs_memcpy(qe, entry, q->size);
5161 q->n_posted = 1;
5162 }
5163
5164 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5165
5166 rc = sli_queue_doorbell(sli4, q);
5167
5168 q->index = (q->index + q->n_posted) & (q->length - 1);
5169 q->n_posted = 0;
5170
5171 if (rc < 0) {
5172 /* failure */
5173 return rc;
5174 } else if (rc > 0) {
5175 /* failure, but we need to return a negative value on failure */
5176 return -rc;
5177 } else {
5178 return qindex;
5179 }
5180 }
5181
5182 /**
5183 * @ingroup sli
5184 * @brief Write an entry to the queue object.
5185 *
5186 * Note: Assumes the q->lock will be locked and released by the caller.
5187 *
5188 * @param sli4 SLI context.
5189 * @param q Pointer to the queue object.
5190 * @param entry Pointer to the entry contents.
5191 *
5192 * @return Returns queue index on success, or negative error value otherwise.
5193 */
5194 int32_t
sli_queue_write(sli4_t * sli4,sli4_queue_t * q,uint8_t * entry)5195 sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5196 {
5197 int32_t rc;
5198
5199 ocs_lock(&q->lock);
5200 rc = _sli_queue_write(sli4, q, entry);
5201 ocs_unlock(&q->lock);
5202
5203 return rc;
5204 }
5205
5206 /**
5207 * @brief Check if the current queue entry is valid.
5208 *
5209 * @param q Pointer to the queue object.
5210 * @param qe Pointer to the queue entry.
5211 * @param clear Boolean to clear valid bit.
5212 *
5213 * @return Returns TRUE if the entry is valid, or FALSE otherwise.
5214 */
5215 static uint8_t
sli_queue_entry_is_valid(sli4_queue_t * q,uint8_t * qe,uint8_t clear)5216 sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear)
5217 {
5218 uint8_t valid = FALSE;
5219 uint8_t valid_bit_set = 0;
5220
5221 switch (q->type) {
5222 case SLI_QTYPE_EQ:
5223 valid = (((sli4_eqe_t *)qe)->vld == q->phase) ? 1 : 0;
5224 if (valid && clear) {
5225 ((sli4_eqe_t *)qe)->vld = 0;
5226 }
5227 break;
5228 case SLI_QTYPE_CQ:
5229 /*
5230 * For both MCQE and WCQE/RCQE, the valid bit
5231 * is bit 31 of dword 3 (0 based)
5232 */
5233 valid_bit_set = (qe[15] & 0x80) != 0;
5234 if (valid_bit_set == q->phase)
5235 valid = 1;
5236
5237 if (valid & clear) {
5238 qe[15] &= ~0x80;
5239 }
5240 break;
5241 case SLI_QTYPE_MQ:
5242 valid = q->index != q->u.r_idx;
5243 break;
5244 case SLI_QTYPE_RQ:
5245 valid = TRUE;
5246 clear = FALSE;
5247 break;
5248 default:
5249 ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type);
5250 }
5251
5252 if (clear) {
5253
5254 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5255 }
5256
5257 return valid;
5258 }
5259
5260 /**
5261 * @ingroup sli
5262 * @brief Read an entry from the queue object.
5263 *
5264 * @param sli4 SLI context.
5265 * @param q Pointer to the queue object.
5266 * @param entry Destination pointer for the queue entry contents.
5267 *
5268 * @return Returns 0 on success, or non-zero otherwise.
5269 */
5270 int32_t
sli_queue_read(sli4_t * sli4,sli4_queue_t * q,uint8_t * entry)5271 sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5272 {
5273 int32_t rc = 0;
5274 uint8_t *qe = q->dma.virt;
5275 uint32_t *qindex = NULL;
5276
5277 uint8_t clear = (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) ? FALSE : TRUE;
5278 if (SLI_QTYPE_MQ == q->type) {
5279 qindex = &q->u.r_idx;
5280 } else {
5281 qindex = &q->index;
5282 }
5283
5284 ocs_lock(&q->lock);
5285
5286 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
5287
5288 qe += *qindex * q->size;
5289
5290 if (!sli_queue_entry_is_valid(q, qe, clear)) {
5291 ocs_unlock(&q->lock);
5292 return -1;
5293 }
5294
5295 if (entry) {
5296 ocs_memcpy(entry, qe, q->size);
5297 #if defined(OCS_INCLUDE_DEBUG)
5298 switch(q->type) {
5299 case SLI_QTYPE_CQ:
5300 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size);
5301 break;
5302 case SLI_QTYPE_MQ:
5303 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64);
5304 break;
5305 case SLI_QTYPE_EQ:
5306 ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size);
5307 break;
5308 default:
5309 break;
5310 }
5311 #endif
5312 }
5313
5314 switch (q->type) {
5315 case SLI_QTYPE_EQ:
5316 case SLI_QTYPE_CQ:
5317 case SLI_QTYPE_MQ:
5318 *qindex = (*qindex + 1) & (q->length - 1);
5319 if (SLI_QTYPE_MQ != q->type) {
5320 q->n_posted++;
5321 /*
5322 * For prism, the phase value will be used to check the validity of eq/cq entries.
5323 * The value toggles after a complete sweep through the queue.
5324 */
5325 if ((SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) && (*qindex == 0)) {
5326 q->phase ^= (uint16_t) 0x1;
5327 }
5328 }
5329 break;
5330 default:
5331 /* reads don't update the index */
5332 break;
5333 }
5334
5335 ocs_unlock(&q->lock);
5336
5337 return rc;
5338 }
5339
5340 int32_t
sli_queue_index(sli4_t * sli4,sli4_queue_t * q)5341 sli_queue_index(sli4_t *sli4, sli4_queue_t *q)
5342 {
5343
5344 if (q) {
5345 return q->index;
5346 } else {
5347 return -1;
5348 }
5349 }
5350
5351 int32_t
sli_queue_poke(sli4_t * sli4,sli4_queue_t * q,uint32_t index,uint8_t * entry)5352 sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
5353 {
5354 int32_t rc;
5355
5356 ocs_lock(&q->lock);
5357 rc = _sli_queue_poke(sli4, q, index, entry);
5358 ocs_unlock(&q->lock);
5359
5360 return rc;
5361 }
5362
5363 int32_t
_sli_queue_poke(sli4_t * sli4,sli4_queue_t * q,uint32_t index,uint8_t * entry)5364 _sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
5365 {
5366 int32_t rc = 0;
5367 uint8_t *qe = q->dma.virt;
5368
5369 if (index >= q->length) {
5370 return -1;
5371 }
5372
5373 qe += index * q->size;
5374
5375 if (entry) {
5376 ocs_memcpy(qe, entry, q->size);
5377 }
5378
5379 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5380
5381 return rc;
5382 }
5383
5384 /**
5385 * @ingroup sli
5386 * @brief Allocate SLI Port resources.
5387 *
5388 * @par Description
5389 * Allocate port-related resources, such as VFI, RPI, XRI, and so on.
5390 * Resources are modeled using extents, regardless of whether the underlying
5391 * device implements resource extents. If the device does not implement
5392 * extents, the SLI layer models this as a single (albeit large) extent.
5393 *
5394 * @param sli4 SLI context.
5395 * @param rtype Resource type (for example, RPI or XRI)
5396 * @param rid Allocated resource ID.
5397 * @param index Index into the bitmap.
5398 *
5399 * @return Returns 0 on success, or a non-zero value on failure.
5400 */
5401 int32_t
sli_resource_alloc(sli4_t * sli4,sli4_resource_e rtype,uint32_t * rid,uint32_t * index)5402 sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index)
5403 {
5404 int32_t rc = 0;
5405 uint32_t size;
5406 uint32_t extent_idx;
5407 uint32_t item_idx;
5408 int status;
5409
5410 *rid = UINT32_MAX;
5411 *index = UINT32_MAX;
5412
5413 switch (rtype) {
5414 case SLI_RSRC_FCOE_VFI:
5415 case SLI_RSRC_FCOE_VPI:
5416 case SLI_RSRC_FCOE_RPI:
5417 case SLI_RSRC_FCOE_XRI:
5418 status = ocs_bitmap_find(sli4->config.extent[rtype].use_map,
5419 sli4->config.extent[rtype].map_size);
5420 if (status < 0) {
5421 ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n",
5422 rtype, sli4->config.extent[rtype].n_alloc);
5423 rc = -1;
5424 break;
5425 } else {
5426 *index = status;
5427 }
5428
5429 size = sli4->config.extent[rtype].size;
5430
5431 extent_idx = *index / size;
5432 item_idx = *index % size;
5433
5434 *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx;
5435
5436 sli4->config.extent[rtype].n_alloc++;
5437 break;
5438 default:
5439 rc = -1;
5440 }
5441
5442 return rc;
5443 }
5444
5445 /**
5446 * @ingroup sli
5447 * @brief Free the SLI Port resources.
5448 *
5449 * @par Description
5450 * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of
5451 * "extent" usage in sli_resource_alloc.
5452 *
5453 * @param sli4 SLI context.
5454 * @param rtype Resource type (for example, RPI or XRI).
5455 * @param rid Allocated resource ID.
5456 *
5457 * @return Returns 0 on success, or a non-zero value on failure.
5458 */
5459 int32_t
sli_resource_free(sli4_t * sli4,sli4_resource_e rtype,uint32_t rid)5460 sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid)
5461 {
5462 int32_t rc = -1;
5463 uint32_t x;
5464 uint32_t size, *base;
5465
5466 switch (rtype) {
5467 case SLI_RSRC_FCOE_VFI:
5468 case SLI_RSRC_FCOE_VPI:
5469 case SLI_RSRC_FCOE_RPI:
5470 case SLI_RSRC_FCOE_XRI:
5471 /*
5472 * Figure out which extent contains the resource ID. I.e. find
5473 * the extent such that
5474 * extent->base <= resource ID < extent->base + extent->size
5475 */
5476 base = sli4->config.extent[rtype].base;
5477 size = sli4->config.extent[rtype].size;
5478
5479 /*
5480 * In the case of FW reset, this may be cleared but the force_free path will
5481 * still attempt to free the resource. Prevent a NULL pointer access.
5482 */
5483 if (base != NULL) {
5484 for (x = 0; x < sli4->config.extent[rtype].number; x++) {
5485 if ((rid >= base[x]) && (rid < (base[x] + size))) {
5486 rid -= base[x];
5487 ocs_bitmap_clear(sli4->config.extent[rtype].use_map,
5488 (x * size) + rid);
5489 rc = 0;
5490 break;
5491 }
5492 }
5493 }
5494 break;
5495 default:
5496 ;
5497 }
5498
5499 return rc;
5500 }
5501
5502 int32_t
sli_resource_reset(sli4_t * sli4,sli4_resource_e rtype)5503 sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype)
5504 {
5505 int32_t rc = -1;
5506 uint32_t i;
5507
5508 switch (rtype) {
5509 case SLI_RSRC_FCOE_VFI:
5510 case SLI_RSRC_FCOE_VPI:
5511 case SLI_RSRC_FCOE_RPI:
5512 case SLI_RSRC_FCOE_XRI:
5513 for (i = 0; i < sli4->config.extent[rtype].map_size; i++) {
5514 ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i);
5515 }
5516 rc = 0;
5517 break;
5518 default:
5519 ;
5520 }
5521
5522 return rc;
5523 }
5524
5525 /**
5526 * @ingroup sli
5527 * @brief Parse an EQ entry to retrieve the CQ_ID for this event.
5528 *
5529 * @param sli4 SLI context.
5530 * @param buf Pointer to the EQ entry.
5531 * @param cq_id CQ_ID for this entry (only valid on success).
5532 *
5533 * @return
5534 * - 0 if success.
5535 * - < 0 if error.
5536 * - > 0 if firmware detects EQ overflow.
5537 */
5538 int32_t
sli_eq_parse(sli4_t * sli4,uint8_t * buf,uint16_t * cq_id)5539 sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id)
5540 {
5541 sli4_eqe_t *eqe = (void *)buf;
5542 int32_t rc = 0;
5543
5544 if (!sli4 || !buf || !cq_id) {
5545 ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n",
5546 sli4, buf, cq_id);
5547 return -1;
5548 }
5549
5550 switch (eqe->major_code) {
5551 case SLI4_MAJOR_CODE_STANDARD:
5552 *cq_id = eqe->resource_id;
5553 break;
5554 case SLI4_MAJOR_CODE_SENTINEL:
5555 ocs_log_debug(sli4->os, "sentinel EQE\n");
5556 rc = 1;
5557 break;
5558 default:
5559 ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n",
5560 eqe->major_code, eqe->minor_code);
5561 rc = -1;
5562 }
5563
5564 return rc;
5565 }
5566
5567 /**
5568 * @ingroup sli
5569 * @brief Parse a CQ entry to retrieve the event type and the associated queue.
5570 *
5571 * @param sli4 SLI context.
5572 * @param cq CQ to process.
5573 * @param cqe Pointer to the CQ entry.
5574 * @param etype CQ event type.
5575 * @param q_id Queue ID associated with this completion message
5576 * (that is, MQ_ID, RQ_ID, and so on).
5577 *
5578 * @return
5579 * - 0 if call completed correctly and CQE status is SUCCESS.
5580 * - -1 if call failed (no CQE status).
5581 * - Other value if call completed correctly and return value is a CQE status value.
5582 */
5583 int32_t
sli_cq_parse(sli4_t * sli4,sli4_queue_t * cq,uint8_t * cqe,sli4_qentry_e * etype,uint16_t * q_id)5584 sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
5585 uint16_t *q_id)
5586 {
5587 int32_t rc = 0;
5588
5589 if (!sli4 || !cq || !cqe || !etype) {
5590 ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
5591 sli4, cq, cqe, etype, q_id);
5592 return -1;
5593 }
5594
5595 if (cq->u.flag.is_mq) {
5596 sli4_mcqe_t *mcqe = (void *)cqe;
5597
5598 if (mcqe->ae) {
5599 *etype = SLI_QENTRY_ASYNC;
5600 } else {
5601 *etype = SLI_QENTRY_MQ;
5602 rc = sli_cqe_mq(mcqe);
5603 }
5604 *q_id = -1;
5605 } else if (SLI4_PORT_TYPE_FC == sli4->port_type) {
5606 rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
5607 } else {
5608 ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n",
5609 sli4->port_type);
5610 rc = -1;
5611 }
5612
5613 return rc;
5614 }
5615
5616 /**
5617 * @ingroup sli
5618 * @brief Cause chip to enter an unrecoverable error state.
5619 *
5620 * @par Description
5621 * Cause chip to enter an unrecoverable error state. This is
5622 * used when detecting unexpected FW behavior so FW can be
5623 * hwted from the driver as soon as error is detected.
5624 *
5625 * @param sli4 SLI context.
5626 * @param dump Generate dump as part of reset.
5627 *
5628 * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip).
5629 */
sli_raise_ue(sli4_t * sli4,uint8_t dump)5630 int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump)
5631 {
5632 #define FDD 2
5633 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5634 switch(sli_get_asic_type(sli4)) {
5635 case SLI4_ASIC_TYPE_BE3: {
5636 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff);
5637 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0);
5638 break;
5639 }
5640 case SLI4_ASIC_TYPE_SKYHAWK: {
5641 uint32_t value;
5642 value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG);
5643 ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24)));
5644 break;
5645 }
5646 default:
5647 ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4));
5648 return -1;
5649 }
5650 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) ||
5651 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) {
5652 if (FDD == dump) {
5653 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP);
5654 } else {
5655 uint32_t value = SLI4_PHYDEV_CONTROL_FRST;
5656 if (dump == 1) {
5657 value |= SLI4_PHYDEV_CONTROL_DD;
5658 }
5659 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value);
5660 }
5661 } else {
5662 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
5663 return -1;
5664 }
5665 return 0;
5666 }
5667
5668 /**
5669 * @ingroup sli
5670 * @brief Read the SLIPORT_STATUS register to check if a dump is present.
5671 *
5672 * @param sli4 SLI context.
5673 *
5674 * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present.
5675 */
sli_dump_is_ready(sli4_t * sli4)5676 int32_t sli_dump_is_ready(sli4_t *sli4)
5677 {
5678 int32_t rc = 0;
5679 uint32_t port_val;
5680 uint32_t bmbx_val;
5681 uint32_t uerr_lo;
5682 uint32_t uerr_hi;
5683 uint32_t uerr_mask_lo;
5684 uint32_t uerr_mask_hi;
5685
5686 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5687 /* for iftype=0, dump ready when UE is encountered */
5688 uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
5689 uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
5690 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
5691 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
5692 if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) {
5693 rc = 1;
5694 }
5695
5696 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) ||
5697 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) {
5698 /*
5699 * Ensure that the port is ready AND the mailbox is
5700 * ready before signaling that the dump is ready to go.
5701 */
5702 port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5703 bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX);
5704
5705 if ((bmbx_val & SLI4_BMBX_RDY) &&
5706 SLI4_PORT_STATUS_READY(port_val)) {
5707 if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) {
5708 rc = 1;
5709 }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) {
5710 rc = 2;
5711 }
5712 }
5713 } else {
5714 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
5715 return -1;
5716 }
5717 return rc;
5718 }
5719
5720 /**
5721 * @ingroup sli
5722 * @brief Read the SLIPORT_STATUS register to check if a dump is present.
5723 *
5724 * @param sli4 SLI context.
5725 *
5726 * @return
5727 * - 0 if call completed correctly and no dump is present.
5728 * - 1 if call completed and dump is present.
5729 * - -1 if call failed (unsupported chip).
5730 */
sli_dump_is_present(sli4_t * sli4)5731 int32_t sli_dump_is_present(sli4_t *sli4)
5732 {
5733 uint32_t val;
5734 uint32_t ready;
5735
5736 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) &&
5737 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(sli4))) {
5738 ocs_log_test(sli4->os, "Function only supported for I/F type 2");
5739 return -1;
5740 }
5741
5742 /* If the chip is not ready, then there cannot be a dump */
5743 ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US);
5744 if (!ready) {
5745 return 0;
5746 }
5747
5748 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5749 if (UINT32_MAX == val) {
5750 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
5751 return -1;
5752 } else {
5753 return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0);
5754 }
5755 }
5756
5757 /**
5758 * @ingroup sli
5759 * @brief Read the SLIPORT_STATUS register to check if the reset required is set.
5760 *
5761 * @param sli4 SLI context.
5762 *
5763 * @return
5764 * - 0 if call completed correctly and reset is not required.
5765 * - 1 if call completed and reset is required.
5766 * - -1 if call failed.
5767 */
sli_reset_required(sli4_t * sli4)5768 int32_t sli_reset_required(sli4_t *sli4)
5769 {
5770 uint32_t val;
5771
5772 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5773 ocs_log_test(sli4->os, "reset required N/A for iftype 0\n");
5774 return 0;
5775 }
5776
5777 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5778 if (UINT32_MAX == val) {
5779 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
5780 return -1;
5781 } else {
5782 return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0);
5783 }
5784 }
5785
5786 /**
5787 * @ingroup sli
5788 * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if
5789 * the port status indicates that a FW error has occurred.
5790 *
5791 * @param sli4 SLI context.
5792 *
5793 * @return
5794 * - 0 if call completed correctly and no FW error occurred.
5795 * - > 0 which indicates that a FW error has occurred.
5796 * - -1 if call failed.
5797 */
sli_fw_error_status(sli4_t * sli4)5798 int32_t sli_fw_error_status(sli4_t *sli4)
5799 {
5800 uint32_t sliport_semaphore;
5801 int32_t rc = 0;
5802
5803 sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
5804 if (UINT32_MAX == sliport_semaphore) {
5805 ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n");
5806 return 1;
5807 }
5808 rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0);
5809
5810 if (rc == 0) {
5811 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
5812 (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) {
5813 uint32_t uerr_mask_lo, uerr_mask_hi;
5814 uint32_t uerr_status_lo, uerr_status_hi;
5815
5816 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
5817 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
5818 uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
5819 uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
5820 if ((uerr_mask_lo & uerr_status_lo) != 0 ||
5821 (uerr_mask_hi & uerr_status_hi) != 0) {
5822 rc = 1;
5823 }
5824 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type ||
5825 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) {
5826 uint32_t sliport_status;
5827
5828 sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5829 rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0);
5830 }
5831 }
5832 return rc;
5833 }
5834
5835 /**
5836 * @ingroup sli
5837 * @brief Determine if the chip FW is in a ready state
5838 *
5839 * @param sli4 SLI context.
5840 *
5841 * @return
5842 * - 0 if call completed correctly and FW is not ready.
5843 * - 1 if call completed correctly and FW is ready.
5844 * - -1 if call failed.
5845 */
5846 int32_t
sli_fw_ready(sli4_t * sli4)5847 sli_fw_ready(sli4_t *sli4)
5848 {
5849 uint32_t val;
5850 int32_t rc = -1;
5851
5852 /*
5853 * Is firmware ready for operation? Check needed depends on IF_TYPE
5854 */
5855 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
5856 SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) {
5857 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
5858 rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY ==
5859 SLI4_PORT_SEMAPHORE_PORT(val)) &&
5860 (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0);
5861 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type ||
5862 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) {
5863 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5864 rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0);
5865 }
5866 return rc;
5867 }
5868
5869 /**
5870 * @ingroup sli
5871 * @brief Determine if the link can be configured
5872 *
5873 * @param sli4 SLI context.
5874 *
5875 * @return
5876 * - 0 if link is not configurable.
5877 * - 1 if link is configurable.
5878 */
sli_link_is_configurable(sli4_t * sli)5879 int32_t sli_link_is_configurable(sli4_t *sli)
5880 {
5881 int32_t rc = 0;
5882 /*
5883 * Link config works on: Skyhawk and Lancer
5884 * Link config does not work on: LancerG6
5885 */
5886
5887 switch (sli_get_asic_type(sli)) {
5888 case SLI4_ASIC_TYPE_SKYHAWK:
5889 case SLI4_ASIC_TYPE_LANCER:
5890 case SLI4_ASIC_TYPE_CORSAIR:
5891 rc = 1;
5892 break;
5893 case SLI4_ASIC_TYPE_LANCERG6:
5894 case SLI4_ASIC_TYPE_LANCERG7:
5895 case SLI4_ASIC_TYPE_BE3:
5896 default:
5897 rc = 0;
5898 break;
5899 }
5900
5901 return rc;
5902
5903 }
5904
5905 /* vim: set noexpandtab textwidth=120: */
5906
5907 /**
5908 * @ingroup sli_fc
5909 * @brief Write an FCOE_WQ_CREATE command.
5910 *
5911 * @param sli4 SLI context.
5912 * @param buf Destination buffer for the command.
5913 * @param size Buffer size, in bytes.
5914 * @param qmem DMA memory for the queue.
5915 * @param cq_id Associated CQ_ID.
5916 * @param ulp The ULP to bind
5917 *
5918 * @note This creates a Version 0 message.
5919 *
5920 * @return Returns the number of bytes written.
5921 */
5922 int32_t
sli_cmd_fcoe_wq_create(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t cq_id,uint16_t ulp)5923 sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size,
5924 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp)
5925 {
5926 sli4_req_fcoe_wq_create_t *wq = NULL;
5927 uint32_t sli_config_off = 0;
5928 uint32_t p;
5929 uintptr_t addr;
5930
5931 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
5932 uint32_t payload_size;
5933
5934 /* Payload length must accommodate both request and response */
5935 payload_size = max(sizeof(sli4_req_fcoe_wq_create_t),
5936 sizeof(sli4_res_common_create_queue_t));
5937
5938 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
5939 NULL);
5940 }
5941 wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off);
5942
5943 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
5944 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
5945 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) -
5946 sizeof(sli4_req_hdr_t);
5947 /* valid values for number of pages: 1-4 (sec 4.5.1) */
5948 wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
5949 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) {
5950 return 0;
5951 }
5952
5953 wq->cq_id = cq_id;
5954
5955 if (sli4->config.dual_ulp_capable) {
5956 wq->dua = 1;
5957 wq->bqu = 1;
5958 wq->ulp = ulp;
5959 }
5960
5961 for (p = 0, addr = qmem->phys;
5962 p < wq->num_pages;
5963 p++, addr += SLI_PAGE_SIZE) {
5964 wq->page_physical_address[p].low = ocs_addr32_lo(addr);
5965 wq->page_physical_address[p].high = ocs_addr32_hi(addr);
5966 }
5967
5968 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t));
5969 }
5970
5971 /**
5972 * @ingroup sli_fc
5973 * @brief Write an FCOE_WQ_CREATE_V1 command.
5974 *
5975 * @param sli4 SLI context.
5976 * @param buf Destination buffer for the command.
5977 * @param size Buffer size, in bytes.
5978 * @param qmem DMA memory for the queue.
5979 * @param cq_id Associated CQ_ID.
5980 * @param ignored This parameter carries the ULP for WQ (ignored for V1)
5981
5982 *
5983 * @return Returns the number of bytes written.
5984 */
5985 int32_t
sli_cmd_fcoe_wq_create_v1(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t cq_id,uint16_t ignored)5986 sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
5987 uint16_t cq_id, uint16_t ignored)
5988 {
5989 sli4_req_fcoe_wq_create_v1_t *wq = NULL;
5990 uint32_t sli_config_off = 0;
5991 uint32_t p;
5992 uintptr_t addr;
5993 uint32_t page_size = 0;
5994 uint32_t page_bytes = 0;
5995 uint32_t n_wqe = 0;
5996
5997 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
5998 uint32_t payload_size;
5999
6000 /* Payload length must accommodate both request and response */
6001 payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t),
6002 sizeof(sli4_res_common_create_queue_t));
6003
6004 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6005 NULL);
6006 }
6007 wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off);
6008
6009 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
6010 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6011 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) -
6012 sizeof(sli4_req_hdr_t);
6013 wq->hdr.version = 1;
6014
6015 n_wqe = qmem->size / sli4->config.wqe_size;
6016
6017 /* This heuristic to determine the page size is simplistic
6018 * but could be made more sophisticated
6019 */
6020 switch (qmem->size) {
6021 case 4096:
6022 case 8192:
6023 case 16384:
6024 case 32768:
6025 page_size = 1;
6026 break;
6027 case 65536:
6028 page_size = 2;
6029 break;
6030 case 131072:
6031 page_size = 4;
6032 break;
6033 case 262144:
6034 page_size = 8;
6035 break;
6036 case 524288:
6037 page_size = 10;
6038 break;
6039 default:
6040 return 0;
6041 }
6042 page_bytes = page_size * SLI_PAGE_SIZE;
6043
6044 /* valid values for number of pages: 1-8 */
6045 wq->num_pages = sli_page_count(qmem->size, page_bytes);
6046 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) {
6047 return 0;
6048 }
6049
6050 wq->cq_id = cq_id;
6051
6052 wq->page_size = page_size;
6053
6054 if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) {
6055 wq->wqe_size = SLI4_WQE_EXT_SIZE;
6056 } else {
6057 wq->wqe_size = SLI4_WQE_SIZE;
6058 }
6059
6060 wq->wqe_count = n_wqe;
6061
6062 for (p = 0, addr = qmem->phys;
6063 p < wq->num_pages;
6064 p++, addr += page_bytes) {
6065 wq->page_physical_address[p].low = ocs_addr32_lo(addr);
6066 wq->page_physical_address[p].high = ocs_addr32_hi(addr);
6067 }
6068
6069 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t));
6070 }
6071
6072 /**
6073 * @ingroup sli_fc
6074 * @brief Write an FCOE_WQ_DESTROY command.
6075 *
6076 * @param sli4 SLI context.
6077 * @param buf Destination buffer for the command.
6078 * @param size Buffer size, in bytes.
6079 * @param wq_id WQ_ID.
6080 *
6081 * @return Returns the number of bytes written.
6082 */
6083 int32_t
sli_cmd_fcoe_wq_destroy(sli4_t * sli4,void * buf,size_t size,uint16_t wq_id)6084 sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id)
6085 {
6086 sli4_req_fcoe_wq_destroy_t *wq = NULL;
6087 uint32_t sli_config_off = 0;
6088
6089 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6090 uint32_t payload_size;
6091
6092 /* Payload length must accommodate both request and response */
6093 payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t),
6094 sizeof(sli4_res_hdr_t));
6095
6096 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6097 NULL);
6098 }
6099 wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off);
6100
6101 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY;
6102 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6103 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) -
6104 sizeof(sli4_req_hdr_t);
6105
6106 wq->wq_id = wq_id;
6107
6108 return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t));
6109 }
6110
6111 /**
6112 * @ingroup sli_fc
6113 * @brief Write an FCOE_POST_SGL_PAGES command.
6114 *
6115 * @param sli4 SLI context.
6116 * @param buf Destination buffer for the command.
6117 * @param size Buffer size, in bytes.
6118 * @param xri starting XRI
6119 * @param xri_count XRI
6120 * @param page0 First SGL memory page.
6121 * @param page1 Second SGL memory page (optional).
6122 * @param dma DMA buffer for non-embedded mailbox command (options)
6123 *
6124 * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length
6125 *
6126 * @return Returns the number of bytes written.
6127 */
6128 int32_t
sli_cmd_fcoe_post_sgl_pages(sli4_t * sli4,void * buf,size_t size,uint16_t xri,uint32_t xri_count,ocs_dma_t * page0[],ocs_dma_t * page1[],ocs_dma_t * dma)6129 sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size,
6130 uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma)
6131 {
6132 sli4_req_fcoe_post_sgl_pages_t *post = NULL;
6133 uint32_t sli_config_off = 0;
6134 uint32_t i;
6135
6136 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6137 uint32_t payload_size;
6138
6139 /* Payload length must accommodate both request and response */
6140 payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t),
6141 sizeof(sli4_res_hdr_t));
6142
6143 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6144 dma);
6145 }
6146 if (dma) {
6147 post = dma->virt;
6148 ocs_memset(post, 0, dma->size);
6149 } else {
6150 post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off);
6151 }
6152
6153 post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES;
6154 post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6155 /* payload size calculation
6156 * 4 = xri_start + xri_count
6157 * xri_count = # of XRI's registered
6158 * sizeof(uint64_t) = physical address size
6159 * 2 = # of physical addresses per page set
6160 */
6161 post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2));
6162
6163 post->xri_start = xri;
6164 post->xri_count = xri_count;
6165
6166 for (i = 0; i < xri_count; i++) {
6167 post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys);
6168 post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys);
6169 }
6170
6171 if (page1) {
6172 for (i = 0; i < xri_count; i++) {
6173 post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys);
6174 post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys);
6175 }
6176 }
6177
6178 return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t));
6179 }
6180
6181 /**
6182 * @ingroup sli_fc
6183 * @brief Write an FCOE_RQ_CREATE command.
6184 *
6185 * @param sli4 SLI context.
6186 * @param buf Destination buffer for the command.
6187 * @param size Buffer size, in bytes.
6188 * @param qmem DMA memory for the queue.
6189 * @param cq_id Associated CQ_ID.
6190 * @param ulp This parameter carries the ULP for the RQ
6191 * @param buffer_size Buffer size pointed to by each RQE.
6192 *
6193 * @note This creates a Version 0 message.
6194 *
6195 * @return Returns the number of bytes written.
6196 */
6197 int32_t
sli_cmd_fcoe_rq_create(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t cq_id,uint16_t ulp,uint16_t buffer_size)6198 sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size,
6199 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size)
6200 {
6201 sli4_req_fcoe_rq_create_t *rq = NULL;
6202 uint32_t sli_config_off = 0;
6203 uint32_t p;
6204 uintptr_t addr;
6205
6206 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6207 uint32_t payload_size;
6208
6209 /* Payload length must accommodate both request and response */
6210 payload_size = max(sizeof(sli4_req_fcoe_rq_create_t),
6211 sizeof(sli4_res_common_create_queue_t));
6212
6213 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6214 NULL);
6215 }
6216 rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off);
6217
6218 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
6219 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6220 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) -
6221 sizeof(sli4_req_hdr_t);
6222 /* valid values for number of pages: 1-8 (sec 4.5.6) */
6223 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
6224 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) {
6225 ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages);
6226 return 0;
6227 }
6228
6229 /*
6230 * RQE count is the log base 2 of the total number of entries
6231 */
6232 rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE);
6233
6234 if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) ||
6235 (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) {
6236 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
6237 buffer_size,
6238 SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE,
6239 SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE);
6240 return -1;
6241 }
6242 rq->buffer_size = buffer_size;
6243
6244 rq->cq_id = cq_id;
6245
6246 if (sli4->config.dual_ulp_capable) {
6247 rq->dua = 1;
6248 rq->bqu = 1;
6249 rq->ulp = ulp;
6250 }
6251
6252 for (p = 0, addr = qmem->phys;
6253 p < rq->num_pages;
6254 p++, addr += SLI_PAGE_SIZE) {
6255 rq->page_physical_address[p].low = ocs_addr32_lo(addr);
6256 rq->page_physical_address[p].high = ocs_addr32_hi(addr);
6257 }
6258
6259 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t));
6260 }
6261
6262 /**
6263 * @ingroup sli_fc
6264 * @brief Write an FCOE_RQ_CREATE_V1 command.
6265 *
6266 * @param sli4 SLI context.
6267 * @param buf Destination buffer for the command.
6268 * @param size Buffer size, in bytes.
6269 * @param qmem DMA memory for the queue.
6270 * @param cq_id Associated CQ_ID.
6271 * @param ulp This parameter carries the ULP for RQ (ignored for V1)
6272 * @param buffer_size Buffer size pointed to by each RQE.
6273 *
6274 * @note This creates a Version 0 message
6275 *
6276 * @return Returns the number of bytes written.
6277 */
6278 int32_t
sli_cmd_fcoe_rq_create_v1(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * qmem,uint16_t cq_id,uint16_t ulp,uint16_t buffer_size)6279 sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size,
6280 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp,
6281 uint16_t buffer_size)
6282 {
6283 sli4_req_fcoe_rq_create_v1_t *rq = NULL;
6284 uint32_t sli_config_off = 0;
6285 uint32_t p;
6286 uintptr_t addr;
6287
6288 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6289 uint32_t payload_size;
6290
6291 /* Payload length must accommodate both request and response */
6292 payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t),
6293 sizeof(sli4_res_common_create_queue_t));
6294
6295 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6296 NULL);
6297 }
6298 rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off);
6299
6300 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
6301 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6302 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) -
6303 sizeof(sli4_req_hdr_t);
6304 rq->hdr.version = 1;
6305
6306 /* Disable "no buffer warnings" to avoid Lancer bug */
6307 rq->dnb = TRUE;
6308
6309 /* valid values for number of pages: 1-8 (sec 4.5.6) */
6310 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
6311 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) {
6312 ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n",
6313 rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES);
6314 return 0;
6315 }
6316
6317 /*
6318 * RQE count is the total number of entries (note not lg2(# entries))
6319 */
6320 rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE;
6321
6322 rq->rqe_size = SLI4_FCOE_RQE_SIZE_8;
6323
6324 rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
6325
6326 if ((buffer_size < sli4->config.rq_min_buf_size) ||
6327 (buffer_size > sli4->config.rq_max_buf_size)) {
6328 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
6329 buffer_size,
6330 sli4->config.rq_min_buf_size,
6331 sli4->config.rq_max_buf_size);
6332 return -1;
6333 }
6334 rq->buffer_size = buffer_size;
6335
6336 rq->cq_id = cq_id;
6337
6338 for (p = 0, addr = qmem->phys;
6339 p < rq->num_pages;
6340 p++, addr += SLI_PAGE_SIZE) {
6341 rq->page_physical_address[p].low = ocs_addr32_lo(addr);
6342 rq->page_physical_address[p].high = ocs_addr32_hi(addr);
6343 }
6344
6345 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t));
6346 }
6347
6348 /**
6349 * @ingroup sli_fc
6350 * @brief Write an FCOE_RQ_DESTROY command.
6351 *
6352 * @param sli4 SLI context.
6353 * @param buf Destination buffer for the command.
6354 * @param size Buffer size, in bytes.
6355 * @param rq_id RQ_ID.
6356 *
6357 * @return Returns the number of bytes written.
6358 */
6359 int32_t
sli_cmd_fcoe_rq_destroy(sli4_t * sli4,void * buf,size_t size,uint16_t rq_id)6360 sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id)
6361 {
6362 sli4_req_fcoe_rq_destroy_t *rq = NULL;
6363 uint32_t sli_config_off = 0;
6364
6365 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6366 uint32_t payload_size;
6367
6368 /* Payload length must accommodate both request and response */
6369 payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t),
6370 sizeof(sli4_res_hdr_t));
6371
6372 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6373 NULL);
6374 }
6375 rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off);
6376
6377 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY;
6378 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6379 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) -
6380 sizeof(sli4_req_hdr_t);
6381
6382 rq->rq_id = rq_id;
6383
6384 return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t));
6385 }
6386
6387 /**
6388 * @ingroup sli_fc
6389 * @brief Write an FCOE_READ_FCF_TABLE command.
6390 *
6391 * @note
6392 * The response of this command exceeds the size of an embedded
6393 * command and requires an external buffer with DMA capability to hold the results.
6394 * The caller should allocate the ocs_dma_t structure / memory.
6395 *
6396 * @param sli4 SLI context.
6397 * @param buf Destination buffer for the command.
6398 * @param size Buffer size, in bytes.
6399 * @param dma Pointer to DMA memory structure. This is allocated by the caller.
6400 * @param index FCF table index to retrieve.
6401 *
6402 * @return Returns the number of bytes written.
6403 */
6404 int32_t
sli_cmd_fcoe_read_fcf_table(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma,uint16_t index)6405 sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index)
6406 {
6407 sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL;
6408
6409 if (SLI4_PORT_TYPE_FC != sli4->port_type) {
6410 ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n");
6411 return -1;
6412 }
6413
6414 read_fcf = dma->virt;
6415
6416 ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t));
6417
6418 read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE;
6419 read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6420 read_fcf->hdr.request_length = dma->size -
6421 sizeof(sli4_req_fcoe_read_fcf_table_t);
6422 read_fcf->fcf_index = index;
6423
6424 return sli_cmd_sli_config(sli4, buf, size, 0, dma);
6425 }
6426
6427 /**
6428 * @ingroup sli_fc
6429 * @brief Write an FCOE_POST_HDR_TEMPLATES command.
6430 *
6431 * @param sli4 SLI context.
6432 * @param buf Destination buffer for the command.
6433 * @param size Buffer size, in bytes.
6434 * @param dma Pointer to DMA memory structure. This is allocated by the caller.
6435 * @param rpi Starting RPI index for the header templates.
6436 * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts.
6437 *
6438 * @return Returns the number of bytes written.
6439 */
6440 int32_t
sli_cmd_fcoe_post_hdr_templates(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * dma,uint16_t rpi,ocs_dma_t * payload_dma)6441 sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size,
6442 ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma)
6443 {
6444 sli4_req_fcoe_post_hdr_templates_t *template = NULL;
6445 uint32_t sli_config_off = 0;
6446 uintptr_t phys = 0;
6447 uint32_t i = 0;
6448 uint32_t page_count;
6449 uint32_t payload_size;
6450
6451 page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
6452
6453 payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) +
6454 page_count * sizeof(sli4_physical_page_descriptor_t);
6455
6456 if (page_count > 16) {
6457 /* We can't fit more than 16 descriptors into an embedded mailbox
6458 command, it has to be non-embedded */
6459 if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) {
6460 ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n");
6461 return 0;
6462 }
6463 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma);
6464 template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt;
6465 } else {
6466 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL);
6467 template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off);
6468 }
6469
6470 if (UINT16_MAX == rpi) {
6471 rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0];
6472 }
6473
6474 template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES;
6475 template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6476 template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) -
6477 sizeof(sli4_req_hdr_t);
6478
6479 template->rpi_offset = rpi;
6480 template->page_count = page_count;
6481 phys = dma->phys;
6482 for (i = 0; i < template->page_count; i++) {
6483 template->page_descriptor[i].low = ocs_addr32_lo(phys);
6484 template->page_descriptor[i].high = ocs_addr32_hi(phys);
6485
6486 phys += SLI_PAGE_SIZE;
6487 }
6488
6489 return(sli_config_off + payload_size);
6490 }
6491
6492 int32_t
sli_cmd_fcoe_rediscover_fcf(sli4_t * sli4,void * buf,size_t size,uint16_t index)6493 sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index)
6494 {
6495 sli4_req_fcoe_rediscover_fcf_t *redisc = NULL;
6496 uint32_t sli_config_off = 0;
6497
6498 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
6499 sizeof(sli4_req_fcoe_rediscover_fcf_t),
6500 NULL);
6501
6502 redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off);
6503
6504 redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF;
6505 redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6506 redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) -
6507 sizeof(sli4_req_hdr_t);
6508
6509 if (index == UINT16_MAX) {
6510 redisc->fcf_count = 0;
6511 } else {
6512 redisc->fcf_count = 1;
6513 redisc->fcf_index[0] = index;
6514 }
6515
6516 return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t));
6517 }
6518
6519 /**
6520 * @ingroup sli_fc
6521 * @brief Write an ABORT_WQE work queue entry.
6522 *
6523 * @param sli4 SLI context.
6524 * @param buf Destination buffer for the WQE.
6525 * @param size Buffer size, in bytes.
6526 * @param type Abort type, such as XRI, abort tag, and request tag.
6527 * @param send_abts Boolean to cause the hardware to automatically generate an ABTS.
6528 * @param ids ID of IOs to abort.
6529 * @param mask Mask applied to the ID values to abort.
6530 * @param tag Tag value associated with this abort.
6531 * @param cq_id The id of the completion queue where the WQE response is sent.
6532 * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI
6533 * Port's optimized write XRI pool.
6534 *
6535 * @return Returns 0 on success, or a non-zero value on failure.
6536 */
6537 int32_t
sli_abort_wqe(sli4_t * sli4,void * buf,size_t size,sli4_abort_type_e type,uint32_t send_abts,uint32_t ids,uint32_t mask,uint16_t tag,uint16_t cq_id)6538 sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts,
6539 uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id)
6540 {
6541 sli4_abort_wqe_t *abort = buf;
6542
6543 ocs_memset(buf, 0, size);
6544
6545 switch (type) {
6546 case SLI_ABORT_XRI:
6547 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
6548 if (mask) {
6549 ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids);
6550 mask = 0;
6551 }
6552 break;
6553 case SLI_ABORT_ABORT_ID:
6554 abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
6555 break;
6556 case SLI_ABORT_REQUEST_ID:
6557 abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
6558 break;
6559 default:
6560 ocs_log_test(sli4->os, "unsupported type %#x\n", type);
6561 return -1;
6562 }
6563
6564 abort->ia = send_abts ? 0 : 1;
6565
6566 /* Suppress ABTS retries */
6567 abort->ir = 1;
6568
6569 abort->t_mask = mask;
6570 abort->t_tag = ids;
6571 abort->command = SLI4_WQE_ABORT;
6572 abort->request_tag = tag;
6573 abort->qosd = TRUE;
6574 abort->cq_id = cq_id;
6575 abort->cmd_type = SLI4_CMD_ABORT_WQE;
6576
6577 return 0;
6578 }
6579
6580 /**
6581 * @ingroup sli_fc
6582 * @brief Write an ELS_REQUEST64_WQE work queue entry.
6583 *
6584 * @param sli4 SLI context.
6585 * @param buf Destination buffer for the WQE.
6586 * @param size Buffer size, in bytes.
6587 * @param sgl DMA memory for the ELS request.
6588 * @param req_type ELS request type.
6589 * @param req_len Length of ELS request in bytes.
6590 * @param max_rsp_len Max length of ELS response in bytes.
6591 * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV.
6592 * @param xri XRI for this exchange.
6593 * @param tag IO tag value.
6594 * @param cq_id The id of the completion queue where the WQE response is sent.
6595 * @param rnode Destination of ELS request (that is, the remote node).
6596 *
6597 * @return Returns 0 on success, or a non-zero value on failure.
6598 */
6599 int32_t
sli_els_request64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint8_t req_type,uint32_t req_len,uint32_t max_rsp_len,uint8_t timeout,uint16_t xri,uint16_t tag,uint16_t cq_id,ocs_remote_node_t * rnode)6600 sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type,
6601 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
6602 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode)
6603 {
6604 sli4_els_request64_wqe_t *els = buf;
6605 sli4_sge_t *sge = sgl->virt;
6606 uint8_t is_fabric = FALSE;
6607
6608 ocs_memset(buf, 0, size);
6609
6610 if (sli4->config.sgl_pre_registered) {
6611 els->xbl = FALSE;
6612
6613 els->dbde = TRUE;
6614 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
6615
6616 els->els_request_payload.buffer_length = req_len;
6617 els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low;
6618 els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high;
6619 } else {
6620 els->xbl = TRUE;
6621
6622 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP;
6623
6624 els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t);
6625 els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6626 els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6627 }
6628
6629 els->els_request_payload_length = req_len;
6630 els->max_response_payload_length = max_rsp_len;
6631
6632 els->xri_tag = xri;
6633 els->timer = timeout;
6634 els->class = SLI4_ELS_REQUEST64_CLASS_3;
6635
6636 els->command = SLI4_WQE_ELS_REQUEST64;
6637
6638 els->request_tag = tag;
6639
6640 if (rnode->node_group) {
6641 els->hlm = TRUE;
6642 els->remote_id = rnode->fc_id & 0x00ffffff;
6643 }
6644
6645 els->iod = SLI4_ELS_REQUEST64_DIR_READ;
6646
6647 els->qosd = TRUE;
6648
6649 /* figure out the ELS_ID value from the request buffer */
6650
6651 switch (req_type) {
6652 case FC_ELS_CMD_LOGO:
6653 els->els_id = SLI4_ELS_REQUEST64_LOGO;
6654 if (rnode->attached) {
6655 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6656 els->context_tag = rnode->indicator;
6657 } else {
6658 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6659 els->context_tag = rnode->sport->indicator;
6660 }
6661 if (FC_ADDR_FABRIC == rnode->fc_id) {
6662 is_fabric = TRUE;
6663 }
6664 break;
6665 case FC_ELS_CMD_FDISC:
6666 if (FC_ADDR_FABRIC == rnode->fc_id) {
6667 is_fabric = TRUE;
6668 }
6669 if (0 == rnode->sport->fc_id) {
6670 els->els_id = SLI4_ELS_REQUEST64_FDISC;
6671 is_fabric = TRUE;
6672 } else {
6673 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6674 }
6675 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6676 els->context_tag = rnode->sport->indicator;
6677 els->sp = TRUE;
6678 break;
6679 case FC_ELS_CMD_FLOGI:
6680 els->els_id = SLI4_ELS_REQUEST64_FLOGIN;
6681 is_fabric = TRUE;
6682 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
6683 if (!rnode->sport->domain) {
6684 ocs_log_test(sli4->os, "invalid domain handle\n");
6685 return -1;
6686 }
6687 /*
6688 * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the
6689 * FCFI here
6690 */
6691 els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI;
6692 els->context_tag = rnode->sport->domain->fcf_indicator;
6693 els->sp = TRUE;
6694 } else {
6695 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6696 els->context_tag = rnode->sport->indicator;
6697
6698 /*
6699 * Set SP here ... we haven't done a REG_VPI yet
6700 * TODO: need to maybe not set this when we have
6701 * completed VFI/VPI registrations ...
6702 *
6703 * Use the FC_ID of the SPORT if it has been allocated, otherwise
6704 * use an S_ID of zero.
6705 */
6706 els->sp = TRUE;
6707 if (rnode->sport->fc_id != UINT32_MAX) {
6708 els->sid = rnode->sport->fc_id;
6709 }
6710 }
6711 break;
6712 case FC_ELS_CMD_PLOGI:
6713 els->els_id = SLI4_ELS_REQUEST64_PLOGI;
6714 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6715 els->context_tag = rnode->sport->indicator;
6716 break;
6717 case FC_ELS_CMD_SCR:
6718 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6719 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6720 els->context_tag = rnode->sport->indicator;
6721 break;
6722 default:
6723 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6724 if (rnode->attached) {
6725 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6726 els->context_tag = rnode->indicator;
6727 } else {
6728 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6729 els->context_tag = rnode->sport->indicator;
6730 }
6731 break;
6732 }
6733
6734 if (is_fabric) {
6735 els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC;
6736 } else {
6737 els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
6738 }
6739
6740 els->cq_id = cq_id;
6741
6742 if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) {
6743 els->remote_id = rnode->fc_id;
6744 }
6745 if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) {
6746 els->temporary_rpi = rnode->indicator;
6747 }
6748
6749 return 0;
6750 }
6751
6752 /**
6753 * @ingroup sli_fc
6754 * @brief Write an FCP_ICMND64_WQE work queue entry.
6755 *
6756 * @param sli4 SLI context.
6757 * @param buf Destination buffer for the WQE.
6758 * @param size Buffer size, in bytes.
6759 * @param sgl DMA memory for the scatter gather list.
6760 * @param xri XRI for this exchange.
6761 * @param tag IO tag value.
6762 * @param cq_id The id of the completion queue where the WQE response is sent.
6763 * @param rpi remote node indicator (RPI)
6764 * @param rnode Destination request (that is, the remote node).
6765 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6766 *
6767 * @return Returns 0 on success, or a non-zero value on failure.
6768 */
6769 int32_t
sli_fcp_icmnd64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint16_t xri,uint16_t tag,uint16_t cq_id,uint32_t rpi,ocs_remote_node_t * rnode,uint8_t timeout)6770 sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
6771 uint16_t xri, uint16_t tag, uint16_t cq_id,
6772 uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout)
6773 {
6774 sli4_fcp_icmnd64_wqe_t *icmnd = buf;
6775 sli4_sge_t *sge = NULL;
6776
6777 ocs_memset(buf, 0, size);
6778
6779 if (!sgl || !sgl->virt) {
6780 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6781 sgl, sgl ? sgl->virt : NULL);
6782 return -1;
6783 }
6784 sge = sgl->virt;
6785
6786 if (sli4->config.sgl_pre_registered) {
6787 icmnd->xbl = FALSE;
6788
6789 icmnd->dbde = TRUE;
6790 icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6791
6792 icmnd->bde.buffer_length = sge[0].buffer_length;
6793 icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6794 icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6795 } else {
6796 icmnd->xbl = TRUE;
6797
6798 icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP;
6799
6800 icmnd->bde.buffer_length = sgl->size;
6801 icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6802 icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6803 }
6804
6805 icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
6806 icmnd->xri_tag = xri;
6807 icmnd->context_tag = rpi;
6808 icmnd->timer = timeout;
6809
6810 icmnd->pu = 2; /* WQE word 4 contains read transfer length */
6811 icmnd->class = SLI4_ELS_REQUEST64_CLASS_3;
6812 icmnd->command = SLI4_WQE_FCP_ICMND64;
6813 icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6814
6815 icmnd->abort_tag = xri;
6816
6817 icmnd->request_tag = tag;
6818 icmnd->len_loc = 3;
6819 if (rnode->node_group) {
6820 icmnd->hlm = TRUE;
6821 icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff;
6822 }
6823 if (((ocs_node_t *)rnode->node)->fcp2device) {
6824 icmnd->erp = TRUE;
6825 }
6826 icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE;
6827 icmnd->cq_id = cq_id;
6828
6829 return 0;
6830 }
6831
6832 /**
6833 * @ingroup sli_fc
6834 * @brief Write an FCP_IREAD64_WQE work queue entry.
6835 *
6836 * @param sli4 SLI context.
6837 * @param buf Destination buffer for the WQE.
6838 * @param size Buffer size, in bytes.
6839 * @param sgl DMA memory for the scatter gather list.
6840 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
6841 * @param xfer_len Data transfer length.
6842 * @param xri XRI for this exchange.
6843 * @param tag IO tag value.
6844 * @param cq_id The id of the completion queue where the WQE response is sent.
6845 * @param rpi remote node indicator (RPI)
6846 * @param rnode Destination request (i.e. remote node).
6847 * @param dif T10 DIF operation, or 0 to disable.
6848 * @param bs T10 DIF block size, or 0 if DIF is disabled.
6849 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6850 *
6851 * @return Returns 0 on success, or a non-zero value on failure.
6852 */
6853 int32_t
sli_fcp_iread64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t first_data_sge,uint32_t xfer_len,uint16_t xri,uint16_t tag,uint16_t cq_id,uint32_t rpi,ocs_remote_node_t * rnode,uint8_t dif,uint8_t bs,uint8_t timeout)6854 sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
6855 uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
6856 uint32_t rpi, ocs_remote_node_t *rnode,
6857 uint8_t dif, uint8_t bs, uint8_t timeout)
6858 {
6859 sli4_fcp_iread64_wqe_t *iread = buf;
6860 sli4_sge_t *sge = NULL;
6861
6862 ocs_memset(buf, 0, size);
6863
6864 if (!sgl || !sgl->virt) {
6865 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6866 sgl, sgl ? sgl->virt : NULL);
6867 return -1;
6868 }
6869 sge = sgl->virt;
6870
6871 if (sli4->config.sgl_pre_registered) {
6872 iread->xbl = FALSE;
6873
6874 iread->dbde = TRUE;
6875 iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6876
6877 iread->bde.buffer_length = sge[0].buffer_length;
6878 iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6879 iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6880 } else {
6881 iread->xbl = TRUE;
6882
6883 iread->bde.bde_type = SLI4_BDE_TYPE_BLP;
6884
6885 iread->bde.buffer_length = sgl->size;
6886 iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6887 iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6888
6889 /* fill out fcp_cmnd buffer len and change resp buffer to be of type
6890 * "skip" (note: response will still be written to sge[1] if necessary) */
6891 iread->fcp_cmd_buffer_length = sge[0].buffer_length;
6892 sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
6893 }
6894
6895 iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
6896 iread->total_transfer_length = xfer_len;
6897
6898 iread->xri_tag = xri;
6899 iread->context_tag = rpi;
6900
6901 iread->timer = timeout;
6902
6903 iread->pu = 2; /* WQE word 4 contains read transfer length */
6904 iread->class = SLI4_ELS_REQUEST64_CLASS_3;
6905 iread->command = SLI4_WQE_FCP_IREAD64;
6906 iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6907 iread->dif = dif;
6908 iread->bs = bs;
6909
6910 iread->abort_tag = xri;
6911
6912 iread->request_tag = tag;
6913 iread->len_loc = 3;
6914 if (rnode->node_group) {
6915 iread->hlm = TRUE;
6916 iread->remote_n_port_id = rnode->fc_id & 0x00ffffff;
6917 }
6918 if (((ocs_node_t *)rnode->node)->fcp2device) {
6919 iread->erp = TRUE;
6920 }
6921 iread->iod = 1;
6922 iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE;
6923 iread->cq_id = cq_id;
6924
6925 if (sli4->config.perf_hint) {
6926 iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6927 iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
6928 iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
6929 iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
6930 }
6931
6932 return 0;
6933 }
6934
6935 /**
6936 * @ingroup sli_fc
6937 * @brief Write an FCP_IWRITE64_WQE work queue entry.
6938 *
6939 * @param sli4 SLI context.
6940 * @param buf Destination buffer for the WQE.
6941 * @param size Buffer size, in bytes.
6942 * @param sgl DMA memory for the scatter gather list.
6943 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
6944 * @param xfer_len Data transfer length.
6945 * @param first_burst The number of first burst bytes
6946 * @param xri XRI for this exchange.
6947 * @param tag IO tag value.
6948 * @param cq_id The id of the completion queue where the WQE response is sent.
6949 * @param rpi remote node indicator (RPI)
6950 * @param rnode Destination request (i.e. remote node)
6951 * @param dif T10 DIF operation, or 0 to disable
6952 * @param bs T10 DIF block size, or 0 if DIF is disabled
6953 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6954 *
6955 * @return Returns 0 on success, or a non-zero value on failure.
6956 */
6957 int32_t
sli_fcp_iwrite64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t first_data_sge,uint32_t xfer_len,uint32_t first_burst,uint16_t xri,uint16_t tag,uint16_t cq_id,uint32_t rpi,ocs_remote_node_t * rnode,uint8_t dif,uint8_t bs,uint8_t timeout)6958 sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
6959 uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id,
6960 uint32_t rpi, ocs_remote_node_t *rnode,
6961 uint8_t dif, uint8_t bs, uint8_t timeout)
6962 {
6963 sli4_fcp_iwrite64_wqe_t *iwrite = buf;
6964 sli4_sge_t *sge = NULL;
6965
6966 ocs_memset(buf, 0, size);
6967
6968 if (!sgl || !sgl->virt) {
6969 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6970 sgl, sgl ? sgl->virt : NULL);
6971 return -1;
6972 }
6973 sge = sgl->virt;
6974
6975 if (sli4->config.sgl_pre_registered) {
6976 iwrite->xbl = FALSE;
6977
6978 iwrite->dbde = TRUE;
6979 iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6980
6981 iwrite->bde.buffer_length = sge[0].buffer_length;
6982 iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6983 iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6984 } else {
6985 iwrite->xbl = TRUE;
6986
6987 iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP;
6988
6989 iwrite->bde.buffer_length = sgl->size;
6990 iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6991 iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6992
6993 /* fill out fcp_cmnd buffer len and change resp buffer to be of type
6994 * "skip" (note: response will still be written to sge[1] if necessary) */
6995 iwrite->fcp_cmd_buffer_length = sge[0].buffer_length;
6996 sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
6997 }
6998
6999 iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
7000 iwrite->total_transfer_length = xfer_len;
7001 iwrite->initial_transfer_length = MIN(xfer_len, first_burst);
7002
7003 iwrite->xri_tag = xri;
7004 iwrite->context_tag = rpi;
7005
7006 iwrite->timer = timeout;
7007
7008 iwrite->pu = 2; /* WQE word 4 contains read transfer length */
7009 iwrite->class = SLI4_ELS_REQUEST64_CLASS_3;
7010 iwrite->command = SLI4_WQE_FCP_IWRITE64;
7011 iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7012 iwrite->dif = dif;
7013 iwrite->bs = bs;
7014
7015 iwrite->abort_tag = xri;
7016
7017 iwrite->request_tag = tag;
7018 iwrite->len_loc = 3;
7019 if (rnode->node_group) {
7020 iwrite->hlm = TRUE;
7021 iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7022 }
7023 if (((ocs_node_t *)rnode->node)->fcp2device) {
7024 iwrite->erp = TRUE;
7025 }
7026 iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE;
7027 iwrite->cq_id = cq_id;
7028
7029 if (sli4->config.perf_hint) {
7030 iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7031 iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7032 iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7033 iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7034 }
7035
7036 return 0;
7037 }
7038
7039 /**
7040 * @ingroup sli_fc
7041 * @brief Write an FCP_TRECEIVE64_WQE work queue entry.
7042 *
7043 * @param sli4 SLI context.
7044 * @param buf Destination buffer for the WQE.
7045 * @param size Buffer size, in bytes.
7046 * @param sgl DMA memory for the Scatter-Gather List.
7047 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7048 * @param relative_off Relative offset of the IO (if any).
7049 * @param xfer_len Data transfer length.
7050 * @param xri XRI for this exchange.
7051 * @param tag IO tag value.
7052 * @param xid OX_ID for the exchange.
7053 * @param cq_id The id of the completion queue where the WQE response is sent.
7054 * @param rpi remote node indicator (RPI)
7055 * @param rnode Destination request (i.e. remote node).
7056 * @param flags Optional attributes, including:
7057 * - ACTIVE - IO is already active.
7058 * - AUTO RSP - Automatically generate a good FCP_RSP.
7059 * @param dif T10 DIF operation, or 0 to disable.
7060 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7061 * @param csctl value of csctl field.
7062 * @param app_id value for VM application header.
7063 *
7064 * @return Returns 0 on success, or a non-zero value on failure.
7065 */
7066 int32_t
sli_fcp_treceive64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t first_data_sge,uint32_t relative_off,uint32_t xfer_len,uint16_t xri,uint16_t tag,uint16_t cq_id,uint16_t xid,uint32_t rpi,ocs_remote_node_t * rnode,uint32_t flags,uint8_t dif,uint8_t bs,uint8_t csctl,uint32_t app_id)7067 sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7068 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
7069 uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs,
7070 uint8_t csctl, uint32_t app_id)
7071 {
7072 sli4_fcp_treceive64_wqe_t *trecv = buf;
7073 sli4_fcp_128byte_wqe_t *trecv_128 = buf;
7074 sli4_sge_t *sge = NULL;
7075
7076 ocs_memset(buf, 0, size);
7077
7078 if (!sgl || !sgl->virt) {
7079 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7080 sgl, sgl ? sgl->virt : NULL);
7081 return -1;
7082 }
7083 sge = sgl->virt;
7084
7085 if (sli4->config.sgl_pre_registered) {
7086 trecv->xbl = FALSE;
7087
7088 trecv->dbde = TRUE;
7089 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7090
7091 trecv->bde.buffer_length = sge[0].buffer_length;
7092 trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7093 trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7094
7095 trecv->payload_offset_length = sge[0].buffer_length;
7096 } else {
7097 trecv->xbl = TRUE;
7098
7099 /* if data is a single physical address, use a BDE */
7100 if (!dif && (xfer_len <= sge[2].buffer_length)) {
7101 trecv->dbde = TRUE;
7102 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7103
7104 trecv->bde.buffer_length = sge[2].buffer_length;
7105 trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7106 trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7107 } else {
7108 trecv->bde.bde_type = SLI4_BDE_TYPE_BLP;
7109 trecv->bde.buffer_length = sgl->size;
7110 trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7111 trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7112 }
7113 }
7114
7115 trecv->relative_offset = relative_off;
7116
7117 if (flags & SLI4_IO_CONTINUATION) {
7118 trecv->xc = TRUE;
7119 }
7120 trecv->xri_tag = xri;
7121
7122 trecv->context_tag = rpi;
7123
7124 trecv->pu = TRUE; /* WQE uses relative offset */
7125
7126 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7127 trecv->ar = TRUE;
7128 }
7129
7130 trecv->command = SLI4_WQE_FCP_TRECEIVE64;
7131 trecv->class = SLI4_ELS_REQUEST64_CLASS_3;
7132 trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7133 trecv->dif = dif;
7134 trecv->bs = bs;
7135
7136 trecv->remote_xid = xid;
7137
7138 trecv->request_tag = tag;
7139
7140 trecv->iod = 1;
7141
7142 trecv->len_loc = 0x2;
7143
7144 if (rnode->node_group) {
7145 trecv->hlm = TRUE;
7146 trecv->dword5.dword = rnode->fc_id & 0x00ffffff;
7147 }
7148
7149 trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE;
7150
7151 trecv->cq_id = cq_id;
7152
7153 trecv->fcp_data_receive_length = xfer_len;
7154
7155 if (sli4->config.perf_hint) {
7156 trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7157 trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7158 trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7159 trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7160 }
7161
7162 /* The upper 7 bits of csctl is the priority */
7163 if (csctl & SLI4_MASK_CCP) {
7164 trecv->ccpe = 1;
7165 trecv->ccp = (csctl & SLI4_MASK_CCP);
7166 }
7167
7168 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) {
7169 trecv->app_id_valid = 1;
7170 trecv->wqes = 1;
7171 trecv_128->dw[31] = app_id;
7172 }
7173 return 0;
7174 }
7175
7176 /**
7177 * @ingroup sli_fc
7178 * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry.
7179 *
7180 * @param sli4 SLI context.
7181 * @param buf Destination buffer for the WQE.
7182 * @param size Buffer size, in bytes.
7183 * @param sgl DMA memory for the Scatter-Gather List.
7184 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7185 * @param relative_off Relative offset of the IO (if any).
7186 * @param xfer_len Data transfer length.
7187 * @param xri XRI for this exchange.
7188 * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround)
7189 * @param tag IO tag value.
7190 * @param xid OX_ID for the exchange.
7191 * @param cq_id The id of the completion queue where the WQE response is sent.
7192 * @param rpi remote node indicator (RPI)
7193 * @param rnode Destination request (i.e. remote node).
7194 * @param flags Optional attributes, including:
7195 * - ACTIVE - IO is already active.
7196 * - AUTO RSP - Automatically generate a good FCP_RSP.
7197 * @param dif T10 DIF operation, or 0 to disable.
7198 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7199 * @param csctl value of csctl field.
7200 * @param app_id value for VM application header.
7201 *
7202 * @return Returns 0 on success, or a non-zero value on failure.
7203 */
7204 int32_t
sli_fcp_cont_treceive64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t first_data_sge,uint32_t relative_off,uint32_t xfer_len,uint16_t xri,uint16_t sec_xri,uint16_t tag,uint16_t cq_id,uint16_t xid,uint32_t rpi,ocs_remote_node_t * rnode,uint32_t flags,uint8_t dif,uint8_t bs,uint8_t csctl,uint32_t app_id)7205 sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7206 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag,
7207 uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags,
7208 uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
7209 {
7210 int32_t rc;
7211
7212 rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag,
7213 cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id);
7214 if (rc == 0) {
7215 sli4_fcp_treceive64_wqe_t *trecv = buf;
7216
7217 trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
7218 trecv->dword5.sec_xri_tag = sec_xri;
7219 }
7220 return rc;
7221 }
7222
7223 /**
7224 * @ingroup sli_fc
7225 * @brief Write an FCP_TRSP64_WQE work queue entry.
7226 *
7227 * @param sli4 SLI context.
7228 * @param buf Destination buffer for the WQE.
7229 * @param size Buffer size, in bytes.
7230 * @param sgl DMA memory for the Scatter-Gather List.
7231 * @param rsp_len Response data length.
7232 * @param xri XRI for this exchange.
7233 * @param tag IO tag value.
7234 * @param cq_id The id of the completion queue where the WQE response is sent.
7235 * @param xid OX_ID for the exchange.
7236 * @param rpi remote node indicator (RPI)
7237 * @param rnode Destination request (i.e. remote node).
7238 * @param flags Optional attributes, including:
7239 * - ACTIVE - IO is already active
7240 * - AUTO RSP - Automatically generate a good FCP_RSP.
7241 * @param csctl value of csctl field.
7242 * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0)
7243 * @param app_id value for VM application header.
7244 *
7245 * @return Returns 0 on success, or a non-zero value on failure.
7246 */
7247 int32_t
sli_fcp_trsp64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t rsp_len,uint16_t xri,uint16_t tag,uint16_t cq_id,uint16_t xid,uint32_t rpi,ocs_remote_node_t * rnode,uint32_t flags,uint8_t csctl,uint8_t port_owned,uint32_t app_id)7248 sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len,
7249 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
7250 uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id)
7251 {
7252 sli4_fcp_trsp64_wqe_t *trsp = buf;
7253 sli4_fcp_128byte_wqe_t *trsp_128 = buf;
7254
7255 ocs_memset(buf, 0, size);
7256
7257 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7258 trsp->ag = TRUE;
7259 /*
7260 * The SLI-4 documentation states that the BDE is ignored when
7261 * using auto-good response, but, at least for IF_TYPE 0 devices,
7262 * this does not appear to be true.
7263 */
7264 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
7265 trsp->bde.buffer_length = 12; /* byte size of RSP */
7266 }
7267 } else {
7268 sli4_sge_t *sge = sgl->virt;
7269
7270 if (sli4->config.sgl_pre_registered || port_owned) {
7271 trsp->dbde = TRUE;
7272 } else {
7273 trsp->xbl = TRUE;
7274 }
7275
7276 trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7277 trsp->bde.buffer_length = sge[0].buffer_length;
7278 trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7279 trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7280
7281 trsp->fcp_response_length = rsp_len;
7282 }
7283
7284 if (flags & SLI4_IO_CONTINUATION) {
7285 trsp->xc = TRUE;
7286 }
7287
7288 if (rnode->node_group) {
7289 trsp->hlm = TRUE;
7290 trsp->dword5 = rnode->fc_id & 0x00ffffff;
7291 }
7292
7293 trsp->xri_tag = xri;
7294 trsp->rpi = rpi;
7295
7296 trsp->command = SLI4_WQE_FCP_TRSP64;
7297 trsp->class = SLI4_ELS_REQUEST64_CLASS_3;
7298
7299 trsp->remote_xid = xid;
7300 trsp->request_tag = tag;
7301 trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1);
7302 trsp->len_loc = 0x1;
7303 trsp->cq_id = cq_id;
7304 trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE;
7305
7306 /* The upper 7 bits of csctl is the priority */
7307 if (csctl & SLI4_MASK_CCP) {
7308 trsp->ccpe = 1;
7309 trsp->ccp = (csctl & SLI4_MASK_CCP);
7310 }
7311
7312 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) {
7313 trsp->app_id_valid = 1;
7314 trsp->wqes = 1;
7315 trsp_128->dw[31] = app_id;
7316 }
7317 return 0;
7318 }
7319
7320 /**
7321 * @ingroup sli_fc
7322 * @brief Write an FCP_TSEND64_WQE work queue entry.
7323 *
7324 * @param sli4 SLI context.
7325 * @param buf Destination buffer for the WQE.
7326 * @param size Buffer size, in bytes.
7327 * @param sgl DMA memory for the scatter gather list.
7328 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7329 * @param relative_off Relative offset of the IO (if any).
7330 * @param xfer_len Data transfer length.
7331 * @param xri XRI for this exchange.
7332 * @param tag IO tag value.
7333 * @param cq_id The id of the completion queue where the WQE response is sent.
7334 * @param xid OX_ID for the exchange.
7335 * @param rpi remote node indicator (RPI)
7336 * @param rnode Destination request (i.e. remote node).
7337 * @param flags Optional attributes, including:
7338 * - ACTIVE - IO is already active.
7339 * - AUTO RSP - Automatically generate a good FCP_RSP.
7340 * @param dif T10 DIF operation, or 0 to disable.
7341 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7342 * @param csctl value of csctl field.
7343 * @param app_id value for VM application header.
7344 *
7345 * @return Returns 0 on success, or a non-zero value on failure.
7346 */
7347 int32_t
sli_fcp_tsend64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t first_data_sge,uint32_t relative_off,uint32_t xfer_len,uint16_t xri,uint16_t tag,uint16_t cq_id,uint16_t xid,uint32_t rpi,ocs_remote_node_t * rnode,uint32_t flags,uint8_t dif,uint8_t bs,uint8_t csctl,uint32_t app_id)7348 sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7349 uint32_t relative_off, uint32_t xfer_len,
7350 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
7351 uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
7352 {
7353 sli4_fcp_tsend64_wqe_t *tsend = buf;
7354 sli4_fcp_128byte_wqe_t *tsend_128 = buf;
7355 sli4_sge_t *sge = NULL;
7356
7357 ocs_memset(buf, 0, size);
7358
7359 if (!sgl || !sgl->virt) {
7360 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7361 sgl, sgl ? sgl->virt : NULL);
7362 return -1;
7363 }
7364 sge = sgl->virt;
7365
7366 if (sli4->config.sgl_pre_registered) {
7367 tsend->xbl = FALSE;
7368
7369 tsend->dbde = TRUE;
7370 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7371
7372 /* TSEND64_WQE specifies first two SGE are skipped
7373 * (i.e. 3rd is valid) */
7374 tsend->bde.buffer_length = sge[2].buffer_length;
7375 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7376 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7377 } else {
7378 tsend->xbl = TRUE;
7379
7380 /* if data is a single physical address, use a BDE */
7381 if (!dif && (xfer_len <= sge[2].buffer_length)) {
7382 tsend->dbde = TRUE;
7383 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7384 /* TSEND64_WQE specifies first two SGE are skipped
7385 * (i.e. 3rd is valid) */
7386 tsend->bde.buffer_length = sge[2].buffer_length;
7387 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7388 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7389 } else {
7390 tsend->bde.bde_type = SLI4_BDE_TYPE_BLP;
7391 tsend->bde.buffer_length = sgl->size;
7392 tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7393 tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7394 }
7395 }
7396
7397 tsend->relative_offset = relative_off;
7398
7399 if (flags & SLI4_IO_CONTINUATION) {
7400 tsend->xc = TRUE;
7401 }
7402 tsend->xri_tag = xri;
7403
7404 tsend->rpi = rpi;
7405
7406 tsend->pu = TRUE; /* WQE uses relative offset */
7407
7408 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7409 tsend->ar = TRUE;
7410 }
7411
7412 tsend->command = SLI4_WQE_FCP_TSEND64;
7413 tsend->class = SLI4_ELS_REQUEST64_CLASS_3;
7414 tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7415 tsend->dif = dif;
7416 tsend->bs = bs;
7417
7418 tsend->remote_xid = xid;
7419
7420 tsend->request_tag = tag;
7421
7422 tsend->len_loc = 0x2;
7423
7424 if (rnode->node_group) {
7425 tsend->hlm = TRUE;
7426 tsend->dword5 = rnode->fc_id & 0x00ffffff;
7427 }
7428
7429 tsend->cq_id = cq_id;
7430
7431 tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE;
7432
7433 tsend->fcp_data_transmit_length = xfer_len;
7434
7435 if (sli4->config.perf_hint) {
7436 tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7437 tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7438 tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7439 tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7440 }
7441
7442 /* The upper 7 bits of csctl is the priority */
7443 if (csctl & SLI4_MASK_CCP) {
7444 tsend->ccpe = 1;
7445 tsend->ccp = (csctl & SLI4_MASK_CCP);
7446 }
7447
7448 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) {
7449 tsend->app_id_valid = 1;
7450 tsend->wqes = 1;
7451 tsend_128->dw[31] = app_id;
7452 }
7453 return 0;
7454 }
7455
7456 /**
7457 * @ingroup sli_fc
7458 * @brief Write a GEN_REQUEST64 work queue entry.
7459 *
7460 * @note This WQE is only used to send FC-CT commands.
7461 *
7462 * @param sli4 SLI context.
7463 * @param buf Destination buffer for the WQE.
7464 * @param size Buffer size, in bytes.
7465 * @param sgl DMA memory for the request.
7466 * @param req_len Length of request.
7467 * @param max_rsp_len Max length of response.
7468 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7469 * @param xri XRI for this exchange.
7470 * @param tag IO tag value.
7471 * @param cq_id The id of the completion queue where the WQE response is sent.
7472 * @param rnode Destination of request (that is, the remote node).
7473 * @param r_ctl R_CTL value for sequence.
7474 * @param type TYPE value for sequence.
7475 * @param df_ctl DF_CTL value for sequence.
7476 *
7477 * @return Returns 0 on success, or a non-zero value on failure.
7478 */
7479 int32_t
sli_gen_request64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * sgl,uint32_t req_len,uint32_t max_rsp_len,uint8_t timeout,uint16_t xri,uint16_t tag,uint16_t cq_id,ocs_remote_node_t * rnode,uint8_t r_ctl,uint8_t type,uint8_t df_ctl)7480 sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
7481 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
7482 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode,
7483 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7484 {
7485 sli4_gen_request64_wqe_t *gen = buf;
7486 sli4_sge_t *sge = NULL;
7487
7488 ocs_memset(buf, 0, size);
7489
7490 if (!sgl || !sgl->virt) {
7491 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7492 sgl, sgl ? sgl->virt : NULL);
7493 return -1;
7494 }
7495 sge = sgl->virt;
7496
7497 if (sli4->config.sgl_pre_registered) {
7498 gen->xbl = FALSE;
7499
7500 gen->dbde = TRUE;
7501 gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7502
7503 gen->bde.buffer_length = req_len;
7504 gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7505 gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7506 } else {
7507 gen->xbl = TRUE;
7508
7509 gen->bde.bde_type = SLI4_BDE_TYPE_BLP;
7510
7511 gen->bde.buffer_length = 2 * sizeof(sli4_sge_t);
7512 gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7513 gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7514 }
7515
7516 gen->request_payload_length = req_len;
7517 gen->max_response_payload_length = max_rsp_len;
7518
7519 gen->df_ctl = df_ctl;
7520 gen->type = type;
7521 gen->r_ctl = r_ctl;
7522
7523 gen->xri_tag = xri;
7524
7525 gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7526 gen->context_tag = rnode->indicator;
7527
7528 gen->class = SLI4_ELS_REQUEST64_CLASS_3;
7529
7530 gen->command = SLI4_WQE_GEN_REQUEST64;
7531
7532 gen->timer = timeout;
7533
7534 gen->request_tag = tag;
7535
7536 gen->iod = SLI4_ELS_REQUEST64_DIR_READ;
7537
7538 gen->qosd = TRUE;
7539
7540 if (rnode->node_group) {
7541 gen->hlm = TRUE;
7542 gen->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7543 }
7544
7545 gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE;
7546
7547 gen->cq_id = cq_id;
7548
7549 return 0;
7550 }
7551
7552 /**
7553 * @ingroup sli_fc
7554 * @brief Write a SEND_FRAME work queue entry
7555 *
7556 * @param sli4 SLI context.
7557 * @param buf Destination buffer for the WQE.
7558 * @param size Buffer size, in bytes.
7559 * @param sof Start of frame value
7560 * @param eof End of frame value
7561 * @param hdr Pointer to FC header data
7562 * @param payload DMA memory for the payload.
7563 * @param req_len Length of payload.
7564 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7565 * @param xri XRI for this exchange.
7566 * @param req_tag IO tag value.
7567 *
7568 * @return Returns 0 on success, or a non-zero value on failure.
7569 */
7570 int32_t
sli_send_frame_wqe(sli4_t * sli4,void * buf,size_t size,uint8_t sof,uint8_t eof,uint32_t * hdr,ocs_dma_t * payload,uint32_t req_len,uint8_t timeout,uint16_t xri,uint16_t req_tag)7571 sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr,
7572 ocs_dma_t *payload, uint32_t req_len, uint8_t timeout,
7573 uint16_t xri, uint16_t req_tag)
7574 {
7575 sli4_send_frame_wqe_t *sf = buf;
7576
7577 ocs_memset(buf, 0, size);
7578
7579 sf->dbde = TRUE;
7580 sf->bde.buffer_length = req_len;
7581 sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7582 sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7583
7584 /* Copy FC header */
7585 sf->fc_header_0_1[0] = hdr[0];
7586 sf->fc_header_0_1[1] = hdr[1];
7587 sf->fc_header_2_5[0] = hdr[2];
7588 sf->fc_header_2_5[1] = hdr[3];
7589 sf->fc_header_2_5[2] = hdr[4];
7590 sf->fc_header_2_5[3] = hdr[5];
7591
7592 sf->frame_length = req_len;
7593
7594 sf->xri_tag = xri;
7595 sf->pu = 0;
7596 sf->context_tag = 0;
7597
7598 sf->ct = 0;
7599 sf->command = SLI4_WQE_SEND_FRAME;
7600 sf->class = SLI4_ELS_REQUEST64_CLASS_3;
7601 sf->timer = timeout;
7602
7603 sf->request_tag = req_tag;
7604 sf->eof = eof;
7605 sf->sof = sof;
7606
7607 sf->qosd = 0;
7608 sf->lenloc = 1;
7609 sf->xc = 0;
7610
7611 sf->xbl = 1;
7612
7613 sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE;
7614 sf->cq_id = 0xffff;
7615
7616 return 0;
7617 }
7618
7619 /**
7620 * @ingroup sli_fc
7621 * @brief Write a XMIT_SEQUENCE64 work queue entry.
7622 *
7623 * This WQE is used to send FC-CT response frames.
7624 *
7625 * @note This API implements a restricted use for this WQE, a TODO: would
7626 * include passing in sequence initiative, and full SGL's
7627 *
7628 * @param sli4 SLI context.
7629 * @param buf Destination buffer for the WQE.
7630 * @param size Buffer size, in bytes.
7631 * @param payload DMA memory for the request.
7632 * @param payload_len Length of request.
7633 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7634 * @param ox_id originator exchange ID
7635 * @param xri XRI for this exchange.
7636 * @param tag IO tag value.
7637 * @param rnode Destination of request (that is, the remote node).
7638 * @param r_ctl R_CTL value for sequence.
7639 * @param type TYPE value for sequence.
7640 * @param df_ctl DF_CTL value for sequence.
7641 *
7642 * @return Returns 0 on success, or a non-zero value on failure.
7643 */
7644 int32_t
sli_xmit_sequence64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * payload,uint32_t payload_len,uint8_t timeout,uint16_t ox_id,uint16_t xri,uint16_t tag,ocs_remote_node_t * rnode,uint8_t r_ctl,uint8_t type,uint8_t df_ctl)7645 sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
7646 uint32_t payload_len, uint8_t timeout, uint16_t ox_id,
7647 uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode,
7648 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7649 {
7650 sli4_xmit_sequence64_wqe_t *xmit = buf;
7651
7652 ocs_memset(buf, 0, size);
7653
7654 if ((payload == NULL) || (payload->virt == NULL)) {
7655 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7656 payload, payload ? payload->virt : NULL);
7657 return -1;
7658 }
7659
7660 if (sli4->config.sgl_pre_registered) {
7661 xmit->dbde = TRUE;
7662 } else {
7663 xmit->xbl = TRUE;
7664 }
7665
7666 xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7667 xmit->bde.buffer_length = payload_len;
7668 xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7669 xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7670 xmit->sequence_payload_len = payload_len;
7671
7672 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7673
7674 xmit->relative_offset = 0;
7675
7676 xmit->si = 0; /* sequence initiative - this matches what is seen from
7677 * FC switches in response to FCGS commands */
7678 xmit->ft = 0; /* force transmit */
7679 xmit->xo = 0; /* exchange responder */
7680 xmit->ls = 1; /* last in seqence */
7681 xmit->df_ctl = df_ctl;
7682 xmit->type = type;
7683 xmit->r_ctl = r_ctl;
7684
7685 xmit->xri_tag = xri;
7686 xmit->context_tag = rnode->indicator;
7687
7688 xmit->dif = 0;
7689 xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7690 xmit->bs = 0;
7691
7692 xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
7693 xmit->class = SLI4_ELS_REQUEST64_CLASS_3;
7694 xmit->pu = 0;
7695 xmit->timer = timeout;
7696
7697 xmit->abort_tag = 0;
7698 xmit->request_tag = tag;
7699 xmit->remote_xid = ox_id;
7700
7701 xmit->iod = SLI4_ELS_REQUEST64_DIR_READ;
7702
7703 if (rnode->node_group) {
7704 xmit->hlm = TRUE;
7705 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7706 }
7707
7708 xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE;
7709
7710 xmit->len_loc = 2;
7711
7712 xmit->cq_id = 0xFFFF;
7713
7714 return 0;
7715 }
7716
7717 /**
7718 * @ingroup sli_fc
7719 * @brief Write a REQUEUE_XRI_WQE work queue entry.
7720 *
7721 * @param sli4 SLI context.
7722 * @param buf Destination buffer for the WQE.
7723 * @param size Buffer size, in bytes.
7724 * @param xri XRI for this exchange.
7725 * @param tag IO tag value.
7726 * @param cq_id The id of the completion queue where the WQE response is sent.
7727 *
7728 * @return Returns 0 on success, or a non-zero value on failure.
7729 */
7730 int32_t
sli_requeue_xri_wqe(sli4_t * sli4,void * buf,size_t size,uint16_t xri,uint16_t tag,uint16_t cq_id)7731 sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id)
7732 {
7733 sli4_requeue_xri_wqe_t *requeue = buf;
7734
7735 ocs_memset(buf, 0, size);
7736
7737 requeue->command = SLI4_WQE_REQUEUE_XRI;
7738 requeue->xri_tag = xri;
7739 requeue->request_tag = tag;
7740 requeue->xc = 1;
7741 requeue->qosd = 1;
7742 requeue->cq_id = cq_id;
7743 requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE;
7744 return 0;
7745 }
7746
7747 int32_t
sli_xmit_bcast64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * payload,uint32_t payload_len,uint8_t timeout,uint16_t xri,uint16_t tag,uint16_t cq_id,ocs_remote_node_t * rnode,uint8_t r_ctl,uint8_t type,uint8_t df_ctl)7748 sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
7749 uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag,
7750 uint16_t cq_id, ocs_remote_node_t *rnode,
7751 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7752 {
7753 sli4_xmit_bcast64_wqe_t *bcast = buf;
7754
7755 /* Command requires a temporary RPI (i.e. unused remote node) */
7756 if (rnode->attached) {
7757 ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator);
7758 return -1;
7759 }
7760
7761 ocs_memset(buf, 0, size);
7762
7763 bcast->dbde = TRUE;
7764 bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
7765 bcast->sequence_payload.buffer_length = payload_len;
7766 bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7767 bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7768
7769 bcast->sequence_payload_length = payload_len;
7770
7771 bcast->df_ctl = df_ctl;
7772 bcast->type = type;
7773 bcast->r_ctl = r_ctl;
7774
7775 bcast->xri_tag = xri;
7776
7777 bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7778 bcast->context_tag = rnode->sport->indicator;
7779
7780 bcast->class = SLI4_ELS_REQUEST64_CLASS_3;
7781
7782 bcast->command = SLI4_WQE_XMIT_BCAST64;
7783
7784 bcast->timer = timeout;
7785
7786 bcast->request_tag = tag;
7787
7788 bcast->temporary_rpi = rnode->indicator;
7789
7790 bcast->len_loc = 0x1;
7791
7792 bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
7793
7794 bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE;
7795
7796 bcast->cq_id = cq_id;
7797
7798 return 0;
7799 }
7800
7801 /**
7802 * @ingroup sli_fc
7803 * @brief Write an XMIT_BLS_RSP64_WQE work queue entry.
7804 *
7805 * @param sli4 SLI context.
7806 * @param buf Destination buffer for the WQE.
7807 * @param size Buffer size, in bytes.
7808 * @param payload Contents of the BLS payload to be sent.
7809 * @param xri XRI for this exchange.
7810 * @param tag IO tag value.
7811 * @param cq_id The id of the completion queue where the WQE response is sent.
7812 * @param rnode Destination of request (that is, the remote node).
7813 * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID.
7814 *
7815 * @return Returns 0 on success, or a non-zero value on failure.
7816 */
7817 int32_t
sli_xmit_bls_rsp64_wqe(sli4_t * sli4,void * buf,size_t size,sli_bls_payload_t * payload,uint16_t xri,uint16_t tag,uint16_t cq_id,ocs_remote_node_t * rnode,uint32_t s_id)7818 sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload,
7819 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id)
7820 {
7821 sli4_xmit_bls_rsp_wqe_t *bls = buf;
7822
7823 /*
7824 * Callers can either specify RPI or S_ID, but not both
7825 */
7826 if (rnode->attached && (s_id != UINT32_MAX)) {
7827 ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n",
7828 rnode->indicator);
7829 return -1;
7830 }
7831
7832 ocs_memset(buf, 0, size);
7833
7834 if (SLI_BLS_ACC == payload->type) {
7835 bls->payload_word0 = (payload->u.acc.seq_id_last << 16) |
7836 (payload->u.acc.seq_id_validity << 24);
7837 bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
7838 bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
7839 } else if (SLI_BLS_RJT == payload->type) {
7840 bls->payload_word0 = *((uint32_t *)&payload->u.rjt);
7841 bls->ar = TRUE;
7842 } else {
7843 ocs_log_test(sli4->os, "bad BLS type %#x\n",
7844 payload->type);
7845 return -1;
7846 }
7847
7848 bls->ox_id = payload->ox_id;
7849 bls->rx_id = payload->rx_id;
7850
7851 if (rnode->attached) {
7852 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7853 bls->context_tag = rnode->indicator;
7854 } else {
7855 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7856 bls->context_tag = rnode->sport->indicator;
7857
7858 if (UINT32_MAX != s_id) {
7859 bls->local_n_port_id = s_id & 0x00ffffff;
7860 } else {
7861 bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff;
7862 }
7863 bls->remote_id = rnode->fc_id & 0x00ffffff;
7864
7865 bls->temporary_rpi = rnode->indicator;
7866 }
7867
7868 bls->xri_tag = xri;
7869
7870 bls->class = SLI4_ELS_REQUEST64_CLASS_3;
7871
7872 bls->command = SLI4_WQE_XMIT_BLS_RSP;
7873
7874 bls->request_tag = tag;
7875
7876 bls->qosd = TRUE;
7877
7878 if (rnode->node_group) {
7879 bls->hlm = TRUE;
7880 bls->remote_id = rnode->fc_id & 0x00ffffff;
7881 }
7882
7883 bls->cq_id = cq_id;
7884
7885 bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE;
7886
7887 return 0;
7888 }
7889
7890 /**
7891 * @ingroup sli_fc
7892 * @brief Write a XMIT_ELS_RSP64_WQE work queue entry.
7893 *
7894 * @param sli4 SLI context.
7895 * @param buf Destination buffer for the WQE.
7896 * @param size Buffer size, in bytes.
7897 * @param rsp DMA memory for the ELS response.
7898 * @param rsp_len Length of ELS response, in bytes.
7899 * @param xri XRI for this exchange.
7900 * @param tag IO tag value.
7901 * @param cq_id The id of the completion queue where the WQE response is sent.
7902 * @param ox_id OX_ID of the exchange containing the request.
7903 * @param rnode Destination of the ELS response (that is, the remote node).
7904 * @param flags Optional attributes, including:
7905 * - SLI4_IO_CONTINUATION - IO is already active.
7906 * @param s_id S_ID used for special responses.
7907 *
7908 * @return Returns 0 on success, or a non-zero value on failure.
7909 */
7910 int32_t
sli_xmit_els_rsp64_wqe(sli4_t * sli4,void * buf,size_t size,ocs_dma_t * rsp,uint32_t rsp_len,uint16_t xri,uint16_t tag,uint16_t cq_id,uint16_t ox_id,ocs_remote_node_t * rnode,uint32_t flags,uint32_t s_id)7911 sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp,
7912 uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
7913 uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id)
7914 {
7915 sli4_xmit_els_rsp64_wqe_t *els = buf;
7916
7917 ocs_memset(buf, 0, size);
7918
7919 if (sli4->config.sgl_pre_registered) {
7920 els->dbde = TRUE;
7921 } else {
7922 els->xbl = TRUE;
7923 }
7924
7925 els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
7926 els->els_response_payload.buffer_length = rsp_len;
7927 els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys);
7928 els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys);
7929
7930 els->els_response_payload_length = rsp_len;
7931
7932 els->xri_tag = xri;
7933
7934 els->class = SLI4_ELS_REQUEST64_CLASS_3;
7935
7936 els->command = SLI4_WQE_ELS_RSP64;
7937
7938 els->request_tag = tag;
7939
7940 els->ox_id = ox_id;
7941
7942 els->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
7943
7944 els->qosd = TRUE;
7945
7946 if (flags & SLI4_IO_CONTINUATION) {
7947 els->xc = TRUE;
7948 }
7949
7950 if (rnode->attached) {
7951 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7952 els->context_tag = rnode->indicator;
7953 } else {
7954 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7955 els->context_tag = rnode->sport->indicator;
7956 els->remote_id = rnode->fc_id & 0x00ffffff;
7957 els->temporary_rpi = rnode->indicator;
7958 if (UINT32_MAX != s_id) {
7959 els->sp = TRUE;
7960 els->s_id = s_id & 0x00ffffff;
7961 }
7962 }
7963
7964 if (rnode->node_group) {
7965 els->hlm = TRUE;
7966 els->remote_id = rnode->fc_id & 0x00ffffff;
7967 }
7968
7969 els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN;
7970
7971 els->cq_id = cq_id;
7972
7973 return 0;
7974 }
7975
7976 /**
7977 * @ingroup sli_fc
7978 * @brief Process an asynchronous Link State event entry.
7979 *
7980 * @par Description
7981 * Parses Asynchronous Completion Queue Entry (ACQE),
7982 * creates an abstracted event, and calls registered callback functions.
7983 *
7984 * @param sli4 SLI context.
7985 * @param acqe Pointer to the ACQE.
7986 *
7987 * @return Returns 0 on success, or a non-zero value on failure.
7988 */
7989 int32_t
sli_fc_process_link_state(sli4_t * sli4,void * acqe)7990 sli_fc_process_link_state(sli4_t *sli4, void *acqe)
7991 {
7992 sli4_link_state_t *link_state = acqe;
7993 sli4_link_event_t event = { 0 };
7994 int32_t rc = 0;
7995
7996 if (!sli4->link) {
7997 /* bail if there is no callback */
7998 return 0;
7999 }
8000
8001 if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) {
8002 event.topology = SLI_LINK_TOPO_NPORT;
8003 event.medium = SLI_LINK_MEDIUM_ETHERNET;
8004 } else {
8005 /* TODO is this supported for anything other than FCoE? */
8006 ocs_log_test(sli4->os, "unsupported link type %#x\n",
8007 link_state->link_type);
8008 event.topology = SLI_LINK_TOPO_MAX;
8009 event.medium = SLI_LINK_MEDIUM_MAX;
8010 rc = -1;
8011 }
8012
8013 switch (link_state->port_link_status) {
8014 case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN:
8015 case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN:
8016 event.status = SLI_LINK_STATUS_DOWN;
8017 break;
8018 case SLI4_PORT_LINK_STATUS_PHYSICAL_UP:
8019 case SLI4_PORT_LINK_STATUS_LOGICAL_UP:
8020 event.status = SLI_LINK_STATUS_UP;
8021 break;
8022 default:
8023 ocs_log_test(sli4->os, "unsupported link status %#x\n",
8024 link_state->port_link_status);
8025 event.status = SLI_LINK_STATUS_MAX;
8026 rc = -1;
8027 }
8028
8029 switch (link_state->port_speed) {
8030 case 0:
8031 event.speed = 0;
8032 break;
8033 case 1:
8034 event.speed = 10;
8035 break;
8036 case 2:
8037 event.speed = 100;
8038 break;
8039 case 3:
8040 event.speed = 1000;
8041 break;
8042 case 4:
8043 event.speed = 10000;
8044 break;
8045 case 5:
8046 event.speed = 20000;
8047 break;
8048 case 6:
8049 event.speed = 25000;
8050 break;
8051 case 7:
8052 event.speed = 40000;
8053 break;
8054 case 8:
8055 event.speed = 100000;
8056 break;
8057 default:
8058 ocs_log_test(sli4->os, "unsupported port_speed %#x\n",
8059 link_state->port_speed);
8060 rc = -1;
8061 }
8062
8063 sli4->link(sli4->link_arg, (void *)&event);
8064
8065 return rc;
8066 }
8067
8068 /**
8069 * @ingroup sli_fc
8070 * @brief Process an asynchronous Link Attention event entry.
8071 *
8072 * @par Description
8073 * Parses Asynchronous Completion Queue Entry (ACQE),
8074 * creates an abstracted event, and calls the registered callback functions.
8075 *
8076 * @param sli4 SLI context.
8077 * @param acqe Pointer to the ACQE.
8078 *
8079 * @todo XXX all events return LINK_UP.
8080 *
8081 * @return Returns 0 on success, or a non-zero value on failure.
8082 */
8083 int32_t
sli_fc_process_link_attention(sli4_t * sli4,void * acqe)8084 sli_fc_process_link_attention(sli4_t *sli4, void *acqe)
8085 {
8086 sli4_link_attention_t *link_attn = acqe;
8087 sli4_link_event_t event = { 0 };
8088
8089 ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x "
8090 "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x "
8091 "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type,
8092 link_attn->topology, link_attn->port_speed, link_attn->port_fault,
8093 link_attn->shared_link_status, link_attn->logical_link_speed,
8094 link_attn->event_tag);
8095
8096 if (!sli4->link) {
8097 return 0;
8098 }
8099
8100 event.medium = SLI_LINK_MEDIUM_FC;
8101
8102 switch (link_attn->attn_type) {
8103 case SLI4_LINK_ATTN_TYPE_LINK_UP:
8104 event.status = SLI_LINK_STATUS_UP;
8105 break;
8106 case SLI4_LINK_ATTN_TYPE_LINK_DOWN:
8107 event.status = SLI_LINK_STATUS_DOWN;
8108 break;
8109 case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA:
8110 ocs_log_debug(sli4->os, "attn_type: no hard alpa\n");
8111 event.status = SLI_LINK_STATUS_NO_ALPA;
8112 break;
8113 default:
8114 ocs_log_test(sli4->os, "attn_type: unknown\n");
8115 break;
8116 }
8117
8118 switch (link_attn->event_type) {
8119 case SLI4_FC_EVENT_LINK_ATTENTION:
8120 break;
8121 case SLI4_FC_EVENT_SHARED_LINK_ATTENTION:
8122 ocs_log_debug(sli4->os, "event_type: FC shared link event \n");
8123 break;
8124 default:
8125 ocs_log_test(sli4->os, "event_type: unknown\n");
8126 break;
8127 }
8128
8129 switch (link_attn->topology) {
8130 case SLI4_LINK_ATTN_P2P:
8131 event.topology = SLI_LINK_TOPO_NPORT;
8132 break;
8133 case SLI4_LINK_ATTN_FC_AL:
8134 event.topology = SLI_LINK_TOPO_LOOP;
8135 break;
8136 case SLI4_LINK_ATTN_INTERNAL_LOOPBACK:
8137 ocs_log_debug(sli4->os, "topology Internal loopback\n");
8138 event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL;
8139 break;
8140 case SLI4_LINK_ATTN_SERDES_LOOPBACK:
8141 ocs_log_debug(sli4->os, "topology serdes loopback\n");
8142 event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL;
8143 break;
8144 default:
8145 ocs_log_test(sli4->os, "topology: unknown\n");
8146 break;
8147 }
8148
8149 event.speed = link_attn->port_speed * 1000;
8150
8151 sli4->link(sli4->link_arg, (void *)&event);
8152
8153 return 0;
8154 }
8155
8156 /**
8157 * @ingroup sli_fc
8158 * @brief Parse an FC/FCoE work queue CQ entry.
8159 *
8160 * @param sli4 SLI context.
8161 * @param cq CQ to process.
8162 * @param cqe Pointer to the CQ entry.
8163 * @param etype CQ event type.
8164 * @param r_id Resource ID associated with this completion message (such as the IO tag).
8165 *
8166 * @return Returns 0 on success, or a non-zero value on failure.
8167 */
8168 int32_t
sli_fc_cqe_parse(sli4_t * sli4,sli4_queue_t * cq,uint8_t * cqe,sli4_qentry_e * etype,uint16_t * r_id)8169 sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
8170 uint16_t *r_id)
8171 {
8172 uint8_t code = cqe[SLI4_CQE_CODE_OFFSET];
8173 int32_t rc = -1;
8174
8175 switch (code) {
8176 case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
8177 {
8178 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8179
8180 *etype = SLI_QENTRY_WQ;
8181 *r_id = wcqe->request_tag;
8182 rc = wcqe->status;
8183
8184 /* Flag errors except for FCP_RSP_FAILURE */
8185 if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) {
8186 ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n",
8187 wcqe->status, wcqe->hw_status,
8188 wcqe->request_tag, wcqe->wqe_specific_1,
8189 wcqe->wqe_specific_2, wcqe->xb);
8190 ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1],
8191 ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]);
8192 }
8193
8194 /* TODO: need to pass additional status back out of here as well
8195 * as status (could overload rc as status/addlstatus are only 8 bits each)
8196 */
8197
8198 break;
8199 }
8200 case SLI4_CQE_CODE_RQ_ASYNC:
8201 {
8202 sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
8203
8204 *etype = SLI_QENTRY_RQ;
8205 *r_id = rcqe->rq_id;
8206 rc = rcqe->status;
8207 break;
8208 }
8209 case SLI4_CQE_CODE_RQ_ASYNC_V1:
8210 {
8211 sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe;
8212
8213 *etype = SLI_QENTRY_RQ;
8214 *r_id = rcqe->rq_id;
8215 rc = rcqe->status;
8216 break;
8217 }
8218 case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
8219 {
8220 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
8221
8222 *etype = SLI_QENTRY_OPT_WRITE_CMD;
8223 *r_id = optcqe->rq_id;
8224 rc = optcqe->status;
8225 break;
8226 }
8227 case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
8228 {
8229 sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe;
8230
8231 *etype = SLI_QENTRY_OPT_WRITE_DATA;
8232 *r_id = dcqe->xri;
8233 rc = dcqe->status;
8234
8235 /* Flag errors */
8236 if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
8237 ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
8238 dcqe->status, dcqe->hw_status,
8239 dcqe->xri, dcqe->total_data_placed,
8240 ((uint32_t*) cqe)[3], dcqe->xb);
8241 }
8242 break;
8243 }
8244 case SLI4_CQE_CODE_RQ_COALESCING:
8245 {
8246 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
8247
8248 *etype = SLI_QENTRY_RQ;
8249 *r_id = rcqe->rq_id;
8250 rc = rcqe->status;
8251 break;
8252 }
8253 case SLI4_CQE_CODE_XRI_ABORTED:
8254 {
8255 sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe;
8256
8257 *etype = SLI_QENTRY_XABT;
8258 *r_id = xa->xri;
8259 rc = 0;
8260 break;
8261 }
8262 case SLI4_CQE_CODE_RELEASE_WQE: {
8263 sli4_fc_wqec_t *wqec = (void*) cqe;
8264
8265 *etype = SLI_QENTRY_WQ_RELEASE;
8266 *r_id = wqec->wq_id;
8267 rc = 0;
8268 break;
8269 }
8270 default:
8271 ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code);
8272 *etype = SLI_QENTRY_MAX;
8273 *r_id = UINT16_MAX;
8274 }
8275
8276 return rc;
8277 }
8278
8279 /**
8280 * @ingroup sli_fc
8281 * @brief Return the ELS/CT response length.
8282 *
8283 * @param sli4 SLI context.
8284 * @param cqe Pointer to the CQ entry.
8285 *
8286 * @return Returns the length, in bytes.
8287 */
8288 uint32_t
sli_fc_response_length(sli4_t * sli4,uint8_t * cqe)8289 sli_fc_response_length(sli4_t *sli4, uint8_t *cqe)
8290 {
8291 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8292
8293 return wcqe->wqe_specific_1;
8294 }
8295
8296 /**
8297 * @ingroup sli_fc
8298 * @brief Return the FCP IO length.
8299 *
8300 * @param sli4 SLI context.
8301 * @param cqe Pointer to the CQ entry.
8302 *
8303 * @return Returns the length, in bytes.
8304 */
8305 uint32_t
sli_fc_io_length(sli4_t * sli4,uint8_t * cqe)8306 sli_fc_io_length(sli4_t *sli4, uint8_t *cqe)
8307 {
8308 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8309
8310 return wcqe->wqe_specific_1;
8311 }
8312
8313 /**
8314 * @ingroup sli_fc
8315 * @brief Retrieve the D_ID from the completion.
8316 *
8317 * @param sli4 SLI context.
8318 * @param cqe Pointer to the CQ entry.
8319 * @param d_id Pointer where the D_ID is written.
8320 *
8321 * @return Returns 0 on success, or a non-zero value on failure.
8322 */
8323 int32_t
sli_fc_els_did(sli4_t * sli4,uint8_t * cqe,uint32_t * d_id)8324 sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id)
8325 {
8326 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8327
8328 *d_id = 0;
8329
8330 if (wcqe->status) {
8331 return -1;
8332 } else {
8333 *d_id = wcqe->wqe_specific_2 & 0x00ffffff;
8334 return 0;
8335 }
8336 }
8337
8338 uint32_t
sli_fc_ext_status(sli4_t * sli4,uint8_t * cqe)8339 sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe)
8340 {
8341 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8342 uint32_t mask;
8343
8344 switch (wcqe->status) {
8345 case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
8346 mask = UINT32_MAX;
8347 break;
8348 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
8349 case SLI4_FC_WCQE_STATUS_CMD_REJECT:
8350 mask = 0xff;
8351 break;
8352 case SLI4_FC_WCQE_STATUS_NPORT_RJT:
8353 case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
8354 case SLI4_FC_WCQE_STATUS_NPORT_BSY:
8355 case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
8356 case SLI4_FC_WCQE_STATUS_LS_RJT:
8357 mask = UINT32_MAX;
8358 break;
8359 case SLI4_FC_WCQE_STATUS_DI_ERROR:
8360 mask = UINT32_MAX;
8361 break;
8362 default:
8363 mask = 0;
8364 }
8365
8366 return wcqe->wqe_specific_2 & mask;
8367 }
8368
8369 /**
8370 * @ingroup sli_fc
8371 * @brief Retrieve the RQ index from the completion.
8372 *
8373 * @param sli4 SLI context.
8374 * @param cqe Pointer to the CQ entry.
8375 * @param rq_id Pointer where the rq_id is written.
8376 * @param index Pointer where the index is written.
8377 *
8378 * @return Returns 0 on success, or a non-zero value on failure.
8379 */
8380 int32_t
sli_fc_rqe_rqid_and_index(sli4_t * sli4,uint8_t * cqe,uint16_t * rq_id,uint32_t * index)8381 sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index)
8382 {
8383 sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
8384 sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe;
8385 int32_t rc = -1;
8386 uint8_t code = 0;
8387
8388 *rq_id = 0;
8389 *index = UINT32_MAX;
8390
8391 code = cqe[SLI4_CQE_CODE_OFFSET];
8392
8393 if (code == SLI4_CQE_CODE_RQ_ASYNC) {
8394 *rq_id = rcqe->rq_id;
8395 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) {
8396 *index = rcqe->rq_element_index;
8397 rc = 0;
8398 } else {
8399 *index = rcqe->rq_element_index;
8400 rc = rcqe->status;
8401 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8402 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
8403 rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte,
8404 rcqe->eof_byte, rcqe->header_data_placement_length);
8405 }
8406 } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
8407 *rq_id = rcqe_v1->rq_id;
8408 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) {
8409 *index = rcqe_v1->rq_element_index;
8410 rc = 0;
8411 } else {
8412 *index = rcqe_v1->rq_element_index;
8413 rc = rcqe_v1->status;
8414 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8415 rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status),
8416 rcqe_v1->rq_id, rcqe_v1->rq_element_index,
8417 rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte,
8418 rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length);
8419 }
8420 } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
8421 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
8422
8423 *rq_id = optcqe->rq_id;
8424 if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) {
8425 *index = optcqe->rq_element_index;
8426 rc = 0;
8427 } else {
8428 *index = optcqe->rq_element_index;
8429 rc = optcqe->status;
8430 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n",
8431 optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id,
8432 optcqe->rq_element_index, optcqe->payload_data_placement_length,
8433 optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri,
8434 optcqe->rpi);
8435 }
8436 } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
8437 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
8438
8439 *rq_id = rcqe->rq_id;
8440 if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) {
8441 *index = rcqe->rq_element_index;
8442 rc = 0;
8443 } else {
8444 *index = UINT32_MAX;
8445 rc = rcqe->status;
8446
8447 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n",
8448 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
8449 rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length);
8450 }
8451 } else {
8452 *index = UINT32_MAX;
8453
8454 rc = rcqe->status;
8455
8456 ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8457 rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length,
8458 rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length);
8459 }
8460
8461 return rc;
8462 }
8463
8464 /**
8465 * @ingroup sli_fc
8466 * @brief Process an asynchronous FCoE event entry.
8467 *
8468 * @par Description
8469 * Parses Asynchronous Completion Queue Entry (ACQE),
8470 * creates an abstracted event, and calls the registered callback functions.
8471 *
8472 * @param sli4 SLI context.
8473 * @param acqe Pointer to the ACQE.
8474 *
8475 * @return Returns 0 on success, or a non-zero value on failure.
8476 */
8477 int32_t
sli_fc_process_fcoe(sli4_t * sli4,void * acqe)8478 sli_fc_process_fcoe(sli4_t *sli4, void *acqe)
8479 {
8480 sli4_fcoe_fip_t *fcoe = acqe;
8481 sli4_fip_event_t event = { 0 };
8482 uint32_t mask = UINT32_MAX;
8483
8484 ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n",
8485 fcoe->event_type,
8486 fcoe->fcf_count,
8487 fcoe->event_tag);
8488
8489 if (!sli4->fip) {
8490 return 0;
8491 }
8492
8493 event.type = fcoe->event_type;
8494 event.index = UINT32_MAX;
8495
8496 switch (fcoe->event_type) {
8497 case SLI4_FCOE_FIP_FCF_DISCOVERED:
8498 ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information);
8499 break;
8500 case SLI4_FCOE_FIP_FCF_TABLE_FULL:
8501 ocs_log_debug(sli4->os, "FCF Table Full\n");
8502 mask = 0;
8503 break;
8504 case SLI4_FCOE_FIP_FCF_DEAD:
8505 ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information);
8506 break;
8507 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
8508 mask = UINT16_MAX;
8509 ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask);
8510 break;
8511 case SLI4_FCOE_FIP_FCF_MODIFIED:
8512 ocs_log_debug(sli4->os, "FCF Modified\n");
8513 break;
8514 default:
8515 ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type);
8516 mask = 0;
8517 }
8518
8519 if (mask != 0) {
8520 event.index = fcoe->event_information & mask;
8521 }
8522
8523 sli4->fip(sli4->fip_arg, &event);
8524
8525 return 0;
8526 }
8527
8528 /**
8529 * @ingroup sli_fc
8530 * @brief Allocate a receive queue.
8531 *
8532 * @par Description
8533 * Allocates DMA memory and configures the requested queue type.
8534 *
8535 * @param sli4 SLI context.
8536 * @param q Pointer to the queue object for the header.
8537 * @param n_entries Number of entries to allocate.
8538 * @param buffer_size buffer size for the queue.
8539 * @param cq Associated CQ.
8540 * @param ulp The ULP to bind
8541 * @param is_hdr Used to validate the rq_id and set the type of queue
8542 *
8543 * @return Returns 0 on success, or -1 on failure.
8544 */
8545 int32_t
sli_fc_rq_alloc(sli4_t * sli4,sli4_queue_t * q,uint32_t n_entries,uint32_t buffer_size,sli4_queue_t * cq,uint16_t ulp,uint8_t is_hdr)8546 sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q,
8547 uint32_t n_entries, uint32_t buffer_size,
8548 sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr)
8549 {
8550 int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t);
8551
8552 if ((sli4 == NULL) || (q == NULL)) {
8553 void *os = sli4 != NULL ? sli4->os : NULL;
8554
8555 ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q);
8556 return -1;
8557 }
8558
8559 if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
8560 n_entries, SLI_PAGE_SIZE)) {
8561 return -1;
8562 }
8563
8564 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
8565 rq_create = sli_cmd_fcoe_rq_create;
8566 } else {
8567 rq_create = sli_cmd_fcoe_rq_create_v1;
8568 }
8569
8570 if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma,
8571 cq->id, ulp, buffer_size)) {
8572 if (__sli_create_queue(sli4, q)) {
8573 ocs_dma_free(sli4->os, &q->dma);
8574 return -1;
8575 }
8576 if (is_hdr && q->id & 1) {
8577 ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id);
8578 ocs_dma_free(sli4->os, &q->dma);
8579 return -1;
8580 } else if (!is_hdr && (q->id & 1) == 0) {
8581 ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id);
8582 ocs_dma_free(sli4->os, &q->dma);
8583 return -1;
8584 }
8585 } else {
8586 return -1;
8587 }
8588 q->u.flag.is_hdr = is_hdr;
8589 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
8590 q->u.flag.rq_batch = TRUE;
8591 }
8592 return 0;
8593 }
8594
8595 /**
8596 * @ingroup sli_fc
8597 * @brief Allocate a receive queue set.
8598 *
8599 * @param sli4 SLI context.
8600 * @param num_rq_pairs to create
8601 * @param qs Pointers to the queue objects for both header and data.
8602 * Length of this arrays should be 2 * num_rq_pairs
8603 * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted.
8604 * @param n_entries number of entries in each RQ queue.
8605 * @param header_buffer_size
8606 * @param payload_buffer_size
8607 * @param ulp The ULP to bind
8608 *
8609 * @return Returns 0 on success, or -1 on failure.
8610 */
8611 int32_t
sli_fc_rq_set_alloc(sli4_t * sli4,uint32_t num_rq_pairs,sli4_queue_t * qs[],uint32_t base_cq_id,uint32_t n_entries,uint32_t header_buffer_size,uint32_t payload_buffer_size,uint16_t ulp)8612 sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs,
8613 sli4_queue_t *qs[], uint32_t base_cq_id,
8614 uint32_t n_entries, uint32_t header_buffer_size,
8615 uint32_t payload_buffer_size, uint16_t ulp)
8616 {
8617 uint32_t i, p, offset = 0;
8618 uint32_t payload_size, total_page_count = 0;
8619 uintptr_t addr;
8620 ocs_dma_t dma;
8621 sli4_res_common_create_queue_set_t *rsp = NULL;
8622 sli4_req_fcoe_rq_create_v2_t *req = NULL;
8623
8624 ocs_memset(&dma, 0, sizeof(dma));
8625
8626 for (i = 0; i < (num_rq_pairs * 2); i++) {
8627 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
8628 n_entries, SLI_PAGE_SIZE)) {
8629 goto error;
8630 }
8631 }
8632
8633 total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2;
8634
8635 /* Payload length must accommodate both request and response */
8636 payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)),
8637 sizeof(sli4_res_common_create_queue_set_t));
8638
8639 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
8640 ocs_log_err(sli4->os, "DMA allocation failed\n");
8641 goto error;
8642 }
8643 ocs_memset(dma.virt, 0, payload_size);
8644
8645 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
8646 payload_size, &dma) == -1) {
8647 goto error;
8648 }
8649 req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt);
8650
8651 /* Fill Header fields */
8652 req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
8653 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
8654 req->hdr.version = 2;
8655 req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t)
8656 + (8 * total_page_count);
8657
8658 /* Fill Payload fields */
8659 req->dnb = TRUE;
8660 req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
8661 req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE;
8662 req->rqe_size = SLI4_FCOE_RQE_SIZE_8;
8663 req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
8664 req->rq_count = num_rq_pairs * 2;
8665 req->base_cq_id = base_cq_id;
8666 req->hdr_buffer_size = header_buffer_size;
8667 req->payload_buffer_size = payload_buffer_size;
8668
8669 for (i = 0; i < (num_rq_pairs * 2); i++) {
8670 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) {
8671 req->page_physical_address[offset].low = ocs_addr32_lo(addr);
8672 req->page_physical_address[offset].high = ocs_addr32_hi(addr);
8673 offset++;
8674 }
8675 }
8676
8677 if (sli_bmbx_command(sli4)){
8678 ocs_log_crit(sli4->os, "bootstrap mailbox write failed RQSet\n");
8679 goto error;
8680 }
8681
8682 rsp = (void *)((uint8_t *)dma.virt);
8683 if (rsp->hdr.status) {
8684 ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n",
8685 rsp->hdr.status, rsp->hdr.additional_status);
8686 goto error;
8687 } else {
8688 for (i = 0; i < (num_rq_pairs * 2); i++) {
8689 qs[i]->id = i + rsp->q_id;
8690 if ((qs[i]->id & 1) == 0) {
8691 qs[i]->u.flag.is_hdr = TRUE;
8692 } else {
8693 qs[i]->u.flag.is_hdr = FALSE;
8694 }
8695 qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
8696 qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
8697 }
8698 }
8699
8700 ocs_dma_free(sli4->os, &dma);
8701
8702 return 0;
8703
8704 error:
8705 for (i = 0; i < (num_rq_pairs * 2); i++) {
8706 if (qs[i]->dma.size) {
8707 ocs_dma_free(sli4->os, &qs[i]->dma);
8708 }
8709 }
8710
8711 if (dma.size) {
8712 ocs_dma_free(sli4->os, &dma);
8713 }
8714
8715 return -1;
8716 }
8717
8718 /**
8719 * @ingroup sli_fc
8720 * @brief Get the RPI resource requirements.
8721 *
8722 * @param sli4 SLI context.
8723 * @param n_rpi Number of RPIs desired.
8724 *
8725 * @return Returns the number of bytes needed. This value may be zero.
8726 */
8727 uint32_t
sli_fc_get_rpi_requirements(sli4_t * sli4,uint32_t n_rpi)8728 sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi)
8729 {
8730 uint32_t bytes = 0;
8731
8732 /* Check if header templates needed */
8733 if (sli4->config.hdr_template_req) {
8734 /* round up to a page */
8735 bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE);
8736 }
8737
8738 return bytes;
8739 }
8740
8741 /**
8742 * @ingroup sli_fc
8743 * @brief Return a text string corresponding to a CQE status value
8744 *
8745 * @param status Status value
8746 *
8747 * @return Returns corresponding string, otherwise "unknown"
8748 */
8749 const char *
sli_fc_get_status_string(uint32_t status)8750 sli_fc_get_status_string(uint32_t status)
8751 {
8752 static struct {
8753 uint32_t code;
8754 const char *label;
8755 } lookup[] = {
8756 {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
8757 {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
8758 {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
8759 {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
8760 {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
8761 {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
8762 {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
8763 {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
8764 {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
8765 {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
8766 {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
8767 {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
8768 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"},
8769 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
8770 {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
8771 {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
8772 {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
8773 {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
8774 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"},
8775 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
8776 {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
8777 {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
8778 };
8779 uint32_t i;
8780
8781 for (i = 0; i < ARRAY_SIZE(lookup); i++) {
8782 if (status == lookup[i].code) {
8783 return lookup[i].label;
8784 }
8785 }
8786 return "unknown";
8787 }
8788