xref: /linux/drivers/net/ethernet/qlogic/qed/qed_init_ops.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/io.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include "qed.h"
15 #include "qed_hsi.h"
16 #include "qed_hw.h"
17 #include "qed_init_ops.h"
18 #include "qed_iro_hsi.h"
19 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
21 
22 #define QED_INIT_MAX_POLL_COUNT 100
23 #define QED_INIT_POLL_PERIOD_US 500
24 
25 static u32 pxp_global_win[] = {
26 	0,
27 	0,
28 	0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
29 	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
30 	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
31 	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
32 	0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
33 	0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
34 	0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
35 	0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
36 	0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
37 	0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
38 	0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
39 	0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
40 	0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
41 	0,
42 	0,
43 	0,
44 	0,
45 };
46 
47 /* IRO Array */
48 static const u32 iro_arr[] = {
49 	0x00000000, 0x00000000, 0x00080000,
50 	0x00004478, 0x00000008, 0x00080000,
51 	0x00003288, 0x00000088, 0x00880000,
52 	0x000058a8, 0x00000020, 0x00200000,
53 	0x00003188, 0x00000008, 0x00080000,
54 	0x00000b00, 0x00000008, 0x00040000,
55 	0x00000a80, 0x00000008, 0x00040000,
56 	0x00000000, 0x00000008, 0x00020000,
57 	0x00000080, 0x00000008, 0x00040000,
58 	0x00000084, 0x00000008, 0x00020000,
59 	0x00005798, 0x00000004, 0x00040000,
60 	0x00004e50, 0x00000000, 0x00780000,
61 	0x00003e40, 0x00000000, 0x00780000,
62 	0x00004500, 0x00000000, 0x00780000,
63 	0x00003210, 0x00000000, 0x00780000,
64 	0x00003b50, 0x00000000, 0x00780000,
65 	0x00007f58, 0x00000000, 0x00780000,
66 	0x00005fd8, 0x00000000, 0x00080000,
67 	0x00007100, 0x00000000, 0x00080000,
68 	0x0000af20, 0x00000000, 0x00080000,
69 	0x00004398, 0x00000000, 0x00080000,
70 	0x0000a5a0, 0x00000000, 0x00080000,
71 	0x0000bde8, 0x00000000, 0x00080000,
72 	0x00000020, 0x00000004, 0x00040000,
73 	0x00005688, 0x00000010, 0x00100000,
74 	0x0000c210, 0x00000030, 0x00300000,
75 	0x0000b108, 0x00000038, 0x00380000,
76 	0x00003d20, 0x00000080, 0x00400000,
77 	0x0000bf60, 0x00000000, 0x00040000,
78 	0x00004560, 0x00040080, 0x00040000,
79 	0x000001f8, 0x00000004, 0x00040000,
80 	0x00003d60, 0x00000080, 0x00200000,
81 	0x00008960, 0x00000040, 0x00300000,
82 	0x0000e840, 0x00000060, 0x00600000,
83 	0x00004698, 0x00000080, 0x00380000,
84 	0x000107b8, 0x000000c0, 0x00c00000,
85 	0x000001f8, 0x00000002, 0x00020000,
86 	0x0000a260, 0x00000000, 0x01080000,
87 	0x0000a368, 0x00000008, 0x00080000,
88 	0x000001c0, 0x00000008, 0x00080000,
89 	0x000001f8, 0x00000008, 0x00080000,
90 	0x00000ac0, 0x00000008, 0x00080000,
91 	0x00002578, 0x00000008, 0x00080000,
92 	0x000024f8, 0x00000008, 0x00080000,
93 	0x00000280, 0x00000008, 0x00080000,
94 	0x00000680, 0x00080018, 0x00080000,
95 	0x00000b78, 0x00080018, 0x00020000,
96 	0x0000c600, 0x00000058, 0x003c0000,
97 	0x00012038, 0x00000020, 0x00100000,
98 	0x00011b00, 0x00000048, 0x00180000,
99 	0x00009650, 0x00000050, 0x00200000,
100 	0x00008b10, 0x00000040, 0x00280000,
101 	0x000116c0, 0x00000018, 0x00100000,
102 	0x0000c808, 0x00000048, 0x00380000,
103 	0x00011790, 0x00000020, 0x00200000,
104 	0x000046d0, 0x00000080, 0x00100000,
105 	0x00003618, 0x00000010, 0x00100000,
106 	0x0000a9e8, 0x00000008, 0x00010000,
107 	0x000097a0, 0x00000008, 0x00010000,
108 	0x00011a10, 0x00000008, 0x00010000,
109 	0x0000e9f8, 0x00000008, 0x00010000,
110 	0x00012648, 0x00000008, 0x00010000,
111 	0x000121c8, 0x00000008, 0x00010000,
112 	0x0000af08, 0x00000030, 0x00100000,
113 	0x0000d748, 0x00000028, 0x00280000,
114 	0x00009e68, 0x00000018, 0x00180000,
115 	0x00009fe8, 0x00000008, 0x00080000,
116 	0x00013ea8, 0x00000008, 0x00080000,
117 	0x00012f18, 0x00000018, 0x00180000,
118 	0x0000dfe8, 0x00500288, 0x00100000,
119 	0x000131a0, 0x00000138, 0x00280000,
120 };
121 
122 void qed_init_iro_array(struct qed_dev *cdev)
123 {
124 	cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
125 }
126 
127 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
128 {
129 	if (rt_offset >= RUNTIME_ARRAY_SIZE) {
130 		DP_ERR(p_hwfn,
131 		       "Avoid storing %u in rt_data at index %u!\n",
132 		       val, rt_offset);
133 		return;
134 	}
135 
136 	p_hwfn->rt_data.init_val[rt_offset] = val;
137 	p_hwfn->rt_data.b_valid[rt_offset] = true;
138 }
139 
140 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
141 			   u32 rt_offset, u32 *p_val, size_t size)
142 {
143 	size_t i;
144 
145 	if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
146 		DP_ERR(p_hwfn,
147 		       "Avoid storing values in rt_data at indices %u-%u!\n",
148 		       rt_offset,
149 		       (u32)(rt_offset + size - 1));
150 		return;
151 	}
152 
153 	for (i = 0; i < size / sizeof(u32); i++) {
154 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
155 		p_hwfn->rt_data.b_valid[rt_offset + i]	= true;
156 	}
157 }
158 
159 static int qed_init_rt(struct qed_hwfn	*p_hwfn,
160 		       struct qed_ptt *p_ptt,
161 		       u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
162 {
163 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
164 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
165 	u16 i, j, segment;
166 	int rc = 0;
167 
168 	/* Since not all RT entries are initialized, go over the RT and
169 	 * for each segment of initialized values use DMA.
170 	 */
171 	for (i = 0; i < size; i++) {
172 		if (!p_valid[i])
173 			continue;
174 
175 		/* In case there isn't any wide-bus configuration here,
176 		 * simply write the data instead of using dmae.
177 		 */
178 		if (!b_must_dmae) {
179 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
180 			p_valid[i] = false;
181 			continue;
182 		}
183 
184 		/* Start of a new segment */
185 		for (segment = 1; i + segment < size; segment++)
186 			if (!p_valid[i + segment])
187 				break;
188 
189 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
190 				       (uintptr_t)(p_init_val + i),
191 				       addr + (i << 2), segment, NULL);
192 		if (rc)
193 			return rc;
194 
195 		/* invalidate after writing */
196 		for (j = i; j < (u32)(i + segment); j++)
197 			p_valid[j] = false;
198 
199 		/* Jump over the entire segment, including invalid entry */
200 		i += segment;
201 	}
202 
203 	return rc;
204 }
205 
206 int qed_init_alloc(struct qed_hwfn *p_hwfn)
207 {
208 	struct qed_rt_data *rt_data = &p_hwfn->rt_data;
209 
210 	if (IS_VF(p_hwfn->cdev))
211 		return 0;
212 
213 	rt_data->b_valid = kzalloc_objs(bool, RUNTIME_ARRAY_SIZE);
214 	if (!rt_data->b_valid)
215 		return -ENOMEM;
216 
217 	rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
218 				    GFP_KERNEL);
219 	if (!rt_data->init_val) {
220 		kfree(rt_data->b_valid);
221 		rt_data->b_valid = NULL;
222 		return -ENOMEM;
223 	}
224 
225 	return 0;
226 }
227 
228 void qed_init_free(struct qed_hwfn *p_hwfn)
229 {
230 	kfree(p_hwfn->rt_data.init_val);
231 	p_hwfn->rt_data.init_val = NULL;
232 	kfree(p_hwfn->rt_data.b_valid);
233 	p_hwfn->rt_data.b_valid = NULL;
234 }
235 
236 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
237 			       struct qed_ptt *p_ptt,
238 			       u32 addr,
239 			       u32 dmae_data_offset,
240 			       u32 size,
241 			       const u32 *buf,
242 			       bool b_must_dmae,
243 			       bool b_can_dmae)
244 {
245 	int rc = 0;
246 
247 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
248 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
249 		const u32 *data = buf + dmae_data_offset;
250 		u32 i;
251 
252 		for (i = 0; i < size; i++)
253 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
254 	} else {
255 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
256 				       (uintptr_t)(buf + dmae_data_offset),
257 				       addr, size, NULL);
258 	}
259 
260 	return rc;
261 }
262 
263 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
264 			      struct qed_ptt *p_ptt,
265 			      u32 addr, u32 fill_count)
266 {
267 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
268 	struct qed_dmae_params params = {};
269 
270 	memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
271 
272 	/* invoke the DMAE virtual/physical buffer API with
273 	 * 1. DMAE init channel
274 	 * 2. addr,
275 	 * 3. p_hwfb->temp_data,
276 	 * 4. fill_count
277 	 */
278 	SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
279 	return qed_dmae_host2grc(p_hwfn, p_ptt,
280 				 (uintptr_t)(&zero_buffer[0]),
281 				 addr, fill_count, &params);
282 }
283 
284 static void qed_init_fill(struct qed_hwfn *p_hwfn,
285 			  struct qed_ptt *p_ptt,
286 			  u32 addr, u32 fill, u32 fill_count)
287 {
288 	u32 i;
289 
290 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
291 		qed_wr(p_hwfn, p_ptt, addr, fill);
292 }
293 
294 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
295 			      struct qed_ptt *p_ptt,
296 			      struct init_write_op *cmd,
297 			      bool b_must_dmae, bool b_can_dmae)
298 {
299 	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
300 	u32 data = le32_to_cpu(cmd->data);
301 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
302 
303 	u32 offset, output_len, input_len, max_size;
304 	struct qed_dev *cdev = p_hwfn->cdev;
305 	union init_array_hdr *hdr;
306 	const u32 *array_data;
307 	int rc = 0;
308 	u32 size;
309 
310 	array_data = cdev->fw_data->arr_data;
311 
312 	hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
313 	data = le32_to_cpu(hdr->raw.data);
314 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
315 	case INIT_ARR_ZIPPED:
316 		offset = dmae_array_offset + 1;
317 		input_len = GET_FIELD(data,
318 				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
319 		max_size = MAX_ZIPPED_SIZE * 4;
320 		memset(p_hwfn->unzip_buf, 0, max_size);
321 
322 		output_len = qed_unzip_data(p_hwfn, input_len,
323 					    (u8 *)&array_data[offset],
324 					    max_size, (u8 *)p_hwfn->unzip_buf);
325 		if (output_len) {
326 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
327 						 output_len,
328 						 p_hwfn->unzip_buf,
329 						 b_must_dmae, b_can_dmae);
330 		} else {
331 			DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
332 			rc = -EINVAL;
333 		}
334 		break;
335 	case INIT_ARR_PATTERN:
336 	{
337 		u32 repeats = GET_FIELD(data,
338 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
339 		u32 i;
340 
341 		size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
342 
343 		for (i = 0; i < repeats; i++, addr += size << 2) {
344 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
345 						 dmae_array_offset + 1,
346 						 size, array_data,
347 						 b_must_dmae, b_can_dmae);
348 			if (rc)
349 				break;
350 		}
351 		break;
352 	}
353 	case INIT_ARR_STANDARD:
354 		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
355 		rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
356 					 dmae_array_offset + 1,
357 					 size, array_data,
358 					 b_must_dmae, b_can_dmae);
359 		break;
360 	}
361 
362 	return rc;
363 }
364 
365 /* init_ops write command */
366 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
367 			   struct qed_ptt *p_ptt,
368 			   struct init_write_op *p_cmd, bool b_can_dmae)
369 {
370 	u32 data = le32_to_cpu(p_cmd->data);
371 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
372 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
373 	union init_write_args *arg = &p_cmd->args;
374 	int rc = 0;
375 
376 	/* Sanitize */
377 	if (b_must_dmae && !b_can_dmae) {
378 		DP_NOTICE(p_hwfn,
379 			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
380 			  addr);
381 		return -EINVAL;
382 	}
383 
384 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
385 	case INIT_SRC_INLINE:
386 		data = le32_to_cpu(p_cmd->args.inline_val);
387 		qed_wr(p_hwfn, p_ptt, addr, data);
388 		break;
389 	case INIT_SRC_ZEROS:
390 		data = le32_to_cpu(p_cmd->args.zeros_count);
391 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
392 			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
393 		else
394 			qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
395 		break;
396 	case INIT_SRC_ARRAY:
397 		rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
398 					b_must_dmae, b_can_dmae);
399 		break;
400 	case INIT_SRC_RUNTIME:
401 		qed_init_rt(p_hwfn, p_ptt, addr,
402 			    le16_to_cpu(arg->runtime.offset),
403 			    le16_to_cpu(arg->runtime.size),
404 			    b_must_dmae);
405 		break;
406 	}
407 
408 	return rc;
409 }
410 
411 static inline bool comp_eq(u32 val, u32 expected_val)
412 {
413 	return val == expected_val;
414 }
415 
416 static inline bool comp_and(u32 val, u32 expected_val)
417 {
418 	return (val & expected_val) == expected_val;
419 }
420 
421 static inline bool comp_or(u32 val, u32 expected_val)
422 {
423 	return (val | expected_val) > 0;
424 }
425 
426 /* init_ops read/poll commands */
427 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
428 			    struct qed_ptt *p_ptt, struct init_read_op *cmd)
429 {
430 	bool (*comp_check)(u32 val, u32 expected_val);
431 	u32 delay = QED_INIT_POLL_PERIOD_US, val;
432 	u32 data, addr, poll;
433 	int i;
434 
435 	data = le32_to_cpu(cmd->op_data);
436 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
437 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
438 
439 	val = qed_rd(p_hwfn, p_ptt, addr);
440 
441 	if (poll == INIT_POLL_NONE)
442 		return;
443 
444 	switch (poll) {
445 	case INIT_POLL_EQ:
446 		comp_check = comp_eq;
447 		break;
448 	case INIT_POLL_OR:
449 		comp_check = comp_or;
450 		break;
451 	case INIT_POLL_AND:
452 		comp_check = comp_and;
453 		break;
454 	default:
455 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
456 		       cmd->op_data);
457 		return;
458 	}
459 
460 	data = le32_to_cpu(cmd->expected_val);
461 	for (i = 0;
462 	     i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
463 	     i++) {
464 		udelay(delay);
465 		val = qed_rd(p_hwfn, p_ptt, addr);
466 	}
467 
468 	if (i == QED_INIT_MAX_POLL_COUNT) {
469 		DP_ERR(p_hwfn,
470 		       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
471 		       addr, le32_to_cpu(cmd->expected_val),
472 		       val, le32_to_cpu(cmd->op_data));
473 	}
474 }
475 
476 /* init_ops callbacks entry point */
477 static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
478 			   struct qed_ptt *p_ptt,
479 			   struct init_callback_op *p_cmd)
480 {
481 	int rc;
482 
483 	switch (p_cmd->callback_id) {
484 	case DMAE_READY_CB:
485 		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
486 		break;
487 	default:
488 		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
489 			  p_cmd->callback_id);
490 		return -EINVAL;
491 	}
492 
493 	return rc;
494 }
495 
496 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
497 				  u16 *p_offset, int modes)
498 {
499 	struct qed_dev *cdev = p_hwfn->cdev;
500 	const u8 *modes_tree_buf;
501 	u8 arg1, arg2, tree_val;
502 
503 	modes_tree_buf = cdev->fw_data->modes_tree_buf;
504 	tree_val = modes_tree_buf[(*p_offset)++];
505 	switch (tree_val) {
506 	case INIT_MODE_OP_NOT:
507 		return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
508 	case INIT_MODE_OP_OR:
509 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
510 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
511 		return arg1 | arg2;
512 	case INIT_MODE_OP_AND:
513 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
514 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
515 		return arg1 & arg2;
516 	default:
517 		tree_val -= MAX_INIT_MODE_OPS;
518 		return (modes & BIT(tree_val)) ? 1 : 0;
519 	}
520 }
521 
522 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
523 			     struct init_if_mode_op *p_cmd, int modes)
524 {
525 	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
526 
527 	if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
528 		return 0;
529 	else
530 		return GET_FIELD(le32_to_cpu(p_cmd->op_data),
531 				 INIT_IF_MODE_OP_CMD_OFFSET);
532 }
533 
534 static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
535 			      u32 phase, u32 phase_id)
536 {
537 	u32 data = le32_to_cpu(p_cmd->phase_data);
538 	u32 op_data = le32_to_cpu(p_cmd->op_data);
539 
540 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
541 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
542 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
543 		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
544 	else
545 		return 0;
546 }
547 
548 int qed_init_run(struct qed_hwfn *p_hwfn,
549 		 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
550 {
551 	bool b_dmae = (phase != PHASE_ENGINE);
552 	struct qed_dev *cdev = p_hwfn->cdev;
553 	u32 cmd_num, num_init_ops;
554 	union init_op *init_ops;
555 	int rc = 0;
556 
557 	num_init_ops = cdev->fw_data->init_ops_size;
558 	init_ops = cdev->fw_data->init_ops;
559 
560 	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
561 	if (!p_hwfn->unzip_buf)
562 		return -ENOMEM;
563 
564 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
565 		union init_op *cmd = &init_ops[cmd_num];
566 		u32 data = le32_to_cpu(cmd->raw.op_data);
567 
568 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
569 		case INIT_OP_WRITE:
570 			rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
571 					     b_dmae);
572 			break;
573 		case INIT_OP_READ:
574 			qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
575 			break;
576 		case INIT_OP_IF_MODE:
577 			cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
578 						     modes);
579 			break;
580 		case INIT_OP_IF_PHASE:
581 			cmd_num += qed_init_cmd_phase(&cmd->if_phase,
582 						      phase, phase_id);
583 			break;
584 		case INIT_OP_DELAY:
585 			/* qed_init_run is always invoked from
586 			 * sleep-able context
587 			 */
588 			udelay(le32_to_cpu(cmd->delay.delay));
589 			break;
590 
591 		case INIT_OP_CALLBACK:
592 			rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
593 			if (phase == PHASE_ENGINE &&
594 			    cmd->callback.callback_id == DMAE_READY_CB)
595 				b_dmae = true;
596 			break;
597 		}
598 
599 		if (rc)
600 			break;
601 	}
602 
603 	kfree(p_hwfn->unzip_buf);
604 	p_hwfn->unzip_buf = NULL;
605 	return rc;
606 }
607 
608 void qed_gtt_init(struct qed_hwfn *p_hwfn)
609 {
610 	u32 gtt_base;
611 	u32 i;
612 
613 	/* Set the global windows */
614 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
615 
616 	for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
617 		if (pxp_global_win[i])
618 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
619 			       pxp_global_win[i]);
620 }
621 
622 int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
623 {
624 	struct qed_fw_data *fw = cdev->fw_data;
625 	struct bin_buffer_hdr *buf_hdr;
626 	u32 offset, len;
627 
628 	if (!data) {
629 		DP_NOTICE(cdev, "Invalid fw data\n");
630 		return -EINVAL;
631 	}
632 
633 	/* First Dword contains metadata and should be skipped */
634 	buf_hdr = (struct bin_buffer_hdr *)data;
635 
636 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
637 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
638 
639 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
640 	fw->init_ops = (union init_op *)(data + offset);
641 
642 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
643 	fw->arr_data = (u32 *)(data + offset);
644 
645 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
646 	fw->modes_tree_buf = (u8 *)(data + offset);
647 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
648 	fw->init_ops_size = len / sizeof(struct init_raw_op);
649 
650 	offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
651 	fw->fw_overlays = (u32 *)(data + offset);
652 	len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
653 	fw->fw_overlays_len = len;
654 
655 	return 0;
656 }
657