xref: /linux/drivers/net/ethernet/mellanox/mlxsw/pci.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/device.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/if_vlan.h>
14 #include <linux/log2.h>
15 #include <linux/string.h>
16 
17 #include "pci_hw.h"
18 #include "pci.h"
19 #include "core.h"
20 #include "cmd.h"
21 #include "port.h"
22 #include "resources.h"
23 
24 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
25 	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
26 #define mlxsw_pci_read32(mlxsw_pci, reg) \
27 	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
28 
29 enum mlxsw_pci_queue_type {
30 	MLXSW_PCI_QUEUE_TYPE_SDQ,
31 	MLXSW_PCI_QUEUE_TYPE_RDQ,
32 	MLXSW_PCI_QUEUE_TYPE_CQ,
33 	MLXSW_PCI_QUEUE_TYPE_EQ,
34 };
35 
36 #define MLXSW_PCI_QUEUE_TYPE_COUNT	4
37 
38 enum mlxsw_pci_cq_type {
39 	MLXSW_PCI_CQ_SDQ,
40 	MLXSW_PCI_CQ_RDQ,
41 };
42 
43 static const u16 mlxsw_pci_doorbell_type_offset[] = {
44 	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
45 	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
46 	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
47 	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
48 };
49 
50 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
51 	0, /* unused */
52 	0, /* unused */
53 	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
54 	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
55 };
56 
57 struct mlxsw_pci_mem_item {
58 	char *buf;
59 	dma_addr_t mapaddr;
60 	size_t size;
61 };
62 
63 struct mlxsw_pci_queue_elem_info {
64 	char *elem; /* pointer to actual dma mapped element mem chunk */
65 	union {
66 		struct {
67 			struct sk_buff *skb;
68 		} sdq;
69 		struct {
70 			struct sk_buff *skb;
71 		} rdq;
72 	} u;
73 };
74 
75 struct mlxsw_pci_queue {
76 	spinlock_t lock; /* for queue accesses */
77 	struct mlxsw_pci_mem_item mem_item;
78 	struct mlxsw_pci_queue_elem_info *elem_info;
79 	u16 producer_counter;
80 	u16 consumer_counter;
81 	u16 count; /* number of elements in queue */
82 	u8 num; /* queue number */
83 	u8 elem_size; /* size of one element */
84 	enum mlxsw_pci_queue_type type;
85 	struct mlxsw_pci *pci;
86 	union {
87 		struct {
88 			enum mlxsw_pci_cqe_v v;
89 			struct mlxsw_pci_queue *dq;
90 			struct napi_struct napi;
91 		} cq;
92 		struct {
93 			struct tasklet_struct tasklet;
94 		} eq;
95 	} u;
96 };
97 
98 struct mlxsw_pci_queue_type_group {
99 	struct mlxsw_pci_queue *q;
100 	u8 count; /* number of queues in group */
101 };
102 
103 struct mlxsw_pci {
104 	struct pci_dev *pdev;
105 	u8 __iomem *hw_addr;
106 	u64 free_running_clock_offset;
107 	u64 utc_sec_offset;
108 	u64 utc_nsec_offset;
109 	bool lag_mode_support;
110 	bool cff_support;
111 	enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
112 	enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
113 	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
114 	u32 doorbell_offset;
115 	struct mlxsw_core *core;
116 	struct {
117 		struct mlxsw_pci_mem_item *items;
118 		unsigned int count;
119 	} fw_area;
120 	struct {
121 		struct mlxsw_pci_mem_item out_mbox;
122 		struct mlxsw_pci_mem_item in_mbox;
123 		struct mutex lock; /* Lock access to command registers */
124 		struct {
125 			u8 status;
126 			u64 out_param;
127 		} comp;
128 	} cmd;
129 	struct mlxsw_bus_info bus_info;
130 	const struct pci_device_id *id;
131 	enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
132 	u8 num_cqs; /* Number of CQs */
133 	u8 num_sdqs; /* Number of SDQs */
134 	bool skip_reset;
135 	struct net_device *napi_dev_tx;
136 	struct net_device *napi_dev_rx;
137 };
138 
139 static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
140 {
141 	int err;
142 
143 	mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0);
144 	if (!mlxsw_pci->napi_dev_tx)
145 		return -ENOMEM;
146 	strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx",
147 		sizeof(mlxsw_pci->napi_dev_tx->name));
148 
149 	mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0);
150 	if (!mlxsw_pci->napi_dev_rx) {
151 		err = -ENOMEM;
152 		goto err_alloc_rx;
153 	}
154 	strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
155 		sizeof(mlxsw_pci->napi_dev_rx->name));
156 	dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
157 
158 	return 0;
159 
160 err_alloc_rx:
161 	free_netdev(mlxsw_pci->napi_dev_tx);
162 	return err;
163 }
164 
165 static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci)
166 {
167 	free_netdev(mlxsw_pci->napi_dev_rx);
168 	free_netdev(mlxsw_pci->napi_dev_tx);
169 }
170 
171 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
172 					size_t elem_size, int elem_index)
173 {
174 	return q->mem_item.buf + (elem_size * elem_index);
175 }
176 
177 static struct mlxsw_pci_queue_elem_info *
178 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
179 {
180 	return &q->elem_info[elem_index];
181 }
182 
183 static struct mlxsw_pci_queue_elem_info *
184 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
185 {
186 	int index = q->producer_counter & (q->count - 1);
187 
188 	if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
189 		return NULL;
190 	return mlxsw_pci_queue_elem_info_get(q, index);
191 }
192 
193 static struct mlxsw_pci_queue_elem_info *
194 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
195 {
196 	int index = q->consumer_counter & (q->count - 1);
197 
198 	return mlxsw_pci_queue_elem_info_get(q, index);
199 }
200 
201 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
202 {
203 	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
204 }
205 
206 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
207 {
208 	return owner_bit != !!(q->consumer_counter & q->count);
209 }
210 
211 static struct mlxsw_pci_queue_type_group *
212 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
213 			       enum mlxsw_pci_queue_type q_type)
214 {
215 	return &mlxsw_pci->queues[q_type];
216 }
217 
218 static struct mlxsw_pci_queue *
219 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
220 		      enum mlxsw_pci_queue_type q_type, u8 q_num)
221 {
222 	return &mlxsw_pci->queues[q_type].q[q_num];
223 }
224 
225 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
226 						 u8 q_num)
227 {
228 	return __mlxsw_pci_queue_get(mlxsw_pci,
229 				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
230 }
231 
232 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
233 						u8 q_num)
234 {
235 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
236 }
237 
238 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci)
239 {
240 	/* There is only one EQ at index 0. */
241 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0);
242 }
243 
244 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
245 					   struct mlxsw_pci_queue *q,
246 					   u16 val)
247 {
248 	mlxsw_pci_write32(mlxsw_pci,
249 			  DOORBELL(mlxsw_pci->doorbell_offset,
250 				   mlxsw_pci_doorbell_type_offset[q->type],
251 				   q->num), val);
252 }
253 
254 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
255 					       struct mlxsw_pci_queue *q,
256 					       u16 val)
257 {
258 	mlxsw_pci_write32(mlxsw_pci,
259 			  DOORBELL(mlxsw_pci->doorbell_offset,
260 				   mlxsw_pci_doorbell_arm_type_offset[q->type],
261 				   q->num), val);
262 }
263 
264 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
265 						   struct mlxsw_pci_queue *q)
266 {
267 	wmb(); /* ensure all writes are done before we ring a bell */
268 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
269 }
270 
271 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 						   struct mlxsw_pci_queue *q)
273 {
274 	wmb(); /* ensure all writes are done before we ring a bell */
275 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
276 				       q->consumer_counter + q->count);
277 }
278 
279 static void
280 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
281 					   struct mlxsw_pci_queue *q)
282 {
283 	wmb(); /* ensure all writes are done before we ring a bell */
284 	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
285 }
286 
287 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
288 					     int page_index)
289 {
290 	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
291 }
292 
293 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
294 			      struct mlxsw_pci_queue *q)
295 {
296 	struct mlxsw_pci_queue *cq;
297 	int tclass;
298 	u8 cq_num;
299 	int lp;
300 	int i;
301 	int err;
302 
303 	q->producer_counter = 0;
304 	q->consumer_counter = 0;
305 	tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
306 						      MLXSW_PCI_SDQ_CTL_TC;
307 	lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
308 						  MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
309 
310 	/* Set CQ of same number of this SDQ. */
311 	cq_num = q->num;
312 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
313 	mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
314 	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
315 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
316 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
317 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
318 
319 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
320 	}
321 
322 	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
323 	if (err)
324 		return err;
325 
326 	cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
327 	cq->u.cq.dq = q;
328 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
329 	return 0;
330 }
331 
332 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
333 			       struct mlxsw_pci_queue *q)
334 {
335 	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
336 }
337 
338 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
339 				  int index, char *frag_data, size_t frag_len,
340 				  int direction)
341 {
342 	struct pci_dev *pdev = mlxsw_pci->pdev;
343 	dma_addr_t mapaddr;
344 
345 	mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
346 	if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
347 		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
348 		return -EIO;
349 	}
350 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
351 	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
352 	return 0;
353 }
354 
355 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
356 				     int index, int direction)
357 {
358 	struct pci_dev *pdev = mlxsw_pci->pdev;
359 	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
360 	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
361 
362 	if (!frag_len)
363 		return;
364 	dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
365 }
366 
367 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
368 				   struct mlxsw_pci_queue_elem_info *elem_info,
369 				   gfp_t gfp)
370 {
371 	size_t buf_len = MLXSW_PORT_MAX_MTU;
372 	char *wqe = elem_info->elem;
373 	struct sk_buff *skb;
374 	int err;
375 
376 	skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
377 	if (!skb)
378 		return -ENOMEM;
379 
380 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
381 				     buf_len, DMA_FROM_DEVICE);
382 	if (err)
383 		goto err_frag_map;
384 
385 	elem_info->u.rdq.skb = skb;
386 	return 0;
387 
388 err_frag_map:
389 	dev_kfree_skb_any(skb);
390 	return err;
391 }
392 
393 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
394 				   struct mlxsw_pci_queue_elem_info *elem_info)
395 {
396 	struct sk_buff *skb;
397 	char *wqe;
398 
399 	skb = elem_info->u.rdq.skb;
400 	wqe = elem_info->elem;
401 
402 	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
403 	dev_kfree_skb_any(skb);
404 }
405 
406 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
407 			      struct mlxsw_pci_queue *q)
408 {
409 	struct mlxsw_pci_queue_elem_info *elem_info;
410 	u8 sdq_count = mlxsw_pci->num_sdqs;
411 	struct mlxsw_pci_queue *cq;
412 	u8 cq_num;
413 	int i;
414 	int err;
415 
416 	q->producer_counter = 0;
417 	q->consumer_counter = 0;
418 
419 	/* Set CQ of same number of this RDQ with base
420 	 * above SDQ count as the lower ones are assigned to SDQs.
421 	 */
422 	cq_num = sdq_count + q->num;
423 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
424 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
425 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
426 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
427 
428 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
429 	}
430 
431 	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
432 	if (err)
433 		return err;
434 
435 	cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
436 	cq->u.cq.dq = q;
437 
438 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
439 
440 	for (i = 0; i < q->count; i++) {
441 		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
442 		BUG_ON(!elem_info);
443 		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
444 		if (err)
445 			goto rollback;
446 		/* Everything is set up, ring doorbell to pass elem to HW */
447 		q->producer_counter++;
448 		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
449 	}
450 
451 	return 0;
452 
453 rollback:
454 	for (i--; i >= 0; i--) {
455 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
456 		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
457 	}
458 	cq->u.cq.dq = NULL;
459 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
460 
461 	return err;
462 }
463 
464 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
465 			       struct mlxsw_pci_queue *q)
466 {
467 	struct mlxsw_pci_queue_elem_info *elem_info;
468 	int i;
469 
470 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
471 	for (i = 0; i < q->count; i++) {
472 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
473 		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
474 	}
475 }
476 
477 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
478 				  struct mlxsw_pci_queue *q)
479 {
480 	q->u.cq.v = mlxsw_pci->max_cqe_ver;
481 
482 	if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
483 	    q->num < mlxsw_pci->num_sdqs &&
484 	    !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
485 		q->u.cq.v = MLXSW_PCI_CQE_V1;
486 }
487 
488 static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
489 					 ptrdiff_t off)
490 {
491 	return ioread32be(mlxsw_pci->hw_addr + off);
492 }
493 
494 static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
495 				    struct sk_buff *skb,
496 				    enum mlxsw_pci_cqe_v cqe_v, char *cqe)
497 {
498 	u8 ts_type;
499 
500 	if (cqe_v != MLXSW_PCI_CQE_V2)
501 		return;
502 
503 	ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
504 
505 	if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
506 	    ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
507 		return;
508 
509 	mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
510 	mlxsw_skb_cb(skb)->cqe_ts.nsec =
511 		mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
512 }
513 
514 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
515 				     struct mlxsw_pci_queue *q,
516 				     u16 consumer_counter_limit,
517 				     enum mlxsw_pci_cqe_v cqe_v,
518 				     char *cqe)
519 {
520 	struct pci_dev *pdev = mlxsw_pci->pdev;
521 	struct mlxsw_pci_queue_elem_info *elem_info;
522 	struct mlxsw_tx_info tx_info;
523 	char *wqe;
524 	struct sk_buff *skb;
525 	int i;
526 
527 	spin_lock(&q->lock);
528 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
529 	tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
530 	skb = elem_info->u.sdq.skb;
531 	wqe = elem_info->elem;
532 	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
533 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
534 
535 	if (unlikely(!tx_info.is_emad &&
536 		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
537 		mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
538 		mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
539 					   tx_info.local_port);
540 		skb = NULL;
541 	}
542 
543 	if (skb)
544 		dev_kfree_skb_any(skb);
545 	elem_info->u.sdq.skb = NULL;
546 
547 	if (q->consumer_counter++ != consumer_counter_limit)
548 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
549 	spin_unlock(&q->lock);
550 }
551 
552 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
553 					      const char *cqe)
554 {
555 	struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
556 
557 	if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
558 		cb->rx_md_info.tx_port_is_lag = true;
559 		cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
560 		cb->rx_md_info.tx_lag_port_index =
561 			mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
562 	} else {
563 		cb->rx_md_info.tx_port_is_lag = false;
564 		cb->rx_md_info.tx_sys_port =
565 			mlxsw_pci_cqe2_tx_system_port_get(cqe);
566 	}
567 
568 	if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
569 	    cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
570 		cb->rx_md_info.tx_port_valid = 1;
571 	else
572 		cb->rx_md_info.tx_port_valid = 0;
573 }
574 
575 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
576 {
577 	struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
578 
579 	cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
580 	if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
581 		cb->rx_md_info.tx_congestion_valid = 1;
582 	else
583 		cb->rx_md_info.tx_congestion_valid = 0;
584 	cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
585 
586 	cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
587 	if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
588 		cb->rx_md_info.latency_valid = 1;
589 	else
590 		cb->rx_md_info.latency_valid = 0;
591 
592 	cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
593 	if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
594 		cb->rx_md_info.tx_tc_valid = 1;
595 	else
596 		cb->rx_md_info.tx_tc_valid = 0;
597 
598 	mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
599 }
600 
601 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
602 				     struct mlxsw_pci_queue *q,
603 				     u16 consumer_counter_limit,
604 				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
605 {
606 	struct pci_dev *pdev = mlxsw_pci->pdev;
607 	struct mlxsw_pci_queue_elem_info *elem_info;
608 	struct mlxsw_rx_info rx_info = {};
609 	char wqe[MLXSW_PCI_WQE_SIZE];
610 	struct sk_buff *skb;
611 	u16 byte_count;
612 	int err;
613 
614 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
615 	skb = elem_info->u.rdq.skb;
616 	memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
617 
618 	if (q->consumer_counter++ != consumer_counter_limit)
619 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
620 
621 	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
622 	if (err) {
623 		dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
624 		goto out;
625 	}
626 
627 	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
628 
629 	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
630 		rx_info.is_lag = true;
631 		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
632 		rx_info.lag_port_index =
633 			mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
634 	} else {
635 		rx_info.is_lag = false;
636 		rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
637 	}
638 
639 	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
640 
641 	if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
642 	    rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
643 		u32 cookie_index = 0;
644 
645 		if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
646 			cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
647 		mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
648 	} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
649 		   rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
650 		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
651 		rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
652 		mlxsw_pci_cqe_rdq_md_init(skb, cqe);
653 	} else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
654 		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
655 		mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
656 	}
657 
658 	mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
659 
660 	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
661 	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
662 		byte_count -= ETH_FCS_LEN;
663 	skb_put(skb, byte_count);
664 	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
665 
666 out:
667 	q->producer_counter++;
668 	return;
669 }
670 
671 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
672 {
673 	struct mlxsw_pci_queue_elem_info *elem_info;
674 	char *elem;
675 	bool owner_bit;
676 
677 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
678 	elem = elem_info->elem;
679 	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
680 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
681 		return NULL;
682 	q->consumer_counter++;
683 	rmb(); /* make sure we read owned bit before the rest of elem */
684 	return elem;
685 }
686 
687 static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
688 {
689 	struct mlxsw_pci_queue_elem_info *elem_info;
690 	bool owner_bit;
691 
692 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
693 	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
694 	return !mlxsw_pci_elem_hw_owned(q, owner_bit);
695 }
696 
697 static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
698 {
699 	struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
700 						 u.cq.napi);
701 	struct mlxsw_pci_queue *rdq = q->u.cq.dq;
702 	struct mlxsw_pci *mlxsw_pci = q->pci;
703 	int work_done = 0;
704 	char *cqe;
705 
706 	/* If the budget is 0, Rx processing should be skipped. */
707 	if (unlikely(!budget))
708 		return 0;
709 
710 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
711 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
712 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
713 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
714 
715 		if (unlikely(sendq)) {
716 			WARN_ON_ONCE(1);
717 			continue;
718 		}
719 
720 		if (unlikely(dqn != rdq->num)) {
721 			WARN_ON_ONCE(1);
722 			continue;
723 		}
724 
725 		mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
726 					 wqe_counter, q->u.cq.v, cqe);
727 
728 		if (++work_done == budget)
729 			break;
730 	}
731 
732 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
733 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq);
734 
735 	if (work_done < budget)
736 		goto processing_completed;
737 
738 	/* The driver still has outstanding work to do, budget was exhausted.
739 	 * Return exactly budget. In that case, the NAPI instance will be polled
740 	 * again.
741 	 */
742 	if (mlxsw_pci_cq_cqe_to_handle(q))
743 		goto out;
744 
745 	/* The driver processed all the completions and handled exactly
746 	 * 'budget'. Return 'budget - 1' to distinguish from the case that
747 	 * driver still has completions to handle.
748 	 */
749 	if (work_done == budget)
750 		work_done--;
751 
752 processing_completed:
753 	if (napi_complete_done(napi, work_done))
754 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
755 out:
756 	return work_done;
757 }
758 
759 static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
760 {
761 	struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
762 						 u.cq.napi);
763 	struct mlxsw_pci_queue *sdq = q->u.cq.dq;
764 	struct mlxsw_pci *mlxsw_pci = q->pci;
765 	int work_done = 0;
766 	char *cqe;
767 
768 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
769 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
770 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
771 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
772 		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
773 
774 		if (unlikely(!sendq)) {
775 			WARN_ON_ONCE(1);
776 			continue;
777 		}
778 
779 		if (unlikely(dqn != sdq->num)) {
780 			WARN_ON_ONCE(1);
781 			continue;
782 		}
783 
784 		memcpy(ncqe, cqe, q->elem_size);
785 		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
786 
787 		mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
788 					 wqe_counter, q->u.cq.v, ncqe);
789 
790 		work_done++;
791 	}
792 
793 	/* If the budget is 0 napi_complete_done() should never be called. */
794 	if (unlikely(!budget))
795 		goto processing_completed;
796 
797 	work_done = min(work_done, budget - 1);
798 	if (unlikely(!napi_complete_done(napi, work_done)))
799 		goto out;
800 
801 processing_completed:
802 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
803 out:
804 	return work_done;
805 }
806 
807 static enum mlxsw_pci_cq_type
808 mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
809 		  const struct mlxsw_pci_queue *q)
810 {
811 	/* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used
812 	 * for SDQs and the rest are used for RDQs.
813 	 */
814 	if (q->num < mlxsw_pci->num_sdqs)
815 		return MLXSW_PCI_CQ_SDQ;
816 
817 	return MLXSW_PCI_CQ_RDQ;
818 }
819 
820 static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
821 				    enum mlxsw_pci_cq_type cq_type)
822 {
823 	struct mlxsw_pci *mlxsw_pci = q->pci;
824 
825 	switch (cq_type) {
826 	case MLXSW_PCI_CQ_SDQ:
827 		netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
828 			       mlxsw_pci_napi_poll_cq_tx);
829 		break;
830 	case MLXSW_PCI_CQ_RDQ:
831 		netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
832 			       mlxsw_pci_napi_poll_cq_rx);
833 		break;
834 	}
835 
836 	napi_enable(&q->u.cq.napi);
837 }
838 
839 static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
840 {
841 	napi_disable(&q->u.cq.napi);
842 	netif_napi_del(&q->u.cq.napi);
843 }
844 
845 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
846 			     struct mlxsw_pci_queue *q)
847 {
848 	int i;
849 	int err;
850 
851 	q->consumer_counter = 0;
852 
853 	for (i = 0; i < q->count; i++) {
854 		char *elem = mlxsw_pci_queue_elem_get(q, i);
855 
856 		mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
857 	}
858 
859 	if (q->u.cq.v == MLXSW_PCI_CQE_V1)
860 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
861 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
862 	else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
863 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
864 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
865 
866 	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
867 	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
868 	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
869 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
870 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
871 
872 		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
873 	}
874 	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
875 	if (err)
876 		return err;
877 	mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q));
878 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
879 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
880 	return 0;
881 }
882 
883 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
884 			      struct mlxsw_pci_queue *q)
885 {
886 	mlxsw_pci_cq_napi_teardown(q);
887 	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
888 }
889 
890 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
891 {
892 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
893 					     MLXSW_PCI_CQE01_COUNT;
894 }
895 
896 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
897 {
898 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
899 					       MLXSW_PCI_CQE01_SIZE;
900 }
901 
902 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
903 {
904 	struct mlxsw_pci_queue_elem_info *elem_info;
905 	char *elem;
906 	bool owner_bit;
907 
908 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
909 	elem = elem_info->elem;
910 	owner_bit = mlxsw_pci_eqe_owner_get(elem);
911 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
912 		return NULL;
913 	q->consumer_counter++;
914 	rmb(); /* make sure we read owned bit before the rest of elem */
915 	return elem;
916 }
917 
918 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
919 {
920 	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
921 	struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
922 	struct mlxsw_pci *mlxsw_pci = q->pci;
923 	int credits = q->count >> 1;
924 	u8 cqn, cq_count;
925 	int items = 0;
926 	char *eqe;
927 
928 	memset(&active_cqns, 0, sizeof(active_cqns));
929 
930 	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
931 		cqn = mlxsw_pci_eqe_cqn_get(eqe);
932 		set_bit(cqn, active_cqns);
933 
934 		if (++items == credits)
935 			break;
936 	}
937 
938 	if (!items)
939 		return;
940 
941 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
942 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
943 
944 	cq_count = mlxsw_pci->num_cqs;
945 	for_each_set_bit(cqn, active_cqns, cq_count) {
946 		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
947 		napi_schedule(&q->u.cq.napi);
948 	}
949 }
950 
951 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
952 			     struct mlxsw_pci_queue *q)
953 {
954 	int i;
955 	int err;
956 
957 	/* We expect to initialize only one EQ, which gets num=0 as it is
958 	 * located at index zero. We use the EQ as EQ1, so set the number for
959 	 * future use.
960 	 */
961 	WARN_ON_ONCE(q->num);
962 	q->num = MLXSW_PCI_EQ_COMP_NUM;
963 
964 	q->consumer_counter = 0;
965 
966 	for (i = 0; i < q->count; i++) {
967 		char *elem = mlxsw_pci_queue_elem_get(q, i);
968 
969 		mlxsw_pci_eqe_owner_set(elem, 1);
970 	}
971 
972 	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
973 	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
974 	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
975 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
976 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
977 
978 		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
979 	}
980 	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
981 	if (err)
982 		return err;
983 	tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
984 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
985 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
986 	return 0;
987 }
988 
989 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
990 			      struct mlxsw_pci_queue *q)
991 {
992 	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
993 }
994 
995 struct mlxsw_pci_queue_ops {
996 	const char *name;
997 	enum mlxsw_pci_queue_type type;
998 	void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
999 			 struct mlxsw_pci_queue *q);
1000 	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
1001 		    struct mlxsw_pci_queue *q);
1002 	void (*fini)(struct mlxsw_pci *mlxsw_pci,
1003 		     struct mlxsw_pci_queue *q);
1004 	u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
1005 	u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
1006 	u16 elem_count;
1007 	u8 elem_size;
1008 };
1009 
1010 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
1011 	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
1012 	.init		= mlxsw_pci_sdq_init,
1013 	.fini		= mlxsw_pci_sdq_fini,
1014 	.elem_count	= MLXSW_PCI_WQE_COUNT,
1015 	.elem_size	= MLXSW_PCI_WQE_SIZE,
1016 };
1017 
1018 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
1019 	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
1020 	.init		= mlxsw_pci_rdq_init,
1021 	.fini		= mlxsw_pci_rdq_fini,
1022 	.elem_count	= MLXSW_PCI_WQE_COUNT,
1023 	.elem_size	= MLXSW_PCI_WQE_SIZE
1024 };
1025 
1026 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
1027 	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
1028 	.pre_init	= mlxsw_pci_cq_pre_init,
1029 	.init		= mlxsw_pci_cq_init,
1030 	.fini		= mlxsw_pci_cq_fini,
1031 	.elem_count_f	= mlxsw_pci_cq_elem_count,
1032 	.elem_size_f	= mlxsw_pci_cq_elem_size
1033 };
1034 
1035 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
1036 	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
1037 	.init		= mlxsw_pci_eq_init,
1038 	.fini		= mlxsw_pci_eq_fini,
1039 	.elem_count	= MLXSW_PCI_EQE_COUNT,
1040 	.elem_size	= MLXSW_PCI_EQE_SIZE
1041 };
1042 
1043 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1044 				const struct mlxsw_pci_queue_ops *q_ops,
1045 				struct mlxsw_pci_queue *q, u8 q_num)
1046 {
1047 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
1048 	int i;
1049 	int err;
1050 
1051 	q->num = q_num;
1052 	if (q_ops->pre_init)
1053 		q_ops->pre_init(mlxsw_pci, q);
1054 
1055 	spin_lock_init(&q->lock);
1056 	q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
1057 					 q_ops->elem_count;
1058 	q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
1059 					    q_ops->elem_size;
1060 	q->type = q_ops->type;
1061 	q->pci = mlxsw_pci;
1062 
1063 	mem_item->size = MLXSW_PCI_AQ_SIZE;
1064 	mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1065 					   mem_item->size, &mem_item->mapaddr,
1066 					   GFP_KERNEL);
1067 	if (!mem_item->buf)
1068 		return -ENOMEM;
1069 
1070 	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
1071 	if (!q->elem_info) {
1072 		err = -ENOMEM;
1073 		goto err_elem_info_alloc;
1074 	}
1075 
1076 	/* Initialize dma mapped elements info elem_info for
1077 	 * future easy access.
1078 	 */
1079 	for (i = 0; i < q->count; i++) {
1080 		struct mlxsw_pci_queue_elem_info *elem_info;
1081 
1082 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
1083 		elem_info->elem =
1084 			__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
1085 	}
1086 
1087 	mlxsw_cmd_mbox_zero(mbox);
1088 	err = q_ops->init(mlxsw_pci, mbox, q);
1089 	if (err)
1090 		goto err_q_ops_init;
1091 	return 0;
1092 
1093 err_q_ops_init:
1094 	kfree(q->elem_info);
1095 err_elem_info_alloc:
1096 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1097 			  mem_item->buf, mem_item->mapaddr);
1098 	return err;
1099 }
1100 
1101 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
1102 				 const struct mlxsw_pci_queue_ops *q_ops,
1103 				 struct mlxsw_pci_queue *q)
1104 {
1105 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
1106 
1107 	q_ops->fini(mlxsw_pci, q);
1108 	kfree(q->elem_info);
1109 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1110 			  mem_item->buf, mem_item->mapaddr);
1111 }
1112 
1113 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1114 				      const struct mlxsw_pci_queue_ops *q_ops,
1115 				      u8 num_qs)
1116 {
1117 	struct mlxsw_pci_queue_type_group *queue_group;
1118 	int i;
1119 	int err;
1120 
1121 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1122 	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1123 	if (!queue_group->q)
1124 		return -ENOMEM;
1125 
1126 	for (i = 0; i < num_qs; i++) {
1127 		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1128 					   &queue_group->q[i], i);
1129 		if (err)
1130 			goto err_queue_init;
1131 	}
1132 	queue_group->count = num_qs;
1133 
1134 	return 0;
1135 
1136 err_queue_init:
1137 	for (i--; i >= 0; i--)
1138 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1139 	kfree(queue_group->q);
1140 	return err;
1141 }
1142 
1143 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1144 				       const struct mlxsw_pci_queue_ops *q_ops)
1145 {
1146 	struct mlxsw_pci_queue_type_group *queue_group;
1147 	int i;
1148 
1149 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1150 	for (i = 0; i < queue_group->count; i++)
1151 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1152 	kfree(queue_group->q);
1153 }
1154 
1155 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1156 {
1157 	struct pci_dev *pdev = mlxsw_pci->pdev;
1158 	u8 num_sdqs;
1159 	u8 sdq_log2sz;
1160 	u8 num_rdqs;
1161 	u8 rdq_log2sz;
1162 	u8 num_cqs;
1163 	u8 cq_log2sz;
1164 	u8 cqv2_log2sz;
1165 	u8 num_eqs;
1166 	u8 eq_log2sz;
1167 	int err;
1168 
1169 	mlxsw_cmd_mbox_zero(mbox);
1170 	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1171 	if (err)
1172 		return err;
1173 
1174 	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1175 	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1176 	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1177 	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1178 	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1179 	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1180 	cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1181 	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1182 	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1183 
1184 	if (num_sdqs + num_rdqs > num_cqs ||
1185 	    num_sdqs < MLXSW_PCI_SDQS_MIN ||
1186 	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) {
1187 		dev_err(&pdev->dev, "Unsupported number of queues\n");
1188 		return -EINVAL;
1189 	}
1190 
1191 	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1192 	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1193 	    (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1194 	    (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1195 	     (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1196 	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1197 		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1198 		return -EINVAL;
1199 	}
1200 
1201 	mlxsw_pci->num_cqs = num_cqs;
1202 	mlxsw_pci->num_sdqs = num_sdqs;
1203 
1204 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1205 					 MLXSW_PCI_EQS_COUNT);
1206 	if (err) {
1207 		dev_err(&pdev->dev, "Failed to initialize event queues\n");
1208 		return err;
1209 	}
1210 
1211 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1212 					 num_cqs);
1213 	if (err) {
1214 		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1215 		goto err_cqs_init;
1216 	}
1217 
1218 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1219 					 num_sdqs);
1220 	if (err) {
1221 		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1222 		goto err_sdqs_init;
1223 	}
1224 
1225 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1226 					 num_rdqs);
1227 	if (err) {
1228 		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1229 		goto err_rdqs_init;
1230 	}
1231 
1232 	return 0;
1233 
1234 err_rdqs_init:
1235 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1236 err_sdqs_init:
1237 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1238 err_cqs_init:
1239 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1240 	return err;
1241 }
1242 
1243 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1244 {
1245 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1246 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1247 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1248 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1249 }
1250 
1251 static void
1252 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1253 				     char *mbox, int index,
1254 				     const struct mlxsw_swid_config *swid)
1255 {
1256 	u8 mask = 0;
1257 
1258 	if (swid->used_type) {
1259 		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1260 			mbox, index, swid->type);
1261 		mask |= 1;
1262 	}
1263 	if (swid->used_properties) {
1264 		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1265 			mbox, index, swid->properties);
1266 		mask |= 2;
1267 	}
1268 	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1269 }
1270 
1271 static int
1272 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1273 				const struct mlxsw_config_profile *profile,
1274 				struct mlxsw_res *res)
1275 {
1276 	u64 single_size, double_size, linear_size;
1277 	int err;
1278 
1279 	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1280 				       &single_size, &double_size,
1281 				       &linear_size);
1282 	if (err)
1283 		return err;
1284 
1285 	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1286 	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1287 	MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1288 
1289 	return 0;
1290 }
1291 
1292 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1293 				    const struct mlxsw_config_profile *profile,
1294 				    struct mlxsw_res *res)
1295 {
1296 	int i;
1297 	int err;
1298 
1299 	mlxsw_cmd_mbox_zero(mbox);
1300 
1301 	if (profile->used_max_vepa_channels) {
1302 		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1303 			mbox, 1);
1304 		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1305 			mbox, profile->max_vepa_channels);
1306 	}
1307 	if (profile->used_max_lag) {
1308 		mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
1309 		mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
1310 							  profile->max_lag);
1311 	}
1312 	if (profile->used_max_mid) {
1313 		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1314 			mbox, 1);
1315 		mlxsw_cmd_mbox_config_profile_max_mid_set(
1316 			mbox, profile->max_mid);
1317 	}
1318 	if (profile->used_max_pgt) {
1319 		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1320 			mbox, 1);
1321 		mlxsw_cmd_mbox_config_profile_max_pgt_set(
1322 			mbox, profile->max_pgt);
1323 	}
1324 	if (profile->used_max_system_port) {
1325 		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1326 			mbox, 1);
1327 		mlxsw_cmd_mbox_config_profile_max_system_port_set(
1328 			mbox, profile->max_system_port);
1329 	}
1330 	if (profile->used_max_vlan_groups) {
1331 		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1332 			mbox, 1);
1333 		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1334 			mbox, profile->max_vlan_groups);
1335 	}
1336 	if (profile->used_max_regions) {
1337 		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1338 			mbox, 1);
1339 		mlxsw_cmd_mbox_config_profile_max_regions_set(
1340 			mbox, profile->max_regions);
1341 	}
1342 	if (profile->used_flood_tables) {
1343 		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1344 			mbox, 1);
1345 		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1346 			mbox, profile->max_flood_tables);
1347 		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1348 			mbox, profile->max_vid_flood_tables);
1349 		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1350 			mbox, profile->max_fid_offset_flood_tables);
1351 		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1352 			mbox, profile->fid_offset_flood_table_size);
1353 		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1354 			mbox, profile->max_fid_flood_tables);
1355 		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1356 			mbox, profile->fid_flood_table_size);
1357 	}
1358 	if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) {
1359 		enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode =
1360 			MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF;
1361 
1362 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1);
1363 		mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode);
1364 		mlxsw_pci->flood_mode = flood_mode;
1365 	} else if (profile->used_flood_mode) {
1366 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1367 			mbox, 1);
1368 		mlxsw_cmd_mbox_config_profile_flood_mode_set(
1369 			mbox, profile->flood_mode);
1370 		mlxsw_pci->flood_mode = profile->flood_mode;
1371 	} else {
1372 		WARN_ON(1);
1373 		return -EINVAL;
1374 	}
1375 	if (profile->used_max_ib_mc) {
1376 		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1377 			mbox, 1);
1378 		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1379 			mbox, profile->max_ib_mc);
1380 	}
1381 	if (profile->used_max_pkey) {
1382 		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1383 			mbox, 1);
1384 		mlxsw_cmd_mbox_config_profile_max_pkey_set(
1385 			mbox, profile->max_pkey);
1386 	}
1387 	if (profile->used_ar_sec) {
1388 		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1389 			mbox, 1);
1390 		mlxsw_cmd_mbox_config_profile_ar_sec_set(
1391 			mbox, profile->ar_sec);
1392 	}
1393 	if (profile->used_adaptive_routing_group_cap) {
1394 		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1395 			mbox, 1);
1396 		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1397 			mbox, profile->adaptive_routing_group_cap);
1398 	}
1399 	if (profile->used_ubridge) {
1400 		mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
1401 		mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
1402 							  profile->ubridge);
1403 	}
1404 	if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1405 		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1406 		if (err)
1407 			return err;
1408 
1409 		mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1410 		mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1411 					MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1412 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1413 									   1);
1414 		mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1415 					MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1416 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1417 								mbox, 1);
1418 		mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1419 					MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1420 	}
1421 
1422 	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1423 		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1424 						     &profile->swid_config[i]);
1425 
1426 	if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1427 		mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1428 		mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1429 	}
1430 
1431 	if (profile->used_cqe_time_stamp_type) {
1432 		mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
1433 									  1);
1434 		mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
1435 					profile->cqe_time_stamp_type);
1436 	}
1437 
1438 	if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) {
1439 		enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode =
1440 			MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW;
1441 
1442 		mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1);
1443 		mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode);
1444 		mlxsw_pci->lag_mode = lag_mode;
1445 	} else {
1446 		mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
1447 	}
1448 	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1449 }
1450 
1451 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1452 {
1453 	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1454 	int err;
1455 
1456 	mlxsw_cmd_mbox_zero(mbox);
1457 	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1458 	if (err)
1459 		return err;
1460 	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1461 	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1462 	return 0;
1463 }
1464 
1465 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1466 				  u16 num_pages)
1467 {
1468 	struct mlxsw_pci_mem_item *mem_item;
1469 	int nent = 0;
1470 	int i;
1471 	int err;
1472 
1473 	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1474 					   GFP_KERNEL);
1475 	if (!mlxsw_pci->fw_area.items)
1476 		return -ENOMEM;
1477 	mlxsw_pci->fw_area.count = num_pages;
1478 
1479 	mlxsw_cmd_mbox_zero(mbox);
1480 	for (i = 0; i < num_pages; i++) {
1481 		mem_item = &mlxsw_pci->fw_area.items[i];
1482 
1483 		mem_item->size = MLXSW_PCI_PAGE_SIZE;
1484 		mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1485 						   mem_item->size,
1486 						   &mem_item->mapaddr, GFP_KERNEL);
1487 		if (!mem_item->buf) {
1488 			err = -ENOMEM;
1489 			goto err_alloc;
1490 		}
1491 		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1492 		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1493 		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1494 			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1495 			if (err)
1496 				goto err_cmd_map_fa;
1497 			nent = 0;
1498 			mlxsw_cmd_mbox_zero(mbox);
1499 		}
1500 	}
1501 
1502 	if (nent) {
1503 		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1504 		if (err)
1505 			goto err_cmd_map_fa;
1506 	}
1507 
1508 	return 0;
1509 
1510 err_cmd_map_fa:
1511 err_alloc:
1512 	for (i--; i >= 0; i--) {
1513 		mem_item = &mlxsw_pci->fw_area.items[i];
1514 
1515 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1516 				  mem_item->buf, mem_item->mapaddr);
1517 	}
1518 	kfree(mlxsw_pci->fw_area.items);
1519 	return err;
1520 }
1521 
1522 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1523 {
1524 	struct mlxsw_pci_mem_item *mem_item;
1525 	int i;
1526 
1527 	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1528 
1529 	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1530 		mem_item = &mlxsw_pci->fw_area.items[i];
1531 
1532 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1533 				  mem_item->buf, mem_item->mapaddr);
1534 	}
1535 	kfree(mlxsw_pci->fw_area.items);
1536 }
1537 
1538 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1539 {
1540 	struct mlxsw_pci *mlxsw_pci = dev_id;
1541 	struct mlxsw_pci_queue *q;
1542 
1543 	q = mlxsw_pci_eq_get(mlxsw_pci);
1544 	tasklet_schedule(&q->u.eq.tasklet);
1545 	return IRQ_HANDLED;
1546 }
1547 
1548 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1549 				struct mlxsw_pci_mem_item *mbox)
1550 {
1551 	struct pci_dev *pdev = mlxsw_pci->pdev;
1552 	int err = 0;
1553 
1554 	mbox->size = MLXSW_CMD_MBOX_SIZE;
1555 	mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1556 				       &mbox->mapaddr, GFP_KERNEL);
1557 	if (!mbox->buf) {
1558 		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1559 		err = -ENOMEM;
1560 	}
1561 
1562 	return err;
1563 }
1564 
1565 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1566 				struct mlxsw_pci_mem_item *mbox)
1567 {
1568 	struct pci_dev *pdev = mlxsw_pci->pdev;
1569 
1570 	dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1571 			  mbox->mapaddr);
1572 }
1573 
1574 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1575 				    const struct pci_device_id *id,
1576 				    u32 *p_sys_status)
1577 {
1578 	unsigned long end;
1579 	u32 val;
1580 
1581 	/* We must wait for the HW to become responsive. */
1582 	msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1583 
1584 	end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1585 	do {
1586 		val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1587 		if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1588 			return 0;
1589 		cond_resched();
1590 	} while (time_before(jiffies, end));
1591 
1592 	*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1593 
1594 	return -EBUSY;
1595 }
1596 
1597 static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
1598 {
1599 	struct pci_dev *pdev = mlxsw_pci->pdev;
1600 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1601 	int err;
1602 
1603 	mlxsw_reg_mrsr_pack(mrsr_pl,
1604 			    MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
1605 	err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1606 	if (err)
1607 		return err;
1608 
1609 	device_lock_assert(&pdev->dev);
1610 
1611 	pci_cfg_access_lock(pdev);
1612 	pci_save_state(pdev);
1613 
1614 	err = __pci_reset_function_locked(pdev);
1615 	if (err)
1616 		pci_err(pdev, "PCI function reset failed with %d\n", err);
1617 
1618 	pci_restore_state(pdev);
1619 	pci_cfg_access_unlock(pdev);
1620 
1621 	return err;
1622 }
1623 
1624 static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
1625 {
1626 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1627 
1628 	mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
1629 	return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1630 }
1631 
1632 static int
1633 mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
1634 {
1635 	struct pci_dev *pdev = mlxsw_pci->pdev;
1636 	char mcam_pl[MLXSW_REG_MCAM_LEN];
1637 	bool pci_reset_supported = false;
1638 	u32 sys_status;
1639 	int err;
1640 
1641 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1642 	if (err) {
1643 		dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1644 			sys_status);
1645 		return err;
1646 	}
1647 
1648 	/* PCI core already issued a PCI reset, do not issue another reset. */
1649 	if (mlxsw_pci->skip_reset)
1650 		return 0;
1651 
1652 	mlxsw_reg_mcam_pack(mcam_pl,
1653 			    MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
1654 	err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
1655 	if (!err)
1656 		mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
1657 				      &pci_reset_supported);
1658 
1659 	if (pci_reset_supported) {
1660 		pci_dbg(pdev, "Starting PCI reset flow\n");
1661 		err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
1662 	} else {
1663 		pci_dbg(pdev, "Starting software reset flow\n");
1664 		err = mlxsw_pci_reset_sw(mlxsw_pci);
1665 	}
1666 	if (err)
1667 		return err;
1668 
1669 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1670 	if (err) {
1671 		dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1672 			sys_status);
1673 		return err;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1680 {
1681 	int err;
1682 
1683 	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1684 	if (err < 0)
1685 		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1686 	return err;
1687 }
1688 
1689 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1690 {
1691 	pci_free_irq_vectors(mlxsw_pci->pdev);
1692 }
1693 
1694 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1695 			  const struct mlxsw_config_profile *profile,
1696 			  struct mlxsw_res *res)
1697 {
1698 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1699 	struct pci_dev *pdev = mlxsw_pci->pdev;
1700 	char *mbox;
1701 	u16 num_pages;
1702 	int err;
1703 
1704 	mlxsw_pci->core = mlxsw_core;
1705 
1706 	mbox = mlxsw_cmd_mbox_alloc();
1707 	if (!mbox)
1708 		return -ENOMEM;
1709 
1710 	err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
1711 	if (err)
1712 		goto err_reset;
1713 
1714 	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1715 	if (err < 0) {
1716 		dev_err(&pdev->dev, "MSI-X init failed\n");
1717 		goto err_alloc_irq;
1718 	}
1719 
1720 	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1721 	if (err)
1722 		goto err_query_fw;
1723 
1724 	mlxsw_pci->bus_info.fw_rev.major =
1725 		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1726 	mlxsw_pci->bus_info.fw_rev.minor =
1727 		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1728 	mlxsw_pci->bus_info.fw_rev.subminor =
1729 		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1730 
1731 	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1732 		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1733 		err = -EINVAL;
1734 		goto err_iface_rev;
1735 	}
1736 	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1737 		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1738 		err = -EINVAL;
1739 		goto err_doorbell_page_bar;
1740 	}
1741 
1742 	mlxsw_pci->doorbell_offset =
1743 		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1744 
1745 	if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1746 		dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1747 		err = -EINVAL;
1748 		goto err_fr_rn_clk_bar;
1749 	}
1750 
1751 	mlxsw_pci->free_running_clock_offset =
1752 		mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1753 
1754 	if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
1755 		dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
1756 		err = -EINVAL;
1757 		goto err_utc_sec_bar;
1758 	}
1759 
1760 	mlxsw_pci->utc_sec_offset =
1761 		mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
1762 
1763 	if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
1764 		dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
1765 		err = -EINVAL;
1766 		goto err_utc_nsec_bar;
1767 	}
1768 
1769 	mlxsw_pci->utc_nsec_offset =
1770 		mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
1771 
1772 	mlxsw_pci->lag_mode_support =
1773 		mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
1774 	mlxsw_pci->cff_support =
1775 		mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
1776 
1777 	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1778 	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1779 	if (err)
1780 		goto err_fw_area_init;
1781 
1782 	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1783 	if (err)
1784 		goto err_boardinfo;
1785 
1786 	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1787 	if (err)
1788 		goto err_query_resources;
1789 
1790 	if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1791 	    MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1792 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1793 	else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1794 		 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1795 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1796 	else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1797 		  MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1798 		 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1799 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1800 	} else {
1801 		dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1802 		goto err_cqe_v_check;
1803 	}
1804 
1805 	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1806 	if (err)
1807 		goto err_config_profile;
1808 
1809 	/* Some resources depend on details of config_profile, such as unified
1810 	 * bridge model. Query the resources again to get correct values.
1811 	 */
1812 	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1813 	if (err)
1814 		goto err_requery_resources;
1815 
1816 	err = mlxsw_pci_napi_devs_init(mlxsw_pci);
1817 	if (err)
1818 		goto err_napi_devs_init;
1819 
1820 	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1821 	if (err)
1822 		goto err_aqs_init;
1823 
1824 	err = request_irq(pci_irq_vector(pdev, 0),
1825 			  mlxsw_pci_eq_irq_handler, 0,
1826 			  mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1827 	if (err) {
1828 		dev_err(&pdev->dev, "IRQ request failed\n");
1829 		goto err_request_eq_irq;
1830 	}
1831 
1832 	goto mbox_put;
1833 
1834 err_request_eq_irq:
1835 	mlxsw_pci_aqs_fini(mlxsw_pci);
1836 err_aqs_init:
1837 	mlxsw_pci_napi_devs_fini(mlxsw_pci);
1838 err_napi_devs_init:
1839 err_requery_resources:
1840 err_config_profile:
1841 err_cqe_v_check:
1842 err_query_resources:
1843 err_boardinfo:
1844 	mlxsw_pci_fw_area_fini(mlxsw_pci);
1845 err_fw_area_init:
1846 err_utc_nsec_bar:
1847 err_utc_sec_bar:
1848 err_fr_rn_clk_bar:
1849 err_doorbell_page_bar:
1850 err_iface_rev:
1851 err_query_fw:
1852 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1853 err_alloc_irq:
1854 err_reset:
1855 mbox_put:
1856 	mlxsw_cmd_mbox_free(mbox);
1857 	return err;
1858 }
1859 
1860 static void mlxsw_pci_fini(void *bus_priv)
1861 {
1862 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1863 
1864 	free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1865 	mlxsw_pci_aqs_fini(mlxsw_pci);
1866 	mlxsw_pci_napi_devs_fini(mlxsw_pci);
1867 	mlxsw_pci_fw_area_fini(mlxsw_pci);
1868 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1869 }
1870 
1871 static struct mlxsw_pci_queue *
1872 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1873 		   const struct mlxsw_tx_info *tx_info)
1874 {
1875 	u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1;
1876 	u8 sdqn;
1877 
1878 	if (tx_info->is_emad) {
1879 		sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1880 	} else {
1881 		BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1882 		sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1883 	}
1884 
1885 	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1886 }
1887 
1888 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1889 					const struct mlxsw_tx_info *tx_info)
1890 {
1891 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1892 	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1893 
1894 	return !mlxsw_pci_queue_elem_info_producer_get(q);
1895 }
1896 
1897 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1898 				  const struct mlxsw_tx_info *tx_info)
1899 {
1900 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1901 	struct mlxsw_pci_queue *q;
1902 	struct mlxsw_pci_queue_elem_info *elem_info;
1903 	char *wqe;
1904 	int i;
1905 	int err;
1906 
1907 	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1908 		err = skb_linearize(skb);
1909 		if (err)
1910 			return err;
1911 	}
1912 
1913 	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1914 	spin_lock_bh(&q->lock);
1915 	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1916 	if (!elem_info) {
1917 		/* queue is full */
1918 		err = -EAGAIN;
1919 		goto unlock;
1920 	}
1921 	mlxsw_skb_cb(skb)->tx_info = *tx_info;
1922 	elem_info->u.sdq.skb = skb;
1923 
1924 	wqe = elem_info->elem;
1925 	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1926 	mlxsw_pci_wqe_lp_set(wqe, 0);
1927 	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1928 
1929 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1930 				     skb_headlen(skb), DMA_TO_DEVICE);
1931 	if (err)
1932 		goto unlock;
1933 
1934 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1935 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1936 
1937 		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1938 					     skb_frag_address(frag),
1939 					     skb_frag_size(frag),
1940 					     DMA_TO_DEVICE);
1941 		if (err)
1942 			goto unmap_frags;
1943 	}
1944 
1945 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1946 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1947 
1948 	/* Set unused sq entries byte count to zero. */
1949 	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1950 		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1951 
1952 	/* Everything is set up, ring producer doorbell to get HW going */
1953 	q->producer_counter++;
1954 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1955 
1956 	goto unlock;
1957 
1958 unmap_frags:
1959 	for (; i >= 0; i--)
1960 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1961 unlock:
1962 	spin_unlock_bh(&q->lock);
1963 	return err;
1964 }
1965 
1966 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1967 			      u32 in_mod, bool out_mbox_direct,
1968 			      char *in_mbox, size_t in_mbox_size,
1969 			      char *out_mbox, size_t out_mbox_size,
1970 			      u8 *p_status)
1971 {
1972 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1973 	dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1974 	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1975 	unsigned long end;
1976 	bool wait_done;
1977 	int err;
1978 
1979 	*p_status = MLXSW_CMD_STATUS_OK;
1980 
1981 	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1982 	if (err)
1983 		return err;
1984 
1985 	if (in_mbox) {
1986 		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1987 		in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1988 	}
1989 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1990 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1991 
1992 	if (out_mbox)
1993 		out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1994 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1995 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1996 
1997 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1998 	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1999 
2000 	wait_done = false;
2001 
2002 	wmb(); /* all needs to be written before we write control register */
2003 	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
2004 			  MLXSW_PCI_CIR_CTRL_GO_BIT |
2005 			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
2006 			  opcode);
2007 
2008 	end = jiffies + timeout;
2009 	do {
2010 		u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
2011 
2012 		if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
2013 			wait_done = true;
2014 			*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
2015 			break;
2016 		}
2017 		cond_resched();
2018 	} while (time_before(jiffies, end));
2019 
2020 	err = 0;
2021 	if (wait_done) {
2022 		if (*p_status)
2023 			err = -EIO;
2024 	} else {
2025 		err = -ETIMEDOUT;
2026 	}
2027 
2028 	if (!err && out_mbox && out_mbox_direct) {
2029 		/* Some commands don't use output param as address to mailbox
2030 		 * but they store output directly into registers. In that case,
2031 		 * copy registers into mbox buffer.
2032 		 */
2033 		__be32 tmp;
2034 
2035 		tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
2036 						   CIR_OUT_PARAM_HI));
2037 		memcpy(out_mbox, &tmp, sizeof(tmp));
2038 		tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
2039 						   CIR_OUT_PARAM_LO));
2040 		memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
2041 	} else if (!err && out_mbox) {
2042 		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
2043 	}
2044 
2045 	mutex_unlock(&mlxsw_pci->cmd.lock);
2046 
2047 	return err;
2048 }
2049 
2050 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
2051 {
2052 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2053 	u64 frc_offset_h;
2054 
2055 	frc_offset_h = mlxsw_pci->free_running_clock_offset;
2056 	return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
2057 }
2058 
2059 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
2060 {
2061 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2062 	u64 frc_offset_l;
2063 
2064 	frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
2065 	return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
2066 }
2067 
2068 static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
2069 {
2070 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2071 
2072 	return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
2073 }
2074 
2075 static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
2076 {
2077 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2078 
2079 	return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
2080 }
2081 
2082 static enum mlxsw_cmd_mbox_config_profile_lag_mode
2083 mlxsw_pci_lag_mode(void *bus_priv)
2084 {
2085 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2086 
2087 	return mlxsw_pci->lag_mode;
2088 }
2089 
2090 static enum mlxsw_cmd_mbox_config_profile_flood_mode
2091 mlxsw_pci_flood_mode(void *bus_priv)
2092 {
2093 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2094 
2095 	return mlxsw_pci->flood_mode;
2096 }
2097 
2098 static const struct mlxsw_bus mlxsw_pci_bus = {
2099 	.kind			= "pci",
2100 	.init			= mlxsw_pci_init,
2101 	.fini			= mlxsw_pci_fini,
2102 	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
2103 	.skb_transmit		= mlxsw_pci_skb_transmit,
2104 	.cmd_exec		= mlxsw_pci_cmd_exec,
2105 	.read_frc_h		= mlxsw_pci_read_frc_h,
2106 	.read_frc_l		= mlxsw_pci_read_frc_l,
2107 	.read_utc_sec		= mlxsw_pci_read_utc_sec,
2108 	.read_utc_nsec		= mlxsw_pci_read_utc_nsec,
2109 	.lag_mode		= mlxsw_pci_lag_mode,
2110 	.flood_mode		= mlxsw_pci_flood_mode,
2111 	.features		= MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
2112 };
2113 
2114 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
2115 {
2116 	int err;
2117 
2118 	mutex_init(&mlxsw_pci->cmd.lock);
2119 
2120 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2121 	if (err)
2122 		goto err_in_mbox_alloc;
2123 
2124 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2125 	if (err)
2126 		goto err_out_mbox_alloc;
2127 
2128 	return 0;
2129 
2130 err_out_mbox_alloc:
2131 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2132 err_in_mbox_alloc:
2133 	mutex_destroy(&mlxsw_pci->cmd.lock);
2134 	return err;
2135 }
2136 
2137 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
2138 {
2139 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2140 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2141 	mutex_destroy(&mlxsw_pci->cmd.lock);
2142 }
2143 
2144 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2145 {
2146 	const char *driver_name = dev_driver_string(&pdev->dev);
2147 	struct mlxsw_pci *mlxsw_pci;
2148 	int err;
2149 
2150 	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
2151 	if (!mlxsw_pci)
2152 		return -ENOMEM;
2153 
2154 	err = pci_enable_device(pdev);
2155 	if (err) {
2156 		dev_err(&pdev->dev, "pci_enable_device failed\n");
2157 		goto err_pci_enable_device;
2158 	}
2159 
2160 	err = pci_request_regions(pdev, driver_name);
2161 	if (err) {
2162 		dev_err(&pdev->dev, "pci_request_regions failed\n");
2163 		goto err_pci_request_regions;
2164 	}
2165 
2166 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2167 	if (err) {
2168 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2169 		if (err) {
2170 			dev_err(&pdev->dev, "dma_set_mask failed\n");
2171 			goto err_pci_set_dma_mask;
2172 		}
2173 	}
2174 
2175 	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
2176 		dev_err(&pdev->dev, "invalid PCI region size\n");
2177 		err = -EINVAL;
2178 		goto err_pci_resource_len_check;
2179 	}
2180 
2181 	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
2182 				     pci_resource_len(pdev, 0));
2183 	if (!mlxsw_pci->hw_addr) {
2184 		dev_err(&pdev->dev, "ioremap failed\n");
2185 		err = -EIO;
2186 		goto err_ioremap;
2187 	}
2188 	pci_set_master(pdev);
2189 
2190 	mlxsw_pci->pdev = pdev;
2191 	pci_set_drvdata(pdev, mlxsw_pci);
2192 
2193 	err = mlxsw_pci_cmd_init(mlxsw_pci);
2194 	if (err)
2195 		goto err_pci_cmd_init;
2196 
2197 	mlxsw_pci->bus_info.device_kind = driver_name;
2198 	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
2199 	mlxsw_pci->bus_info.dev = &pdev->dev;
2200 	mlxsw_pci->bus_info.read_clock_capable = true;
2201 	mlxsw_pci->id = id;
2202 
2203 	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
2204 					     &mlxsw_pci_bus, mlxsw_pci, false,
2205 					     NULL, NULL);
2206 	if (err) {
2207 		dev_err(&pdev->dev, "cannot register bus device\n");
2208 		goto err_bus_device_register;
2209 	}
2210 
2211 	return 0;
2212 
2213 err_bus_device_register:
2214 	mlxsw_pci_cmd_fini(mlxsw_pci);
2215 err_pci_cmd_init:
2216 	iounmap(mlxsw_pci->hw_addr);
2217 err_ioremap:
2218 err_pci_resource_len_check:
2219 err_pci_set_dma_mask:
2220 	pci_release_regions(pdev);
2221 err_pci_request_regions:
2222 	pci_disable_device(pdev);
2223 err_pci_enable_device:
2224 	kfree(mlxsw_pci);
2225 	return err;
2226 }
2227 
2228 static void mlxsw_pci_remove(struct pci_dev *pdev)
2229 {
2230 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2231 
2232 	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2233 	mlxsw_pci_cmd_fini(mlxsw_pci);
2234 	iounmap(mlxsw_pci->hw_addr);
2235 	pci_release_regions(mlxsw_pci->pdev);
2236 	pci_disable_device(mlxsw_pci->pdev);
2237 	kfree(mlxsw_pci);
2238 }
2239 
2240 static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
2241 {
2242 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2243 
2244 	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2245 }
2246 
2247 static void mlxsw_pci_reset_done(struct pci_dev *pdev)
2248 {
2249 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2250 
2251 	mlxsw_pci->skip_reset = true;
2252 	mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
2253 				       mlxsw_pci, false, NULL, NULL);
2254 	mlxsw_pci->skip_reset = false;
2255 }
2256 
2257 static const struct pci_error_handlers mlxsw_pci_err_handler = {
2258 	.reset_prepare = mlxsw_pci_reset_prepare,
2259 	.reset_done = mlxsw_pci_reset_done,
2260 };
2261 
2262 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
2263 {
2264 	pci_driver->probe = mlxsw_pci_probe;
2265 	pci_driver->remove = mlxsw_pci_remove;
2266 	pci_driver->shutdown = mlxsw_pci_remove;
2267 	pci_driver->err_handler = &mlxsw_pci_err_handler;
2268 	return pci_register_driver(pci_driver);
2269 }
2270 EXPORT_SYMBOL(mlxsw_pci_driver_register);
2271 
2272 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
2273 {
2274 	pci_unregister_driver(pci_driver);
2275 }
2276 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
2277 
2278 static int __init mlxsw_pci_module_init(void)
2279 {
2280 	return 0;
2281 }
2282 
2283 static void __exit mlxsw_pci_module_exit(void)
2284 {
2285 }
2286 
2287 module_init(mlxsw_pci_module_init);
2288 module_exit(mlxsw_pci_module_exit);
2289 
2290 MODULE_LICENSE("Dual BSD/GPL");
2291 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2292 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
2293