xref: /linux/drivers/net/ethernet/mellanox/mlxsw/pci.c (revision 90602c251cda8a1e526efb250f28c1ea3f87cd78)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/device.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/if_vlan.h>
14 #include <linux/log2.h>
15 #include <linux/string.h>
16 #include <net/page_pool/helpers.h>
17 
18 #include "pci_hw.h"
19 #include "pci.h"
20 #include "core.h"
21 #include "cmd.h"
22 #include "port.h"
23 #include "resources.h"
24 
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
29 
30 enum mlxsw_pci_queue_type {
31 	MLXSW_PCI_QUEUE_TYPE_SDQ,
32 	MLXSW_PCI_QUEUE_TYPE_RDQ,
33 	MLXSW_PCI_QUEUE_TYPE_CQ,
34 	MLXSW_PCI_QUEUE_TYPE_EQ,
35 };
36 
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT	4
38 
39 enum mlxsw_pci_cq_type {
40 	MLXSW_PCI_CQ_SDQ,
41 	MLXSW_PCI_CQ_RDQ,
42 };
43 
44 static const u16 mlxsw_pci_doorbell_type_offset[] = {
45 	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
46 	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
47 	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
48 	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
49 };
50 
51 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
52 	0, /* unused */
53 	0, /* unused */
54 	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
55 	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
56 };
57 
58 struct mlxsw_pci_mem_item {
59 	char *buf;
60 	dma_addr_t mapaddr;
61 	size_t size;
62 };
63 
64 struct mlxsw_pci_queue_elem_info {
65 	struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
66 	char *elem; /* pointer to actual dma mapped element mem chunk */
67 	struct {
68 		struct sk_buff *skb;
69 	} sdq;
70 };
71 
72 struct mlxsw_pci_queue {
73 	spinlock_t lock; /* for queue accesses */
74 	struct mlxsw_pci_mem_item mem_item;
75 	struct mlxsw_pci_queue_elem_info *elem_info;
76 	u16 producer_counter;
77 	u16 consumer_counter;
78 	u16 count; /* number of elements in queue */
79 	u8 num; /* queue number */
80 	u8 elem_size; /* size of one element */
81 	enum mlxsw_pci_queue_type type;
82 	struct mlxsw_pci *pci;
83 	union {
84 		struct {
85 			enum mlxsw_pci_cqe_v v;
86 			struct mlxsw_pci_queue *dq;
87 			struct napi_struct napi;
88 			struct page_pool *page_pool;
89 		} cq;
90 		struct {
91 			struct tasklet_struct tasklet;
92 		} eq;
93 		struct {
94 			struct mlxsw_pci_queue *cq;
95 		} rdq;
96 	} u;
97 };
98 
99 struct mlxsw_pci_queue_type_group {
100 	struct mlxsw_pci_queue *q;
101 	u8 count; /* number of queues in group */
102 };
103 
104 struct mlxsw_pci {
105 	struct pci_dev *pdev;
106 	u8 __iomem *hw_addr;
107 	u64 free_running_clock_offset;
108 	u64 utc_sec_offset;
109 	u64 utc_nsec_offset;
110 	bool lag_mode_support;
111 	bool cff_support;
112 	enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
113 	enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
114 	u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
115 	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
116 	u32 doorbell_offset;
117 	struct mlxsw_core *core;
118 	struct {
119 		struct mlxsw_pci_mem_item *items;
120 		unsigned int count;
121 	} fw_area;
122 	struct {
123 		struct mlxsw_pci_mem_item out_mbox;
124 		struct mlxsw_pci_mem_item in_mbox;
125 		struct mutex lock; /* Lock access to command registers */
126 		struct {
127 			u8 status;
128 			u64 out_param;
129 		} comp;
130 	} cmd;
131 	struct mlxsw_bus_info bus_info;
132 	const struct pci_device_id *id;
133 	enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
134 	u8 num_cqs; /* Number of CQs */
135 	u8 num_sdqs; /* Number of SDQs */
136 	bool skip_reset;
137 	struct net_device *napi_dev_tx;
138 	struct net_device *napi_dev_rx;
139 };
140 
mlxsw_pci_napi_devs_init(struct mlxsw_pci * mlxsw_pci)141 static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
142 {
143 	int err;
144 
145 	mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0);
146 	if (!mlxsw_pci->napi_dev_tx)
147 		return -ENOMEM;
148 	strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx",
149 		sizeof(mlxsw_pci->napi_dev_tx->name));
150 
151 	mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0);
152 	if (!mlxsw_pci->napi_dev_rx) {
153 		err = -ENOMEM;
154 		goto err_alloc_rx;
155 	}
156 	strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
157 		sizeof(mlxsw_pci->napi_dev_rx->name));
158 	dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
159 
160 	return 0;
161 
162 err_alloc_rx:
163 	free_netdev(mlxsw_pci->napi_dev_tx);
164 	return err;
165 }
166 
mlxsw_pci_napi_devs_fini(struct mlxsw_pci * mlxsw_pci)167 static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci)
168 {
169 	free_netdev(mlxsw_pci->napi_dev_rx);
170 	free_netdev(mlxsw_pci->napi_dev_tx);
171 }
172 
__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue * q,size_t elem_size,int elem_index)173 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
174 					size_t elem_size, int elem_index)
175 {
176 	return q->mem_item.buf + (elem_size * elem_index);
177 }
178 
179 static struct mlxsw_pci_queue_elem_info *
mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue * q,int elem_index)180 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
181 {
182 	return &q->elem_info[elem_index];
183 }
184 
185 static struct mlxsw_pci_queue_elem_info *
mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue * q)186 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
187 {
188 	int index = q->producer_counter & (q->count - 1);
189 
190 	if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
191 		return NULL;
192 	return mlxsw_pci_queue_elem_info_get(q, index);
193 }
194 
195 static struct mlxsw_pci_queue_elem_info *
mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue * q)196 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
197 {
198 	int index = q->consumer_counter & (q->count - 1);
199 
200 	return mlxsw_pci_queue_elem_info_get(q, index);
201 }
202 
mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue * q,int elem_index)203 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
204 {
205 	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
206 }
207 
mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue * q,bool owner_bit)208 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
209 {
210 	return owner_bit != !!(q->consumer_counter & q->count);
211 }
212 
213 static struct mlxsw_pci_queue_type_group *
mlxsw_pci_queue_type_group_get(struct mlxsw_pci * mlxsw_pci,enum mlxsw_pci_queue_type q_type)214 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
215 			       enum mlxsw_pci_queue_type q_type)
216 {
217 	return &mlxsw_pci->queues[q_type];
218 }
219 
220 static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci * mlxsw_pci,enum mlxsw_pci_queue_type q_type,u8 q_num)221 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
222 		      enum mlxsw_pci_queue_type q_type, u8 q_num)
223 {
224 	return &mlxsw_pci->queues[q_type].q[q_num];
225 }
226 
mlxsw_pci_sdq_get(struct mlxsw_pci * mlxsw_pci,u8 q_num)227 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
228 						 u8 q_num)
229 {
230 	return __mlxsw_pci_queue_get(mlxsw_pci,
231 				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
232 }
233 
mlxsw_pci_cq_get(struct mlxsw_pci * mlxsw_pci,u8 q_num)234 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
235 						u8 q_num)
236 {
237 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
238 }
239 
mlxsw_pci_eq_get(struct mlxsw_pci * mlxsw_pci)240 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci)
241 {
242 	/* There is only one EQ at index 0. */
243 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0);
244 }
245 
__mlxsw_pci_queue_doorbell_set(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q,u16 val)246 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
247 					   struct mlxsw_pci_queue *q,
248 					   u16 val)
249 {
250 	mlxsw_pci_write32(mlxsw_pci,
251 			  DOORBELL(mlxsw_pci->doorbell_offset,
252 				   mlxsw_pci_doorbell_type_offset[q->type],
253 				   q->num), val);
254 }
255 
__mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q,u16 val)256 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
257 					       struct mlxsw_pci_queue *q,
258 					       u16 val)
259 {
260 	mlxsw_pci_write32(mlxsw_pci,
261 			  DOORBELL(mlxsw_pci->doorbell_offset,
262 				   mlxsw_pci_doorbell_arm_type_offset[q->type],
263 				   q->num), val);
264 }
265 
mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)266 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
267 						   struct mlxsw_pci_queue *q)
268 {
269 	wmb(); /* ensure all writes are done before we ring a bell */
270 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
271 }
272 
mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)273 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
274 						   struct mlxsw_pci_queue *q)
275 {
276 	wmb(); /* ensure all writes are done before we ring a bell */
277 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
278 				       q->consumer_counter + q->count);
279 }
280 
281 static void
mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)282 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
283 					   struct mlxsw_pci_queue *q)
284 {
285 	wmb(); /* ensure all writes are done before we ring a bell */
286 	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
287 }
288 
__mlxsw_pci_queue_page_get(struct mlxsw_pci_queue * q,int page_index)289 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
290 					     int page_index)
291 {
292 	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
293 }
294 
mlxsw_pci_sdq_init(struct mlxsw_pci * mlxsw_pci,char * mbox,struct mlxsw_pci_queue * q)295 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
296 			      struct mlxsw_pci_queue *q)
297 {
298 	struct mlxsw_pci_queue *cq;
299 	int tclass;
300 	u8 cq_num;
301 	int lp;
302 	int i;
303 	int err;
304 
305 	q->producer_counter = 0;
306 	q->consumer_counter = 0;
307 	tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
308 						      MLXSW_PCI_SDQ_CTL_TC;
309 	lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
310 						  MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
311 
312 	/* Set CQ of same number of this SDQ. */
313 	cq_num = q->num;
314 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
315 	mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
316 	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
317 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
318 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
319 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
320 
321 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
322 	}
323 
324 	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
325 	if (err)
326 		return err;
327 
328 	cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
329 	cq->u.cq.dq = q;
330 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
331 	return 0;
332 }
333 
mlxsw_pci_sdq_fini(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)334 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
335 			       struct mlxsw_pci_queue *q)
336 {
337 	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
338 }
339 
340 #define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
341 
342 #define MLXSW_PCI_RX_BUF_SW_OVERHEAD		\
343 		(MLXSW_PCI_SKB_HEADROOM +	\
344 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
345 
346 static void
mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci * mlxsw_pci,struct page * page,char * wqe,int index,size_t frag_len)347 mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
348 			  char *wqe, int index, size_t frag_len)
349 {
350 	dma_addr_t mapaddr;
351 
352 	mapaddr = page_pool_get_dma_addr(page);
353 
354 	if (index == 0) {
355 		mapaddr += MLXSW_PCI_SKB_HEADROOM;
356 		frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
357 	}
358 
359 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
360 	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
361 }
362 
mlxsw_pci_wqe_frag_map(struct mlxsw_pci * mlxsw_pci,char * wqe,int index,char * frag_data,size_t frag_len,int direction)363 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
364 				  int index, char *frag_data, size_t frag_len,
365 				  int direction)
366 {
367 	struct pci_dev *pdev = mlxsw_pci->pdev;
368 	dma_addr_t mapaddr;
369 
370 	mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
371 	if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
372 		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
373 		return -EIO;
374 	}
375 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
376 	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
377 	return 0;
378 }
379 
mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci * mlxsw_pci,char * wqe,int index,int direction)380 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
381 				     int index, int direction)
382 {
383 	struct pci_dev *pdev = mlxsw_pci->pdev;
384 	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
385 	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
386 
387 	if (!frag_len)
388 		return;
389 	dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
390 }
391 
mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue * q,struct page * pages[],u16 byte_count)392 static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
393 					       struct page *pages[],
394 					       u16 byte_count)
395 {
396 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
397 	unsigned int linear_data_size;
398 	struct page_pool *page_pool;
399 	struct sk_buff *skb;
400 	int page_index = 0;
401 	bool linear_only;
402 	void *data;
403 
404 	linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
405 	linear_data_size = linear_only ? byte_count :
406 					 PAGE_SIZE -
407 					 MLXSW_PCI_RX_BUF_SW_OVERHEAD;
408 
409 	page_pool = cq->u.cq.page_pool;
410 	page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
411 				   MLXSW_PCI_SKB_HEADROOM, linear_data_size);
412 
413 	data = page_address(pages[page_index]);
414 	net_prefetch(data);
415 
416 	skb = napi_build_skb(data, PAGE_SIZE);
417 	if (unlikely(!skb))
418 		return ERR_PTR(-ENOMEM);
419 
420 	skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
421 	skb_put(skb, linear_data_size);
422 
423 	if (linear_only)
424 		return skb;
425 
426 	byte_count -= linear_data_size;
427 	page_index++;
428 
429 	while (byte_count > 0) {
430 		unsigned int frag_size;
431 		struct page *page;
432 
433 		page = pages[page_index];
434 		frag_size = min(byte_count, PAGE_SIZE);
435 		page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
436 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
437 				page, 0, frag_size, PAGE_SIZE);
438 		byte_count -= frag_size;
439 		page_index++;
440 	}
441 
442 	return skb;
443 }
444 
mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue * q,struct mlxsw_pci_queue_elem_info * elem_info,int index)445 static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
446 				    struct mlxsw_pci_queue_elem_info *elem_info,
447 				    int index)
448 {
449 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
450 	char *wqe = elem_info->elem;
451 	struct page *page;
452 
453 	page = page_pool_dev_alloc_pages(cq->u.cq.page_pool);
454 	if (unlikely(!page))
455 		return -ENOMEM;
456 
457 	mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
458 	elem_info->pages[index] = page;
459 	return 0;
460 }
461 
mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue * q,struct mlxsw_pci_queue_elem_info * elem_info,int index)462 static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
463 				    struct mlxsw_pci_queue_elem_info *elem_info,
464 				    int index)
465 {
466 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
467 
468 	page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
469 			   false);
470 }
471 
mlxsw_pci_num_sg_entries_get(u16 byte_count)472 static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
473 {
474 	return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
475 			    PAGE_SIZE);
476 }
477 
478 static int
mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue * q,const struct mlxsw_pci_queue_elem_info * el,u16 byte_count,struct page * pages[],u8 * p_num_sg_entries)479 mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
480 				    const struct mlxsw_pci_queue_elem_info *el,
481 				    u16 byte_count, struct page *pages[],
482 				    u8 *p_num_sg_entries)
483 {
484 	u8 num_sg_entries;
485 	int i;
486 
487 	num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
488 	if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
489 		return -EINVAL;
490 
491 	for (i = 0; i < num_sg_entries; i++)
492 		pages[i] = el->pages[i];
493 
494 	*p_num_sg_entries = num_sg_entries;
495 	return 0;
496 }
497 
498 static int
mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue * q,struct mlxsw_pci_queue_elem_info * elem_info,u8 num_sg_entries)499 mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
500 			  struct mlxsw_pci_queue_elem_info *elem_info,
501 			  u8 num_sg_entries)
502 {
503 	struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
504 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
505 	int i, err;
506 
507 	for (i = 0; i < num_sg_entries; i++) {
508 		old_pages[i] = elem_info->pages[i];
509 		err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
510 		if (err) {
511 			dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
512 			goto err_page_alloc;
513 		}
514 	}
515 
516 	return 0;
517 
518 err_page_alloc:
519 	for (i--; i >= 0; i--)
520 		page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
521 
522 	return err;
523 }
524 
525 static void
mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue * q,struct page * pages[],u8 num_sg_entries)526 mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
527 			    u8 num_sg_entries)
528 {
529 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
530 	int i;
531 
532 	for (i = 0; i < num_sg_entries; i++)
533 		page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
534 }
535 
mlxsw_pci_rdq_init(struct mlxsw_pci * mlxsw_pci,char * mbox,struct mlxsw_pci_queue * q)536 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
537 			      struct mlxsw_pci_queue *q)
538 {
539 	struct mlxsw_pci_queue_elem_info *elem_info;
540 	u8 sdq_count = mlxsw_pci->num_sdqs;
541 	struct mlxsw_pci_queue *cq;
542 	u8 cq_num;
543 	int i, j;
544 	int err;
545 
546 	q->producer_counter = 0;
547 	q->consumer_counter = 0;
548 
549 	/* Set CQ of same number of this RDQ with base
550 	 * above SDQ count as the lower ones are assigned to SDQs.
551 	 */
552 	cq_num = sdq_count + q->num;
553 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
554 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
555 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
556 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
557 
558 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
559 	}
560 
561 	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
562 	if (err)
563 		return err;
564 
565 	cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
566 	cq->u.cq.dq = q;
567 	q->u.rdq.cq = cq;
568 
569 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
570 
571 	for (i = 0; i < q->count; i++) {
572 		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
573 		BUG_ON(!elem_info);
574 
575 		for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
576 			err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
577 			if (err)
578 				goto rollback;
579 		}
580 		/* Everything is set up, ring doorbell to pass elem to HW */
581 		q->producer_counter++;
582 		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
583 	}
584 
585 	return 0;
586 
587 rollback:
588 	for (i--; i >= 0; i--) {
589 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
590 		for (j--; j >= 0; j--)
591 			mlxsw_pci_rdq_page_free(q, elem_info, j);
592 		j = mlxsw_pci->num_sg_entries;
593 	}
594 	q->u.rdq.cq = NULL;
595 	cq->u.cq.dq = NULL;
596 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
597 
598 	return err;
599 }
600 
mlxsw_pci_rdq_fini(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)601 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
602 			       struct mlxsw_pci_queue *q)
603 {
604 	struct mlxsw_pci_queue_elem_info *elem_info;
605 	int i, j;
606 
607 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
608 	for (i = 0; i < q->count; i++) {
609 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
610 		for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
611 			mlxsw_pci_rdq_page_free(q, elem_info, j);
612 	}
613 }
614 
mlxsw_pci_cq_pre_init(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)615 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
616 				  struct mlxsw_pci_queue *q)
617 {
618 	q->u.cq.v = mlxsw_pci->max_cqe_ver;
619 
620 	if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
621 	    q->num < mlxsw_pci->num_sdqs &&
622 	    !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
623 		q->u.cq.v = MLXSW_PCI_CQE_V1;
624 }
625 
mlxsw_pci_read32_off(struct mlxsw_pci * mlxsw_pci,ptrdiff_t off)626 static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
627 					 ptrdiff_t off)
628 {
629 	return ioread32be(mlxsw_pci->hw_addr + off);
630 }
631 
mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci * mlxsw_pci,struct sk_buff * skb,enum mlxsw_pci_cqe_v cqe_v,char * cqe)632 static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
633 				    struct sk_buff *skb,
634 				    enum mlxsw_pci_cqe_v cqe_v, char *cqe)
635 {
636 	u8 ts_type;
637 
638 	if (cqe_v != MLXSW_PCI_CQE_V2)
639 		return;
640 
641 	ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
642 
643 	if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
644 	    ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
645 		return;
646 
647 	mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
648 	mlxsw_skb_cb(skb)->cqe_ts.nsec =
649 		mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
650 }
651 
mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q,u16 consumer_counter_limit,enum mlxsw_pci_cqe_v cqe_v,char * cqe,int budget)652 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
653 				     struct mlxsw_pci_queue *q,
654 				     u16 consumer_counter_limit,
655 				     enum mlxsw_pci_cqe_v cqe_v,
656 				     char *cqe, int budget)
657 {
658 	struct pci_dev *pdev = mlxsw_pci->pdev;
659 	struct mlxsw_pci_queue_elem_info *elem_info;
660 	struct mlxsw_tx_info tx_info;
661 	char *wqe;
662 	struct sk_buff *skb;
663 	int i;
664 
665 	spin_lock(&q->lock);
666 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
667 	tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
668 	skb = elem_info->sdq.skb;
669 	wqe = elem_info->elem;
670 	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
671 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
672 
673 	if (unlikely(!tx_info.is_emad &&
674 		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
675 		mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
676 		mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
677 					   tx_info.local_port);
678 		skb = NULL;
679 	}
680 
681 	if (skb)
682 		napi_consume_skb(skb, budget);
683 	elem_info->sdq.skb = NULL;
684 
685 	if (q->consumer_counter++ != consumer_counter_limit)
686 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
687 	spin_unlock(&q->lock);
688 }
689 
mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff * skb,const char * cqe)690 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
691 					      const char *cqe)
692 {
693 	struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
694 
695 	if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
696 		cb->rx_md_info.tx_port_is_lag = true;
697 		cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
698 		cb->rx_md_info.tx_lag_port_index =
699 			mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
700 	} else {
701 		cb->rx_md_info.tx_port_is_lag = false;
702 		cb->rx_md_info.tx_sys_port =
703 			mlxsw_pci_cqe2_tx_system_port_get(cqe);
704 	}
705 
706 	if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
707 	    cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
708 		cb->rx_md_info.tx_port_valid = 1;
709 	else
710 		cb->rx_md_info.tx_port_valid = 0;
711 }
712 
mlxsw_pci_cqe_rdq_md_init(struct sk_buff * skb,const char * cqe)713 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
714 {
715 	struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
716 
717 	cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
718 	if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
719 		cb->rx_md_info.tx_congestion_valid = 1;
720 	else
721 		cb->rx_md_info.tx_congestion_valid = 0;
722 	cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
723 
724 	cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
725 	if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
726 		cb->rx_md_info.latency_valid = 1;
727 	else
728 		cb->rx_md_info.latency_valid = 0;
729 
730 	cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
731 	if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
732 		cb->rx_md_info.tx_tc_valid = 1;
733 	else
734 		cb->rx_md_info.tx_tc_valid = 0;
735 
736 	mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
737 }
738 
mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q,u16 consumer_counter_limit,enum mlxsw_pci_cqe_v cqe_v,char * cqe)739 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
740 				     struct mlxsw_pci_queue *q,
741 				     u16 consumer_counter_limit,
742 				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
743 {
744 	struct pci_dev *pdev = mlxsw_pci->pdev;
745 	struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
746 	struct mlxsw_pci_queue_elem_info *elem_info;
747 	struct mlxsw_rx_info rx_info = {};
748 	struct sk_buff *skb;
749 	u8 num_sg_entries;
750 	u16 byte_count;
751 	int err;
752 
753 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
754 
755 	if (q->consumer_counter++ != consumer_counter_limit)
756 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
757 
758 	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
759 	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
760 		byte_count -= ETH_FCS_LEN;
761 
762 	err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
763 						  pages, &num_sg_entries);
764 	if (err)
765 		goto out;
766 
767 	err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
768 	if (err)
769 		goto out;
770 
771 	skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
772 	if (IS_ERR(skb)) {
773 		dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
774 		mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
775 		goto out;
776 	}
777 
778 	skb_mark_for_recycle(skb);
779 
780 	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
781 		rx_info.is_lag = true;
782 		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
783 		rx_info.lag_port_index =
784 			mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
785 	} else {
786 		rx_info.is_lag = false;
787 		rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
788 	}
789 
790 	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
791 
792 	if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
793 	    rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
794 		u32 cookie_index = 0;
795 
796 		if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
797 			cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
798 		mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
799 	} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
800 		   rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
801 		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
802 		rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
803 		mlxsw_pci_cqe_rdq_md_init(skb, cqe);
804 	} else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
805 		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
806 		mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
807 	}
808 
809 	mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
810 
811 	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
812 
813 out:
814 	q->producer_counter++;
815 	return;
816 }
817 
mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue * q)818 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
819 {
820 	struct mlxsw_pci_queue_elem_info *elem_info;
821 	char *elem;
822 	bool owner_bit;
823 
824 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
825 	elem = elem_info->elem;
826 	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
827 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
828 		return NULL;
829 	q->consumer_counter++;
830 	rmb(); /* make sure we read owned bit before the rest of elem */
831 	return elem;
832 }
833 
mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue * q)834 static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
835 {
836 	struct mlxsw_pci_queue_elem_info *elem_info;
837 	bool owner_bit;
838 
839 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
840 	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
841 	return !mlxsw_pci_elem_hw_owned(q, owner_bit);
842 }
843 
mlxsw_pci_napi_poll_cq_rx(struct napi_struct * napi,int budget)844 static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
845 {
846 	struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
847 						 u.cq.napi);
848 	struct mlxsw_pci_queue *rdq = q->u.cq.dq;
849 	struct mlxsw_pci *mlxsw_pci = q->pci;
850 	int work_done = 0;
851 	char *cqe;
852 
853 	/* If the budget is 0, Rx processing should be skipped. */
854 	if (unlikely(!budget))
855 		return 0;
856 
857 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
858 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
859 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
860 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
861 
862 		if (unlikely(sendq)) {
863 			WARN_ON_ONCE(1);
864 			continue;
865 		}
866 
867 		if (unlikely(dqn != rdq->num)) {
868 			WARN_ON_ONCE(1);
869 			continue;
870 		}
871 
872 		mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
873 					 wqe_counter, q->u.cq.v, cqe);
874 
875 		if (++work_done == budget)
876 			break;
877 	}
878 
879 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
880 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq);
881 
882 	if (work_done < budget)
883 		goto processing_completed;
884 
885 	/* The driver still has outstanding work to do, budget was exhausted.
886 	 * Return exactly budget. In that case, the NAPI instance will be polled
887 	 * again.
888 	 */
889 	if (mlxsw_pci_cq_cqe_to_handle(q))
890 		goto out;
891 
892 	/* The driver processed all the completions and handled exactly
893 	 * 'budget'. Return 'budget - 1' to distinguish from the case that
894 	 * driver still has completions to handle.
895 	 */
896 	if (work_done == budget)
897 		work_done--;
898 
899 processing_completed:
900 	if (napi_complete_done(napi, work_done))
901 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
902 out:
903 	return work_done;
904 }
905 
mlxsw_pci_napi_poll_cq_tx(struct napi_struct * napi,int budget)906 static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
907 {
908 	struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
909 						 u.cq.napi);
910 	struct mlxsw_pci_queue *sdq = q->u.cq.dq;
911 	struct mlxsw_pci *mlxsw_pci = q->pci;
912 	int work_done = 0;
913 	char *cqe;
914 
915 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
916 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
917 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
918 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
919 		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
920 
921 		if (unlikely(!sendq)) {
922 			WARN_ON_ONCE(1);
923 			continue;
924 		}
925 
926 		if (unlikely(dqn != sdq->num)) {
927 			WARN_ON_ONCE(1);
928 			continue;
929 		}
930 
931 		memcpy(ncqe, cqe, q->elem_size);
932 		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
933 
934 		mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
935 					 wqe_counter, q->u.cq.v, ncqe, budget);
936 
937 		work_done++;
938 	}
939 
940 	/* If the budget is 0 napi_complete_done() should never be called. */
941 	if (unlikely(!budget))
942 		goto processing_completed;
943 
944 	work_done = min(work_done, budget - 1);
945 	if (unlikely(!napi_complete_done(napi, work_done)))
946 		goto out;
947 
948 processing_completed:
949 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
950 out:
951 	return work_done;
952 }
953 
954 static enum mlxsw_pci_cq_type
mlxsw_pci_cq_type(const struct mlxsw_pci * mlxsw_pci,const struct mlxsw_pci_queue * q)955 mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
956 		  const struct mlxsw_pci_queue *q)
957 {
958 	/* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used
959 	 * for SDQs and the rest are used for RDQs.
960 	 */
961 	if (q->num < mlxsw_pci->num_sdqs)
962 		return MLXSW_PCI_CQ_SDQ;
963 
964 	return MLXSW_PCI_CQ_RDQ;
965 }
966 
mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue * q,enum mlxsw_pci_cq_type cq_type)967 static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
968 				    enum mlxsw_pci_cq_type cq_type)
969 {
970 	struct mlxsw_pci *mlxsw_pci = q->pci;
971 
972 	switch (cq_type) {
973 	case MLXSW_PCI_CQ_SDQ:
974 		netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
975 			       mlxsw_pci_napi_poll_cq_tx);
976 		break;
977 	case MLXSW_PCI_CQ_RDQ:
978 		netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
979 			       mlxsw_pci_napi_poll_cq_rx);
980 		break;
981 	}
982 }
983 
mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue * q)984 static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
985 {
986 	netif_napi_del(&q->u.cq.napi);
987 }
988 
mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue * q,enum mlxsw_pci_cq_type cq_type)989 static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
990 				       enum mlxsw_pci_cq_type cq_type)
991 {
992 	struct page_pool_params pp_params = {};
993 	struct mlxsw_pci *mlxsw_pci = q->pci;
994 	struct page_pool *page_pool;
995 
996 	if (cq_type != MLXSW_PCI_CQ_RDQ)
997 		return 0;
998 
999 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1000 	pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
1001 	pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
1002 	pp_params.dev = &mlxsw_pci->pdev->dev;
1003 	pp_params.napi = &q->u.cq.napi;
1004 	pp_params.dma_dir = DMA_FROM_DEVICE;
1005 	pp_params.max_len = PAGE_SIZE;
1006 
1007 	page_pool = page_pool_create(&pp_params);
1008 	if (IS_ERR(page_pool))
1009 		return PTR_ERR(page_pool);
1010 
1011 	q->u.cq.page_pool = page_pool;
1012 	return 0;
1013 }
1014 
mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue * q,enum mlxsw_pci_cq_type cq_type)1015 static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
1016 					enum mlxsw_pci_cq_type cq_type)
1017 {
1018 	if (cq_type != MLXSW_PCI_CQ_RDQ)
1019 		return;
1020 
1021 	page_pool_destroy(q->u.cq.page_pool);
1022 }
1023 
mlxsw_pci_cq_init(struct mlxsw_pci * mlxsw_pci,char * mbox,struct mlxsw_pci_queue * q)1024 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1025 			     struct mlxsw_pci_queue *q)
1026 {
1027 	enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
1028 	int i;
1029 	int err;
1030 
1031 	q->consumer_counter = 0;
1032 
1033 	for (i = 0; i < q->count; i++) {
1034 		char *elem = mlxsw_pci_queue_elem_get(q, i);
1035 
1036 		mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
1037 	}
1038 
1039 	if (q->u.cq.v == MLXSW_PCI_CQE_V1)
1040 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
1041 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
1042 	else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
1043 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
1044 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
1045 
1046 	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
1047 	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
1048 	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
1049 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
1050 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
1051 
1052 		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
1053 	}
1054 	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
1055 	if (err)
1056 		return err;
1057 	mlxsw_pci_cq_napi_setup(q, cq_type);
1058 
1059 	err = mlxsw_pci_cq_page_pool_init(q, cq_type);
1060 	if (err)
1061 		goto err_page_pool_init;
1062 
1063 	napi_enable(&q->u.cq.napi);
1064 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1065 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1066 	return 0;
1067 
1068 err_page_pool_init:
1069 	mlxsw_pci_cq_napi_teardown(q);
1070 	return err;
1071 }
1072 
mlxsw_pci_cq_fini(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)1073 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
1074 			      struct mlxsw_pci_queue *q)
1075 {
1076 	enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
1077 
1078 	napi_disable(&q->u.cq.napi);
1079 	mlxsw_pci_cq_page_pool_fini(q, cq_type);
1080 	mlxsw_pci_cq_napi_teardown(q);
1081 	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
1082 }
1083 
mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue * q)1084 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
1085 {
1086 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
1087 					     MLXSW_PCI_CQE01_COUNT;
1088 }
1089 
mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue * q)1090 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
1091 {
1092 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
1093 					       MLXSW_PCI_CQE01_SIZE;
1094 }
1095 
mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue * q)1096 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
1097 {
1098 	struct mlxsw_pci_queue_elem_info *elem_info;
1099 	char *elem;
1100 	bool owner_bit;
1101 
1102 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
1103 	elem = elem_info->elem;
1104 	owner_bit = mlxsw_pci_eqe_owner_get(elem);
1105 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
1106 		return NULL;
1107 	q->consumer_counter++;
1108 	rmb(); /* make sure we read owned bit before the rest of elem */
1109 	return elem;
1110 }
1111 
mlxsw_pci_eq_tasklet(struct tasklet_struct * t)1112 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
1113 {
1114 	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
1115 	struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
1116 	struct mlxsw_pci *mlxsw_pci = q->pci;
1117 	int credits = q->count >> 1;
1118 	u8 cqn, cq_count;
1119 	int items = 0;
1120 	char *eqe;
1121 
1122 	memset(&active_cqns, 0, sizeof(active_cqns));
1123 
1124 	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
1125 		cqn = mlxsw_pci_eqe_cqn_get(eqe);
1126 		set_bit(cqn, active_cqns);
1127 
1128 		if (++items == credits)
1129 			break;
1130 	}
1131 
1132 	if (!items)
1133 		return;
1134 
1135 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1136 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1137 
1138 	cq_count = mlxsw_pci->num_cqs;
1139 	for_each_set_bit(cqn, active_cqns, cq_count) {
1140 		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
1141 		napi_schedule(&q->u.cq.napi);
1142 	}
1143 }
1144 
mlxsw_pci_eq_init(struct mlxsw_pci * mlxsw_pci,char * mbox,struct mlxsw_pci_queue * q)1145 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1146 			     struct mlxsw_pci_queue *q)
1147 {
1148 	int i;
1149 	int err;
1150 
1151 	/* We expect to initialize only one EQ, which gets num=0 as it is
1152 	 * located at index zero. We use the EQ as EQ1, so set the number for
1153 	 * future use.
1154 	 */
1155 	WARN_ON_ONCE(q->num);
1156 	q->num = MLXSW_PCI_EQ_COMP_NUM;
1157 
1158 	q->consumer_counter = 0;
1159 
1160 	for (i = 0; i < q->count; i++) {
1161 		char *elem = mlxsw_pci_queue_elem_get(q, i);
1162 
1163 		mlxsw_pci_eqe_owner_set(elem, 1);
1164 	}
1165 
1166 	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
1167 	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
1168 	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
1169 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
1170 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
1171 
1172 		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
1173 	}
1174 	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
1175 	if (err)
1176 		return err;
1177 	tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
1178 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1179 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1180 	return 0;
1181 }
1182 
mlxsw_pci_eq_fini(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_queue * q)1183 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
1184 			      struct mlxsw_pci_queue *q)
1185 {
1186 	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
1187 }
1188 
1189 struct mlxsw_pci_queue_ops {
1190 	const char *name;
1191 	enum mlxsw_pci_queue_type type;
1192 	void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
1193 			 struct mlxsw_pci_queue *q);
1194 	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
1195 		    struct mlxsw_pci_queue *q);
1196 	void (*fini)(struct mlxsw_pci *mlxsw_pci,
1197 		     struct mlxsw_pci_queue *q);
1198 	u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
1199 	u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
1200 	u16 elem_count;
1201 	u8 elem_size;
1202 };
1203 
1204 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
1205 	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
1206 	.init		= mlxsw_pci_sdq_init,
1207 	.fini		= mlxsw_pci_sdq_fini,
1208 	.elem_count	= MLXSW_PCI_WQE_COUNT,
1209 	.elem_size	= MLXSW_PCI_WQE_SIZE,
1210 };
1211 
1212 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
1213 	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
1214 	.init		= mlxsw_pci_rdq_init,
1215 	.fini		= mlxsw_pci_rdq_fini,
1216 	.elem_count	= MLXSW_PCI_WQE_COUNT,
1217 	.elem_size	= MLXSW_PCI_WQE_SIZE
1218 };
1219 
1220 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
1221 	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
1222 	.pre_init	= mlxsw_pci_cq_pre_init,
1223 	.init		= mlxsw_pci_cq_init,
1224 	.fini		= mlxsw_pci_cq_fini,
1225 	.elem_count_f	= mlxsw_pci_cq_elem_count,
1226 	.elem_size_f	= mlxsw_pci_cq_elem_size
1227 };
1228 
1229 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
1230 	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
1231 	.init		= mlxsw_pci_eq_init,
1232 	.fini		= mlxsw_pci_eq_fini,
1233 	.elem_count	= MLXSW_PCI_EQE_COUNT,
1234 	.elem_size	= MLXSW_PCI_EQE_SIZE
1235 };
1236 
mlxsw_pci_queue_init(struct mlxsw_pci * mlxsw_pci,char * mbox,const struct mlxsw_pci_queue_ops * q_ops,struct mlxsw_pci_queue * q,u8 q_num)1237 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1238 				const struct mlxsw_pci_queue_ops *q_ops,
1239 				struct mlxsw_pci_queue *q, u8 q_num)
1240 {
1241 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
1242 	int i;
1243 	int err;
1244 
1245 	q->num = q_num;
1246 	if (q_ops->pre_init)
1247 		q_ops->pre_init(mlxsw_pci, q);
1248 
1249 	spin_lock_init(&q->lock);
1250 	q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
1251 					 q_ops->elem_count;
1252 	q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
1253 					    q_ops->elem_size;
1254 	q->type = q_ops->type;
1255 	q->pci = mlxsw_pci;
1256 
1257 	mem_item->size = MLXSW_PCI_AQ_SIZE;
1258 	mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1259 					   mem_item->size, &mem_item->mapaddr,
1260 					   GFP_KERNEL);
1261 	if (!mem_item->buf)
1262 		return -ENOMEM;
1263 
1264 	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
1265 	if (!q->elem_info) {
1266 		err = -ENOMEM;
1267 		goto err_elem_info_alloc;
1268 	}
1269 
1270 	/* Initialize dma mapped elements info elem_info for
1271 	 * future easy access.
1272 	 */
1273 	for (i = 0; i < q->count; i++) {
1274 		struct mlxsw_pci_queue_elem_info *elem_info;
1275 
1276 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
1277 		elem_info->elem =
1278 			__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
1279 	}
1280 
1281 	mlxsw_cmd_mbox_zero(mbox);
1282 	err = q_ops->init(mlxsw_pci, mbox, q);
1283 	if (err)
1284 		goto err_q_ops_init;
1285 	return 0;
1286 
1287 err_q_ops_init:
1288 	kfree(q->elem_info);
1289 err_elem_info_alloc:
1290 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1291 			  mem_item->buf, mem_item->mapaddr);
1292 	return err;
1293 }
1294 
mlxsw_pci_queue_fini(struct mlxsw_pci * mlxsw_pci,const struct mlxsw_pci_queue_ops * q_ops,struct mlxsw_pci_queue * q)1295 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
1296 				 const struct mlxsw_pci_queue_ops *q_ops,
1297 				 struct mlxsw_pci_queue *q)
1298 {
1299 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
1300 
1301 	q_ops->fini(mlxsw_pci, q);
1302 	kfree(q->elem_info);
1303 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1304 			  mem_item->buf, mem_item->mapaddr);
1305 }
1306 
mlxsw_pci_queue_group_init(struct mlxsw_pci * mlxsw_pci,char * mbox,const struct mlxsw_pci_queue_ops * q_ops,u8 num_qs)1307 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1308 				      const struct mlxsw_pci_queue_ops *q_ops,
1309 				      u8 num_qs)
1310 {
1311 	struct mlxsw_pci_queue_type_group *queue_group;
1312 	int i;
1313 	int err;
1314 
1315 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1316 	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1317 	if (!queue_group->q)
1318 		return -ENOMEM;
1319 
1320 	for (i = 0; i < num_qs; i++) {
1321 		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1322 					   &queue_group->q[i], i);
1323 		if (err)
1324 			goto err_queue_init;
1325 	}
1326 	queue_group->count = num_qs;
1327 
1328 	return 0;
1329 
1330 err_queue_init:
1331 	for (i--; i >= 0; i--)
1332 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1333 	kfree(queue_group->q);
1334 	return err;
1335 }
1336 
mlxsw_pci_queue_group_fini(struct mlxsw_pci * mlxsw_pci,const struct mlxsw_pci_queue_ops * q_ops)1337 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1338 				       const struct mlxsw_pci_queue_ops *q_ops)
1339 {
1340 	struct mlxsw_pci_queue_type_group *queue_group;
1341 	int i;
1342 
1343 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1344 	for (i = 0; i < queue_group->count; i++)
1345 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1346 	kfree(queue_group->q);
1347 }
1348 
mlxsw_pci_aqs_init(struct mlxsw_pci * mlxsw_pci,char * mbox)1349 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1350 {
1351 	struct pci_dev *pdev = mlxsw_pci->pdev;
1352 	u8 num_sdqs;
1353 	u8 sdq_log2sz;
1354 	u8 num_rdqs;
1355 	u8 rdq_log2sz;
1356 	u8 num_cqs;
1357 	u8 cq_log2sz;
1358 	u8 cqv2_log2sz;
1359 	u8 num_eqs;
1360 	u8 eq_log2sz;
1361 	int err;
1362 
1363 	mlxsw_cmd_mbox_zero(mbox);
1364 	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1365 	if (err)
1366 		return err;
1367 
1368 	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1369 	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1370 	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1371 	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1372 	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1373 	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1374 	cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1375 	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1376 	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1377 
1378 	if (num_sdqs + num_rdqs > num_cqs ||
1379 	    num_sdqs < MLXSW_PCI_SDQS_MIN ||
1380 	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) {
1381 		dev_err(&pdev->dev, "Unsupported number of queues\n");
1382 		return -EINVAL;
1383 	}
1384 
1385 	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1386 	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1387 	    (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1388 	    (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1389 	     (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1390 	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1391 		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1392 		return -EINVAL;
1393 	}
1394 
1395 	mlxsw_pci->num_cqs = num_cqs;
1396 	mlxsw_pci->num_sdqs = num_sdqs;
1397 
1398 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1399 					 MLXSW_PCI_EQS_COUNT);
1400 	if (err) {
1401 		dev_err(&pdev->dev, "Failed to initialize event queues\n");
1402 		return err;
1403 	}
1404 
1405 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1406 					 num_cqs);
1407 	if (err) {
1408 		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1409 		goto err_cqs_init;
1410 	}
1411 
1412 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1413 					 num_sdqs);
1414 	if (err) {
1415 		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1416 		goto err_sdqs_init;
1417 	}
1418 
1419 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1420 					 num_rdqs);
1421 	if (err) {
1422 		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1423 		goto err_rdqs_init;
1424 	}
1425 
1426 	return 0;
1427 
1428 err_rdqs_init:
1429 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1430 err_sdqs_init:
1431 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1432 err_cqs_init:
1433 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1434 	return err;
1435 }
1436 
mlxsw_pci_aqs_fini(struct mlxsw_pci * mlxsw_pci)1437 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1438 {
1439 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1440 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1441 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1442 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1443 }
1444 
1445 static void
mlxsw_pci_config_profile_swid_config(struct mlxsw_pci * mlxsw_pci,char * mbox,int index,const struct mlxsw_swid_config * swid)1446 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1447 				     char *mbox, int index,
1448 				     const struct mlxsw_swid_config *swid)
1449 {
1450 	u8 mask = 0;
1451 
1452 	if (swid->used_type) {
1453 		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1454 			mbox, index, swid->type);
1455 		mask |= 1;
1456 	}
1457 	if (swid->used_properties) {
1458 		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1459 			mbox, index, swid->properties);
1460 		mask |= 2;
1461 	}
1462 	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1463 }
1464 
1465 static int
mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci * mlxsw_pci,const struct mlxsw_config_profile * profile,struct mlxsw_res * res)1466 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1467 				const struct mlxsw_config_profile *profile,
1468 				struct mlxsw_res *res)
1469 {
1470 	u64 single_size, double_size, linear_size;
1471 	int err;
1472 
1473 	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1474 				       &single_size, &double_size,
1475 				       &linear_size);
1476 	if (err)
1477 		return err;
1478 
1479 	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1480 	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1481 	MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1482 
1483 	return 0;
1484 }
1485 
mlxsw_pci_config_profile(struct mlxsw_pci * mlxsw_pci,char * mbox,const struct mlxsw_config_profile * profile,struct mlxsw_res * res)1486 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1487 				    const struct mlxsw_config_profile *profile,
1488 				    struct mlxsw_res *res)
1489 {
1490 	int i;
1491 	int err;
1492 
1493 	mlxsw_cmd_mbox_zero(mbox);
1494 
1495 	if (profile->used_max_vepa_channels) {
1496 		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1497 			mbox, 1);
1498 		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1499 			mbox, profile->max_vepa_channels);
1500 	}
1501 	if (profile->used_max_lag) {
1502 		mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
1503 		mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
1504 							  profile->max_lag);
1505 	}
1506 	if (profile->used_max_mid) {
1507 		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1508 			mbox, 1);
1509 		mlxsw_cmd_mbox_config_profile_max_mid_set(
1510 			mbox, profile->max_mid);
1511 	}
1512 	if (profile->used_max_pgt) {
1513 		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1514 			mbox, 1);
1515 		mlxsw_cmd_mbox_config_profile_max_pgt_set(
1516 			mbox, profile->max_pgt);
1517 	}
1518 	if (profile->used_max_system_port) {
1519 		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1520 			mbox, 1);
1521 		mlxsw_cmd_mbox_config_profile_max_system_port_set(
1522 			mbox, profile->max_system_port);
1523 	}
1524 	if (profile->used_max_vlan_groups) {
1525 		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1526 			mbox, 1);
1527 		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1528 			mbox, profile->max_vlan_groups);
1529 	}
1530 	if (profile->used_max_regions) {
1531 		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1532 			mbox, 1);
1533 		mlxsw_cmd_mbox_config_profile_max_regions_set(
1534 			mbox, profile->max_regions);
1535 	}
1536 	if (profile->used_flood_tables) {
1537 		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1538 			mbox, 1);
1539 		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1540 			mbox, profile->max_flood_tables);
1541 		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1542 			mbox, profile->max_vid_flood_tables);
1543 		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1544 			mbox, profile->max_fid_offset_flood_tables);
1545 		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1546 			mbox, profile->fid_offset_flood_table_size);
1547 		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1548 			mbox, profile->max_fid_flood_tables);
1549 		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1550 			mbox, profile->fid_flood_table_size);
1551 	}
1552 	if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) {
1553 		enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode =
1554 			MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF;
1555 
1556 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1);
1557 		mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode);
1558 		mlxsw_pci->flood_mode = flood_mode;
1559 	} else if (profile->used_flood_mode) {
1560 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1561 			mbox, 1);
1562 		mlxsw_cmd_mbox_config_profile_flood_mode_set(
1563 			mbox, profile->flood_mode);
1564 		mlxsw_pci->flood_mode = profile->flood_mode;
1565 	} else {
1566 		WARN_ON(1);
1567 		return -EINVAL;
1568 	}
1569 	if (profile->used_max_ib_mc) {
1570 		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1571 			mbox, 1);
1572 		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1573 			mbox, profile->max_ib_mc);
1574 	}
1575 	if (profile->used_max_pkey) {
1576 		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1577 			mbox, 1);
1578 		mlxsw_cmd_mbox_config_profile_max_pkey_set(
1579 			mbox, profile->max_pkey);
1580 	}
1581 	if (profile->used_ar_sec) {
1582 		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1583 			mbox, 1);
1584 		mlxsw_cmd_mbox_config_profile_ar_sec_set(
1585 			mbox, profile->ar_sec);
1586 	}
1587 	if (profile->used_adaptive_routing_group_cap) {
1588 		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1589 			mbox, 1);
1590 		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1591 			mbox, profile->adaptive_routing_group_cap);
1592 	}
1593 	if (profile->used_ubridge) {
1594 		mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
1595 		mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
1596 							  profile->ubridge);
1597 	}
1598 	if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1599 		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1600 		if (err)
1601 			return err;
1602 
1603 		mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1604 		mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1605 					MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1606 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1607 									   1);
1608 		mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1609 					MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1610 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1611 								mbox, 1);
1612 		mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1613 					MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1614 	}
1615 
1616 	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1617 		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1618 						     &profile->swid_config[i]);
1619 
1620 	if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1621 		mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1622 		mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1623 	}
1624 
1625 	if (profile->used_cqe_time_stamp_type) {
1626 		mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
1627 									  1);
1628 		mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
1629 					profile->cqe_time_stamp_type);
1630 	}
1631 
1632 	if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) {
1633 		enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode =
1634 			MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW;
1635 
1636 		mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1);
1637 		mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode);
1638 		mlxsw_pci->lag_mode = lag_mode;
1639 	} else {
1640 		mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
1641 	}
1642 	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1643 }
1644 
mlxsw_pci_boardinfo(struct mlxsw_pci * mlxsw_pci,char * mbox)1645 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1646 {
1647 	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1648 	int err;
1649 
1650 	mlxsw_cmd_mbox_zero(mbox);
1651 	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1652 	if (err)
1653 		return err;
1654 	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1655 	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1656 	return 0;
1657 }
1658 
mlxsw_pci_fw_area_init(struct mlxsw_pci * mlxsw_pci,char * mbox,u16 num_pages)1659 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1660 				  u16 num_pages)
1661 {
1662 	struct mlxsw_pci_mem_item *mem_item;
1663 	int nent = 0;
1664 	int i;
1665 	int err;
1666 
1667 	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1668 					   GFP_KERNEL);
1669 	if (!mlxsw_pci->fw_area.items)
1670 		return -ENOMEM;
1671 	mlxsw_pci->fw_area.count = num_pages;
1672 
1673 	mlxsw_cmd_mbox_zero(mbox);
1674 	for (i = 0; i < num_pages; i++) {
1675 		mem_item = &mlxsw_pci->fw_area.items[i];
1676 
1677 		mem_item->size = MLXSW_PCI_PAGE_SIZE;
1678 		mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1679 						   mem_item->size,
1680 						   &mem_item->mapaddr, GFP_KERNEL);
1681 		if (!mem_item->buf) {
1682 			err = -ENOMEM;
1683 			goto err_alloc;
1684 		}
1685 		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1686 		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1687 		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1688 			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1689 			if (err)
1690 				goto err_cmd_map_fa;
1691 			nent = 0;
1692 			mlxsw_cmd_mbox_zero(mbox);
1693 		}
1694 	}
1695 
1696 	if (nent) {
1697 		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1698 		if (err)
1699 			goto err_cmd_map_fa;
1700 	}
1701 
1702 	return 0;
1703 
1704 err_cmd_map_fa:
1705 err_alloc:
1706 	for (i--; i >= 0; i--) {
1707 		mem_item = &mlxsw_pci->fw_area.items[i];
1708 
1709 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1710 				  mem_item->buf, mem_item->mapaddr);
1711 	}
1712 	kfree(mlxsw_pci->fw_area.items);
1713 	return err;
1714 }
1715 
mlxsw_pci_fw_area_fini(struct mlxsw_pci * mlxsw_pci)1716 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1717 {
1718 	struct mlxsw_pci_mem_item *mem_item;
1719 	int i;
1720 
1721 	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1722 
1723 	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1724 		mem_item = &mlxsw_pci->fw_area.items[i];
1725 
1726 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1727 				  mem_item->buf, mem_item->mapaddr);
1728 	}
1729 	kfree(mlxsw_pci->fw_area.items);
1730 }
1731 
mlxsw_pci_eq_irq_handler(int irq,void * dev_id)1732 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1733 {
1734 	struct mlxsw_pci *mlxsw_pci = dev_id;
1735 	struct mlxsw_pci_queue *q;
1736 
1737 	q = mlxsw_pci_eq_get(mlxsw_pci);
1738 	tasklet_schedule(&q->u.eq.tasklet);
1739 	return IRQ_HANDLED;
1740 }
1741 
mlxsw_pci_mbox_alloc(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_mem_item * mbox)1742 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1743 				struct mlxsw_pci_mem_item *mbox)
1744 {
1745 	struct pci_dev *pdev = mlxsw_pci->pdev;
1746 	int err = 0;
1747 
1748 	mbox->size = MLXSW_CMD_MBOX_SIZE;
1749 	mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1750 				       &mbox->mapaddr, GFP_KERNEL);
1751 	if (!mbox->buf) {
1752 		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1753 		err = -ENOMEM;
1754 	}
1755 
1756 	return err;
1757 }
1758 
mlxsw_pci_mbox_free(struct mlxsw_pci * mlxsw_pci,struct mlxsw_pci_mem_item * mbox)1759 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1760 				struct mlxsw_pci_mem_item *mbox)
1761 {
1762 	struct pci_dev *pdev = mlxsw_pci->pdev;
1763 
1764 	dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1765 			  mbox->mapaddr);
1766 }
1767 
mlxsw_pci_sys_ready_wait(struct mlxsw_pci * mlxsw_pci,const struct pci_device_id * id,u32 * p_sys_status)1768 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1769 				    const struct pci_device_id *id,
1770 				    u32 *p_sys_status)
1771 {
1772 	unsigned long end;
1773 	u32 val;
1774 
1775 	/* We must wait for the HW to become responsive. */
1776 	msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1777 
1778 	end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1779 	do {
1780 		val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1781 		if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1782 			return 0;
1783 		cond_resched();
1784 	} while (time_before(jiffies, end));
1785 
1786 	*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1787 
1788 	return -EBUSY;
1789 }
1790 
mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci * mlxsw_pci,bool pci_reset_sbr_supported)1791 static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
1792 					  bool pci_reset_sbr_supported)
1793 {
1794 	struct pci_dev *pdev = mlxsw_pci->pdev;
1795 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1796 	struct pci_dev *bridge;
1797 	int err;
1798 
1799 	if (!pci_reset_sbr_supported) {
1800 		pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
1801 		goto sbr;
1802 	}
1803 
1804 	mlxsw_reg_mrsr_pack(mrsr_pl,
1805 			    MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
1806 	err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1807 	if (err)
1808 		return err;
1809 
1810 sbr:
1811 	device_lock_assert(&pdev->dev);
1812 
1813 	bridge = pci_upstream_bridge(pdev);
1814 	if (bridge)
1815 		pci_cfg_access_lock(bridge);
1816 	pci_cfg_access_lock(pdev);
1817 	pci_save_state(pdev);
1818 
1819 	err = __pci_reset_function_locked(pdev);
1820 	if (err)
1821 		pci_err(pdev, "PCI function reset failed with %d\n", err);
1822 
1823 	pci_restore_state(pdev);
1824 	pci_cfg_access_unlock(pdev);
1825 	if (bridge)
1826 		pci_cfg_access_unlock(bridge);
1827 
1828 	return err;
1829 }
1830 
mlxsw_pci_reset_sw(struct mlxsw_pci * mlxsw_pci)1831 static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
1832 {
1833 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1834 
1835 	mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
1836 	return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1837 }
1838 
1839 static int
mlxsw_pci_reset(struct mlxsw_pci * mlxsw_pci,const struct pci_device_id * id)1840 mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
1841 {
1842 	struct pci_dev *pdev = mlxsw_pci->pdev;
1843 	bool pci_reset_sbr_supported = false;
1844 	char mcam_pl[MLXSW_REG_MCAM_LEN];
1845 	bool pci_reset_supported = false;
1846 	u32 sys_status;
1847 	int err;
1848 
1849 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1850 	if (err) {
1851 		dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1852 			sys_status);
1853 		return err;
1854 	}
1855 
1856 	/* PCI core already issued a PCI reset, do not issue another reset. */
1857 	if (mlxsw_pci->skip_reset)
1858 		return 0;
1859 
1860 	mlxsw_reg_mcam_pack(mcam_pl,
1861 			    MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
1862 	err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
1863 	if (!err) {
1864 		mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
1865 				      &pci_reset_supported);
1866 		mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
1867 				      &pci_reset_sbr_supported);
1868 	}
1869 
1870 	if (pci_reset_supported) {
1871 		pci_dbg(pdev, "Starting PCI reset flow\n");
1872 		err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
1873 						     pci_reset_sbr_supported);
1874 	} else {
1875 		pci_dbg(pdev, "Starting software reset flow\n");
1876 		err = mlxsw_pci_reset_sw(mlxsw_pci);
1877 	}
1878 	if (err)
1879 		return err;
1880 
1881 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1882 	if (err) {
1883 		dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1884 			sys_status);
1885 		return err;
1886 	}
1887 
1888 	return 0;
1889 }
1890 
mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci * mlxsw_pci)1891 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1892 {
1893 	int err;
1894 
1895 	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1896 	if (err < 0)
1897 		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1898 	return err;
1899 }
1900 
mlxsw_pci_free_irq_vectors(struct mlxsw_pci * mlxsw_pci)1901 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1902 {
1903 	pci_free_irq_vectors(mlxsw_pci->pdev);
1904 }
1905 
mlxsw_pci_num_sg_entries_set(struct mlxsw_pci * mlxsw_pci)1906 static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
1907 {
1908 	u8 num_sg_entries;
1909 
1910 	num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
1911 	mlxsw_pci->num_sg_entries = min(num_sg_entries,
1912 					MLXSW_PCI_WQE_SG_ENTRIES);
1913 
1914 	WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
1915 }
1916 
mlxsw_pci_init(void * bus_priv,struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,struct mlxsw_res * res)1917 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1918 			  const struct mlxsw_config_profile *profile,
1919 			  struct mlxsw_res *res)
1920 {
1921 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1922 	struct pci_dev *pdev = mlxsw_pci->pdev;
1923 	char *mbox;
1924 	u16 num_pages;
1925 	int err;
1926 
1927 	mlxsw_pci->core = mlxsw_core;
1928 
1929 	mbox = mlxsw_cmd_mbox_alloc();
1930 	if (!mbox)
1931 		return -ENOMEM;
1932 
1933 	err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
1934 	if (err)
1935 		goto err_reset;
1936 
1937 	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1938 	if (err < 0) {
1939 		dev_err(&pdev->dev, "MSI-X init failed\n");
1940 		goto err_alloc_irq;
1941 	}
1942 
1943 	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1944 	if (err)
1945 		goto err_query_fw;
1946 
1947 	mlxsw_pci->bus_info.fw_rev.major =
1948 		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1949 	mlxsw_pci->bus_info.fw_rev.minor =
1950 		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1951 	mlxsw_pci->bus_info.fw_rev.subminor =
1952 		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1953 
1954 	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1955 		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1956 		err = -EINVAL;
1957 		goto err_iface_rev;
1958 	}
1959 	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1960 		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1961 		err = -EINVAL;
1962 		goto err_doorbell_page_bar;
1963 	}
1964 
1965 	mlxsw_pci->doorbell_offset =
1966 		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1967 
1968 	if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1969 		dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1970 		err = -EINVAL;
1971 		goto err_fr_rn_clk_bar;
1972 	}
1973 
1974 	mlxsw_pci->free_running_clock_offset =
1975 		mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1976 
1977 	if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
1978 		dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
1979 		err = -EINVAL;
1980 		goto err_utc_sec_bar;
1981 	}
1982 
1983 	mlxsw_pci->utc_sec_offset =
1984 		mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
1985 
1986 	if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
1987 		dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
1988 		err = -EINVAL;
1989 		goto err_utc_nsec_bar;
1990 	}
1991 
1992 	mlxsw_pci->utc_nsec_offset =
1993 		mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
1994 
1995 	mlxsw_pci->lag_mode_support =
1996 		mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
1997 	mlxsw_pci->cff_support =
1998 		mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
1999 
2000 	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
2001 	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
2002 	if (err)
2003 		goto err_fw_area_init;
2004 
2005 	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
2006 	if (err)
2007 		goto err_boardinfo;
2008 
2009 	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
2010 	if (err)
2011 		goto err_query_resources;
2012 
2013 	if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
2014 	    MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
2015 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
2016 	else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
2017 		 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
2018 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
2019 	else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
2020 		  MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
2021 		 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
2022 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
2023 	} else {
2024 		dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
2025 		goto err_cqe_v_check;
2026 	}
2027 
2028 	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
2029 	if (err)
2030 		goto err_config_profile;
2031 
2032 	/* Some resources depend on details of config_profile, such as unified
2033 	 * bridge model. Query the resources again to get correct values.
2034 	 */
2035 	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
2036 	if (err)
2037 		goto err_requery_resources;
2038 
2039 	mlxsw_pci_num_sg_entries_set(mlxsw_pci);
2040 
2041 	err = mlxsw_pci_napi_devs_init(mlxsw_pci);
2042 	if (err)
2043 		goto err_napi_devs_init;
2044 
2045 	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
2046 	if (err)
2047 		goto err_aqs_init;
2048 
2049 	err = request_irq(pci_irq_vector(pdev, 0),
2050 			  mlxsw_pci_eq_irq_handler, 0,
2051 			  mlxsw_pci->bus_info.device_kind, mlxsw_pci);
2052 	if (err) {
2053 		dev_err(&pdev->dev, "IRQ request failed\n");
2054 		goto err_request_eq_irq;
2055 	}
2056 
2057 	goto mbox_put;
2058 
2059 err_request_eq_irq:
2060 	mlxsw_pci_aqs_fini(mlxsw_pci);
2061 err_aqs_init:
2062 	mlxsw_pci_napi_devs_fini(mlxsw_pci);
2063 err_napi_devs_init:
2064 err_requery_resources:
2065 err_config_profile:
2066 err_cqe_v_check:
2067 err_query_resources:
2068 err_boardinfo:
2069 	mlxsw_pci_fw_area_fini(mlxsw_pci);
2070 err_fw_area_init:
2071 err_utc_nsec_bar:
2072 err_utc_sec_bar:
2073 err_fr_rn_clk_bar:
2074 err_doorbell_page_bar:
2075 err_iface_rev:
2076 err_query_fw:
2077 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
2078 err_alloc_irq:
2079 err_reset:
2080 mbox_put:
2081 	mlxsw_cmd_mbox_free(mbox);
2082 	return err;
2083 }
2084 
mlxsw_pci_fini(void * bus_priv)2085 static void mlxsw_pci_fini(void *bus_priv)
2086 {
2087 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2088 
2089 	free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
2090 	mlxsw_pci_aqs_fini(mlxsw_pci);
2091 	mlxsw_pci_napi_devs_fini(mlxsw_pci);
2092 	mlxsw_pci_fw_area_fini(mlxsw_pci);
2093 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
2094 }
2095 
2096 static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci * mlxsw_pci,const struct mlxsw_tx_info * tx_info)2097 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
2098 		   const struct mlxsw_tx_info *tx_info)
2099 {
2100 	u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1;
2101 	u8 sdqn;
2102 
2103 	if (tx_info->is_emad) {
2104 		sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
2105 	} else {
2106 		BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
2107 		sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
2108 	}
2109 
2110 	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
2111 }
2112 
mlxsw_pci_skb_transmit_busy(void * bus_priv,const struct mlxsw_tx_info * tx_info)2113 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
2114 					const struct mlxsw_tx_info *tx_info)
2115 {
2116 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2117 	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
2118 
2119 	return !mlxsw_pci_queue_elem_info_producer_get(q);
2120 }
2121 
mlxsw_pci_skb_transmit(void * bus_priv,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)2122 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
2123 				  const struct mlxsw_tx_info *tx_info)
2124 {
2125 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2126 	struct mlxsw_pci_queue *q;
2127 	struct mlxsw_pci_queue_elem_info *elem_info;
2128 	char *wqe;
2129 	int i;
2130 	int err;
2131 
2132 	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
2133 		err = skb_linearize(skb);
2134 		if (err)
2135 			return err;
2136 	}
2137 
2138 	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
2139 	spin_lock_bh(&q->lock);
2140 	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
2141 	if (!elem_info) {
2142 		/* queue is full */
2143 		err = -EAGAIN;
2144 		goto unlock;
2145 	}
2146 	mlxsw_skb_cb(skb)->tx_info = *tx_info;
2147 	elem_info->sdq.skb = skb;
2148 
2149 	wqe = elem_info->elem;
2150 	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
2151 	mlxsw_pci_wqe_lp_set(wqe, 0);
2152 	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
2153 
2154 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
2155 				     skb_headlen(skb), DMA_TO_DEVICE);
2156 	if (err)
2157 		goto unlock;
2158 
2159 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2160 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2161 
2162 		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
2163 					     skb_frag_address(frag),
2164 					     skb_frag_size(frag),
2165 					     DMA_TO_DEVICE);
2166 		if (err)
2167 			goto unmap_frags;
2168 	}
2169 
2170 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2171 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2172 
2173 	/* Set unused sq entries byte count to zero. */
2174 	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
2175 		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
2176 
2177 	/* Everything is set up, ring producer doorbell to get HW going */
2178 	q->producer_counter++;
2179 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
2180 
2181 	goto unlock;
2182 
2183 unmap_frags:
2184 	for (; i >= 0; i--)
2185 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
2186 unlock:
2187 	spin_unlock_bh(&q->lock);
2188 	return err;
2189 }
2190 
mlxsw_pci_cmd_exec(void * bus_priv,u16 opcode,u8 opcode_mod,u32 in_mod,bool out_mbox_direct,char * in_mbox,size_t in_mbox_size,char * out_mbox,size_t out_mbox_size,u8 * p_status)2191 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
2192 			      u32 in_mod, bool out_mbox_direct,
2193 			      char *in_mbox, size_t in_mbox_size,
2194 			      char *out_mbox, size_t out_mbox_size,
2195 			      u8 *p_status)
2196 {
2197 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2198 	dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
2199 	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
2200 	unsigned long end;
2201 	bool wait_done;
2202 	int err;
2203 
2204 	*p_status = MLXSW_CMD_STATUS_OK;
2205 
2206 	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
2207 	if (err)
2208 		return err;
2209 
2210 	if (in_mbox) {
2211 		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
2212 		in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
2213 	}
2214 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
2215 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
2216 
2217 	if (out_mbox)
2218 		out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
2219 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
2220 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
2221 
2222 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
2223 	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
2224 
2225 	wait_done = false;
2226 
2227 	wmb(); /* all needs to be written before we write control register */
2228 	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
2229 			  MLXSW_PCI_CIR_CTRL_GO_BIT |
2230 			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
2231 			  opcode);
2232 
2233 	end = jiffies + timeout;
2234 	do {
2235 		u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
2236 
2237 		if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
2238 			wait_done = true;
2239 			*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
2240 			break;
2241 		}
2242 		cond_resched();
2243 	} while (time_before(jiffies, end));
2244 
2245 	err = 0;
2246 	if (wait_done) {
2247 		if (*p_status)
2248 			err = -EIO;
2249 	} else {
2250 		err = -ETIMEDOUT;
2251 	}
2252 
2253 	if (!err && out_mbox && out_mbox_direct) {
2254 		/* Some commands don't use output param as address to mailbox
2255 		 * but they store output directly into registers. In that case,
2256 		 * copy registers into mbox buffer.
2257 		 */
2258 		__be32 tmp;
2259 
2260 		tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
2261 						   CIR_OUT_PARAM_HI));
2262 		memcpy(out_mbox, &tmp, sizeof(tmp));
2263 		tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
2264 						   CIR_OUT_PARAM_LO));
2265 		memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
2266 	} else if (!err && out_mbox) {
2267 		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
2268 	}
2269 
2270 	mutex_unlock(&mlxsw_pci->cmd.lock);
2271 
2272 	return err;
2273 }
2274 
mlxsw_pci_read_frc_h(void * bus_priv)2275 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
2276 {
2277 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2278 	u64 frc_offset_h;
2279 
2280 	frc_offset_h = mlxsw_pci->free_running_clock_offset;
2281 	return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
2282 }
2283 
mlxsw_pci_read_frc_l(void * bus_priv)2284 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
2285 {
2286 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2287 	u64 frc_offset_l;
2288 
2289 	frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
2290 	return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
2291 }
2292 
mlxsw_pci_read_utc_sec(void * bus_priv)2293 static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
2294 {
2295 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2296 
2297 	return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
2298 }
2299 
mlxsw_pci_read_utc_nsec(void * bus_priv)2300 static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
2301 {
2302 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2303 
2304 	return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
2305 }
2306 
2307 static enum mlxsw_cmd_mbox_config_profile_lag_mode
mlxsw_pci_lag_mode(void * bus_priv)2308 mlxsw_pci_lag_mode(void *bus_priv)
2309 {
2310 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2311 
2312 	return mlxsw_pci->lag_mode;
2313 }
2314 
2315 static enum mlxsw_cmd_mbox_config_profile_flood_mode
mlxsw_pci_flood_mode(void * bus_priv)2316 mlxsw_pci_flood_mode(void *bus_priv)
2317 {
2318 	struct mlxsw_pci *mlxsw_pci = bus_priv;
2319 
2320 	return mlxsw_pci->flood_mode;
2321 }
2322 
2323 static const struct mlxsw_bus mlxsw_pci_bus = {
2324 	.kind			= "pci",
2325 	.init			= mlxsw_pci_init,
2326 	.fini			= mlxsw_pci_fini,
2327 	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
2328 	.skb_transmit		= mlxsw_pci_skb_transmit,
2329 	.cmd_exec		= mlxsw_pci_cmd_exec,
2330 	.read_frc_h		= mlxsw_pci_read_frc_h,
2331 	.read_frc_l		= mlxsw_pci_read_frc_l,
2332 	.read_utc_sec		= mlxsw_pci_read_utc_sec,
2333 	.read_utc_nsec		= mlxsw_pci_read_utc_nsec,
2334 	.lag_mode		= mlxsw_pci_lag_mode,
2335 	.flood_mode		= mlxsw_pci_flood_mode,
2336 	.features		= MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
2337 };
2338 
mlxsw_pci_cmd_init(struct mlxsw_pci * mlxsw_pci)2339 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
2340 {
2341 	int err;
2342 
2343 	mutex_init(&mlxsw_pci->cmd.lock);
2344 
2345 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2346 	if (err)
2347 		goto err_in_mbox_alloc;
2348 
2349 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2350 	if (err)
2351 		goto err_out_mbox_alloc;
2352 
2353 	return 0;
2354 
2355 err_out_mbox_alloc:
2356 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2357 err_in_mbox_alloc:
2358 	mutex_destroy(&mlxsw_pci->cmd.lock);
2359 	return err;
2360 }
2361 
mlxsw_pci_cmd_fini(struct mlxsw_pci * mlxsw_pci)2362 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
2363 {
2364 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2365 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2366 	mutex_destroy(&mlxsw_pci->cmd.lock);
2367 }
2368 
mlxsw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)2369 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2370 {
2371 	const char *driver_name = dev_driver_string(&pdev->dev);
2372 	struct mlxsw_pci *mlxsw_pci;
2373 	int err;
2374 
2375 	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
2376 	if (!mlxsw_pci)
2377 		return -ENOMEM;
2378 
2379 	err = pci_enable_device(pdev);
2380 	if (err) {
2381 		dev_err(&pdev->dev, "pci_enable_device failed\n");
2382 		goto err_pci_enable_device;
2383 	}
2384 
2385 	err = pci_request_regions(pdev, driver_name);
2386 	if (err) {
2387 		dev_err(&pdev->dev, "pci_request_regions failed\n");
2388 		goto err_pci_request_regions;
2389 	}
2390 
2391 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2392 	if (err) {
2393 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2394 		if (err) {
2395 			dev_err(&pdev->dev, "dma_set_mask failed\n");
2396 			goto err_pci_set_dma_mask;
2397 		}
2398 	}
2399 
2400 	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
2401 		dev_err(&pdev->dev, "invalid PCI region size\n");
2402 		err = -EINVAL;
2403 		goto err_pci_resource_len_check;
2404 	}
2405 
2406 	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
2407 				     pci_resource_len(pdev, 0));
2408 	if (!mlxsw_pci->hw_addr) {
2409 		dev_err(&pdev->dev, "ioremap failed\n");
2410 		err = -EIO;
2411 		goto err_ioremap;
2412 	}
2413 	pci_set_master(pdev);
2414 
2415 	mlxsw_pci->pdev = pdev;
2416 	pci_set_drvdata(pdev, mlxsw_pci);
2417 
2418 	err = mlxsw_pci_cmd_init(mlxsw_pci);
2419 	if (err)
2420 		goto err_pci_cmd_init;
2421 
2422 	mlxsw_pci->bus_info.device_kind = driver_name;
2423 	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
2424 	mlxsw_pci->bus_info.dev = &pdev->dev;
2425 	mlxsw_pci->bus_info.read_clock_capable = true;
2426 	mlxsw_pci->id = id;
2427 
2428 	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
2429 					     &mlxsw_pci_bus, mlxsw_pci, false,
2430 					     NULL, NULL);
2431 	if (err) {
2432 		dev_err(&pdev->dev, "cannot register bus device\n");
2433 		goto err_bus_device_register;
2434 	}
2435 
2436 	return 0;
2437 
2438 err_bus_device_register:
2439 	mlxsw_pci_cmd_fini(mlxsw_pci);
2440 err_pci_cmd_init:
2441 	iounmap(mlxsw_pci->hw_addr);
2442 err_ioremap:
2443 err_pci_resource_len_check:
2444 err_pci_set_dma_mask:
2445 	pci_release_regions(pdev);
2446 err_pci_request_regions:
2447 	pci_disable_device(pdev);
2448 err_pci_enable_device:
2449 	kfree(mlxsw_pci);
2450 	return err;
2451 }
2452 
mlxsw_pci_remove(struct pci_dev * pdev)2453 static void mlxsw_pci_remove(struct pci_dev *pdev)
2454 {
2455 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2456 
2457 	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2458 	mlxsw_pci_cmd_fini(mlxsw_pci);
2459 	iounmap(mlxsw_pci->hw_addr);
2460 	pci_release_regions(mlxsw_pci->pdev);
2461 	pci_disable_device(mlxsw_pci->pdev);
2462 	kfree(mlxsw_pci);
2463 }
2464 
mlxsw_pci_reset_prepare(struct pci_dev * pdev)2465 static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
2466 {
2467 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2468 
2469 	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2470 }
2471 
mlxsw_pci_reset_done(struct pci_dev * pdev)2472 static void mlxsw_pci_reset_done(struct pci_dev *pdev)
2473 {
2474 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2475 
2476 	mlxsw_pci->skip_reset = true;
2477 	mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
2478 				       mlxsw_pci, false, NULL, NULL);
2479 	mlxsw_pci->skip_reset = false;
2480 }
2481 
2482 static const struct pci_error_handlers mlxsw_pci_err_handler = {
2483 	.reset_prepare = mlxsw_pci_reset_prepare,
2484 	.reset_done = mlxsw_pci_reset_done,
2485 };
2486 
mlxsw_pci_driver_register(struct pci_driver * pci_driver)2487 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
2488 {
2489 	pci_driver->probe = mlxsw_pci_probe;
2490 	pci_driver->remove = mlxsw_pci_remove;
2491 	pci_driver->shutdown = mlxsw_pci_remove;
2492 	pci_driver->err_handler = &mlxsw_pci_err_handler;
2493 	return pci_register_driver(pci_driver);
2494 }
2495 EXPORT_SYMBOL(mlxsw_pci_driver_register);
2496 
mlxsw_pci_driver_unregister(struct pci_driver * pci_driver)2497 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
2498 {
2499 	pci_unregister_driver(pci_driver);
2500 }
2501 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
2502 
mlxsw_pci_module_init(void)2503 static int __init mlxsw_pci_module_init(void)
2504 {
2505 	return 0;
2506 }
2507 
mlxsw_pci_module_exit(void)2508 static void __exit mlxsw_pci_module_exit(void)
2509 {
2510 }
2511 
2512 module_init(mlxsw_pci_module_init);
2513 module_exit(mlxsw_pci_module_exit);
2514 
2515 MODULE_LICENSE("Dual BSD/GPL");
2516 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2517 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
2518