1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/export.h> 7 #include <linux/err.h> 8 #include <linux/device.h> 9 #include <linux/pci.h> 10 #include <linux/interrupt.h> 11 #include <linux/types.h> 12 #include <linux/skbuff.h> 13 #include <linux/if_vlan.h> 14 #include <linux/log2.h> 15 #include <linux/string.h> 16 17 #include "pci_hw.h" 18 #include "pci.h" 19 #include "core.h" 20 #include "cmd.h" 21 #include "port.h" 22 #include "resources.h" 23 24 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ 25 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) 26 #define mlxsw_pci_read32(mlxsw_pci, reg) \ 27 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) 28 29 enum mlxsw_pci_queue_type { 30 MLXSW_PCI_QUEUE_TYPE_SDQ, 31 MLXSW_PCI_QUEUE_TYPE_RDQ, 32 MLXSW_PCI_QUEUE_TYPE_CQ, 33 MLXSW_PCI_QUEUE_TYPE_EQ, 34 }; 35 36 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 37 38 enum mlxsw_pci_cq_type { 39 MLXSW_PCI_CQ_SDQ, 40 MLXSW_PCI_CQ_RDQ, 41 }; 42 43 static const u16 mlxsw_pci_doorbell_type_offset[] = { 44 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ 45 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ 46 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ 47 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ 48 }; 49 50 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = { 51 0, /* unused */ 52 0, /* unused */ 53 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ 54 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ 55 }; 56 57 struct mlxsw_pci_mem_item { 58 char *buf; 59 dma_addr_t mapaddr; 60 size_t size; 61 }; 62 63 struct mlxsw_pci_queue_elem_info { 64 char *elem; /* pointer to actual dma mapped element mem chunk */ 65 union { 66 struct { 67 struct sk_buff *skb; 68 } sdq; 69 struct { 70 struct sk_buff *skb; 71 } rdq; 72 } u; 73 }; 74 75 struct mlxsw_pci_queue { 76 spinlock_t lock; /* for queue accesses */ 77 struct mlxsw_pci_mem_item mem_item; 78 struct mlxsw_pci_queue_elem_info *elem_info; 79 u16 producer_counter; 80 u16 consumer_counter; 81 u16 count; /* number of elements in queue */ 82 u8 num; /* queue number */ 83 u8 elem_size; /* size of one element */ 84 enum mlxsw_pci_queue_type type; 85 struct tasklet_struct tasklet; /* queue processing tasklet */ 86 struct mlxsw_pci *pci; 87 struct { 88 enum mlxsw_pci_cqe_v v; 89 struct mlxsw_pci_queue *dq; 90 } cq; 91 }; 92 93 struct mlxsw_pci_queue_type_group { 94 struct mlxsw_pci_queue *q; 95 u8 count; /* number of queues in group */ 96 }; 97 98 struct mlxsw_pci { 99 struct pci_dev *pdev; 100 u8 __iomem *hw_addr; 101 u64 free_running_clock_offset; 102 u64 utc_sec_offset; 103 u64 utc_nsec_offset; 104 bool lag_mode_support; 105 bool cff_support; 106 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; 107 enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode; 108 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; 109 u32 doorbell_offset; 110 struct mlxsw_core *core; 111 struct { 112 struct mlxsw_pci_mem_item *items; 113 unsigned int count; 114 } fw_area; 115 struct { 116 struct mlxsw_pci_mem_item out_mbox; 117 struct mlxsw_pci_mem_item in_mbox; 118 struct mutex lock; /* Lock access to command registers */ 119 struct { 120 u8 status; 121 u64 out_param; 122 } comp; 123 } cmd; 124 struct mlxsw_bus_info bus_info; 125 const struct pci_device_id *id; 126 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ 127 u8 num_cqs; /* Number of CQs */ 128 u8 num_sdqs; /* Number of SDQs */ 129 bool skip_reset; 130 }; 131 132 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) 133 { 134 tasklet_schedule(&q->tasklet); 135 } 136 137 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, 138 size_t elem_size, int elem_index) 139 { 140 return q->mem_item.buf + (elem_size * elem_index); 141 } 142 143 static struct mlxsw_pci_queue_elem_info * 144 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) 145 { 146 return &q->elem_info[elem_index]; 147 } 148 149 static struct mlxsw_pci_queue_elem_info * 150 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) 151 { 152 int index = q->producer_counter & (q->count - 1); 153 154 if ((u16) (q->producer_counter - q->consumer_counter) == q->count) 155 return NULL; 156 return mlxsw_pci_queue_elem_info_get(q, index); 157 } 158 159 static struct mlxsw_pci_queue_elem_info * 160 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) 161 { 162 int index = q->consumer_counter & (q->count - 1); 163 164 return mlxsw_pci_queue_elem_info_get(q, index); 165 } 166 167 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) 168 { 169 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; 170 } 171 172 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) 173 { 174 return owner_bit != !!(q->consumer_counter & q->count); 175 } 176 177 static struct mlxsw_pci_queue_type_group * 178 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, 179 enum mlxsw_pci_queue_type q_type) 180 { 181 return &mlxsw_pci->queues[q_type]; 182 } 183 184 static struct mlxsw_pci_queue * 185 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, 186 enum mlxsw_pci_queue_type q_type, u8 q_num) 187 { 188 return &mlxsw_pci->queues[q_type].q[q_num]; 189 } 190 191 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, 192 u8 q_num) 193 { 194 return __mlxsw_pci_queue_get(mlxsw_pci, 195 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); 196 } 197 198 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, 199 u8 q_num) 200 { 201 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); 202 } 203 204 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci) 205 { 206 /* There is only one EQ at index 0. */ 207 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0); 208 } 209 210 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, 211 struct mlxsw_pci_queue *q, 212 u16 val) 213 { 214 mlxsw_pci_write32(mlxsw_pci, 215 DOORBELL(mlxsw_pci->doorbell_offset, 216 mlxsw_pci_doorbell_type_offset[q->type], 217 q->num), val); 218 } 219 220 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci, 221 struct mlxsw_pci_queue *q, 222 u16 val) 223 { 224 mlxsw_pci_write32(mlxsw_pci, 225 DOORBELL(mlxsw_pci->doorbell_offset, 226 mlxsw_pci_doorbell_arm_type_offset[q->type], 227 q->num), val); 228 } 229 230 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci, 231 struct mlxsw_pci_queue *q) 232 { 233 wmb(); /* ensure all writes are done before we ring a bell */ 234 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); 235 } 236 237 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci, 238 struct mlxsw_pci_queue *q) 239 { 240 wmb(); /* ensure all writes are done before we ring a bell */ 241 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, 242 q->consumer_counter + q->count); 243 } 244 245 static void 246 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci, 247 struct mlxsw_pci_queue *q) 248 { 249 wmb(); /* ensure all writes are done before we ring a bell */ 250 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); 251 } 252 253 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, 254 int page_index) 255 { 256 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; 257 } 258 259 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 260 struct mlxsw_pci_queue *q) 261 { 262 struct mlxsw_pci_queue *cq; 263 int tclass; 264 u8 cq_num; 265 int lp; 266 int i; 267 int err; 268 269 q->producer_counter = 0; 270 q->consumer_counter = 0; 271 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : 272 MLXSW_PCI_SDQ_CTL_TC; 273 lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : 274 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; 275 276 /* Set CQ of same number of this SDQ. */ 277 cq_num = q->num; 278 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); 279 mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); 280 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); 281 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ 282 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 283 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 284 285 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); 286 } 287 288 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); 289 if (err) 290 return err; 291 292 cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); 293 cq->cq.dq = q; 294 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 295 return 0; 296 } 297 298 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, 299 struct mlxsw_pci_queue *q) 300 { 301 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); 302 } 303 304 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, 305 int index, char *frag_data, size_t frag_len, 306 int direction) 307 { 308 struct pci_dev *pdev = mlxsw_pci->pdev; 309 dma_addr_t mapaddr; 310 311 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction); 312 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) { 313 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); 314 return -EIO; 315 } 316 mlxsw_pci_wqe_address_set(wqe, index, mapaddr); 317 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); 318 return 0; 319 } 320 321 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, 322 int index, int direction) 323 { 324 struct pci_dev *pdev = mlxsw_pci->pdev; 325 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); 326 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); 327 328 if (!frag_len) 329 return; 330 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); 331 } 332 333 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, 334 struct mlxsw_pci_queue_elem_info *elem_info, 335 gfp_t gfp) 336 { 337 size_t buf_len = MLXSW_PORT_MAX_MTU; 338 char *wqe = elem_info->elem; 339 struct sk_buff *skb; 340 int err; 341 342 skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp); 343 if (!skb) 344 return -ENOMEM; 345 346 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, 347 buf_len, DMA_FROM_DEVICE); 348 if (err) 349 goto err_frag_map; 350 351 elem_info->u.rdq.skb = skb; 352 return 0; 353 354 err_frag_map: 355 dev_kfree_skb_any(skb); 356 return err; 357 } 358 359 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, 360 struct mlxsw_pci_queue_elem_info *elem_info) 361 { 362 struct sk_buff *skb; 363 char *wqe; 364 365 skb = elem_info->u.rdq.skb; 366 wqe = elem_info->elem; 367 368 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 369 dev_kfree_skb_any(skb); 370 } 371 372 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 373 struct mlxsw_pci_queue *q) 374 { 375 struct mlxsw_pci_queue_elem_info *elem_info; 376 u8 sdq_count = mlxsw_pci->num_sdqs; 377 struct mlxsw_pci_queue *cq; 378 u8 cq_num; 379 int i; 380 int err; 381 382 q->producer_counter = 0; 383 q->consumer_counter = 0; 384 385 /* Set CQ of same number of this RDQ with base 386 * above SDQ count as the lower ones are assigned to SDQs. 387 */ 388 cq_num = sdq_count + q->num; 389 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); 390 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ 391 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 392 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 393 394 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); 395 } 396 397 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); 398 if (err) 399 return err; 400 401 cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); 402 cq->cq.dq = q; 403 404 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 405 406 for (i = 0; i < q->count; i++) { 407 elem_info = mlxsw_pci_queue_elem_info_producer_get(q); 408 BUG_ON(!elem_info); 409 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); 410 if (err) 411 goto rollback; 412 /* Everything is set up, ring doorbell to pass elem to HW */ 413 q->producer_counter++; 414 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 415 } 416 417 return 0; 418 419 rollback: 420 for (i--; i >= 0; i--) { 421 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 422 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); 423 } 424 cq->cq.dq = NULL; 425 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); 426 427 return err; 428 } 429 430 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, 431 struct mlxsw_pci_queue *q) 432 { 433 struct mlxsw_pci_queue_elem_info *elem_info; 434 int i; 435 436 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); 437 for (i = 0; i < q->count; i++) { 438 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 439 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); 440 } 441 } 442 443 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, 444 struct mlxsw_pci_queue *q) 445 { 446 q->cq.v = mlxsw_pci->max_cqe_ver; 447 448 if (q->cq.v == MLXSW_PCI_CQE_V2 && 449 q->num < mlxsw_pci->num_sdqs && 450 !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core)) 451 q->cq.v = MLXSW_PCI_CQE_V1; 452 } 453 454 static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci, 455 ptrdiff_t off) 456 { 457 return ioread32be(mlxsw_pci->hw_addr + off); 458 } 459 460 static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci, 461 struct sk_buff *skb, 462 enum mlxsw_pci_cqe_v cqe_v, char *cqe) 463 { 464 u8 ts_type; 465 466 if (cqe_v != MLXSW_PCI_CQE_V2) 467 return; 468 469 ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe); 470 471 if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC && 472 ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC) 473 return; 474 475 mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe); 476 mlxsw_skb_cb(skb)->cqe_ts.nsec = 477 mlxsw_pci_cqe2_time_stamp_nsec_get(cqe); 478 } 479 480 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, 481 struct mlxsw_pci_queue *q, 482 u16 consumer_counter_limit, 483 enum mlxsw_pci_cqe_v cqe_v, 484 char *cqe) 485 { 486 struct pci_dev *pdev = mlxsw_pci->pdev; 487 struct mlxsw_pci_queue_elem_info *elem_info; 488 struct mlxsw_tx_info tx_info; 489 char *wqe; 490 struct sk_buff *skb; 491 int i; 492 493 spin_lock(&q->lock); 494 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 495 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info; 496 skb = elem_info->u.sdq.skb; 497 wqe = elem_info->elem; 498 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) 499 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); 500 501 if (unlikely(!tx_info.is_emad && 502 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 503 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); 504 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb, 505 tx_info.local_port); 506 skb = NULL; 507 } 508 509 if (skb) 510 dev_kfree_skb_any(skb); 511 elem_info->u.sdq.skb = NULL; 512 513 if (q->consumer_counter++ != consumer_counter_limit) 514 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); 515 spin_unlock(&q->lock); 516 } 517 518 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb, 519 const char *cqe) 520 { 521 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); 522 523 if (mlxsw_pci_cqe2_tx_lag_get(cqe)) { 524 cb->rx_md_info.tx_port_is_lag = true; 525 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe); 526 cb->rx_md_info.tx_lag_port_index = 527 mlxsw_pci_cqe2_tx_lag_subport_get(cqe); 528 } else { 529 cb->rx_md_info.tx_port_is_lag = false; 530 cb->rx_md_info.tx_sys_port = 531 mlxsw_pci_cqe2_tx_system_port_get(cqe); 532 } 533 534 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT && 535 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID) 536 cb->rx_md_info.tx_port_valid = 1; 537 else 538 cb->rx_md_info.tx_port_valid = 0; 539 } 540 541 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe) 542 { 543 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); 544 545 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe); 546 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID) 547 cb->rx_md_info.tx_congestion_valid = 1; 548 else 549 cb->rx_md_info.tx_congestion_valid = 0; 550 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT; 551 552 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe); 553 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID) 554 cb->rx_md_info.latency_valid = 1; 555 else 556 cb->rx_md_info.latency_valid = 0; 557 558 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe); 559 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID) 560 cb->rx_md_info.tx_tc_valid = 1; 561 else 562 cb->rx_md_info.tx_tc_valid = 0; 563 564 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); 565 } 566 567 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, 568 struct mlxsw_pci_queue *q, 569 u16 consumer_counter_limit, 570 enum mlxsw_pci_cqe_v cqe_v, char *cqe) 571 { 572 struct pci_dev *pdev = mlxsw_pci->pdev; 573 struct mlxsw_pci_queue_elem_info *elem_info; 574 struct mlxsw_rx_info rx_info = {}; 575 char wqe[MLXSW_PCI_WQE_SIZE]; 576 struct sk_buff *skb; 577 u16 byte_count; 578 int err; 579 580 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 581 skb = elem_info->u.rdq.skb; 582 memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); 583 584 if (q->consumer_counter++ != consumer_counter_limit) 585 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); 586 587 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); 588 if (err) { 589 dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); 590 goto out; 591 } 592 593 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 594 595 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { 596 rx_info.is_lag = true; 597 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe); 598 rx_info.lag_port_index = 599 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe); 600 } else { 601 rx_info.is_lag = false; 602 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); 603 } 604 605 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); 606 607 if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL || 608 rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) { 609 u32 cookie_index = 0; 610 611 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) 612 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); 613 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index; 614 } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && 615 rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && 616 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { 617 rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); 618 mlxsw_pci_cqe_rdq_md_init(skb, cqe); 619 } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE && 620 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { 621 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); 622 } 623 624 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); 625 626 byte_count = mlxsw_pci_cqe_byte_count_get(cqe); 627 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) 628 byte_count -= ETH_FCS_LEN; 629 skb_put(skb, byte_count); 630 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); 631 632 out: 633 /* Everything is set up, ring doorbell to pass elem to HW */ 634 q->producer_counter++; 635 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 636 return; 637 } 638 639 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) 640 { 641 struct mlxsw_pci_queue_elem_info *elem_info; 642 char *elem; 643 bool owner_bit; 644 645 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 646 elem = elem_info->elem; 647 owner_bit = mlxsw_pci_cqe_owner_get(q->cq.v, elem); 648 if (mlxsw_pci_elem_hw_owned(q, owner_bit)) 649 return NULL; 650 q->consumer_counter++; 651 rmb(); /* make sure we read owned bit before the rest of elem */ 652 return elem; 653 } 654 655 static void mlxsw_pci_cq_rx_tasklet(struct tasklet_struct *t) 656 { 657 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); 658 struct mlxsw_pci_queue *rdq = q->cq.dq; 659 struct mlxsw_pci *mlxsw_pci = q->pci; 660 int credits = q->count >> 1; 661 int items = 0; 662 char *cqe; 663 664 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { 665 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 666 u8 sendq = mlxsw_pci_cqe_sr_get(q->cq.v, cqe); 667 u8 dqn = mlxsw_pci_cqe_dqn_get(q->cq.v, cqe); 668 char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; 669 670 if (unlikely(sendq)) { 671 WARN_ON_ONCE(1); 672 continue; 673 } 674 675 if (unlikely(dqn != rdq->num)) { 676 WARN_ON_ONCE(1); 677 continue; 678 } 679 680 memcpy(ncqe, cqe, q->elem_size); 681 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 682 683 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 684 wqe_counter, q->cq.v, ncqe); 685 686 if (++items == credits) 687 break; 688 } 689 690 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 691 } 692 693 static void mlxsw_pci_cq_tx_tasklet(struct tasklet_struct *t) 694 { 695 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); 696 struct mlxsw_pci_queue *sdq = q->cq.dq; 697 struct mlxsw_pci *mlxsw_pci = q->pci; 698 int credits = q->count >> 1; 699 int items = 0; 700 char *cqe; 701 702 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { 703 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 704 u8 sendq = mlxsw_pci_cqe_sr_get(q->cq.v, cqe); 705 u8 dqn = mlxsw_pci_cqe_dqn_get(q->cq.v, cqe); 706 char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; 707 708 if (unlikely(!sendq)) { 709 WARN_ON_ONCE(1); 710 continue; 711 } 712 713 if (unlikely(dqn != sdq->num)) { 714 WARN_ON_ONCE(1); 715 continue; 716 } 717 718 memcpy(ncqe, cqe, q->elem_size); 719 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 720 721 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 722 wqe_counter, q->cq.v, ncqe); 723 724 if (++items == credits) 725 break; 726 } 727 728 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 729 } 730 731 static enum mlxsw_pci_cq_type 732 mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci, 733 const struct mlxsw_pci_queue *q) 734 { 735 /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used 736 * for SDQs and the rest are used for RDQs. 737 */ 738 if (q->num < mlxsw_pci->num_sdqs) 739 return MLXSW_PCI_CQ_SDQ; 740 741 return MLXSW_PCI_CQ_RDQ; 742 } 743 744 static void mlxsw_pci_cq_tasklet_setup(struct mlxsw_pci_queue *q, 745 enum mlxsw_pci_cq_type cq_type) 746 { 747 switch (cq_type) { 748 case MLXSW_PCI_CQ_SDQ: 749 tasklet_setup(&q->tasklet, mlxsw_pci_cq_tx_tasklet); 750 break; 751 case MLXSW_PCI_CQ_RDQ: 752 tasklet_setup(&q->tasklet, mlxsw_pci_cq_rx_tasklet); 753 break; 754 } 755 } 756 757 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 758 struct mlxsw_pci_queue *q) 759 { 760 int i; 761 int err; 762 763 q->consumer_counter = 0; 764 765 for (i = 0; i < q->count; i++) { 766 char *elem = mlxsw_pci_queue_elem_get(q, i); 767 768 mlxsw_pci_cqe_owner_set(q->cq.v, elem, 1); 769 } 770 771 if (q->cq.v == MLXSW_PCI_CQE_V1) 772 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, 773 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); 774 else if (q->cq.v == MLXSW_PCI_CQE_V2) 775 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, 776 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); 777 778 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); 779 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); 780 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); 781 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 782 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 783 784 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); 785 } 786 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); 787 if (err) 788 return err; 789 mlxsw_pci_cq_tasklet_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q)); 790 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 791 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 792 return 0; 793 } 794 795 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, 796 struct mlxsw_pci_queue *q) 797 { 798 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); 799 } 800 801 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) 802 { 803 return q->cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : 804 MLXSW_PCI_CQE01_COUNT; 805 } 806 807 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) 808 { 809 return q->cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : 810 MLXSW_PCI_CQE01_SIZE; 811 } 812 813 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) 814 { 815 struct mlxsw_pci_queue_elem_info *elem_info; 816 char *elem; 817 bool owner_bit; 818 819 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 820 elem = elem_info->elem; 821 owner_bit = mlxsw_pci_eqe_owner_get(elem); 822 if (mlxsw_pci_elem_hw_owned(q, owner_bit)) 823 return NULL; 824 q->consumer_counter++; 825 rmb(); /* make sure we read owned bit before the rest of elem */ 826 return elem; 827 } 828 829 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t) 830 { 831 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; 832 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); 833 struct mlxsw_pci *mlxsw_pci = q->pci; 834 int credits = q->count >> 1; 835 u8 cqn, cq_count; 836 int items = 0; 837 char *eqe; 838 839 memset(&active_cqns, 0, sizeof(active_cqns)); 840 841 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { 842 cqn = mlxsw_pci_eqe_cqn_get(eqe); 843 set_bit(cqn, active_cqns); 844 845 if (++items == credits) 846 break; 847 } 848 849 if (!items) 850 return; 851 852 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 853 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 854 855 cq_count = mlxsw_pci->num_cqs; 856 for_each_set_bit(cqn, active_cqns, cq_count) { 857 q = mlxsw_pci_cq_get(mlxsw_pci, cqn); 858 mlxsw_pci_queue_tasklet_schedule(q); 859 } 860 } 861 862 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 863 struct mlxsw_pci_queue *q) 864 { 865 int i; 866 int err; 867 868 /* We expect to initialize only one EQ, which gets num=0 as it is 869 * located at index zero. We use the EQ as EQ1, so set the number for 870 * future use. 871 */ 872 WARN_ON_ONCE(q->num); 873 q->num = MLXSW_PCI_EQ_COMP_NUM; 874 875 q->consumer_counter = 0; 876 877 for (i = 0; i < q->count; i++) { 878 char *elem = mlxsw_pci_queue_elem_get(q, i); 879 880 mlxsw_pci_eqe_owner_set(elem, 1); 881 } 882 883 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ 884 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ 885 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); 886 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 887 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 888 889 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); 890 } 891 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); 892 if (err) 893 return err; 894 tasklet_setup(&q->tasklet, mlxsw_pci_eq_tasklet); 895 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 896 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 897 return 0; 898 } 899 900 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, 901 struct mlxsw_pci_queue *q) 902 { 903 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); 904 } 905 906 struct mlxsw_pci_queue_ops { 907 const char *name; 908 enum mlxsw_pci_queue_type type; 909 void (*pre_init)(struct mlxsw_pci *mlxsw_pci, 910 struct mlxsw_pci_queue *q); 911 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, 912 struct mlxsw_pci_queue *q); 913 void (*fini)(struct mlxsw_pci *mlxsw_pci, 914 struct mlxsw_pci_queue *q); 915 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); 916 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); 917 u16 elem_count; 918 u8 elem_size; 919 }; 920 921 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { 922 .type = MLXSW_PCI_QUEUE_TYPE_SDQ, 923 .init = mlxsw_pci_sdq_init, 924 .fini = mlxsw_pci_sdq_fini, 925 .elem_count = MLXSW_PCI_WQE_COUNT, 926 .elem_size = MLXSW_PCI_WQE_SIZE, 927 }; 928 929 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { 930 .type = MLXSW_PCI_QUEUE_TYPE_RDQ, 931 .init = mlxsw_pci_rdq_init, 932 .fini = mlxsw_pci_rdq_fini, 933 .elem_count = MLXSW_PCI_WQE_COUNT, 934 .elem_size = MLXSW_PCI_WQE_SIZE 935 }; 936 937 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { 938 .type = MLXSW_PCI_QUEUE_TYPE_CQ, 939 .pre_init = mlxsw_pci_cq_pre_init, 940 .init = mlxsw_pci_cq_init, 941 .fini = mlxsw_pci_cq_fini, 942 .elem_count_f = mlxsw_pci_cq_elem_count, 943 .elem_size_f = mlxsw_pci_cq_elem_size 944 }; 945 946 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { 947 .type = MLXSW_PCI_QUEUE_TYPE_EQ, 948 .init = mlxsw_pci_eq_init, 949 .fini = mlxsw_pci_eq_fini, 950 .elem_count = MLXSW_PCI_EQE_COUNT, 951 .elem_size = MLXSW_PCI_EQE_SIZE 952 }; 953 954 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 955 const struct mlxsw_pci_queue_ops *q_ops, 956 struct mlxsw_pci_queue *q, u8 q_num) 957 { 958 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; 959 int i; 960 int err; 961 962 q->num = q_num; 963 if (q_ops->pre_init) 964 q_ops->pre_init(mlxsw_pci, q); 965 966 spin_lock_init(&q->lock); 967 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : 968 q_ops->elem_count; 969 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : 970 q_ops->elem_size; 971 q->type = q_ops->type; 972 q->pci = mlxsw_pci; 973 974 mem_item->size = MLXSW_PCI_AQ_SIZE; 975 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, 976 mem_item->size, &mem_item->mapaddr, 977 GFP_KERNEL); 978 if (!mem_item->buf) 979 return -ENOMEM; 980 981 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); 982 if (!q->elem_info) { 983 err = -ENOMEM; 984 goto err_elem_info_alloc; 985 } 986 987 /* Initialize dma mapped elements info elem_info for 988 * future easy access. 989 */ 990 for (i = 0; i < q->count; i++) { 991 struct mlxsw_pci_queue_elem_info *elem_info; 992 993 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 994 elem_info->elem = 995 __mlxsw_pci_queue_elem_get(q, q->elem_size, i); 996 } 997 998 mlxsw_cmd_mbox_zero(mbox); 999 err = q_ops->init(mlxsw_pci, mbox, q); 1000 if (err) 1001 goto err_q_ops_init; 1002 return 0; 1003 1004 err_q_ops_init: 1005 kfree(q->elem_info); 1006 err_elem_info_alloc: 1007 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, 1008 mem_item->buf, mem_item->mapaddr); 1009 return err; 1010 } 1011 1012 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, 1013 const struct mlxsw_pci_queue_ops *q_ops, 1014 struct mlxsw_pci_queue *q) 1015 { 1016 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; 1017 1018 q_ops->fini(mlxsw_pci, q); 1019 kfree(q->elem_info); 1020 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, 1021 mem_item->buf, mem_item->mapaddr); 1022 } 1023 1024 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 1025 const struct mlxsw_pci_queue_ops *q_ops, 1026 u8 num_qs) 1027 { 1028 struct mlxsw_pci_queue_type_group *queue_group; 1029 int i; 1030 int err; 1031 1032 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); 1033 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); 1034 if (!queue_group->q) 1035 return -ENOMEM; 1036 1037 for (i = 0; i < num_qs; i++) { 1038 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, 1039 &queue_group->q[i], i); 1040 if (err) 1041 goto err_queue_init; 1042 } 1043 queue_group->count = num_qs; 1044 1045 return 0; 1046 1047 err_queue_init: 1048 for (i--; i >= 0; i--) 1049 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); 1050 kfree(queue_group->q); 1051 return err; 1052 } 1053 1054 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci, 1055 const struct mlxsw_pci_queue_ops *q_ops) 1056 { 1057 struct mlxsw_pci_queue_type_group *queue_group; 1058 int i; 1059 1060 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); 1061 for (i = 0; i < queue_group->count; i++) 1062 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); 1063 kfree(queue_group->q); 1064 } 1065 1066 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) 1067 { 1068 struct pci_dev *pdev = mlxsw_pci->pdev; 1069 u8 num_sdqs; 1070 u8 sdq_log2sz; 1071 u8 num_rdqs; 1072 u8 rdq_log2sz; 1073 u8 num_cqs; 1074 u8 cq_log2sz; 1075 u8 cqv2_log2sz; 1076 u8 num_eqs; 1077 u8 eq_log2sz; 1078 int err; 1079 1080 mlxsw_cmd_mbox_zero(mbox); 1081 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox); 1082 if (err) 1083 return err; 1084 1085 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox); 1086 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox); 1087 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox); 1088 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); 1089 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); 1090 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); 1091 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox); 1092 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); 1093 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); 1094 1095 if (num_sdqs + num_rdqs > num_cqs || 1096 num_sdqs < MLXSW_PCI_SDQS_MIN || 1097 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) { 1098 dev_err(&pdev->dev, "Unsupported number of queues\n"); 1099 return -EINVAL; 1100 } 1101 1102 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || 1103 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || 1104 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) || 1105 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 && 1106 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) || 1107 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { 1108 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); 1109 return -EINVAL; 1110 } 1111 1112 mlxsw_pci->num_cqs = num_cqs; 1113 mlxsw_pci->num_sdqs = num_sdqs; 1114 1115 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, 1116 MLXSW_PCI_EQS_COUNT); 1117 if (err) { 1118 dev_err(&pdev->dev, "Failed to initialize event queues\n"); 1119 return err; 1120 } 1121 1122 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops, 1123 num_cqs); 1124 if (err) { 1125 dev_err(&pdev->dev, "Failed to initialize completion queues\n"); 1126 goto err_cqs_init; 1127 } 1128 1129 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops, 1130 num_sdqs); 1131 if (err) { 1132 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n"); 1133 goto err_sdqs_init; 1134 } 1135 1136 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops, 1137 num_rdqs); 1138 if (err) { 1139 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n"); 1140 goto err_rdqs_init; 1141 } 1142 1143 return 0; 1144 1145 err_rdqs_init: 1146 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); 1147 err_sdqs_init: 1148 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); 1149 err_cqs_init: 1150 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); 1151 return err; 1152 } 1153 1154 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) 1155 { 1156 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); 1157 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); 1158 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); 1159 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); 1160 } 1161 1162 static void 1163 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, 1164 char *mbox, int index, 1165 const struct mlxsw_swid_config *swid) 1166 { 1167 u8 mask = 0; 1168 1169 if (swid->used_type) { 1170 mlxsw_cmd_mbox_config_profile_swid_config_type_set( 1171 mbox, index, swid->type); 1172 mask |= 1; 1173 } 1174 if (swid->used_properties) { 1175 mlxsw_cmd_mbox_config_profile_swid_config_properties_set( 1176 mbox, index, swid->properties); 1177 mask |= 2; 1178 } 1179 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); 1180 } 1181 1182 static int 1183 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci, 1184 const struct mlxsw_config_profile *profile, 1185 struct mlxsw_res *res) 1186 { 1187 u64 single_size, double_size, linear_size; 1188 int err; 1189 1190 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile, 1191 &single_size, &double_size, 1192 &linear_size); 1193 if (err) 1194 return err; 1195 1196 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); 1197 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); 1198 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size); 1199 1200 return 0; 1201 } 1202 1203 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, 1204 const struct mlxsw_config_profile *profile, 1205 struct mlxsw_res *res) 1206 { 1207 int i; 1208 int err; 1209 1210 mlxsw_cmd_mbox_zero(mbox); 1211 1212 if (profile->used_max_vepa_channels) { 1213 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set( 1214 mbox, 1); 1215 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set( 1216 mbox, profile->max_vepa_channels); 1217 } 1218 if (profile->used_max_lag) { 1219 mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1); 1220 mlxsw_cmd_mbox_config_profile_max_lag_set(mbox, 1221 profile->max_lag); 1222 } 1223 if (profile->used_max_mid) { 1224 mlxsw_cmd_mbox_config_profile_set_max_mid_set( 1225 mbox, 1); 1226 mlxsw_cmd_mbox_config_profile_max_mid_set( 1227 mbox, profile->max_mid); 1228 } 1229 if (profile->used_max_pgt) { 1230 mlxsw_cmd_mbox_config_profile_set_max_pgt_set( 1231 mbox, 1); 1232 mlxsw_cmd_mbox_config_profile_max_pgt_set( 1233 mbox, profile->max_pgt); 1234 } 1235 if (profile->used_max_system_port) { 1236 mlxsw_cmd_mbox_config_profile_set_max_system_port_set( 1237 mbox, 1); 1238 mlxsw_cmd_mbox_config_profile_max_system_port_set( 1239 mbox, profile->max_system_port); 1240 } 1241 if (profile->used_max_vlan_groups) { 1242 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set( 1243 mbox, 1); 1244 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set( 1245 mbox, profile->max_vlan_groups); 1246 } 1247 if (profile->used_max_regions) { 1248 mlxsw_cmd_mbox_config_profile_set_max_regions_set( 1249 mbox, 1); 1250 mlxsw_cmd_mbox_config_profile_max_regions_set( 1251 mbox, profile->max_regions); 1252 } 1253 if (profile->used_flood_tables) { 1254 mlxsw_cmd_mbox_config_profile_set_flood_tables_set( 1255 mbox, 1); 1256 mlxsw_cmd_mbox_config_profile_max_flood_tables_set( 1257 mbox, profile->max_flood_tables); 1258 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( 1259 mbox, profile->max_vid_flood_tables); 1260 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( 1261 mbox, profile->max_fid_offset_flood_tables); 1262 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( 1263 mbox, profile->fid_offset_flood_table_size); 1264 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( 1265 mbox, profile->max_fid_flood_tables); 1266 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( 1267 mbox, profile->fid_flood_table_size); 1268 } 1269 if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) { 1270 enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode = 1271 MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF; 1272 1273 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1); 1274 mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode); 1275 mlxsw_pci->flood_mode = flood_mode; 1276 } else if (profile->used_flood_mode) { 1277 mlxsw_cmd_mbox_config_profile_set_flood_mode_set( 1278 mbox, 1); 1279 mlxsw_cmd_mbox_config_profile_flood_mode_set( 1280 mbox, profile->flood_mode); 1281 mlxsw_pci->flood_mode = profile->flood_mode; 1282 } else { 1283 WARN_ON(1); 1284 return -EINVAL; 1285 } 1286 if (profile->used_max_ib_mc) { 1287 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( 1288 mbox, 1); 1289 mlxsw_cmd_mbox_config_profile_max_ib_mc_set( 1290 mbox, profile->max_ib_mc); 1291 } 1292 if (profile->used_max_pkey) { 1293 mlxsw_cmd_mbox_config_profile_set_max_pkey_set( 1294 mbox, 1); 1295 mlxsw_cmd_mbox_config_profile_max_pkey_set( 1296 mbox, profile->max_pkey); 1297 } 1298 if (profile->used_ar_sec) { 1299 mlxsw_cmd_mbox_config_profile_set_ar_sec_set( 1300 mbox, 1); 1301 mlxsw_cmd_mbox_config_profile_ar_sec_set( 1302 mbox, profile->ar_sec); 1303 } 1304 if (profile->used_adaptive_routing_group_cap) { 1305 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set( 1306 mbox, 1); 1307 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( 1308 mbox, profile->adaptive_routing_group_cap); 1309 } 1310 if (profile->used_ubridge) { 1311 mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1); 1312 mlxsw_cmd_mbox_config_profile_ubridge_set(mbox, 1313 profile->ubridge); 1314 } 1315 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) { 1316 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res); 1317 if (err) 1318 return err; 1319 1320 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1); 1321 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox, 1322 MLXSW_RES_GET(res, KVD_LINEAR_SIZE)); 1323 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox, 1324 1); 1325 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox, 1326 MLXSW_RES_GET(res, KVD_SINGLE_SIZE)); 1327 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( 1328 mbox, 1); 1329 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, 1330 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); 1331 } 1332 1333 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) 1334 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, 1335 &profile->swid_config[i]); 1336 1337 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) { 1338 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1); 1339 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1); 1340 } 1341 1342 if (profile->used_cqe_time_stamp_type) { 1343 mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox, 1344 1); 1345 mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox, 1346 profile->cqe_time_stamp_type); 1347 } 1348 1349 if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) { 1350 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode = 1351 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW; 1352 1353 mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1); 1354 mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode); 1355 mlxsw_pci->lag_mode = lag_mode; 1356 } else { 1357 mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW; 1358 } 1359 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); 1360 } 1361 1362 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) 1363 { 1364 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; 1365 int err; 1366 1367 mlxsw_cmd_mbox_zero(mbox); 1368 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox); 1369 if (err) 1370 return err; 1371 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); 1372 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); 1373 return 0; 1374 } 1375 1376 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 1377 u16 num_pages) 1378 { 1379 struct mlxsw_pci_mem_item *mem_item; 1380 int nent = 0; 1381 int i; 1382 int err; 1383 1384 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), 1385 GFP_KERNEL); 1386 if (!mlxsw_pci->fw_area.items) 1387 return -ENOMEM; 1388 mlxsw_pci->fw_area.count = num_pages; 1389 1390 mlxsw_cmd_mbox_zero(mbox); 1391 for (i = 0; i < num_pages; i++) { 1392 mem_item = &mlxsw_pci->fw_area.items[i]; 1393 1394 mem_item->size = MLXSW_PCI_PAGE_SIZE; 1395 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, 1396 mem_item->size, 1397 &mem_item->mapaddr, GFP_KERNEL); 1398 if (!mem_item->buf) { 1399 err = -ENOMEM; 1400 goto err_alloc; 1401 } 1402 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); 1403 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ 1404 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { 1405 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); 1406 if (err) 1407 goto err_cmd_map_fa; 1408 nent = 0; 1409 mlxsw_cmd_mbox_zero(mbox); 1410 } 1411 } 1412 1413 if (nent) { 1414 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); 1415 if (err) 1416 goto err_cmd_map_fa; 1417 } 1418 1419 return 0; 1420 1421 err_cmd_map_fa: 1422 err_alloc: 1423 for (i--; i >= 0; i--) { 1424 mem_item = &mlxsw_pci->fw_area.items[i]; 1425 1426 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, 1427 mem_item->buf, mem_item->mapaddr); 1428 } 1429 kfree(mlxsw_pci->fw_area.items); 1430 return err; 1431 } 1432 1433 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) 1434 { 1435 struct mlxsw_pci_mem_item *mem_item; 1436 int i; 1437 1438 mlxsw_cmd_unmap_fa(mlxsw_pci->core); 1439 1440 for (i = 0; i < mlxsw_pci->fw_area.count; i++) { 1441 mem_item = &mlxsw_pci->fw_area.items[i]; 1442 1443 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, 1444 mem_item->buf, mem_item->mapaddr); 1445 } 1446 kfree(mlxsw_pci->fw_area.items); 1447 } 1448 1449 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) 1450 { 1451 struct mlxsw_pci *mlxsw_pci = dev_id; 1452 struct mlxsw_pci_queue *q; 1453 1454 q = mlxsw_pci_eq_get(mlxsw_pci); 1455 mlxsw_pci_queue_tasklet_schedule(q); 1456 return IRQ_HANDLED; 1457 } 1458 1459 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci, 1460 struct mlxsw_pci_mem_item *mbox) 1461 { 1462 struct pci_dev *pdev = mlxsw_pci->pdev; 1463 int err = 0; 1464 1465 mbox->size = MLXSW_CMD_MBOX_SIZE; 1466 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, 1467 &mbox->mapaddr, GFP_KERNEL); 1468 if (!mbox->buf) { 1469 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n"); 1470 err = -ENOMEM; 1471 } 1472 1473 return err; 1474 } 1475 1476 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, 1477 struct mlxsw_pci_mem_item *mbox) 1478 { 1479 struct pci_dev *pdev = mlxsw_pci->pdev; 1480 1481 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf, 1482 mbox->mapaddr); 1483 } 1484 1485 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, 1486 const struct pci_device_id *id, 1487 u32 *p_sys_status) 1488 { 1489 unsigned long end; 1490 u32 val; 1491 1492 /* We must wait for the HW to become responsive. */ 1493 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); 1494 1495 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1496 do { 1497 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1498 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1499 return 0; 1500 cond_resched(); 1501 } while (time_before(jiffies, end)); 1502 1503 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK; 1504 1505 return -EBUSY; 1506 } 1507 1508 static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci) 1509 { 1510 struct pci_dev *pdev = mlxsw_pci->pdev; 1511 char mrsr_pl[MLXSW_REG_MRSR_LEN]; 1512 int err; 1513 1514 mlxsw_reg_mrsr_pack(mrsr_pl, 1515 MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE); 1516 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); 1517 if (err) 1518 return err; 1519 1520 device_lock_assert(&pdev->dev); 1521 1522 pci_cfg_access_lock(pdev); 1523 pci_save_state(pdev); 1524 1525 err = __pci_reset_function_locked(pdev); 1526 if (err) 1527 pci_err(pdev, "PCI function reset failed with %d\n", err); 1528 1529 pci_restore_state(pdev); 1530 pci_cfg_access_unlock(pdev); 1531 1532 return err; 1533 } 1534 1535 static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci) 1536 { 1537 char mrsr_pl[MLXSW_REG_MRSR_LEN]; 1538 1539 mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET); 1540 return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); 1541 } 1542 1543 static int 1544 mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) 1545 { 1546 struct pci_dev *pdev = mlxsw_pci->pdev; 1547 char mcam_pl[MLXSW_REG_MCAM_LEN]; 1548 bool pci_reset_supported; 1549 u32 sys_status; 1550 int err; 1551 1552 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); 1553 if (err) { 1554 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n", 1555 sys_status); 1556 return err; 1557 } 1558 1559 /* PCI core already issued a PCI reset, do not issue another reset. */ 1560 if (mlxsw_pci->skip_reset) 1561 return 0; 1562 1563 mlxsw_reg_mcam_pack(mcam_pl, 1564 MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES); 1565 err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl); 1566 if (err) 1567 return err; 1568 1569 mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET, 1570 &pci_reset_supported); 1571 1572 if (pci_reset_supported) { 1573 pci_dbg(pdev, "Starting PCI reset flow\n"); 1574 err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci); 1575 } else { 1576 pci_dbg(pdev, "Starting software reset flow\n"); 1577 err = mlxsw_pci_reset_sw(mlxsw_pci); 1578 } 1579 if (err) 1580 return err; 1581 1582 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); 1583 if (err) { 1584 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n", 1585 sys_status); 1586 return err; 1587 } 1588 1589 return 0; 1590 } 1591 1592 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) 1593 { 1594 int err; 1595 1596 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); 1597 if (err < 0) 1598 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); 1599 return err; 1600 } 1601 1602 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) 1603 { 1604 pci_free_irq_vectors(mlxsw_pci->pdev); 1605 } 1606 1607 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, 1608 const struct mlxsw_config_profile *profile, 1609 struct mlxsw_res *res) 1610 { 1611 struct mlxsw_pci *mlxsw_pci = bus_priv; 1612 struct pci_dev *pdev = mlxsw_pci->pdev; 1613 char *mbox; 1614 u16 num_pages; 1615 int err; 1616 1617 mlxsw_pci->core = mlxsw_core; 1618 1619 mbox = mlxsw_cmd_mbox_alloc(); 1620 if (!mbox) 1621 return -ENOMEM; 1622 1623 err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id); 1624 if (err) 1625 goto err_reset; 1626 1627 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); 1628 if (err < 0) { 1629 dev_err(&pdev->dev, "MSI-X init failed\n"); 1630 goto err_alloc_irq; 1631 } 1632 1633 err = mlxsw_cmd_query_fw(mlxsw_core, mbox); 1634 if (err) 1635 goto err_query_fw; 1636 1637 mlxsw_pci->bus_info.fw_rev.major = 1638 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox); 1639 mlxsw_pci->bus_info.fw_rev.minor = 1640 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox); 1641 mlxsw_pci->bus_info.fw_rev.subminor = 1642 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox); 1643 1644 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) { 1645 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n"); 1646 err = -EINVAL; 1647 goto err_iface_rev; 1648 } 1649 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) { 1650 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n"); 1651 err = -EINVAL; 1652 goto err_doorbell_page_bar; 1653 } 1654 1655 mlxsw_pci->doorbell_offset = 1656 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox); 1657 1658 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) { 1659 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n"); 1660 err = -EINVAL; 1661 goto err_fr_rn_clk_bar; 1662 } 1663 1664 mlxsw_pci->free_running_clock_offset = 1665 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox); 1666 1667 if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) { 1668 dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n"); 1669 err = -EINVAL; 1670 goto err_utc_sec_bar; 1671 } 1672 1673 mlxsw_pci->utc_sec_offset = 1674 mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox); 1675 1676 if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) { 1677 dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n"); 1678 err = -EINVAL; 1679 goto err_utc_nsec_bar; 1680 } 1681 1682 mlxsw_pci->utc_nsec_offset = 1683 mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox); 1684 1685 mlxsw_pci->lag_mode_support = 1686 mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox); 1687 mlxsw_pci->cff_support = 1688 mlxsw_cmd_mbox_query_fw_cff_support_get(mbox); 1689 1690 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); 1691 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); 1692 if (err) 1693 goto err_fw_area_init; 1694 1695 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); 1696 if (err) 1697 goto err_boardinfo; 1698 1699 err = mlxsw_core_resources_query(mlxsw_core, mbox, res); 1700 if (err) 1701 goto err_query_resources; 1702 1703 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) && 1704 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2)) 1705 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2; 1706 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) && 1707 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1)) 1708 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1; 1709 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) && 1710 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) || 1711 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) { 1712 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0; 1713 } else { 1714 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n"); 1715 goto err_cqe_v_check; 1716 } 1717 1718 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); 1719 if (err) 1720 goto err_config_profile; 1721 1722 /* Some resources depend on details of config_profile, such as unified 1723 * bridge model. Query the resources again to get correct values. 1724 */ 1725 err = mlxsw_core_resources_query(mlxsw_core, mbox, res); 1726 if (err) 1727 goto err_requery_resources; 1728 1729 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); 1730 if (err) 1731 goto err_aqs_init; 1732 1733 err = request_irq(pci_irq_vector(pdev, 0), 1734 mlxsw_pci_eq_irq_handler, 0, 1735 mlxsw_pci->bus_info.device_kind, mlxsw_pci); 1736 if (err) { 1737 dev_err(&pdev->dev, "IRQ request failed\n"); 1738 goto err_request_eq_irq; 1739 } 1740 1741 goto mbox_put; 1742 1743 err_request_eq_irq: 1744 mlxsw_pci_aqs_fini(mlxsw_pci); 1745 err_aqs_init: 1746 err_requery_resources: 1747 err_config_profile: 1748 err_cqe_v_check: 1749 err_query_resources: 1750 err_boardinfo: 1751 mlxsw_pci_fw_area_fini(mlxsw_pci); 1752 err_fw_area_init: 1753 err_utc_nsec_bar: 1754 err_utc_sec_bar: 1755 err_fr_rn_clk_bar: 1756 err_doorbell_page_bar: 1757 err_iface_rev: 1758 err_query_fw: 1759 mlxsw_pci_free_irq_vectors(mlxsw_pci); 1760 err_alloc_irq: 1761 err_reset: 1762 mbox_put: 1763 mlxsw_cmd_mbox_free(mbox); 1764 return err; 1765 } 1766 1767 static void mlxsw_pci_fini(void *bus_priv) 1768 { 1769 struct mlxsw_pci *mlxsw_pci = bus_priv; 1770 1771 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); 1772 mlxsw_pci_aqs_fini(mlxsw_pci); 1773 mlxsw_pci_fw_area_fini(mlxsw_pci); 1774 mlxsw_pci_free_irq_vectors(mlxsw_pci); 1775 } 1776 1777 static struct mlxsw_pci_queue * 1778 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, 1779 const struct mlxsw_tx_info *tx_info) 1780 { 1781 u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1; 1782 u8 sdqn; 1783 1784 if (tx_info->is_emad) { 1785 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX; 1786 } else { 1787 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0); 1788 sdqn = 1 + (tx_info->local_port % ctl_sdq_count); 1789 } 1790 1791 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn); 1792 } 1793 1794 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, 1795 const struct mlxsw_tx_info *tx_info) 1796 { 1797 struct mlxsw_pci *mlxsw_pci = bus_priv; 1798 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); 1799 1800 return !mlxsw_pci_queue_elem_info_producer_get(q); 1801 } 1802 1803 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, 1804 const struct mlxsw_tx_info *tx_info) 1805 { 1806 struct mlxsw_pci *mlxsw_pci = bus_priv; 1807 struct mlxsw_pci_queue *q; 1808 struct mlxsw_pci_queue_elem_info *elem_info; 1809 char *wqe; 1810 int i; 1811 int err; 1812 1813 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { 1814 err = skb_linearize(skb); 1815 if (err) 1816 return err; 1817 } 1818 1819 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); 1820 spin_lock_bh(&q->lock); 1821 elem_info = mlxsw_pci_queue_elem_info_producer_get(q); 1822 if (!elem_info) { 1823 /* queue is full */ 1824 err = -EAGAIN; 1825 goto unlock; 1826 } 1827 mlxsw_skb_cb(skb)->tx_info = *tx_info; 1828 elem_info->u.sdq.skb = skb; 1829 1830 wqe = elem_info->elem; 1831 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ 1832 mlxsw_pci_wqe_lp_set(wqe, 0); 1833 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); 1834 1835 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, 1836 skb_headlen(skb), DMA_TO_DEVICE); 1837 if (err) 1838 goto unlock; 1839 1840 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1841 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1842 1843 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1, 1844 skb_frag_address(frag), 1845 skb_frag_size(frag), 1846 DMA_TO_DEVICE); 1847 if (err) 1848 goto unmap_frags; 1849 } 1850 1851 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1852 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1853 1854 /* Set unused sq entries byte count to zero. */ 1855 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) 1856 mlxsw_pci_wqe_byte_count_set(wqe, i, 0); 1857 1858 /* Everything is set up, ring producer doorbell to get HW going */ 1859 q->producer_counter++; 1860 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 1861 1862 goto unlock; 1863 1864 unmap_frags: 1865 for (; i >= 0; i--) 1866 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); 1867 unlock: 1868 spin_unlock_bh(&q->lock); 1869 return err; 1870 } 1871 1872 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, 1873 u32 in_mod, bool out_mbox_direct, 1874 char *in_mbox, size_t in_mbox_size, 1875 char *out_mbox, size_t out_mbox_size, 1876 u8 *p_status) 1877 { 1878 struct mlxsw_pci *mlxsw_pci = bus_priv; 1879 dma_addr_t in_mapaddr = 0, out_mapaddr = 0; 1880 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); 1881 unsigned long end; 1882 bool wait_done; 1883 int err; 1884 1885 *p_status = MLXSW_CMD_STATUS_OK; 1886 1887 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); 1888 if (err) 1889 return err; 1890 1891 if (in_mbox) { 1892 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); 1893 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; 1894 } 1895 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); 1896 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); 1897 1898 if (out_mbox) 1899 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; 1900 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); 1901 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); 1902 1903 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); 1904 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); 1905 1906 wait_done = false; 1907 1908 wmb(); /* all needs to be written before we write control register */ 1909 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, 1910 MLXSW_PCI_CIR_CTRL_GO_BIT | 1911 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | 1912 opcode); 1913 1914 end = jiffies + timeout; 1915 do { 1916 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); 1917 1918 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { 1919 wait_done = true; 1920 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; 1921 break; 1922 } 1923 cond_resched(); 1924 } while (time_before(jiffies, end)); 1925 1926 err = 0; 1927 if (wait_done) { 1928 if (*p_status) 1929 err = -EIO; 1930 } else { 1931 err = -ETIMEDOUT; 1932 } 1933 1934 if (!err && out_mbox && out_mbox_direct) { 1935 /* Some commands don't use output param as address to mailbox 1936 * but they store output directly into registers. In that case, 1937 * copy registers into mbox buffer. 1938 */ 1939 __be32 tmp; 1940 1941 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, 1942 CIR_OUT_PARAM_HI)); 1943 memcpy(out_mbox, &tmp, sizeof(tmp)); 1944 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, 1945 CIR_OUT_PARAM_LO)); 1946 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); 1947 } else if (!err && out_mbox) { 1948 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); 1949 } 1950 1951 mutex_unlock(&mlxsw_pci->cmd.lock); 1952 1953 return err; 1954 } 1955 1956 static u32 mlxsw_pci_read_frc_h(void *bus_priv) 1957 { 1958 struct mlxsw_pci *mlxsw_pci = bus_priv; 1959 u64 frc_offset_h; 1960 1961 frc_offset_h = mlxsw_pci->free_running_clock_offset; 1962 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h); 1963 } 1964 1965 static u32 mlxsw_pci_read_frc_l(void *bus_priv) 1966 { 1967 struct mlxsw_pci *mlxsw_pci = bus_priv; 1968 u64 frc_offset_l; 1969 1970 frc_offset_l = mlxsw_pci->free_running_clock_offset + 4; 1971 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l); 1972 } 1973 1974 static u32 mlxsw_pci_read_utc_sec(void *bus_priv) 1975 { 1976 struct mlxsw_pci *mlxsw_pci = bus_priv; 1977 1978 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset); 1979 } 1980 1981 static u32 mlxsw_pci_read_utc_nsec(void *bus_priv) 1982 { 1983 struct mlxsw_pci *mlxsw_pci = bus_priv; 1984 1985 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset); 1986 } 1987 1988 static enum mlxsw_cmd_mbox_config_profile_lag_mode 1989 mlxsw_pci_lag_mode(void *bus_priv) 1990 { 1991 struct mlxsw_pci *mlxsw_pci = bus_priv; 1992 1993 return mlxsw_pci->lag_mode; 1994 } 1995 1996 static enum mlxsw_cmd_mbox_config_profile_flood_mode 1997 mlxsw_pci_flood_mode(void *bus_priv) 1998 { 1999 struct mlxsw_pci *mlxsw_pci = bus_priv; 2000 2001 return mlxsw_pci->flood_mode; 2002 } 2003 2004 static const struct mlxsw_bus mlxsw_pci_bus = { 2005 .kind = "pci", 2006 .init = mlxsw_pci_init, 2007 .fini = mlxsw_pci_fini, 2008 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, 2009 .skb_transmit = mlxsw_pci_skb_transmit, 2010 .cmd_exec = mlxsw_pci_cmd_exec, 2011 .read_frc_h = mlxsw_pci_read_frc_h, 2012 .read_frc_l = mlxsw_pci_read_frc_l, 2013 .read_utc_sec = mlxsw_pci_read_utc_sec, 2014 .read_utc_nsec = mlxsw_pci_read_utc_nsec, 2015 .lag_mode = mlxsw_pci_lag_mode, 2016 .flood_mode = mlxsw_pci_flood_mode, 2017 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, 2018 }; 2019 2020 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) 2021 { 2022 int err; 2023 2024 mutex_init(&mlxsw_pci->cmd.lock); 2025 2026 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 2027 if (err) 2028 goto err_in_mbox_alloc; 2029 2030 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 2031 if (err) 2032 goto err_out_mbox_alloc; 2033 2034 return 0; 2035 2036 err_out_mbox_alloc: 2037 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 2038 err_in_mbox_alloc: 2039 mutex_destroy(&mlxsw_pci->cmd.lock); 2040 return err; 2041 } 2042 2043 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) 2044 { 2045 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 2046 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 2047 mutex_destroy(&mlxsw_pci->cmd.lock); 2048 } 2049 2050 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2051 { 2052 const char *driver_name = dev_driver_string(&pdev->dev); 2053 struct mlxsw_pci *mlxsw_pci; 2054 int err; 2055 2056 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); 2057 if (!mlxsw_pci) 2058 return -ENOMEM; 2059 2060 err = pci_enable_device(pdev); 2061 if (err) { 2062 dev_err(&pdev->dev, "pci_enable_device failed\n"); 2063 goto err_pci_enable_device; 2064 } 2065 2066 err = pci_request_regions(pdev, driver_name); 2067 if (err) { 2068 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2069 goto err_pci_request_regions; 2070 } 2071 2072 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2073 if (err) { 2074 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2075 if (err) { 2076 dev_err(&pdev->dev, "dma_set_mask failed\n"); 2077 goto err_pci_set_dma_mask; 2078 } 2079 } 2080 2081 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) { 2082 dev_err(&pdev->dev, "invalid PCI region size\n"); 2083 err = -EINVAL; 2084 goto err_pci_resource_len_check; 2085 } 2086 2087 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), 2088 pci_resource_len(pdev, 0)); 2089 if (!mlxsw_pci->hw_addr) { 2090 dev_err(&pdev->dev, "ioremap failed\n"); 2091 err = -EIO; 2092 goto err_ioremap; 2093 } 2094 pci_set_master(pdev); 2095 2096 mlxsw_pci->pdev = pdev; 2097 pci_set_drvdata(pdev, mlxsw_pci); 2098 2099 err = mlxsw_pci_cmd_init(mlxsw_pci); 2100 if (err) 2101 goto err_pci_cmd_init; 2102 2103 mlxsw_pci->bus_info.device_kind = driver_name; 2104 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); 2105 mlxsw_pci->bus_info.dev = &pdev->dev; 2106 mlxsw_pci->bus_info.read_clock_capable = true; 2107 mlxsw_pci->id = id; 2108 2109 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, 2110 &mlxsw_pci_bus, mlxsw_pci, false, 2111 NULL, NULL); 2112 if (err) { 2113 dev_err(&pdev->dev, "cannot register bus device\n"); 2114 goto err_bus_device_register; 2115 } 2116 2117 return 0; 2118 2119 err_bus_device_register: 2120 mlxsw_pci_cmd_fini(mlxsw_pci); 2121 err_pci_cmd_init: 2122 iounmap(mlxsw_pci->hw_addr); 2123 err_ioremap: 2124 err_pci_resource_len_check: 2125 err_pci_set_dma_mask: 2126 pci_release_regions(pdev); 2127 err_pci_request_regions: 2128 pci_disable_device(pdev); 2129 err_pci_enable_device: 2130 kfree(mlxsw_pci); 2131 return err; 2132 } 2133 2134 static void mlxsw_pci_remove(struct pci_dev *pdev) 2135 { 2136 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); 2137 2138 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); 2139 mlxsw_pci_cmd_fini(mlxsw_pci); 2140 iounmap(mlxsw_pci->hw_addr); 2141 pci_release_regions(mlxsw_pci->pdev); 2142 pci_disable_device(mlxsw_pci->pdev); 2143 kfree(mlxsw_pci); 2144 } 2145 2146 static void mlxsw_pci_reset_prepare(struct pci_dev *pdev) 2147 { 2148 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); 2149 2150 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); 2151 } 2152 2153 static void mlxsw_pci_reset_done(struct pci_dev *pdev) 2154 { 2155 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); 2156 2157 mlxsw_pci->skip_reset = true; 2158 mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus, 2159 mlxsw_pci, false, NULL, NULL); 2160 mlxsw_pci->skip_reset = false; 2161 } 2162 2163 static const struct pci_error_handlers mlxsw_pci_err_handler = { 2164 .reset_prepare = mlxsw_pci_reset_prepare, 2165 .reset_done = mlxsw_pci_reset_done, 2166 }; 2167 2168 int mlxsw_pci_driver_register(struct pci_driver *pci_driver) 2169 { 2170 pci_driver->probe = mlxsw_pci_probe; 2171 pci_driver->remove = mlxsw_pci_remove; 2172 pci_driver->shutdown = mlxsw_pci_remove; 2173 pci_driver->err_handler = &mlxsw_pci_err_handler; 2174 return pci_register_driver(pci_driver); 2175 } 2176 EXPORT_SYMBOL(mlxsw_pci_driver_register); 2177 2178 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) 2179 { 2180 pci_unregister_driver(pci_driver); 2181 } 2182 EXPORT_SYMBOL(mlxsw_pci_driver_unregister); 2183 2184 static int __init mlxsw_pci_module_init(void) 2185 { 2186 return 0; 2187 } 2188 2189 static void __exit mlxsw_pci_module_exit(void) 2190 { 2191 } 2192 2193 module_init(mlxsw_pci_module_init); 2194 module_exit(mlxsw_pci_module_exit); 2195 2196 MODULE_LICENSE("Dual BSD/GPL"); 2197 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 2198 MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); 2199