1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #ifndef __SPARX5_MAIN_H__ 8 #define __SPARX5_MAIN_H__ 9 10 #include <linux/types.h> 11 #include <linux/phy/phy.h> 12 #include <linux/netdevice.h> 13 #include <linux/phy.h> 14 #include <linux/if_vlan.h> 15 #include <linux/bitmap.h> 16 #include <linux/phylink.h> 17 #include <linux/net_tstamp.h> 18 #include <linux/ptp_clock_kernel.h> 19 #include <linux/hrtimer.h> 20 #include <linux/debugfs.h> 21 #include <net/flow_offload.h> 22 23 #include <fdma_api.h> 24 25 #include "sparx5_main_regs.h" 26 27 /* Target chip type */ 28 enum spx5_target_chiptype { 29 SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ 30 SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */ 31 SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */ 32 SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */ 33 SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */ 34 SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */ 35 SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */ 36 SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */ 37 SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */ 38 SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */ 39 }; 40 41 enum sparx5_port_max_tags { 42 SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */ 43 SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */ 44 SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */ 45 }; 46 47 enum sparx5_vlan_port_type { 48 SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */ 49 SPX5_VLAN_PORT_TYPE_C, /* C-port */ 50 SPX5_VLAN_PORT_TYPE_S, /* S-port */ 51 SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */ 52 }; 53 54 #define SPX5_PORTS 65 55 #define SPX5_PORTS_ALL 70 /* Total number of ports */ 56 57 #define SPX5_PORT_CPU_0 0 /* CPU Port 0 */ 58 #define SPX5_PORT_CPU_1 1 /* CPU Port 1 */ 59 #define SPX5_PORT_VD0 2 /* VD0/Port used for IPMC */ 60 #define SPX5_PORT_VD1 3 /* VD1/Port used for AFI/OAM */ 61 #define SPX5_PORT_VD2 4 /* VD2/Port used for IPinIP*/ 62 63 #define PGID_UC_FLOOD 0 64 #define PGID_MC_FLOOD 1 65 #define PGID_IPV4_MC_DATA 2 66 #define PGID_IPV4_MC_CTRL 3 67 #define PGID_IPV6_MC_DATA 4 68 #define PGID_IPV6_MC_CTRL 5 69 #define PGID_BCAST 6 70 #define PGID_CPU 7 71 #define PGID_MCAST_START 8 72 73 #define PGID_TABLE_SIZE 3290 74 75 #define IFH_LEN 9 /* 36 bytes */ 76 #define NULL_VID 0 77 #define SPX5_MACT_PULL_DELAY (2 * HZ) 78 #define SPX5_STATS_CHECK_DELAY (1 * HZ) 79 #define SPX5_PRIOS 8 /* Number of priority queues */ 80 #define SPX5_BUFFER_CELL_SZ 184 /* Cell size */ 81 #define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */ 82 83 #define XTR_QUEUE 0 84 #define INJ_QUEUE 0 85 86 #define FDMA_DCB_MAX 64 87 #define FDMA_RX_DCB_MAX_DBS 15 88 #define FDMA_TX_DCB_MAX_DBS 1 89 90 #define SPARX5_PHC_COUNT 3 91 #define SPARX5_PHC_PORT 0 92 93 #define IFH_REW_OP_NOOP 0x0 94 #define IFH_REW_OP_ONE_STEP_PTP 0x3 95 #define IFH_REW_OP_TWO_STEP_PTP 0x4 96 97 #define IFH_PDU_TYPE_NONE 0x0 98 #define IFH_PDU_TYPE_PTP 0x5 99 #define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 100 #define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 101 102 #define SPX5_DSM_CAL_LEN 64 103 #define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13 104 105 struct sparx5; 106 107 struct sparx5_calendar_data { 108 u32 schedule[SPX5_DSM_CAL_LEN]; 109 u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; 110 u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; 111 u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; 112 u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; 113 u32 new_slots[SPX5_DSM_CAL_LEN]; 114 u32 temp_sched[SPX5_DSM_CAL_LEN]; 115 u32 indices[SPX5_DSM_CAL_LEN]; 116 u32 short_list[SPX5_DSM_CAL_LEN]; 117 u32 long_list[SPX5_DSM_CAL_LEN]; 118 }; 119 120 /* Frame DMA receive state: 121 * For each DB, there is a SKB, and the skb data pointer is mapped in 122 * the DB. Once a frame is received the skb is given to the upper layers 123 * and a new skb is added to the dcb. 124 * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. 125 */ 126 struct sparx5_rx { 127 struct fdma fdma; 128 struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; 129 dma_addr_t dma; 130 struct napi_struct napi; 131 struct net_device *ndev; 132 u64 packets; 133 }; 134 135 /* Frame DMA transmit state: 136 * DCBs are chained using the DCBs nextptr field. 137 */ 138 struct sparx5_tx { 139 struct fdma fdma; 140 u64 packets; 141 u64 dropped; 142 }; 143 144 struct sparx5_port_config { 145 phy_interface_t portmode; 146 u32 bandwidth; 147 int speed; 148 int duplex; 149 enum phy_media media; 150 bool inband; 151 bool power_down; 152 bool autoneg; 153 bool serdes_reset; 154 u32 pause; 155 u32 pause_adv; 156 phy_interface_t phy_mode; 157 u32 sd_sgpio; 158 }; 159 160 struct sparx5_port { 161 struct net_device *ndev; 162 struct sparx5 *sparx5; 163 struct device_node *of_node; 164 struct phy *serdes; 165 struct sparx5_port_config conf; 166 struct phylink_config phylink_config; 167 struct phylink *phylink; 168 struct phylink_pcs phylink_pcs; 169 struct flow_stats mirror_stats; 170 u16 portno; 171 /* Ingress default VLAN (pvid) */ 172 u16 pvid; 173 /* Egress default VLAN (vid) */ 174 u16 vid; 175 bool signd_internal; 176 bool signd_active_high; 177 bool signd_enable; 178 bool flow_control; 179 enum sparx5_port_max_tags max_vlan_tags; 180 enum sparx5_vlan_port_type vlan_type; 181 u32 custom_etype; 182 bool vlan_aware; 183 struct hrtimer inj_timer; 184 /* ptp */ 185 u8 ptp_cmd; 186 u16 ts_id; 187 struct sk_buff_head tx_skbs; 188 bool is_mrouter; 189 struct list_head tc_templates; /* list of TC templates on this port */ 190 }; 191 192 enum sparx5_core_clockfreq { 193 SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */ 194 SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */ 195 SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */ 196 SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */ 197 }; 198 199 struct sparx5_phc { 200 struct ptp_clock *clock; 201 struct ptp_clock_info info; 202 struct kernel_hwtstamp_config hwtstamp_config; 203 struct sparx5 *sparx5; 204 u8 index; 205 }; 206 207 struct sparx5_skb_cb { 208 u8 rew_op; 209 u8 pdu_type; 210 u8 pdu_w16_offset; 211 u16 ts_id; 212 unsigned long jiffies; 213 }; 214 215 struct sparx5_mdb_entry { 216 struct list_head list; 217 DECLARE_BITMAP(port_mask, SPX5_PORTS); 218 unsigned char addr[ETH_ALEN]; 219 bool cpu_copy; 220 u16 vid; 221 u16 pgid_idx; 222 }; 223 224 struct sparx5_mall_mirror_entry { 225 u32 idx; 226 struct sparx5_port *port; 227 }; 228 229 struct sparx5_mall_entry { 230 struct list_head list; 231 struct sparx5_port *port; 232 unsigned long cookie; 233 enum flow_action_id type; 234 bool ingress; 235 union { 236 struct sparx5_mall_mirror_entry mirror; 237 }; 238 }; 239 240 #define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) 241 #define SPARX5_SKB_CB(skb) \ 242 ((struct sparx5_skb_cb *)((skb)->cb)) 243 244 struct sparx5_regs { 245 const unsigned int *tsize; 246 const unsigned int *gaddr; 247 const unsigned int *gcnt; 248 const unsigned int *gsize; 249 const unsigned int *raddr; 250 const unsigned int *rcnt; 251 const unsigned int *fpos; 252 const unsigned int *fsize; 253 }; 254 255 struct sparx5_consts { 256 u32 n_ports; /* Number of front ports */ 257 u32 n_ports_all; /* Number of front ports + internal ports */ 258 u32 n_hsch_l1_elems; /* Number of HSCH layer 1 elements */ 259 u32 n_hsch_queues; /* Number of HSCH queues */ 260 u32 n_lb_groups; /* Number of leacky bucket groupd */ 261 u32 n_pgids; /* Number of PGID's */ 262 u32 n_sio_clks; /* Number of serial IO clocks */ 263 u32 n_own_upsids; /* Number of own UPSID's */ 264 u32 n_auto_cals; /* Number of auto calendars */ 265 u32 n_filters; /* Number of PSFP filters */ 266 u32 n_gates; /* Number of PSFP gates */ 267 u32 n_sdlbs; /* Number of service dual leaky buckets */ 268 u32 n_dsm_cal_taxis; /* Number of DSM calendar taxis */ 269 u32 buf_size; /* Amount of QLIM watermark memory */ 270 u32 qres_max_prio_idx; /* Maximum QRES prio index */ 271 u32 qres_max_colour_idx; /* Maximum QRES colour index */ 272 u32 tod_pin; /* PTP TOD pin */ 273 }; 274 275 struct sparx5_ops { 276 bool (*is_port_2g5)(int portno); 277 bool (*is_port_5g)(int portno); 278 bool (*is_port_10g)(int portno); 279 bool (*is_port_25g)(int portno); 280 u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port); 281 u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port); 282 u32 (*get_hsch_max_group_rate)(int grp); 283 struct sparx5_sdlb_group *(*get_sdlb_group)(int idx); 284 int (*set_port_mux)(struct sparx5 *sparx5, struct sparx5_port *port, 285 struct sparx5_port_config *conf); 286 287 irqreturn_t (*ptp_irq_handler)(int irq, void *args); 288 int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi, 289 struct sparx5_calendar_data *data); 290 }; 291 292 struct sparx5_main_io_resource { 293 enum sparx5_target id; 294 phys_addr_t offset; 295 int range; 296 }; 297 298 struct sparx5_match_data { 299 const struct sparx5_regs *regs; 300 const struct sparx5_consts *consts; 301 const struct sparx5_ops *ops; 302 const struct sparx5_main_io_resource *iomap; 303 int ioranges; 304 int iomap_size; 305 }; 306 307 struct sparx5 { 308 struct platform_device *pdev; 309 struct device *dev; 310 u32 chip_id; 311 enum spx5_target_chiptype target_ct; 312 void __iomem *regs[NUM_TARGETS]; 313 int port_count; 314 struct mutex lock; /* MAC reg lock */ 315 /* port structures are in net device */ 316 struct sparx5_port *ports[SPX5_PORTS]; 317 enum sparx5_core_clockfreq coreclock; 318 /* Statistics */ 319 u32 num_stats; 320 u32 num_ethtool_stats; 321 const char * const *stats_layout; 322 u64 *stats; 323 /* Workqueue for reading stats */ 324 struct mutex queue_stats_lock; 325 struct delayed_work stats_work; 326 struct workqueue_struct *stats_queue; 327 /* Notifiers */ 328 struct notifier_block netdevice_nb; 329 struct notifier_block switchdev_nb; 330 struct notifier_block switchdev_blocking_nb; 331 /* Switch state */ 332 u8 base_mac[ETH_ALEN]; 333 /* Associated bridge device (when bridged) */ 334 struct net_device *hw_bridge_dev; 335 /* Bridged interfaces */ 336 DECLARE_BITMAP(bridge_mask, SPX5_PORTS); 337 DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS); 338 DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS); 339 DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS); 340 /* SW MAC table */ 341 struct list_head mact_entries; 342 /* mac table list (mact_entries) mutex */ 343 struct mutex mact_lock; 344 /* SW MDB table */ 345 struct list_head mdb_entries; 346 /* mdb list mutex */ 347 struct mutex mdb_lock; 348 struct delayed_work mact_work; 349 struct workqueue_struct *mact_queue; 350 /* Board specifics */ 351 bool sd_sgpio_remapping; 352 /* Register based inj/xtr */ 353 int xtr_irq; 354 /* Frame DMA */ 355 int fdma_irq; 356 spinlock_t tx_lock; /* lock for frame transmission */ 357 struct sparx5_rx rx; 358 struct sparx5_tx tx; 359 /* PTP */ 360 bool ptp; 361 struct sparx5_phc phc[SPARX5_PHC_COUNT]; 362 spinlock_t ptp_clock_lock; /* lock for phc */ 363 spinlock_t ptp_ts_id_lock; /* lock for ts_id */ 364 struct mutex ptp_lock; /* lock for ptp interface state */ 365 u16 ptp_skbs; 366 int ptp_irq; 367 /* VCAP */ 368 struct vcap_control *vcap_ctrl; 369 /* PGID allocation map */ 370 u8 pgid_map[PGID_TABLE_SIZE]; 371 struct list_head mall_entries; 372 /* Common root for debugfs */ 373 struct dentry *debugfs_root; 374 const struct sparx5_match_data *data; 375 }; 376 377 /* sparx5_main.c */ 378 bool is_sparx5(struct sparx5 *sparx5); 379 380 /* sparx5_switchdev.c */ 381 int sparx5_register_notifier_blocks(struct sparx5 *sparx5); 382 void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); 383 384 /* sparx5_packet.c */ 385 struct frame_info { 386 int src_port; 387 u32 timestamp; 388 }; 389 390 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); 391 void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); 392 irqreturn_t sparx5_xtr_handler(int irq, void *_priv); 393 netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); 394 int sparx5_manual_injection_mode(struct sparx5 *sparx5); 395 void sparx5_port_inj_timer_setup(struct sparx5_port *port); 396 397 /* sparx5_fdma.c */ 398 int sparx5_fdma_start(struct sparx5 *sparx5); 399 int sparx5_fdma_stop(struct sparx5 *sparx5); 400 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); 401 irqreturn_t sparx5_fdma_handler(int irq, void *args); 402 403 /* sparx5_mactable.c */ 404 void sparx5_mact_pull_work(struct work_struct *work); 405 int sparx5_mact_learn(struct sparx5 *sparx5, int port, 406 const unsigned char mac[ETH_ALEN], u16 vid); 407 bool sparx5_mact_getnext(struct sparx5 *sparx5, 408 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2); 409 int sparx5_mact_find(struct sparx5 *sparx5, 410 const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2); 411 int sparx5_mact_forget(struct sparx5 *sparx5, 412 const unsigned char mac[ETH_ALEN], u16 vid); 413 int sparx5_add_mact_entry(struct sparx5 *sparx5, 414 struct net_device *dev, 415 u16 portno, 416 const unsigned char *addr, u16 vid); 417 int sparx5_del_mact_entry(struct sparx5 *sparx5, 418 const unsigned char *addr, 419 u16 vid); 420 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr); 421 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr); 422 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs); 423 void sparx5_mact_init(struct sparx5 *sparx5); 424 425 /* sparx5_vlan.c */ 426 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable); 427 void sparx5_pgid_clear(struct sparx5 *spx5, int pgid); 428 void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]); 429 void sparx5_update_fwd(struct sparx5 *sparx5); 430 void sparx5_vlan_init(struct sparx5 *sparx5); 431 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno); 432 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, 433 bool untagged); 434 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid); 435 void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port); 436 437 /* sparx5_calendar.c */ 438 int sparx5_config_auto_calendar(struct sparx5 *sparx5); 439 int sparx5_config_dsm_calendar(struct sparx5 *sparx5); 440 int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, 441 struct sparx5_calendar_data *data); 442 443 444 /* sparx5_ethtool.c */ 445 void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); 446 int sparx_stats_init(struct sparx5 *sparx5); 447 448 /* sparx5_dcb.c */ 449 #ifdef CONFIG_SPARX5_DCB 450 int sparx5_dcb_init(struct sparx5 *sparx5); 451 #else 452 static inline int sparx5_dcb_init(struct sparx5 *sparx5) 453 { 454 return 0; 455 } 456 #endif 457 458 /* sparx5_netdev.c */ 459 void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); 460 void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); 461 void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); 462 void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); 463 void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno); 464 bool sparx5_netdevice_check(const struct net_device *dev); 465 struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); 466 int sparx5_register_netdevs(struct sparx5 *sparx5); 467 void sparx5_destroy_netdevs(struct sparx5 *sparx5); 468 void sparx5_unregister_netdevs(struct sparx5 *sparx5); 469 470 /* sparx5_ptp.c */ 471 int sparx5_ptp_init(struct sparx5 *sparx5); 472 void sparx5_ptp_deinit(struct sparx5 *sparx5); 473 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, 474 struct kernel_hwtstamp_config *cfg, 475 struct netlink_ext_ack *extack); 476 void sparx5_ptp_hwtstamp_get(struct sparx5_port *port, 477 struct kernel_hwtstamp_config *cfg); 478 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, 479 u64 timestamp); 480 int sparx5_ptp_txtstamp_request(struct sparx5_port *port, 481 struct sk_buff *skb); 482 void sparx5_ptp_txtstamp_release(struct sparx5_port *port, 483 struct sk_buff *skb); 484 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); 485 int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); 486 487 /* sparx5_vcap_impl.c */ 488 int sparx5_vcap_init(struct sparx5 *sparx5); 489 void sparx5_vcap_destroy(struct sparx5 *sparx5); 490 491 /* sparx5_pgid.c */ 492 enum sparx5_pgid_type { 493 SPX5_PGID_FREE, 494 SPX5_PGID_RESERVED, 495 SPX5_PGID_MULTICAST, 496 }; 497 498 void sparx5_pgid_init(struct sparx5 *spx5); 499 int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); 500 int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); 501 int sparx5_get_pgid(struct sparx5 *sparx5, int pgid); 502 503 /* sparx5_pool.c */ 504 struct sparx5_pool_entry { 505 u16 ref_cnt; 506 u32 idx; /* tc index */ 507 }; 508 509 u32 sparx5_pool_idx_to_id(u32 idx); 510 int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id); 511 int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id); 512 int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx, 513 u32 *id); 514 515 /* sparx5_port.c */ 516 int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port, 517 struct sparx5_port_config *conf); 518 int sparx5_get_internal_port(struct sparx5 *sparx5, int port); 519 520 /* sparx5_sdlb.c */ 521 #define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF 522 #define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1) 523 #define SPX5_SDLB_GROUP_RATE_MAX 25000000000ULL 524 #define SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET 13 525 #define SPX5_SDLB_CNT 4096 526 #define SPX5_SDLB_GROUP_CNT 10 527 #define SPX5_CLK_PER_100PS_DEFAULT 16 528 529 struct sparx5_sdlb_group { 530 u64 max_rate; 531 u32 min_burst; 532 u32 frame_size; 533 u32 pup_interval; 534 u32 nsets; 535 }; 536 537 extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT]; 538 struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx); 539 int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, 540 u64 rate); 541 542 int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5); 543 int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst); 544 int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group); 545 546 int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx); 547 int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx); 548 549 void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst, 550 u32 frame_size, u32 idx); 551 552 /* sparx5_police.c */ 553 enum { 554 /* More policer types will be added later */ 555 SPX5_POL_SERVICE 556 }; 557 558 struct sparx5_policer { 559 u32 type; 560 u32 idx; 561 u64 rate; 562 u32 burst; 563 u32 group; 564 u8 event_mask; 565 }; 566 567 int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol); 568 569 /* sparx5_psfp.c */ 570 #define SPX5_PSFP_GCE_CNT 4 571 #define SPX5_PSFP_SG_CNT 1024 572 #define SPX5_PSFP_SG_MIN_CYCLE_TIME_NS (1 * NSEC_PER_USEC) 573 #define SPX5_PSFP_SG_MAX_CYCLE_TIME_NS ((1 * NSEC_PER_SEC) - 1) 574 #define SPX5_PSFP_SG_MAX_IPV (SPX5_PRIOS - 1) 575 #define SPX5_PSFP_SG_OPEN (SPX5_PSFP_SG_CNT - 1) 576 #define SPX5_PSFP_SG_CYCLE_TIME_DEFAULT 1000000 577 #define SPX5_PSFP_SF_MAX_SDU 16383 578 579 struct sparx5_psfp_fm { 580 struct sparx5_policer pol; 581 }; 582 583 struct sparx5_psfp_gce { 584 bool gate_state; /* StreamGateState */ 585 u32 interval; /* TimeInterval */ 586 u32 ipv; /* InternalPriorityValue */ 587 u32 maxoctets; /* IntervalOctetMax */ 588 }; 589 590 struct sparx5_psfp_sg { 591 bool gate_state; /* PSFPAdminGateStates */ 592 bool gate_enabled; /* PSFPGateEnabled */ 593 u32 ipv; /* PSFPAdminIPV */ 594 struct timespec64 basetime; /* PSFPAdminBaseTime */ 595 u32 cycletime; /* PSFPAdminCycleTime */ 596 u32 cycletimeext; /* PSFPAdminCycleTimeExtension */ 597 u32 num_entries; /* PSFPAdminControlListLength */ 598 struct sparx5_psfp_gce gce[SPX5_PSFP_GCE_CNT]; 599 }; 600 601 struct sparx5_psfp_sf { 602 bool sblock_osize_ena; 603 bool sblock_osize; 604 u32 max_sdu; 605 u32 sgid; /* Gate id */ 606 u32 fmid; /* Flow meter id */ 607 }; 608 609 int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx, 610 struct sparx5_psfp_fm *fm, u32 *id); 611 int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id); 612 613 int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx, 614 struct sparx5_psfp_sg *sg, u32 *id); 615 int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id); 616 617 int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf, 618 u32 *id); 619 int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id); 620 621 u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx); 622 u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx); 623 u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid); 624 void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid); 625 626 void sparx5_psfp_init(struct sparx5 *sparx5); 627 628 /* sparx5_qos.c */ 629 void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time, 630 const ktime_t org_base_time, ktime_t *new_base_time); 631 632 /* sparx5_mirror.c */ 633 int sparx5_mirror_add(struct sparx5_mall_entry *entry); 634 void sparx5_mirror_del(struct sparx5_mall_entry *entry); 635 void sparx5_mirror_stats(struct sparx5_mall_entry *entry, 636 struct flow_stats *fstats); 637 638 /* Clock period in picoseconds */ 639 static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) 640 { 641 switch (cclock) { 642 case SPX5_CORE_CLOCK_250MHZ: 643 return 4000; 644 case SPX5_CORE_CLOCK_500MHZ: 645 return 2000; 646 case SPX5_CORE_CLOCK_625MHZ: 647 default: 648 return 1600; 649 } 650 } 651 652 static inline bool sparx5_is_baser(phy_interface_t interface) 653 { 654 return interface == PHY_INTERFACE_MODE_5GBASER || 655 interface == PHY_INTERFACE_MODE_10GBASER || 656 interface == PHY_INTERFACE_MODE_25GBASER; 657 } 658 659 extern const struct phylink_mac_ops sparx5_phylink_mac_ops; 660 extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops; 661 extern const struct ethtool_ops sparx5_ethtool_ops; 662 extern const struct dcbnl_rtnl_ops sparx5_dcbnl_ops; 663 664 /* Calculate raw offset */ 665 static inline __pure int spx5_offset(int id, int tinst, int tcnt, 666 int gbase, int ginst, 667 int gcnt, int gwidth, 668 int raddr, int rinst, 669 int rcnt, int rwidth) 670 { 671 WARN_ON((tinst) >= tcnt); 672 WARN_ON((ginst) >= gcnt); 673 WARN_ON((rinst) >= rcnt); 674 return gbase + ((ginst) * gwidth) + 675 raddr + ((rinst) * rwidth); 676 } 677 678 /* Read, Write and modify registers content. 679 * The register definition macros start at the id 680 */ 681 static inline void __iomem *spx5_addr(void __iomem *base[], 682 int id, int tinst, int tcnt, 683 int gbase, int ginst, 684 int gcnt, int gwidth, 685 int raddr, int rinst, 686 int rcnt, int rwidth) 687 { 688 WARN_ON((tinst) >= tcnt); 689 WARN_ON((ginst) >= gcnt); 690 WARN_ON((rinst) >= rcnt); 691 return base[id + (tinst)] + 692 gbase + ((ginst) * gwidth) + 693 raddr + ((rinst) * rwidth); 694 } 695 696 static inline void __iomem *spx5_inst_addr(void __iomem *base, 697 int gbase, int ginst, 698 int gcnt, int gwidth, 699 int raddr, int rinst, 700 int rcnt, int rwidth) 701 { 702 WARN_ON((ginst) >= gcnt); 703 WARN_ON((rinst) >= rcnt); 704 return base + 705 gbase + ((ginst) * gwidth) + 706 raddr + ((rinst) * rwidth); 707 } 708 709 static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt, 710 int gbase, int ginst, int gcnt, int gwidth, 711 int raddr, int rinst, int rcnt, int rwidth) 712 { 713 return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 714 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 715 } 716 717 static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt, 718 int gbase, int ginst, int gcnt, int gwidth, 719 int raddr, int rinst, int rcnt, int rwidth) 720 { 721 return readl(spx5_inst_addr(iomem, gbase, ginst, 722 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 723 } 724 725 static inline void spx5_wr(u32 val, struct sparx5 *sparx5, 726 int id, int tinst, int tcnt, 727 int gbase, int ginst, int gcnt, int gwidth, 728 int raddr, int rinst, int rcnt, int rwidth) 729 { 730 writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt, 731 gbase, ginst, gcnt, gwidth, 732 raddr, rinst, rcnt, rwidth)); 733 } 734 735 static inline void spx5_inst_wr(u32 val, void __iomem *iomem, 736 int id, int tinst, int tcnt, 737 int gbase, int ginst, int gcnt, int gwidth, 738 int raddr, int rinst, int rcnt, int rwidth) 739 { 740 writel(val, spx5_inst_addr(iomem, 741 gbase, ginst, gcnt, gwidth, 742 raddr, rinst, rcnt, rwidth)); 743 } 744 745 static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5, 746 int id, int tinst, int tcnt, 747 int gbase, int ginst, int gcnt, int gwidth, 748 int raddr, int rinst, int rcnt, int rwidth) 749 { 750 u32 nval; 751 752 nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 753 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 754 nval = (nval & ~mask) | (val & mask); 755 writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 756 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 757 } 758 759 static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem, 760 int id, int tinst, int tcnt, 761 int gbase, int ginst, int gcnt, int gwidth, 762 int raddr, int rinst, int rcnt, int rwidth) 763 { 764 u32 nval; 765 766 nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, 767 rinst, rcnt, rwidth)); 768 nval = (nval & ~mask) | (val & mask); 769 writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, 770 rinst, rcnt, rwidth)); 771 } 772 773 static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst) 774 { 775 return sparx5->regs[id + tinst]; 776 } 777 778 static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5, 779 int id, int tinst, int tcnt, 780 int gbase, int ginst, int gcnt, int gwidth, 781 int raddr, int rinst, int rcnt, int rwidth) 782 { 783 return spx5_addr(sparx5->regs, id, tinst, tcnt, 784 gbase, ginst, gcnt, gwidth, 785 raddr, rinst, rcnt, rwidth); 786 } 787 788 #endif /* __SPARX5_MAIN_H__ */ 789