18a971df9SLuo Jie // SPDX-License-Identifier: GPL-2.0-only 28a971df9SLuo Jie /* 38a971df9SLuo Jie * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 48a971df9SLuo Jie */ 58a971df9SLuo Jie 68a971df9SLuo Jie /* PPE HW initialization configs such as BM(buffer management), 78a971df9SLuo Jie * QM(queue management) and scheduler configs. 88a971df9SLuo Jie */ 98a971df9SLuo Jie 108a971df9SLuo Jie #include <linux/bitfield.h> 1173d05bdaSLuo Jie #include <linux/bitmap.h> 128a971df9SLuo Jie #include <linux/bits.h> 138a971df9SLuo Jie #include <linux/device.h> 148a971df9SLuo Jie #include <linux/regmap.h> 158a971df9SLuo Jie 168a971df9SLuo Jie #include "ppe.h" 178a971df9SLuo Jie #include "ppe_config.h" 188a971df9SLuo Jie #include "ppe_regs.h" 198a971df9SLuo Jie 2033122798SLuo Jie #define PPE_QUEUE_SCH_PRI_NUM 8 2133122798SLuo Jie 228a971df9SLuo Jie /** 238a971df9SLuo Jie * struct ppe_bm_port_config - PPE BM port configuration. 248a971df9SLuo Jie * @port_id_start: The fist BM port ID to configure. 258a971df9SLuo Jie * @port_id_end: The last BM port ID to configure. 268a971df9SLuo Jie * @pre_alloc: BM port dedicated buffer number. 278a971df9SLuo Jie * @in_fly_buf: Buffer number for receiving the packet after pause frame sent. 288a971df9SLuo Jie * @ceil: Ceil to generate the back pressure. 298a971df9SLuo Jie * @weight: Weight value. 308a971df9SLuo Jie * @resume_offset: Resume offset from the threshold value. 318a971df9SLuo Jie * @resume_ceil: Ceil to resume from the back pressure state. 328a971df9SLuo Jie * @dynamic: Dynamic threshold used or not. 338a971df9SLuo Jie * 348a971df9SLuo Jie * The is for configuring the threshold that impacts the port 358a971df9SLuo Jie * flow control. 368a971df9SLuo Jie */ 378a971df9SLuo Jie struct ppe_bm_port_config { 388a971df9SLuo Jie unsigned int port_id_start; 398a971df9SLuo Jie unsigned int port_id_end; 408a971df9SLuo Jie unsigned int pre_alloc; 418a971df9SLuo Jie unsigned int in_fly_buf; 428a971df9SLuo Jie unsigned int ceil; 438a971df9SLuo Jie unsigned int weight; 448a971df9SLuo Jie unsigned int resume_offset; 458a971df9SLuo Jie unsigned int resume_ceil; 468a971df9SLuo Jie bool dynamic; 478a971df9SLuo Jie }; 488a971df9SLuo Jie 49806268dcSLuo Jie /** 50806268dcSLuo Jie * struct ppe_qm_queue_config - PPE queue config. 51806268dcSLuo Jie * @queue_start: PPE start of queue ID. 52806268dcSLuo Jie * @queue_end: PPE end of queue ID. 53806268dcSLuo Jie * @prealloc_buf: Queue dedicated buffer number. 54806268dcSLuo Jie * @ceil: Ceil to start drop packet from queue. 55806268dcSLuo Jie * @weight: Weight value. 56806268dcSLuo Jie * @resume_offset: Resume offset from the threshold. 57806268dcSLuo Jie * @dynamic: Threshold value is decided dynamically or statically. 58806268dcSLuo Jie * 59806268dcSLuo Jie * Queue configuration decides the threshold to drop packet from PPE 60806268dcSLuo Jie * hardware queue. 61806268dcSLuo Jie */ 62806268dcSLuo Jie struct ppe_qm_queue_config { 63806268dcSLuo Jie unsigned int queue_start; 64806268dcSLuo Jie unsigned int queue_end; 65806268dcSLuo Jie unsigned int prealloc_buf; 66806268dcSLuo Jie unsigned int ceil; 67806268dcSLuo Jie unsigned int weight; 68806268dcSLuo Jie unsigned int resume_offset; 69806268dcSLuo Jie bool dynamic; 70806268dcSLuo Jie }; 71806268dcSLuo Jie 7233122798SLuo Jie /** 7333122798SLuo Jie * enum ppe_scheduler_direction - PPE scheduler direction for packet. 7433122798SLuo Jie * @PPE_SCH_INGRESS: Scheduler for the packet on ingress, 7533122798SLuo Jie * @PPE_SCH_EGRESS: Scheduler for the packet on egress, 7633122798SLuo Jie */ 7733122798SLuo Jie enum ppe_scheduler_direction { 7833122798SLuo Jie PPE_SCH_INGRESS = 0, 7933122798SLuo Jie PPE_SCH_EGRESS = 1, 8033122798SLuo Jie }; 8133122798SLuo Jie 8233122798SLuo Jie /** 8333122798SLuo Jie * struct ppe_scheduler_bm_config - PPE arbitration for buffer config. 8433122798SLuo Jie * @valid: Arbitration entry valid or not. 8533122798SLuo Jie * @dir: Arbitration entry for egress or ingress. 8633122798SLuo Jie * @port: Port ID to use arbitration entry. 8733122798SLuo Jie * @backup_port_valid: Backup port valid or not. 8833122798SLuo Jie * @backup_port: Backup port ID to use. 8933122798SLuo Jie * 9033122798SLuo Jie * Configure the scheduler settings for accessing and releasing the PPE buffers. 9133122798SLuo Jie */ 9233122798SLuo Jie struct ppe_scheduler_bm_config { 9333122798SLuo Jie bool valid; 9433122798SLuo Jie enum ppe_scheduler_direction dir; 9533122798SLuo Jie unsigned int port; 9633122798SLuo Jie bool backup_port_valid; 9733122798SLuo Jie unsigned int backup_port; 9833122798SLuo Jie }; 9933122798SLuo Jie 10033122798SLuo Jie /** 10133122798SLuo Jie * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config. 10233122798SLuo Jie * @ensch_port_bmp: Port bit map for enqueue scheduler. 10333122798SLuo Jie * @ensch_port: Port ID to enqueue scheduler. 10433122798SLuo Jie * @desch_port: Port ID to dequeue scheduler. 10533122798SLuo Jie * @desch_backup_port_valid: Dequeue for the backup port valid or not. 10633122798SLuo Jie * @desch_backup_port: Backup port ID to dequeue scheduler. 10733122798SLuo Jie * 10833122798SLuo Jie * Configure the scheduler settings for enqueuing and dequeuing packets on 10933122798SLuo Jie * the PPE port. 11033122798SLuo Jie */ 11133122798SLuo Jie struct ppe_scheduler_qm_config { 11233122798SLuo Jie unsigned int ensch_port_bmp; 11333122798SLuo Jie unsigned int ensch_port; 11433122798SLuo Jie unsigned int desch_port; 11533122798SLuo Jie bool desch_backup_port_valid; 11633122798SLuo Jie unsigned int desch_backup_port; 11733122798SLuo Jie }; 11833122798SLuo Jie 11933122798SLuo Jie /** 12033122798SLuo Jie * struct ppe_scheduler_port_config - PPE port scheduler config. 12133122798SLuo Jie * @port: Port ID to be scheduled. 12233122798SLuo Jie * @flow_level: Scheduler flow level or not. 12333122798SLuo Jie * @node_id: Node ID, for level 0, queue ID is used. 12433122798SLuo Jie * @loop_num: Loop number of scheduler config. 12533122798SLuo Jie * @pri_max: Max priority configured. 12633122798SLuo Jie * @flow_id: Strict priority ID. 12733122798SLuo Jie * @drr_node_id: Node ID for scheduler. 12833122798SLuo Jie * 12933122798SLuo Jie * PPE port scheduler configuration which decides the priority in the 13033122798SLuo Jie * packet scheduler for the egress port. 13133122798SLuo Jie */ 13233122798SLuo Jie struct ppe_scheduler_port_config { 13333122798SLuo Jie unsigned int port; 13433122798SLuo Jie bool flow_level; 13533122798SLuo Jie unsigned int node_id; 13633122798SLuo Jie unsigned int loop_num; 13733122798SLuo Jie unsigned int pri_max; 13833122798SLuo Jie unsigned int flow_id; 13933122798SLuo Jie unsigned int drr_node_id; 14033122798SLuo Jie }; 14133122798SLuo Jie 1427a23a8afSLuo Jie /** 1437a23a8afSLuo Jie * struct ppe_port_schedule_resource - PPE port scheduler resource. 1447a23a8afSLuo Jie * @ucastq_start: Unicast queue start ID. 1457a23a8afSLuo Jie * @ucastq_end: Unicast queue end ID. 1467a23a8afSLuo Jie * @mcastq_start: Multicast queue start ID. 1477a23a8afSLuo Jie * @mcastq_end: Multicast queue end ID. 1487a23a8afSLuo Jie * @flow_id_start: Flow start ID. 1497a23a8afSLuo Jie * @flow_id_end: Flow end ID. 1507a23a8afSLuo Jie * @l0node_start: Scheduler node start ID for queue level. 1517a23a8afSLuo Jie * @l0node_end: Scheduler node end ID for queue level. 1527a23a8afSLuo Jie * @l1node_start: Scheduler node start ID for flow level. 1537a23a8afSLuo Jie * @l1node_end: Scheduler node end ID for flow level. 1547a23a8afSLuo Jie * 1557a23a8afSLuo Jie * PPE scheduler resource allocated among the PPE ports. 1567a23a8afSLuo Jie */ 1577a23a8afSLuo Jie struct ppe_port_schedule_resource { 1587a23a8afSLuo Jie unsigned int ucastq_start; 1597a23a8afSLuo Jie unsigned int ucastq_end; 1607a23a8afSLuo Jie unsigned int mcastq_start; 1617a23a8afSLuo Jie unsigned int mcastq_end; 1627a23a8afSLuo Jie unsigned int flow_id_start; 1637a23a8afSLuo Jie unsigned int flow_id_end; 1647a23a8afSLuo Jie unsigned int l0node_start; 1657a23a8afSLuo Jie unsigned int l0node_end; 1667a23a8afSLuo Jie unsigned int l1node_start; 1677a23a8afSLuo Jie unsigned int l1node_end; 1687a23a8afSLuo Jie }; 1697a23a8afSLuo Jie 1708a971df9SLuo Jie /* There are total 2048 buffers available in PPE, out of which some 1718a971df9SLuo Jie * buffers are reserved for some specific purposes per PPE port. The 1728a971df9SLuo Jie * rest of the pool of 1550 buffers are assigned to the general 'group0' 1738a971df9SLuo Jie * which is shared among all ports of the PPE. 1748a971df9SLuo Jie */ 1758a971df9SLuo Jie static const int ipq9574_ppe_bm_group_config = 1550; 1768a971df9SLuo Jie 1778a971df9SLuo Jie /* The buffer configurations per PPE port. There are 15 BM ports and 1788a971df9SLuo Jie * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0, 1798a971df9SLuo Jie * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for 1808a971df9SLuo Jie * EIP port. 1818a971df9SLuo Jie */ 1828a971df9SLuo Jie static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = { 1838a971df9SLuo Jie { 1848a971df9SLuo Jie /* Buffer configuration for the BM port ID 0 of EDMA. */ 1858a971df9SLuo Jie .port_id_start = 0, 1868a971df9SLuo Jie .port_id_end = 0, 1878a971df9SLuo Jie .pre_alloc = 0, 1888a971df9SLuo Jie .in_fly_buf = 100, 1898a971df9SLuo Jie .ceil = 1146, 1908a971df9SLuo Jie .weight = 7, 1918a971df9SLuo Jie .resume_offset = 8, 1928a971df9SLuo Jie .resume_ceil = 0, 1938a971df9SLuo Jie .dynamic = true, 1948a971df9SLuo Jie }, 1958a971df9SLuo Jie { 1968a971df9SLuo Jie /* Buffer configuration for the BM port ID 1-7 of EDMA. */ 1978a971df9SLuo Jie .port_id_start = 1, 1988a971df9SLuo Jie .port_id_end = 7, 1998a971df9SLuo Jie .pre_alloc = 0, 2008a971df9SLuo Jie .in_fly_buf = 100, 2018a971df9SLuo Jie .ceil = 250, 2028a971df9SLuo Jie .weight = 4, 2038a971df9SLuo Jie .resume_offset = 36, 2048a971df9SLuo Jie .resume_ceil = 0, 2058a971df9SLuo Jie .dynamic = true, 2068a971df9SLuo Jie }, 2078a971df9SLuo Jie { 2088a971df9SLuo Jie /* Buffer configuration for the BM port ID 8-13 of PPE ports. */ 2098a971df9SLuo Jie .port_id_start = 8, 2108a971df9SLuo Jie .port_id_end = 13, 2118a971df9SLuo Jie .pre_alloc = 0, 2128a971df9SLuo Jie .in_fly_buf = 128, 2138a971df9SLuo Jie .ceil = 250, 2148a971df9SLuo Jie .weight = 4, 2158a971df9SLuo Jie .resume_offset = 36, 2168a971df9SLuo Jie .resume_ceil = 0, 2178a971df9SLuo Jie .dynamic = true, 2188a971df9SLuo Jie }, 2198a971df9SLuo Jie { 2208a971df9SLuo Jie /* Buffer configuration for the BM port ID 14 of EIP. */ 2218a971df9SLuo Jie .port_id_start = 14, 2228a971df9SLuo Jie .port_id_end = 14, 2238a971df9SLuo Jie .pre_alloc = 0, 2248a971df9SLuo Jie .in_fly_buf = 40, 2258a971df9SLuo Jie .ceil = 250, 2268a971df9SLuo Jie .weight = 4, 2278a971df9SLuo Jie .resume_offset = 36, 2288a971df9SLuo Jie .resume_ceil = 0, 2298a971df9SLuo Jie .dynamic = true, 2308a971df9SLuo Jie }, 2318a971df9SLuo Jie }; 2328a971df9SLuo Jie 233806268dcSLuo Jie /* QM fetches the packet from PPE buffer management for transmitting the 234806268dcSLuo Jie * packet out. The QM group configuration limits the total number of buffers 235806268dcSLuo Jie * enqueued by all PPE hardware queues. 236806268dcSLuo Jie * There are total 2048 buffers available, out of which some buffers are 237806268dcSLuo Jie * dedicated to hardware exception handlers. The remaining buffers are 238806268dcSLuo Jie * assigned to the general 'group0', which is the group assigned to all 239806268dcSLuo Jie * queues by default. 240806268dcSLuo Jie */ 241806268dcSLuo Jie static const int ipq9574_ppe_qm_group_config = 2000; 242806268dcSLuo Jie 243806268dcSLuo Jie /* Default QM settings for unicast and multicast queues for IPQ9754. */ 244806268dcSLuo Jie static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = { 245806268dcSLuo Jie { 246806268dcSLuo Jie /* QM settings for unicast queues 0 to 255. */ 247806268dcSLuo Jie .queue_start = 0, 248806268dcSLuo Jie .queue_end = 255, 249806268dcSLuo Jie .prealloc_buf = 0, 250806268dcSLuo Jie .ceil = 1200, 251806268dcSLuo Jie .weight = 7, 252806268dcSLuo Jie .resume_offset = 36, 253806268dcSLuo Jie .dynamic = true, 254806268dcSLuo Jie }, 255806268dcSLuo Jie { 256806268dcSLuo Jie /* QM settings for multicast queues 256 to 299. */ 257806268dcSLuo Jie .queue_start = 256, 258806268dcSLuo Jie .queue_end = 299, 259806268dcSLuo Jie .prealloc_buf = 0, 260806268dcSLuo Jie .ceil = 250, 261806268dcSLuo Jie .weight = 0, 262806268dcSLuo Jie .resume_offset = 36, 263806268dcSLuo Jie .dynamic = false, 264806268dcSLuo Jie }, 265806268dcSLuo Jie }; 266806268dcSLuo Jie 26733122798SLuo Jie /* PPE scheduler configuration for BM includes multiple entries. Each entry 26833122798SLuo Jie * indicates the primary port to be assigned the buffers for the ingress or 26933122798SLuo Jie * to release the buffers for the egress. Backup port ID will be used when 27033122798SLuo Jie * the primary port ID is down. 27133122798SLuo Jie */ 27233122798SLuo Jie static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = { 27333122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 27433122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 27533122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 27633122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 27733122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 27833122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 27933122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 28033122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 28133122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 28233122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 28333122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 28433122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 28533122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 28633122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 28733122798SLuo Jie {true, PPE_SCH_INGRESS, 7, false, 0}, 28833122798SLuo Jie {true, PPE_SCH_EGRESS, 7, false, 0}, 28933122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 29033122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 29133122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 29233122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 29333122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 29433122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 29533122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 29633122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 29733122798SLuo Jie {true, PPE_SCH_INGRESS, 2, false, 0}, 29833122798SLuo Jie {true, PPE_SCH_EGRESS, 2, false, 0}, 29933122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 30033122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 30133122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 30233122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 30333122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 30433122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 30533122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 30633122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 30733122798SLuo Jie {true, PPE_SCH_INGRESS, 3, false, 0}, 30833122798SLuo Jie {true, PPE_SCH_EGRESS, 3, false, 0}, 30933122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 31033122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 31133122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 31233122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 31333122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 31433122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 31533122798SLuo Jie {true, PPE_SCH_INGRESS, 7, false, 0}, 31633122798SLuo Jie {true, PPE_SCH_EGRESS, 7, false, 0}, 31733122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 31833122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 31933122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 32033122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 32133122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 32233122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 32333122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 32433122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 32533122798SLuo Jie {true, PPE_SCH_INGRESS, 4, false, 0}, 32633122798SLuo Jie {true, PPE_SCH_EGRESS, 4, false, 0}, 32733122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 32833122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 32933122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 33033122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 33133122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 33233122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 33333122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 33433122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 33533122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 33633122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 33733122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 33833122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 33933122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 34033122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 34133122798SLuo Jie {true, PPE_SCH_INGRESS, 2, false, 0}, 34233122798SLuo Jie {true, PPE_SCH_EGRESS, 2, false, 0}, 34333122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 34433122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 34533122798SLuo Jie {true, PPE_SCH_INGRESS, 7, false, 0}, 34633122798SLuo Jie {true, PPE_SCH_EGRESS, 7, false, 0}, 34733122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 34833122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 34933122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 35033122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 35133122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 35233122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 35333122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 35433122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 35533122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 35633122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 35733122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 35833122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 35933122798SLuo Jie {true, PPE_SCH_INGRESS, 3, false, 0}, 36033122798SLuo Jie {true, PPE_SCH_EGRESS, 3, false, 0}, 36133122798SLuo Jie {true, PPE_SCH_INGRESS, 1, false, 0}, 36233122798SLuo Jie {true, PPE_SCH_EGRESS, 1, false, 0}, 36333122798SLuo Jie {true, PPE_SCH_INGRESS, 0, false, 0}, 36433122798SLuo Jie {true, PPE_SCH_EGRESS, 0, false, 0}, 36533122798SLuo Jie {true, PPE_SCH_INGRESS, 5, false, 0}, 36633122798SLuo Jie {true, PPE_SCH_EGRESS, 5, false, 0}, 36733122798SLuo Jie {true, PPE_SCH_INGRESS, 6, false, 0}, 36833122798SLuo Jie {true, PPE_SCH_EGRESS, 6, false, 0}, 36933122798SLuo Jie {true, PPE_SCH_INGRESS, 4, false, 0}, 37033122798SLuo Jie {true, PPE_SCH_EGRESS, 4, false, 0}, 37133122798SLuo Jie {true, PPE_SCH_INGRESS, 7, false, 0}, 37233122798SLuo Jie {true, PPE_SCH_EGRESS, 7, false, 0}, 37333122798SLuo Jie }; 37433122798SLuo Jie 37533122798SLuo Jie /* PPE scheduler configuration for QM includes multiple entries. Each entry 37633122798SLuo Jie * contains ports to be dispatched for enqueueing and dequeueing. The backup 37733122798SLuo Jie * port for dequeueing is supported to be used when the primary port for 37833122798SLuo Jie * dequeueing is down. 37933122798SLuo Jie */ 38033122798SLuo Jie static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = { 38133122798SLuo Jie {0x98, 6, 0, true, 1}, 38233122798SLuo Jie {0x94, 5, 6, true, 3}, 38333122798SLuo Jie {0x86, 0, 5, true, 4}, 38433122798SLuo Jie {0x8C, 1, 6, true, 0}, 38533122798SLuo Jie {0x1C, 7, 5, true, 1}, 38633122798SLuo Jie {0x98, 2, 6, true, 0}, 38733122798SLuo Jie {0x1C, 5, 7, true, 1}, 38833122798SLuo Jie {0x34, 3, 6, true, 0}, 38933122798SLuo Jie {0x8C, 4, 5, true, 1}, 39033122798SLuo Jie {0x98, 2, 6, true, 0}, 39133122798SLuo Jie {0x8C, 5, 4, true, 1}, 39233122798SLuo Jie {0xA8, 0, 6, true, 2}, 39333122798SLuo Jie {0x98, 5, 1, true, 0}, 39433122798SLuo Jie {0x98, 6, 5, true, 2}, 39533122798SLuo Jie {0x89, 1, 6, true, 4}, 39633122798SLuo Jie {0xA4, 3, 0, true, 1}, 39733122798SLuo Jie {0x8C, 5, 6, true, 4}, 39833122798SLuo Jie {0xA8, 0, 2, true, 1}, 39933122798SLuo Jie {0x98, 6, 5, true, 0}, 40033122798SLuo Jie {0xC4, 4, 3, true, 1}, 40133122798SLuo Jie {0x94, 6, 5, true, 0}, 40233122798SLuo Jie {0x1C, 7, 6, true, 1}, 40333122798SLuo Jie {0x98, 2, 5, true, 0}, 40433122798SLuo Jie {0x1C, 6, 7, true, 1}, 40533122798SLuo Jie {0x1C, 5, 6, true, 0}, 40633122798SLuo Jie {0x94, 3, 5, true, 1}, 40733122798SLuo Jie {0x8C, 4, 6, true, 0}, 40833122798SLuo Jie {0x94, 1, 5, true, 3}, 40933122798SLuo Jie {0x94, 6, 1, true, 0}, 41033122798SLuo Jie {0xD0, 3, 5, true, 2}, 41133122798SLuo Jie {0x98, 6, 0, true, 1}, 41233122798SLuo Jie {0x94, 5, 6, true, 3}, 41333122798SLuo Jie {0x94, 1, 5, true, 0}, 41433122798SLuo Jie {0x98, 2, 6, true, 1}, 41533122798SLuo Jie {0x8C, 4, 5, true, 0}, 41633122798SLuo Jie {0x1C, 7, 6, true, 1}, 41733122798SLuo Jie {0x8C, 0, 5, true, 4}, 41833122798SLuo Jie {0x89, 1, 6, true, 2}, 41933122798SLuo Jie {0x98, 5, 0, true, 1}, 42033122798SLuo Jie {0x94, 6, 5, true, 3}, 42133122798SLuo Jie {0x92, 0, 6, true, 2}, 42233122798SLuo Jie {0x98, 1, 5, true, 0}, 42333122798SLuo Jie {0x98, 6, 2, true, 1}, 42433122798SLuo Jie {0xD0, 0, 5, true, 3}, 42533122798SLuo Jie {0x94, 6, 0, true, 1}, 42633122798SLuo Jie {0x8C, 5, 6, true, 4}, 42733122798SLuo Jie {0x8C, 1, 5, true, 0}, 42833122798SLuo Jie {0x1C, 6, 7, true, 1}, 42933122798SLuo Jie {0x1C, 5, 6, true, 0}, 43033122798SLuo Jie {0xB0, 2, 3, true, 1}, 43133122798SLuo Jie {0xC4, 4, 5, true, 0}, 43233122798SLuo Jie {0x8C, 6, 4, true, 1}, 43333122798SLuo Jie {0xA4, 3, 6, true, 0}, 43433122798SLuo Jie {0x1C, 5, 7, true, 1}, 43533122798SLuo Jie {0x4C, 0, 5, true, 4}, 43633122798SLuo Jie {0x8C, 6, 0, true, 1}, 43733122798SLuo Jie {0x34, 7, 6, true, 3}, 43833122798SLuo Jie {0x94, 5, 0, true, 1}, 43933122798SLuo Jie {0x98, 6, 5, true, 2}, 44033122798SLuo Jie }; 44133122798SLuo Jie 44233122798SLuo Jie static const struct ppe_scheduler_port_config ppe_port_sch_config[] = { 44333122798SLuo Jie { 44433122798SLuo Jie .port = 0, 44533122798SLuo Jie .flow_level = true, 44633122798SLuo Jie .node_id = 0, 44733122798SLuo Jie .loop_num = 1, 44833122798SLuo Jie .pri_max = 1, 44933122798SLuo Jie .flow_id = 0, 45033122798SLuo Jie .drr_node_id = 0, 45133122798SLuo Jie }, 45233122798SLuo Jie { 45333122798SLuo Jie .port = 0, 45433122798SLuo Jie .flow_level = false, 45533122798SLuo Jie .node_id = 0, 45633122798SLuo Jie .loop_num = 8, 45733122798SLuo Jie .pri_max = 8, 45833122798SLuo Jie .flow_id = 0, 45933122798SLuo Jie .drr_node_id = 0, 46033122798SLuo Jie }, 46133122798SLuo Jie { 46233122798SLuo Jie .port = 0, 46333122798SLuo Jie .flow_level = false, 46433122798SLuo Jie .node_id = 8, 46533122798SLuo Jie .loop_num = 8, 46633122798SLuo Jie .pri_max = 8, 46733122798SLuo Jie .flow_id = 0, 46833122798SLuo Jie .drr_node_id = 0, 46933122798SLuo Jie }, 47033122798SLuo Jie { 47133122798SLuo Jie .port = 0, 47233122798SLuo Jie .flow_level = false, 47333122798SLuo Jie .node_id = 16, 47433122798SLuo Jie .loop_num = 8, 47533122798SLuo Jie .pri_max = 8, 47633122798SLuo Jie .flow_id = 0, 47733122798SLuo Jie .drr_node_id = 0, 47833122798SLuo Jie }, 47933122798SLuo Jie { 48033122798SLuo Jie .port = 0, 48133122798SLuo Jie .flow_level = false, 48233122798SLuo Jie .node_id = 24, 48333122798SLuo Jie .loop_num = 8, 48433122798SLuo Jie .pri_max = 8, 48533122798SLuo Jie .flow_id = 0, 48633122798SLuo Jie .drr_node_id = 0, 48733122798SLuo Jie }, 48833122798SLuo Jie { 48933122798SLuo Jie .port = 0, 49033122798SLuo Jie .flow_level = false, 49133122798SLuo Jie .node_id = 32, 49233122798SLuo Jie .loop_num = 8, 49333122798SLuo Jie .pri_max = 8, 49433122798SLuo Jie .flow_id = 0, 49533122798SLuo Jie .drr_node_id = 0, 49633122798SLuo Jie }, 49733122798SLuo Jie { 49833122798SLuo Jie .port = 0, 49933122798SLuo Jie .flow_level = false, 50033122798SLuo Jie .node_id = 40, 50133122798SLuo Jie .loop_num = 8, 50233122798SLuo Jie .pri_max = 8, 50333122798SLuo Jie .flow_id = 0, 50433122798SLuo Jie .drr_node_id = 0, 50533122798SLuo Jie }, 50633122798SLuo Jie { 50733122798SLuo Jie .port = 0, 50833122798SLuo Jie .flow_level = false, 50933122798SLuo Jie .node_id = 48, 51033122798SLuo Jie .loop_num = 8, 51133122798SLuo Jie .pri_max = 8, 51233122798SLuo Jie .flow_id = 0, 51333122798SLuo Jie .drr_node_id = 0, 51433122798SLuo Jie }, 51533122798SLuo Jie { 51633122798SLuo Jie .port = 0, 51733122798SLuo Jie .flow_level = false, 51833122798SLuo Jie .node_id = 56, 51933122798SLuo Jie .loop_num = 8, 52033122798SLuo Jie .pri_max = 8, 52133122798SLuo Jie .flow_id = 0, 52233122798SLuo Jie .drr_node_id = 0, 52333122798SLuo Jie }, 52433122798SLuo Jie { 52533122798SLuo Jie .port = 0, 52633122798SLuo Jie .flow_level = false, 52733122798SLuo Jie .node_id = 256, 52833122798SLuo Jie .loop_num = 8, 52933122798SLuo Jie .pri_max = 8, 53033122798SLuo Jie .flow_id = 0, 53133122798SLuo Jie .drr_node_id = 0, 53233122798SLuo Jie }, 53333122798SLuo Jie { 53433122798SLuo Jie .port = 0, 53533122798SLuo Jie .flow_level = false, 53633122798SLuo Jie .node_id = 264, 53733122798SLuo Jie .loop_num = 8, 53833122798SLuo Jie .pri_max = 8, 53933122798SLuo Jie .flow_id = 0, 54033122798SLuo Jie .drr_node_id = 0, 54133122798SLuo Jie }, 54233122798SLuo Jie { 54333122798SLuo Jie .port = 1, 54433122798SLuo Jie .flow_level = true, 54533122798SLuo Jie .node_id = 36, 54633122798SLuo Jie .loop_num = 2, 54733122798SLuo Jie .pri_max = 0, 54833122798SLuo Jie .flow_id = 1, 54933122798SLuo Jie .drr_node_id = 8, 55033122798SLuo Jie }, 55133122798SLuo Jie { 55233122798SLuo Jie .port = 1, 55333122798SLuo Jie .flow_level = false, 55433122798SLuo Jie .node_id = 144, 55533122798SLuo Jie .loop_num = 16, 55633122798SLuo Jie .pri_max = 8, 55733122798SLuo Jie .flow_id = 36, 55833122798SLuo Jie .drr_node_id = 48, 55933122798SLuo Jie }, 56033122798SLuo Jie { 56133122798SLuo Jie .port = 1, 56233122798SLuo Jie .flow_level = false, 56333122798SLuo Jie .node_id = 272, 56433122798SLuo Jie .loop_num = 4, 56533122798SLuo Jie .pri_max = 4, 56633122798SLuo Jie .flow_id = 36, 56733122798SLuo Jie .drr_node_id = 48, 56833122798SLuo Jie }, 56933122798SLuo Jie { 57033122798SLuo Jie .port = 2, 57133122798SLuo Jie .flow_level = true, 57233122798SLuo Jie .node_id = 40, 57333122798SLuo Jie .loop_num = 2, 57433122798SLuo Jie .pri_max = 0, 57533122798SLuo Jie .flow_id = 2, 57633122798SLuo Jie .drr_node_id = 12, 57733122798SLuo Jie }, 57833122798SLuo Jie { 57933122798SLuo Jie .port = 2, 58033122798SLuo Jie .flow_level = false, 58133122798SLuo Jie .node_id = 160, 58233122798SLuo Jie .loop_num = 16, 58333122798SLuo Jie .pri_max = 8, 58433122798SLuo Jie .flow_id = 40, 58533122798SLuo Jie .drr_node_id = 64, 58633122798SLuo Jie }, 58733122798SLuo Jie { 58833122798SLuo Jie .port = 2, 58933122798SLuo Jie .flow_level = false, 59033122798SLuo Jie .node_id = 276, 59133122798SLuo Jie .loop_num = 4, 59233122798SLuo Jie .pri_max = 4, 59333122798SLuo Jie .flow_id = 40, 59433122798SLuo Jie .drr_node_id = 64, 59533122798SLuo Jie }, 59633122798SLuo Jie { 59733122798SLuo Jie .port = 3, 59833122798SLuo Jie .flow_level = true, 59933122798SLuo Jie .node_id = 44, 60033122798SLuo Jie .loop_num = 2, 60133122798SLuo Jie .pri_max = 0, 60233122798SLuo Jie .flow_id = 3, 60333122798SLuo Jie .drr_node_id = 16, 60433122798SLuo Jie }, 60533122798SLuo Jie { 60633122798SLuo Jie .port = 3, 60733122798SLuo Jie .flow_level = false, 60833122798SLuo Jie .node_id = 176, 60933122798SLuo Jie .loop_num = 16, 61033122798SLuo Jie .pri_max = 8, 61133122798SLuo Jie .flow_id = 44, 61233122798SLuo Jie .drr_node_id = 80, 61333122798SLuo Jie }, 61433122798SLuo Jie { 61533122798SLuo Jie .port = 3, 61633122798SLuo Jie .flow_level = false, 61733122798SLuo Jie .node_id = 280, 61833122798SLuo Jie .loop_num = 4, 61933122798SLuo Jie .pri_max = 4, 62033122798SLuo Jie .flow_id = 44, 62133122798SLuo Jie .drr_node_id = 80, 62233122798SLuo Jie }, 62333122798SLuo Jie { 62433122798SLuo Jie .port = 4, 62533122798SLuo Jie .flow_level = true, 62633122798SLuo Jie .node_id = 48, 62733122798SLuo Jie .loop_num = 2, 62833122798SLuo Jie .pri_max = 0, 62933122798SLuo Jie .flow_id = 4, 63033122798SLuo Jie .drr_node_id = 20, 63133122798SLuo Jie }, 63233122798SLuo Jie { 63333122798SLuo Jie .port = 4, 63433122798SLuo Jie .flow_level = false, 63533122798SLuo Jie .node_id = 192, 63633122798SLuo Jie .loop_num = 16, 63733122798SLuo Jie .pri_max = 8, 63833122798SLuo Jie .flow_id = 48, 63933122798SLuo Jie .drr_node_id = 96, 64033122798SLuo Jie }, 64133122798SLuo Jie { 64233122798SLuo Jie .port = 4, 64333122798SLuo Jie .flow_level = false, 64433122798SLuo Jie .node_id = 284, 64533122798SLuo Jie .loop_num = 4, 64633122798SLuo Jie .pri_max = 4, 64733122798SLuo Jie .flow_id = 48, 64833122798SLuo Jie .drr_node_id = 96, 64933122798SLuo Jie }, 65033122798SLuo Jie { 65133122798SLuo Jie .port = 5, 65233122798SLuo Jie .flow_level = true, 65333122798SLuo Jie .node_id = 52, 65433122798SLuo Jie .loop_num = 2, 65533122798SLuo Jie .pri_max = 0, 65633122798SLuo Jie .flow_id = 5, 65733122798SLuo Jie .drr_node_id = 24, 65833122798SLuo Jie }, 65933122798SLuo Jie { 66033122798SLuo Jie .port = 5, 66133122798SLuo Jie .flow_level = false, 66233122798SLuo Jie .node_id = 208, 66333122798SLuo Jie .loop_num = 16, 66433122798SLuo Jie .pri_max = 8, 66533122798SLuo Jie .flow_id = 52, 66633122798SLuo Jie .drr_node_id = 112, 66733122798SLuo Jie }, 66833122798SLuo Jie { 66933122798SLuo Jie .port = 5, 67033122798SLuo Jie .flow_level = false, 67133122798SLuo Jie .node_id = 288, 67233122798SLuo Jie .loop_num = 4, 67333122798SLuo Jie .pri_max = 4, 67433122798SLuo Jie .flow_id = 52, 67533122798SLuo Jie .drr_node_id = 112, 67633122798SLuo Jie }, 67733122798SLuo Jie { 67833122798SLuo Jie .port = 6, 67933122798SLuo Jie .flow_level = true, 68033122798SLuo Jie .node_id = 56, 68133122798SLuo Jie .loop_num = 2, 68233122798SLuo Jie .pri_max = 0, 68333122798SLuo Jie .flow_id = 6, 68433122798SLuo Jie .drr_node_id = 28, 68533122798SLuo Jie }, 68633122798SLuo Jie { 68733122798SLuo Jie .port = 6, 68833122798SLuo Jie .flow_level = false, 68933122798SLuo Jie .node_id = 224, 69033122798SLuo Jie .loop_num = 16, 69133122798SLuo Jie .pri_max = 8, 69233122798SLuo Jie .flow_id = 56, 69333122798SLuo Jie .drr_node_id = 128, 69433122798SLuo Jie }, 69533122798SLuo Jie { 69633122798SLuo Jie .port = 6, 69733122798SLuo Jie .flow_level = false, 69833122798SLuo Jie .node_id = 292, 69933122798SLuo Jie .loop_num = 4, 70033122798SLuo Jie .pri_max = 4, 70133122798SLuo Jie .flow_id = 56, 70233122798SLuo Jie .drr_node_id = 128, 70333122798SLuo Jie }, 70433122798SLuo Jie { 70533122798SLuo Jie .port = 7, 70633122798SLuo Jie .flow_level = true, 70733122798SLuo Jie .node_id = 60, 70833122798SLuo Jie .loop_num = 2, 70933122798SLuo Jie .pri_max = 0, 71033122798SLuo Jie .flow_id = 7, 71133122798SLuo Jie .drr_node_id = 32, 71233122798SLuo Jie }, 71333122798SLuo Jie { 71433122798SLuo Jie .port = 7, 71533122798SLuo Jie .flow_level = false, 71633122798SLuo Jie .node_id = 240, 71733122798SLuo Jie .loop_num = 16, 71833122798SLuo Jie .pri_max = 8, 71933122798SLuo Jie .flow_id = 60, 72033122798SLuo Jie .drr_node_id = 144, 72133122798SLuo Jie }, 72233122798SLuo Jie { 72333122798SLuo Jie .port = 7, 72433122798SLuo Jie .flow_level = false, 72533122798SLuo Jie .node_id = 296, 72633122798SLuo Jie .loop_num = 4, 72733122798SLuo Jie .pri_max = 4, 72833122798SLuo Jie .flow_id = 60, 72933122798SLuo Jie .drr_node_id = 144, 73033122798SLuo Jie }, 73133122798SLuo Jie }; 73233122798SLuo Jie 7337a23a8afSLuo Jie /* The scheduler resource is applied to each PPE port, The resource 7347a23a8afSLuo Jie * includes the unicast & multicast queues, flow nodes and DRR nodes. 7357a23a8afSLuo Jie */ 7367a23a8afSLuo Jie static const struct ppe_port_schedule_resource ppe_scheduler_res[] = { 7377a23a8afSLuo Jie { .ucastq_start = 0, 7387a23a8afSLuo Jie .ucastq_end = 63, 7397a23a8afSLuo Jie .mcastq_start = 256, 7407a23a8afSLuo Jie .mcastq_end = 271, 7417a23a8afSLuo Jie .flow_id_start = 0, 7427a23a8afSLuo Jie .flow_id_end = 0, 7437a23a8afSLuo Jie .l0node_start = 0, 7447a23a8afSLuo Jie .l0node_end = 7, 7457a23a8afSLuo Jie .l1node_start = 0, 7467a23a8afSLuo Jie .l1node_end = 0, 7477a23a8afSLuo Jie }, 7487a23a8afSLuo Jie { .ucastq_start = 144, 7497a23a8afSLuo Jie .ucastq_end = 159, 7507a23a8afSLuo Jie .mcastq_start = 272, 7517a23a8afSLuo Jie .mcastq_end = 275, 7527a23a8afSLuo Jie .flow_id_start = 36, 7537a23a8afSLuo Jie .flow_id_end = 39, 7547a23a8afSLuo Jie .l0node_start = 48, 7557a23a8afSLuo Jie .l0node_end = 63, 7567a23a8afSLuo Jie .l1node_start = 8, 7577a23a8afSLuo Jie .l1node_end = 11, 7587a23a8afSLuo Jie }, 7597a23a8afSLuo Jie { .ucastq_start = 160, 7607a23a8afSLuo Jie .ucastq_end = 175, 7617a23a8afSLuo Jie .mcastq_start = 276, 7627a23a8afSLuo Jie .mcastq_end = 279, 7637a23a8afSLuo Jie .flow_id_start = 40, 7647a23a8afSLuo Jie .flow_id_end = 43, 7657a23a8afSLuo Jie .l0node_start = 64, 7667a23a8afSLuo Jie .l0node_end = 79, 7677a23a8afSLuo Jie .l1node_start = 12, 7687a23a8afSLuo Jie .l1node_end = 15, 7697a23a8afSLuo Jie }, 7707a23a8afSLuo Jie { .ucastq_start = 176, 7717a23a8afSLuo Jie .ucastq_end = 191, 7727a23a8afSLuo Jie .mcastq_start = 280, 7737a23a8afSLuo Jie .mcastq_end = 283, 7747a23a8afSLuo Jie .flow_id_start = 44, 7757a23a8afSLuo Jie .flow_id_end = 47, 7767a23a8afSLuo Jie .l0node_start = 80, 7777a23a8afSLuo Jie .l0node_end = 95, 7787a23a8afSLuo Jie .l1node_start = 16, 7797a23a8afSLuo Jie .l1node_end = 19, 7807a23a8afSLuo Jie }, 7817a23a8afSLuo Jie { .ucastq_start = 192, 7827a23a8afSLuo Jie .ucastq_end = 207, 7837a23a8afSLuo Jie .mcastq_start = 284, 7847a23a8afSLuo Jie .mcastq_end = 287, 7857a23a8afSLuo Jie .flow_id_start = 48, 7867a23a8afSLuo Jie .flow_id_end = 51, 7877a23a8afSLuo Jie .l0node_start = 96, 7887a23a8afSLuo Jie .l0node_end = 111, 7897a23a8afSLuo Jie .l1node_start = 20, 7907a23a8afSLuo Jie .l1node_end = 23, 7917a23a8afSLuo Jie }, 7927a23a8afSLuo Jie { .ucastq_start = 208, 7937a23a8afSLuo Jie .ucastq_end = 223, 7947a23a8afSLuo Jie .mcastq_start = 288, 7957a23a8afSLuo Jie .mcastq_end = 291, 7967a23a8afSLuo Jie .flow_id_start = 52, 7977a23a8afSLuo Jie .flow_id_end = 55, 7987a23a8afSLuo Jie .l0node_start = 112, 7997a23a8afSLuo Jie .l0node_end = 127, 8007a23a8afSLuo Jie .l1node_start = 24, 8017a23a8afSLuo Jie .l1node_end = 27, 8027a23a8afSLuo Jie }, 8037a23a8afSLuo Jie { .ucastq_start = 224, 8047a23a8afSLuo Jie .ucastq_end = 239, 8057a23a8afSLuo Jie .mcastq_start = 292, 8067a23a8afSLuo Jie .mcastq_end = 295, 8077a23a8afSLuo Jie .flow_id_start = 56, 8087a23a8afSLuo Jie .flow_id_end = 59, 8097a23a8afSLuo Jie .l0node_start = 128, 8107a23a8afSLuo Jie .l0node_end = 143, 8117a23a8afSLuo Jie .l1node_start = 28, 8127a23a8afSLuo Jie .l1node_end = 31, 8137a23a8afSLuo Jie }, 8147a23a8afSLuo Jie { .ucastq_start = 240, 8157a23a8afSLuo Jie .ucastq_end = 255, 8167a23a8afSLuo Jie .mcastq_start = 296, 8177a23a8afSLuo Jie .mcastq_end = 299, 8187a23a8afSLuo Jie .flow_id_start = 60, 8197a23a8afSLuo Jie .flow_id_end = 63, 8207a23a8afSLuo Jie .l0node_start = 144, 8217a23a8afSLuo Jie .l0node_end = 159, 8227a23a8afSLuo Jie .l1node_start = 32, 8237a23a8afSLuo Jie .l1node_end = 35, 8247a23a8afSLuo Jie }, 8257a23a8afSLuo Jie { .ucastq_start = 64, 8267a23a8afSLuo Jie .ucastq_end = 143, 8277a23a8afSLuo Jie .mcastq_start = 0, 8287a23a8afSLuo Jie .mcastq_end = 0, 8297a23a8afSLuo Jie .flow_id_start = 1, 8307a23a8afSLuo Jie .flow_id_end = 35, 8317a23a8afSLuo Jie .l0node_start = 8, 8327a23a8afSLuo Jie .l0node_end = 47, 8337a23a8afSLuo Jie .l1node_start = 1, 8347a23a8afSLuo Jie .l1node_end = 7, 8357a23a8afSLuo Jie }, 8367a23a8afSLuo Jie }; 8377a23a8afSLuo Jie 83833122798SLuo Jie /* Set the PPE queue level scheduler configuration. */ 83933122798SLuo Jie static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev, 84033122798SLuo Jie int node_id, int port, 84133122798SLuo Jie struct ppe_scheduler_cfg scheduler_cfg) 84233122798SLuo Jie { 84333122798SLuo Jie u32 val, reg; 84433122798SLuo Jie int ret; 84533122798SLuo Jie 84633122798SLuo Jie reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC; 84733122798SLuo Jie val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id); 84833122798SLuo Jie val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri); 84933122798SLuo Jie val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri); 85033122798SLuo Jie val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt); 85133122798SLuo Jie val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt); 85233122798SLuo Jie 85333122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 85433122798SLuo Jie if (ret) 85533122798SLuo Jie return ret; 85633122798SLuo Jie 85733122798SLuo Jie reg = PPE_L0_C_FLOW_CFG_TBL_ADDR + 85833122798SLuo Jie (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) * 85933122798SLuo Jie PPE_L0_C_FLOW_CFG_TBL_INC; 86033122798SLuo Jie val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id); 86133122798SLuo Jie val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet); 86233122798SLuo Jie 86333122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 86433122798SLuo Jie if (ret) 86533122798SLuo Jie return ret; 86633122798SLuo Jie 86733122798SLuo Jie reg = PPE_L0_E_FLOW_CFG_TBL_ADDR + 86833122798SLuo Jie (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) * 86933122798SLuo Jie PPE_L0_E_FLOW_CFG_TBL_INC; 87033122798SLuo Jie val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id); 87133122798SLuo Jie val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet); 87233122798SLuo Jie 87333122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 87433122798SLuo Jie if (ret) 87533122798SLuo Jie return ret; 87633122798SLuo Jie 87733122798SLuo Jie reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC; 87833122798SLuo Jie val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port); 87933122798SLuo Jie 88033122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 88133122798SLuo Jie if (ret) 88233122798SLuo Jie return ret; 88333122798SLuo Jie 88433122798SLuo Jie reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC; 88533122798SLuo Jie val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode); 88633122798SLuo Jie 88733122798SLuo Jie return regmap_update_bits(ppe_dev->regmap, reg, 88833122798SLuo Jie PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, 88933122798SLuo Jie val); 89033122798SLuo Jie } 89133122798SLuo Jie 89233122798SLuo Jie /* Set the PPE flow level scheduler configuration. */ 89333122798SLuo Jie static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev, 89433122798SLuo Jie int node_id, int port, 89533122798SLuo Jie struct ppe_scheduler_cfg scheduler_cfg) 89633122798SLuo Jie { 89733122798SLuo Jie u32 val, reg; 89833122798SLuo Jie int ret; 89933122798SLuo Jie 90033122798SLuo Jie val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id); 90133122798SLuo Jie val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri); 90233122798SLuo Jie val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri); 90333122798SLuo Jie val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt); 90433122798SLuo Jie val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt); 90533122798SLuo Jie reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC; 90633122798SLuo Jie 90733122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 90833122798SLuo Jie if (ret) 90933122798SLuo Jie return ret; 91033122798SLuo Jie 91133122798SLuo Jie val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id); 91233122798SLuo Jie val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet); 91333122798SLuo Jie reg = PPE_L1_C_FLOW_CFG_TBL_ADDR + 91433122798SLuo Jie (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) * 91533122798SLuo Jie PPE_L1_C_FLOW_CFG_TBL_INC; 91633122798SLuo Jie 91733122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 91833122798SLuo Jie if (ret) 91933122798SLuo Jie return ret; 92033122798SLuo Jie 92133122798SLuo Jie val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id); 92233122798SLuo Jie val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet); 92333122798SLuo Jie reg = PPE_L1_E_FLOW_CFG_TBL_ADDR + 92433122798SLuo Jie (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) * 92533122798SLuo Jie PPE_L1_E_FLOW_CFG_TBL_INC; 92633122798SLuo Jie 92733122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 92833122798SLuo Jie if (ret) 92933122798SLuo Jie return ret; 93033122798SLuo Jie 93133122798SLuo Jie val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port); 93233122798SLuo Jie reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC; 93333122798SLuo Jie 93433122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 93533122798SLuo Jie if (ret) 93633122798SLuo Jie return ret; 93733122798SLuo Jie 93833122798SLuo Jie reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC; 93933122798SLuo Jie val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode); 94033122798SLuo Jie 94133122798SLuo Jie return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val); 94233122798SLuo Jie } 94333122798SLuo Jie 94433122798SLuo Jie /** 94533122798SLuo Jie * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue 94633122798SLuo Jie * @ppe_dev: PPE device 94733122798SLuo Jie * @node_id: PPE queue ID or flow ID 94833122798SLuo Jie * @flow_level: Flow level scheduler or queue level scheduler 94933122798SLuo Jie * @port: PPE port ID set scheduler configuration 95033122798SLuo Jie * @scheduler_cfg: PPE scheduler configuration 95133122798SLuo Jie * 95233122798SLuo Jie * PPE scheduler configuration supports queue level and flow level on 95333122798SLuo Jie * the PPE egress port. 95433122798SLuo Jie * 95533122798SLuo Jie * Return: 0 on success, negative error code on failure. 95633122798SLuo Jie */ 95733122798SLuo Jie int ppe_queue_scheduler_set(struct ppe_device *ppe_dev, 95833122798SLuo Jie int node_id, bool flow_level, int port, 95933122798SLuo Jie struct ppe_scheduler_cfg scheduler_cfg) 96033122798SLuo Jie { 96133122798SLuo Jie if (flow_level) 96233122798SLuo Jie return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id, 96333122798SLuo Jie port, scheduler_cfg); 96433122798SLuo Jie 96533122798SLuo Jie return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id, 96633122798SLuo Jie port, scheduler_cfg); 96733122798SLuo Jie } 96833122798SLuo Jie 9697a23a8afSLuo Jie /** 9707a23a8afSLuo Jie * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID 9717a23a8afSLuo Jie * @ppe_dev: PPE device 9727a23a8afSLuo Jie * @queue_dst: PPE queue destination configuration 9737a23a8afSLuo Jie * @queue_base: PPE queue base ID 9747a23a8afSLuo Jie * @profile_id: Profile ID 9757a23a8afSLuo Jie * 9767a23a8afSLuo Jie * The PPE unicast queue base ID and profile ID are configured based on the 9777a23a8afSLuo Jie * destination port information that can be service code or CPU code or the 9787a23a8afSLuo Jie * destination port. 9797a23a8afSLuo Jie * 9807a23a8afSLuo Jie * Return: 0 on success, negative error code on failure. 9817a23a8afSLuo Jie */ 9827a23a8afSLuo Jie int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev, 9837a23a8afSLuo Jie struct ppe_queue_ucast_dest queue_dst, 9847a23a8afSLuo Jie int queue_base, int profile_id) 9857a23a8afSLuo Jie { 9867a23a8afSLuo Jie int index, profile_size; 9877a23a8afSLuo Jie u32 val, reg; 9887a23a8afSLuo Jie 9897a23a8afSLuo Jie profile_size = queue_dst.src_profile << 8; 9907a23a8afSLuo Jie if (queue_dst.service_code_en) 9917a23a8afSLuo Jie index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size + 9927a23a8afSLuo Jie queue_dst.service_code; 9937a23a8afSLuo Jie else if (queue_dst.cpu_code_en) 9947a23a8afSLuo Jie index = PPE_QUEUE_BASE_CPU_CODE + profile_size + 9957a23a8afSLuo Jie queue_dst.cpu_code; 9967a23a8afSLuo Jie else 9977a23a8afSLuo Jie index = profile_size + queue_dst.dest_port; 9987a23a8afSLuo Jie 9997a23a8afSLuo Jie val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id); 10007a23a8afSLuo Jie val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base); 10017a23a8afSLuo Jie reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC; 10027a23a8afSLuo Jie 10037a23a8afSLuo Jie return regmap_write(ppe_dev->regmap, reg, val); 10047a23a8afSLuo Jie } 10057a23a8afSLuo Jie 10067a23a8afSLuo Jie /** 10077a23a8afSLuo Jie * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority 10087a23a8afSLuo Jie * @ppe_dev: PPE device 10097a23a8afSLuo Jie * @profile_id: Profile ID 10107a23a8afSLuo Jie * @priority: PPE internal priority to be used to set queue offset 10117a23a8afSLuo Jie * @queue_offset: Queue offset used for calculating the destination queue ID 10127a23a8afSLuo Jie * 10137a23a8afSLuo Jie * The PPE unicast queue offset is configured based on the PPE 10147a23a8afSLuo Jie * internal priority. 10157a23a8afSLuo Jie * 10167a23a8afSLuo Jie * Return: 0 on success, negative error code on failure. 10177a23a8afSLuo Jie */ 10187a23a8afSLuo Jie int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev, 10197a23a8afSLuo Jie int profile_id, 10207a23a8afSLuo Jie int priority, 10217a23a8afSLuo Jie int queue_offset) 10227a23a8afSLuo Jie { 10237a23a8afSLuo Jie u32 val, reg; 10247a23a8afSLuo Jie int index; 10257a23a8afSLuo Jie 10267a23a8afSLuo Jie index = (profile_id << 4) + priority; 10277a23a8afSLuo Jie val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset); 10287a23a8afSLuo Jie reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC; 10297a23a8afSLuo Jie 10307a23a8afSLuo Jie return regmap_write(ppe_dev->regmap, reg, val); 10317a23a8afSLuo Jie } 10327a23a8afSLuo Jie 10337a23a8afSLuo Jie /** 10347a23a8afSLuo Jie * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash 10357a23a8afSLuo Jie * @ppe_dev: PPE device 10367a23a8afSLuo Jie * @profile_id: Profile ID 10377a23a8afSLuo Jie * @rss_hash: Packet hash value to be used to set queue offset 10387a23a8afSLuo Jie * @queue_offset: Queue offset used for calculating the destination queue ID 10397a23a8afSLuo Jie * 10407a23a8afSLuo Jie * The PPE unicast queue offset is configured based on the RSS hash value. 10417a23a8afSLuo Jie * 10427a23a8afSLuo Jie * Return: 0 on success, negative error code on failure. 10437a23a8afSLuo Jie */ 10447a23a8afSLuo Jie int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev, 10457a23a8afSLuo Jie int profile_id, 10467a23a8afSLuo Jie int rss_hash, 10477a23a8afSLuo Jie int queue_offset) 10487a23a8afSLuo Jie { 10497a23a8afSLuo Jie u32 val, reg; 10507a23a8afSLuo Jie int index; 10517a23a8afSLuo Jie 10527a23a8afSLuo Jie index = (profile_id << 8) + rss_hash; 10537a23a8afSLuo Jie val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset); 10547a23a8afSLuo Jie reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC; 10557a23a8afSLuo Jie 10567a23a8afSLuo Jie return regmap_write(ppe_dev->regmap, reg, val); 10577a23a8afSLuo Jie } 10587a23a8afSLuo Jie 10597a23a8afSLuo Jie /** 10607a23a8afSLuo Jie * ppe_port_resource_get - Get PPE resource per port 10617a23a8afSLuo Jie * @ppe_dev: PPE device 10627a23a8afSLuo Jie * @port: PPE port 10637a23a8afSLuo Jie * @type: Resource type 10647a23a8afSLuo Jie * @res_start: Resource start ID returned 10657a23a8afSLuo Jie * @res_end: Resource end ID returned 10667a23a8afSLuo Jie * 10677a23a8afSLuo Jie * PPE resource is assigned per PPE port, which is acquired for QoS scheduler. 10687a23a8afSLuo Jie * 10697a23a8afSLuo Jie * Return: 0 on success, negative error code on failure. 10707a23a8afSLuo Jie */ 10717a23a8afSLuo Jie int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, 10727a23a8afSLuo Jie enum ppe_resource_type type, 10737a23a8afSLuo Jie int *res_start, int *res_end) 10747a23a8afSLuo Jie { 10757a23a8afSLuo Jie struct ppe_port_schedule_resource res; 10767a23a8afSLuo Jie 10777a23a8afSLuo Jie /* The reserved resource with the maximum port ID of PPE is 10787a23a8afSLuo Jie * also allowed to be acquired. 10797a23a8afSLuo Jie */ 10807a23a8afSLuo Jie if (port > ppe_dev->num_ports) 10817a23a8afSLuo Jie return -EINVAL; 10827a23a8afSLuo Jie 10837a23a8afSLuo Jie res = ppe_scheduler_res[port]; 10847a23a8afSLuo Jie switch (type) { 10857a23a8afSLuo Jie case PPE_RES_UCAST: 10867a23a8afSLuo Jie *res_start = res.ucastq_start; 10877a23a8afSLuo Jie *res_end = res.ucastq_end; 10887a23a8afSLuo Jie break; 10897a23a8afSLuo Jie case PPE_RES_MCAST: 10907a23a8afSLuo Jie *res_start = res.mcastq_start; 10917a23a8afSLuo Jie *res_end = res.mcastq_end; 10927a23a8afSLuo Jie break; 10937a23a8afSLuo Jie case PPE_RES_FLOW_ID: 10947a23a8afSLuo Jie *res_start = res.flow_id_start; 10957a23a8afSLuo Jie *res_end = res.flow_id_end; 10967a23a8afSLuo Jie break; 10977a23a8afSLuo Jie case PPE_RES_L0_NODE: 10987a23a8afSLuo Jie *res_start = res.l0node_start; 10997a23a8afSLuo Jie *res_end = res.l0node_end; 11007a23a8afSLuo Jie break; 11017a23a8afSLuo Jie case PPE_RES_L1_NODE: 11027a23a8afSLuo Jie *res_start = res.l1node_start; 11037a23a8afSLuo Jie *res_end = res.l1node_end; 11047a23a8afSLuo Jie break; 11057a23a8afSLuo Jie default: 11067a23a8afSLuo Jie return -EINVAL; 11077a23a8afSLuo Jie } 11087a23a8afSLuo Jie 11097a23a8afSLuo Jie return 0; 11107a23a8afSLuo Jie } 11117a23a8afSLuo Jie 111273d05bdaSLuo Jie /** 111373d05bdaSLuo Jie * ppe_sc_config_set - Set PPE service code configuration 111473d05bdaSLuo Jie * @ppe_dev: PPE device 111573d05bdaSLuo Jie * @sc: Service ID, 0-255 supported by PPE 111673d05bdaSLuo Jie * @cfg: Service code configuration 111773d05bdaSLuo Jie * 111873d05bdaSLuo Jie * PPE service code is used by the PPE during its packet processing stages, 111973d05bdaSLuo Jie * to perform or bypass certain selected packet operations on the packet. 112073d05bdaSLuo Jie * 112173d05bdaSLuo Jie * Return: 0 on success, negative error code on failure. 112273d05bdaSLuo Jie */ 112373d05bdaSLuo Jie int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg) 112473d05bdaSLuo Jie { 112573d05bdaSLuo Jie u32 val, reg, servcode_val[2] = {}; 112673d05bdaSLuo Jie unsigned long bitmap_value; 112773d05bdaSLuo Jie int ret; 112873d05bdaSLuo Jie 112973d05bdaSLuo Jie val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid); 113073d05bdaSLuo Jie val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port); 113173d05bdaSLuo Jie val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src); 113273d05bdaSLuo Jie 113373d05bdaSLuo Jie bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE); 113473d05bdaSLuo Jie val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value); 113573d05bdaSLuo Jie val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN, 113673d05bdaSLuo Jie test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter)); 113773d05bdaSLuo Jie val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN, 113873d05bdaSLuo Jie test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter)); 113973d05bdaSLuo Jie reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc; 114073d05bdaSLuo Jie 114173d05bdaSLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 114273d05bdaSLuo Jie if (ret) 114373d05bdaSLuo Jie return ret; 114473d05bdaSLuo Jie 114573d05bdaSLuo Jie bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE); 114673d05bdaSLuo Jie PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value); 114773d05bdaSLuo Jie PPE_SERVICE_SET_RX_CNT_EN(servcode_val, 114873d05bdaSLuo Jie test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter)); 114973d05bdaSLuo Jie reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc; 115073d05bdaSLuo Jie 115173d05bdaSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 115273d05bdaSLuo Jie servcode_val, ARRAY_SIZE(servcode_val)); 115373d05bdaSLuo Jie if (ret) 115473d05bdaSLuo Jie return ret; 115573d05bdaSLuo Jie 115673d05bdaSLuo Jie reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc; 115773d05bdaSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 115873d05bdaSLuo Jie servcode_val, ARRAY_SIZE(servcode_val)); 115973d05bdaSLuo Jie if (ret) 116073d05bdaSLuo Jie return ret; 116173d05bdaSLuo Jie 116273d05bdaSLuo Jie PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code); 116373d05bdaSLuo Jie PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap); 116473d05bdaSLuo Jie PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service); 116573d05bdaSLuo Jie PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel); 116673d05bdaSLuo Jie PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val, 116773d05bdaSLuo Jie test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter)); 116873d05bdaSLuo Jie 116973d05bdaSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 117073d05bdaSLuo Jie servcode_val, ARRAY_SIZE(servcode_val)); 117173d05bdaSLuo Jie if (ret) 117273d05bdaSLuo Jie return ret; 117373d05bdaSLuo Jie 117473d05bdaSLuo Jie bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE); 117573d05bdaSLuo Jie val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value); 117673d05bdaSLuo Jie reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc; 117773d05bdaSLuo Jie 117873d05bdaSLuo Jie return regmap_write(ppe_dev->regmap, reg, val); 117973d05bdaSLuo Jie } 118073d05bdaSLuo Jie 11818821bb0fSLuo Jie /** 11828821bb0fSLuo Jie * ppe_counter_enable_set - Set PPE port counter enabled 11838821bb0fSLuo Jie * @ppe_dev: PPE device 11848821bb0fSLuo Jie * @port: PPE port ID 11858821bb0fSLuo Jie * 11868821bb0fSLuo Jie * Enable PPE counters on the given port for the unicast packet, multicast 11878821bb0fSLuo Jie * packet and VLAN packet received and transmitted by PPE. 11888821bb0fSLuo Jie * 11898821bb0fSLuo Jie * Return: 0 on success, negative error code on failure. 11908821bb0fSLuo Jie */ 11918821bb0fSLuo Jie int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port) 11928821bb0fSLuo Jie { 11938821bb0fSLuo Jie u32 reg, mru_mtu_val[3]; 11948821bb0fSLuo Jie int ret; 11958821bb0fSLuo Jie 11968821bb0fSLuo Jie reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port; 11978821bb0fSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 11988821bb0fSLuo Jie mru_mtu_val, ARRAY_SIZE(mru_mtu_val)); 11998821bb0fSLuo Jie if (ret) 12008821bb0fSLuo Jie return ret; 12018821bb0fSLuo Jie 12028821bb0fSLuo Jie PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true); 12038821bb0fSLuo Jie PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true); 12048821bb0fSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 12058821bb0fSLuo Jie mru_mtu_val, ARRAY_SIZE(mru_mtu_val)); 12068821bb0fSLuo Jie if (ret) 12078821bb0fSLuo Jie return ret; 12088821bb0fSLuo Jie 12098821bb0fSLuo Jie reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port; 12108821bb0fSLuo Jie ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN); 12118821bb0fSLuo Jie if (ret) 12128821bb0fSLuo Jie return ret; 12138821bb0fSLuo Jie 12148821bb0fSLuo Jie reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port; 12158821bb0fSLuo Jie 12168821bb0fSLuo Jie return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN); 12178821bb0fSLuo Jie } 12188821bb0fSLuo Jie 12191c46c3c0SLuo Jie static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index, 12201c46c3c0SLuo Jie struct ppe_rss_hash_cfg cfg) 12211c46c3c0SLuo Jie { 12221c46c3c0SLuo Jie u32 reg, val; 12231c46c3c0SLuo Jie 12241c46c3c0SLuo Jie switch (index) { 12251c46c3c0SLuo Jie case 0: 12261c46c3c0SLuo Jie val = cfg.hash_sip_mix[0]; 12271c46c3c0SLuo Jie break; 12281c46c3c0SLuo Jie case 1: 12291c46c3c0SLuo Jie val = cfg.hash_dip_mix[0]; 12301c46c3c0SLuo Jie break; 12311c46c3c0SLuo Jie case 2: 12321c46c3c0SLuo Jie val = cfg.hash_protocol_mix; 12331c46c3c0SLuo Jie break; 12341c46c3c0SLuo Jie case 3: 12351c46c3c0SLuo Jie val = cfg.hash_dport_mix; 12361c46c3c0SLuo Jie break; 12371c46c3c0SLuo Jie case 4: 12381c46c3c0SLuo Jie val = cfg.hash_sport_mix; 12391c46c3c0SLuo Jie break; 12401c46c3c0SLuo Jie default: 12411c46c3c0SLuo Jie return -EINVAL; 12421c46c3c0SLuo Jie } 12431c46c3c0SLuo Jie 12441c46c3c0SLuo Jie reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC; 12451c46c3c0SLuo Jie 12461c46c3c0SLuo Jie return regmap_update_bits(ppe_dev->regmap, reg, 12471c46c3c0SLuo Jie PPE_RSS_HASH_MIX_IPV4_VAL, 12481c46c3c0SLuo Jie FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val)); 12491c46c3c0SLuo Jie } 12501c46c3c0SLuo Jie 12511c46c3c0SLuo Jie static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index, 12521c46c3c0SLuo Jie struct ppe_rss_hash_cfg cfg) 12531c46c3c0SLuo Jie { 12541c46c3c0SLuo Jie u32 reg, val; 12551c46c3c0SLuo Jie 12561c46c3c0SLuo Jie switch (index) { 12571c46c3c0SLuo Jie case 0 ... 3: 12581c46c3c0SLuo Jie val = cfg.hash_sip_mix[index]; 12591c46c3c0SLuo Jie break; 12601c46c3c0SLuo Jie case 4 ... 7: 12611c46c3c0SLuo Jie val = cfg.hash_dip_mix[index - 4]; 12621c46c3c0SLuo Jie break; 12631c46c3c0SLuo Jie case 8: 12641c46c3c0SLuo Jie val = cfg.hash_protocol_mix; 12651c46c3c0SLuo Jie break; 12661c46c3c0SLuo Jie case 9: 12671c46c3c0SLuo Jie val = cfg.hash_dport_mix; 12681c46c3c0SLuo Jie break; 12691c46c3c0SLuo Jie case 10: 12701c46c3c0SLuo Jie val = cfg.hash_sport_mix; 12711c46c3c0SLuo Jie break; 12721c46c3c0SLuo Jie default: 12731c46c3c0SLuo Jie return -EINVAL; 12741c46c3c0SLuo Jie } 12751c46c3c0SLuo Jie 12761c46c3c0SLuo Jie reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC; 12771c46c3c0SLuo Jie 12781c46c3c0SLuo Jie return regmap_update_bits(ppe_dev->regmap, reg, 12791c46c3c0SLuo Jie PPE_RSS_HASH_MIX_VAL, 12801c46c3c0SLuo Jie FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val)); 12811c46c3c0SLuo Jie } 12821c46c3c0SLuo Jie 12831c46c3c0SLuo Jie /** 12841c46c3c0SLuo Jie * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received. 12851c46c3c0SLuo Jie * @ppe_dev: PPE device. 12861c46c3c0SLuo Jie * @mode: Configure RSS hash for the packet type IPv4 and IPv6. 12871c46c3c0SLuo Jie * @cfg: RSS hash configuration. 12881c46c3c0SLuo Jie * 12891c46c3c0SLuo Jie * PPE RSS hash settings are configured for the packet type IPv4 and IPv6. 12901c46c3c0SLuo Jie * 12911c46c3c0SLuo Jie * Return: 0 on success, negative error code on failure. 12921c46c3c0SLuo Jie */ 12931c46c3c0SLuo Jie int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode, 12941c46c3c0SLuo Jie struct ppe_rss_hash_cfg cfg) 12951c46c3c0SLuo Jie { 12961c46c3c0SLuo Jie u32 val, reg; 12971c46c3c0SLuo Jie int i, ret; 12981c46c3c0SLuo Jie 12991c46c3c0SLuo Jie if (mode & PPE_RSS_HASH_MODE_IPV4) { 13001c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask); 13011c46c3c0SLuo Jie val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode); 13021c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val); 13031c46c3c0SLuo Jie if (ret) 13041c46c3c0SLuo Jie return ret; 13051c46c3c0SLuo Jie 13061c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed); 13071c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val); 13081c46c3c0SLuo Jie if (ret) 13091c46c3c0SLuo Jie return ret; 13101c46c3c0SLuo Jie 13111c46c3c0SLuo Jie for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) { 13121c46c3c0SLuo Jie ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg); 13131c46c3c0SLuo Jie if (ret) 13141c46c3c0SLuo Jie return ret; 13151c46c3c0SLuo Jie } 13161c46c3c0SLuo Jie 13171c46c3c0SLuo Jie for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) { 13181c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]); 13191c46c3c0SLuo Jie val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]); 13201c46c3c0SLuo Jie reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC; 13211c46c3c0SLuo Jie 13221c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 13231c46c3c0SLuo Jie if (ret) 13241c46c3c0SLuo Jie return ret; 13251c46c3c0SLuo Jie } 13261c46c3c0SLuo Jie } 13271c46c3c0SLuo Jie 13281c46c3c0SLuo Jie if (mode & PPE_RSS_HASH_MODE_IPV6) { 13291c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask); 13301c46c3c0SLuo Jie val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode); 13311c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val); 13321c46c3c0SLuo Jie if (ret) 13331c46c3c0SLuo Jie return ret; 13341c46c3c0SLuo Jie 13351c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed); 13361c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val); 13371c46c3c0SLuo Jie if (ret) 13381c46c3c0SLuo Jie return ret; 13391c46c3c0SLuo Jie 13401c46c3c0SLuo Jie for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) { 13411c46c3c0SLuo Jie ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg); 13421c46c3c0SLuo Jie if (ret) 13431c46c3c0SLuo Jie return ret; 13441c46c3c0SLuo Jie } 13451c46c3c0SLuo Jie 13461c46c3c0SLuo Jie for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) { 13471c46c3c0SLuo Jie val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]); 13481c46c3c0SLuo Jie val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]); 13491c46c3c0SLuo Jie reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC; 13501c46c3c0SLuo Jie 13511c46c3c0SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 13521c46c3c0SLuo Jie if (ret) 13531c46c3c0SLuo Jie return ret; 13541c46c3c0SLuo Jie } 13551c46c3c0SLuo Jie } 13561c46c3c0SLuo Jie 13571c46c3c0SLuo Jie return 0; 13581c46c3c0SLuo Jie } 13591c46c3c0SLuo Jie 1360fa99608aSLuo Jie /** 1361fa99608aSLuo Jie * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping 1362fa99608aSLuo Jie * @ppe_dev: PPE device 1363fa99608aSLuo Jie * @ring_id: Ethernet DMA ring ID 1364fa99608aSLuo Jie * @queue_map: Bit map of queue IDs to given Ethernet DMA ring 1365fa99608aSLuo Jie * 1366fa99608aSLuo Jie * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring. 1367fa99608aSLuo Jie * 1368fa99608aSLuo Jie * Return: 0 on success, negative error code on failure. 1369fa99608aSLuo Jie */ 1370fa99608aSLuo Jie int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map) 1371fa99608aSLuo Jie { 1372fa99608aSLuo Jie u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT]; 1373fa99608aSLuo Jie 1374fa99608aSLuo Jie memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val)); 1375fa99608aSLuo Jie reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id; 1376fa99608aSLuo Jie 1377fa99608aSLuo Jie return regmap_bulk_write(ppe_dev->regmap, reg, 1378fa99608aSLuo Jie queue_bitmap_val, 1379fa99608aSLuo Jie ARRAY_SIZE(queue_bitmap_val)); 1380fa99608aSLuo Jie } 1381fa99608aSLuo Jie 13828a971df9SLuo Jie static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id, 13838a971df9SLuo Jie const struct ppe_bm_port_config port_cfg) 13848a971df9SLuo Jie { 13858a971df9SLuo Jie u32 reg, val, bm_fc_val[2]; 13868a971df9SLuo Jie int ret; 13878a971df9SLuo Jie 13888a971df9SLuo Jie reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id; 13898a971df9SLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 13908a971df9SLuo Jie bm_fc_val, ARRAY_SIZE(bm_fc_val)); 13918a971df9SLuo Jie if (ret) 13928a971df9SLuo Jie return ret; 13938a971df9SLuo Jie 13948a971df9SLuo Jie /* Configure BM flow control related threshold. */ 13958a971df9SLuo Jie PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight); 13968a971df9SLuo Jie PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset); 13978a971df9SLuo Jie PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil); 13988a971df9SLuo Jie PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic); 13998a971df9SLuo Jie PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf); 14008a971df9SLuo Jie PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc); 14018a971df9SLuo Jie 14028a971df9SLuo Jie /* Configure low/high bits of the ceiling for the BM port. */ 14038a971df9SLuo Jie val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil); 14048a971df9SLuo Jie PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val); 14058a971df9SLuo Jie val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil); 14068a971df9SLuo Jie PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val); 14078a971df9SLuo Jie 14088a971df9SLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 14098a971df9SLuo Jie bm_fc_val, ARRAY_SIZE(bm_fc_val)); 14108a971df9SLuo Jie if (ret) 14118a971df9SLuo Jie return ret; 14128a971df9SLuo Jie 14138a971df9SLuo Jie /* Assign the default group ID 0 to the BM port. */ 14148a971df9SLuo Jie val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0); 14158a971df9SLuo Jie reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id; 14168a971df9SLuo Jie ret = regmap_update_bits(ppe_dev->regmap, reg, 14178a971df9SLuo Jie PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 14188a971df9SLuo Jie val); 14198a971df9SLuo Jie if (ret) 14208a971df9SLuo Jie return ret; 14218a971df9SLuo Jie 14228a971df9SLuo Jie /* Enable BM port flow control. */ 14238a971df9SLuo Jie reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id; 14248a971df9SLuo Jie 14258a971df9SLuo Jie return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN); 14268a971df9SLuo Jie } 14278a971df9SLuo Jie 14288a971df9SLuo Jie /* Configure the buffer threshold for the port flow control function. */ 14298a971df9SLuo Jie static int ppe_config_bm(struct ppe_device *ppe_dev) 14308a971df9SLuo Jie { 14318a971df9SLuo Jie const struct ppe_bm_port_config *port_cfg; 14328a971df9SLuo Jie unsigned int i, bm_port_id, port_cfg_cnt; 14338a971df9SLuo Jie u32 reg, val; 14348a971df9SLuo Jie int ret; 14358a971df9SLuo Jie 14368a971df9SLuo Jie /* Configure the allocated buffer number only for group 0. 14378a971df9SLuo Jie * The buffer number of group 1-3 is already cleared to 0 14388a971df9SLuo Jie * after PPE reset during the probe of PPE driver. 14398a971df9SLuo Jie */ 14408a971df9SLuo Jie reg = PPE_BM_SHARED_GROUP_CFG_ADDR; 14418a971df9SLuo Jie val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT, 14428a971df9SLuo Jie ipq9574_ppe_bm_group_config); 14438a971df9SLuo Jie ret = regmap_update_bits(ppe_dev->regmap, reg, 14448a971df9SLuo Jie PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT, 14458a971df9SLuo Jie val); 14468a971df9SLuo Jie if (ret) 14478a971df9SLuo Jie goto bm_config_fail; 14488a971df9SLuo Jie 14498a971df9SLuo Jie /* Configure buffer thresholds for the BM ports. */ 14508a971df9SLuo Jie port_cfg = ipq9574_ppe_bm_port_config; 14518a971df9SLuo Jie port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config); 14528a971df9SLuo Jie for (i = 0; i < port_cfg_cnt; i++) { 14538a971df9SLuo Jie for (bm_port_id = port_cfg[i].port_id_start; 14548a971df9SLuo Jie bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) { 14558a971df9SLuo Jie ret = ppe_config_bm_threshold(ppe_dev, bm_port_id, 14568a971df9SLuo Jie port_cfg[i]); 14578a971df9SLuo Jie if (ret) 14588a971df9SLuo Jie goto bm_config_fail; 14598a971df9SLuo Jie } 14608a971df9SLuo Jie } 14618a971df9SLuo Jie 14628a971df9SLuo Jie return 0; 14638a971df9SLuo Jie 14648a971df9SLuo Jie bm_config_fail: 14658a971df9SLuo Jie dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret); 14668a971df9SLuo Jie return ret; 14678a971df9SLuo Jie } 14688a971df9SLuo Jie 1469806268dcSLuo Jie /* Configure PPE hardware queue depth, which is decided by the threshold 1470806268dcSLuo Jie * of queue. 1471806268dcSLuo Jie */ 1472806268dcSLuo Jie static int ppe_config_qm(struct ppe_device *ppe_dev) 1473806268dcSLuo Jie { 1474806268dcSLuo Jie const struct ppe_qm_queue_config *queue_cfg; 1475806268dcSLuo Jie int ret, i, queue_id, queue_cfg_count; 1476806268dcSLuo Jie u32 reg, multicast_queue_cfg[5]; 1477806268dcSLuo Jie u32 unicast_queue_cfg[4]; 1478806268dcSLuo Jie u32 group_cfg[3]; 1479806268dcSLuo Jie 1480806268dcSLuo Jie /* Assign the buffer number to the group 0 by default. */ 1481806268dcSLuo Jie reg = PPE_AC_GRP_CFG_TBL_ADDR; 1482806268dcSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 1483806268dcSLuo Jie group_cfg, ARRAY_SIZE(group_cfg)); 1484806268dcSLuo Jie if (ret) 1485806268dcSLuo Jie goto qm_config_fail; 1486806268dcSLuo Jie 1487806268dcSLuo Jie PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config); 1488806268dcSLuo Jie 1489806268dcSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 1490806268dcSLuo Jie group_cfg, ARRAY_SIZE(group_cfg)); 1491806268dcSLuo Jie if (ret) 1492806268dcSLuo Jie goto qm_config_fail; 1493806268dcSLuo Jie 1494806268dcSLuo Jie queue_cfg = ipq9574_ppe_qm_queue_config; 1495806268dcSLuo Jie queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config); 1496806268dcSLuo Jie for (i = 0; i < queue_cfg_count; i++) { 1497806268dcSLuo Jie queue_id = queue_cfg[i].queue_start; 1498806268dcSLuo Jie 1499806268dcSLuo Jie /* Configure threshold for dropping packets separately for 1500806268dcSLuo Jie * unicast and multicast PPE queues. 1501806268dcSLuo Jie */ 1502806268dcSLuo Jie while (queue_id <= queue_cfg[i].queue_end) { 1503806268dcSLuo Jie if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) { 1504806268dcSLuo Jie reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR + 1505806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id; 1506806268dcSLuo Jie 1507806268dcSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 1508806268dcSLuo Jie unicast_queue_cfg, 1509806268dcSLuo Jie ARRAY_SIZE(unicast_queue_cfg)); 1510806268dcSLuo Jie if (ret) 1511806268dcSLuo Jie goto qm_config_fail; 1512806268dcSLuo Jie 1513806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true); 1514806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0); 1515806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg, 1516806268dcSLuo Jie queue_cfg[i].prealloc_buf); 1517806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg, 1518806268dcSLuo Jie queue_cfg[i].dynamic); 1519806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg, 1520806268dcSLuo Jie queue_cfg[i].weight); 1521806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg, 1522806268dcSLuo Jie queue_cfg[i].ceil); 1523806268dcSLuo Jie PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg, 1524806268dcSLuo Jie queue_cfg[i].resume_offset); 1525806268dcSLuo Jie 1526806268dcSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 1527806268dcSLuo Jie unicast_queue_cfg, 1528806268dcSLuo Jie ARRAY_SIZE(unicast_queue_cfg)); 1529806268dcSLuo Jie if (ret) 1530806268dcSLuo Jie goto qm_config_fail; 1531806268dcSLuo Jie } else { 1532806268dcSLuo Jie reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR + 1533806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id; 1534806268dcSLuo Jie 1535806268dcSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 1536806268dcSLuo Jie multicast_queue_cfg, 1537806268dcSLuo Jie ARRAY_SIZE(multicast_queue_cfg)); 1538806268dcSLuo Jie if (ret) 1539806268dcSLuo Jie goto qm_config_fail; 1540806268dcSLuo Jie 1541806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true); 1542806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0); 1543806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg, 1544806268dcSLuo Jie queue_cfg[i].prealloc_buf); 1545806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg, 1546806268dcSLuo Jie queue_cfg[i].ceil); 1547806268dcSLuo Jie PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg, 1548806268dcSLuo Jie queue_cfg[i].resume_offset); 1549806268dcSLuo Jie 1550806268dcSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 1551806268dcSLuo Jie multicast_queue_cfg, 1552806268dcSLuo Jie ARRAY_SIZE(multicast_queue_cfg)); 1553806268dcSLuo Jie if (ret) 1554806268dcSLuo Jie goto qm_config_fail; 1555806268dcSLuo Jie } 1556806268dcSLuo Jie 1557806268dcSLuo Jie /* Enable enqueue. */ 1558806268dcSLuo Jie reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id; 1559806268dcSLuo Jie ret = regmap_clear_bits(ppe_dev->regmap, reg, 1560806268dcSLuo Jie PPE_ENQ_OPR_TBL_ENQ_DISABLE); 1561806268dcSLuo Jie if (ret) 1562806268dcSLuo Jie goto qm_config_fail; 1563806268dcSLuo Jie 1564806268dcSLuo Jie /* Enable dequeue. */ 1565806268dcSLuo Jie reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id; 1566806268dcSLuo Jie ret = regmap_clear_bits(ppe_dev->regmap, reg, 1567806268dcSLuo Jie PPE_DEQ_OPR_TBL_DEQ_DISABLE); 1568806268dcSLuo Jie if (ret) 1569806268dcSLuo Jie goto qm_config_fail; 1570806268dcSLuo Jie 1571806268dcSLuo Jie queue_id++; 1572806268dcSLuo Jie } 1573806268dcSLuo Jie } 1574806268dcSLuo Jie 1575806268dcSLuo Jie /* Enable queue counter for all PPE hardware queues. */ 1576806268dcSLuo Jie ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR, 1577806268dcSLuo Jie PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN); 1578806268dcSLuo Jie if (ret) 1579806268dcSLuo Jie goto qm_config_fail; 1580806268dcSLuo Jie 1581806268dcSLuo Jie return 0; 1582806268dcSLuo Jie 1583806268dcSLuo Jie qm_config_fail: 1584806268dcSLuo Jie dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret); 1585806268dcSLuo Jie return ret; 1586806268dcSLuo Jie } 1587806268dcSLuo Jie 158833122798SLuo Jie static int ppe_node_scheduler_config(struct ppe_device *ppe_dev, 158933122798SLuo Jie const struct ppe_scheduler_port_config config) 159033122798SLuo Jie { 159133122798SLuo Jie struct ppe_scheduler_cfg sch_cfg; 159233122798SLuo Jie int ret, i; 159333122798SLuo Jie 159433122798SLuo Jie for (i = 0; i < config.loop_num; i++) { 159533122798SLuo Jie if (!config.pri_max) { 159633122798SLuo Jie /* Round robin scheduler without priority. */ 159733122798SLuo Jie sch_cfg.flow_id = config.flow_id; 159833122798SLuo Jie sch_cfg.pri = 0; 159933122798SLuo Jie sch_cfg.drr_node_id = config.drr_node_id; 160033122798SLuo Jie } else { 160133122798SLuo Jie sch_cfg.flow_id = config.flow_id + (i / config.pri_max); 160233122798SLuo Jie sch_cfg.pri = i % config.pri_max; 160333122798SLuo Jie sch_cfg.drr_node_id = config.drr_node_id + i; 160433122798SLuo Jie } 160533122798SLuo Jie 160633122798SLuo Jie /* Scheduler weight, must be more than 0. */ 160733122798SLuo Jie sch_cfg.drr_node_wt = 1; 160833122798SLuo Jie /* Byte based to be scheduled. */ 160933122798SLuo Jie sch_cfg.unit_is_packet = false; 161033122798SLuo Jie /* Frame + CRC calculated. */ 161133122798SLuo Jie sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC; 161233122798SLuo Jie 161333122798SLuo Jie ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i, 161433122798SLuo Jie config.flow_level, 161533122798SLuo Jie config.port, 161633122798SLuo Jie sch_cfg); 161733122798SLuo Jie if (ret) 161833122798SLuo Jie return ret; 161933122798SLuo Jie } 162033122798SLuo Jie 162133122798SLuo Jie return 0; 162233122798SLuo Jie } 162333122798SLuo Jie 162433122798SLuo Jie /* Initialize scheduler settings for PPE buffer utilization and dispatching 162533122798SLuo Jie * packet on PPE queue. 162633122798SLuo Jie */ 162733122798SLuo Jie static int ppe_config_scheduler(struct ppe_device *ppe_dev) 162833122798SLuo Jie { 162933122798SLuo Jie const struct ppe_scheduler_port_config *port_cfg; 163033122798SLuo Jie const struct ppe_scheduler_qm_config *qm_cfg; 163133122798SLuo Jie const struct ppe_scheduler_bm_config *bm_cfg; 163233122798SLuo Jie int ret, i, count; 163333122798SLuo Jie u32 val, reg; 163433122798SLuo Jie 163533122798SLuo Jie count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config); 163633122798SLuo Jie bm_cfg = ipq9574_ppe_sch_bm_config; 163733122798SLuo Jie 163833122798SLuo Jie /* Configure the depth of BM scheduler entries. */ 163933122798SLuo Jie val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count); 164033122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0); 164133122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1); 164233122798SLuo Jie 164333122798SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val); 164433122798SLuo Jie if (ret) 164533122798SLuo Jie goto sch_config_fail; 164633122798SLuo Jie 164733122798SLuo Jie /* Configure each BM scheduler entry with the valid ingress port and 164833122798SLuo Jie * egress port, the second port takes effect when the specified port 164933122798SLuo Jie * is in the inactive state. 165033122798SLuo Jie */ 165133122798SLuo Jie for (i = 0; i < count; i++) { 165233122798SLuo Jie val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid); 165333122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir); 165433122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port); 165533122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID, 165633122798SLuo Jie bm_cfg[i].backup_port_valid); 165733122798SLuo Jie val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT, 165833122798SLuo Jie bm_cfg[i].backup_port); 165933122798SLuo Jie 166033122798SLuo Jie reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC; 166133122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 166233122798SLuo Jie if (ret) 166333122798SLuo Jie goto sch_config_fail; 166433122798SLuo Jie } 166533122798SLuo Jie 166633122798SLuo Jie count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config); 166733122798SLuo Jie qm_cfg = ipq9574_ppe_sch_qm_config; 166833122798SLuo Jie 166933122798SLuo Jie /* Configure the depth of QM scheduler entries. */ 167033122798SLuo Jie val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count); 167133122798SLuo Jie ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val); 167233122798SLuo Jie if (ret) 167333122798SLuo Jie goto sch_config_fail; 167433122798SLuo Jie 167533122798SLuo Jie /* Configure each QM scheduler entry with enqueue port and dequeue 167633122798SLuo Jie * port, the second port takes effect when the specified dequeue 167733122798SLuo Jie * port is in the inactive port. 167833122798SLuo Jie */ 167933122798SLuo Jie for (i = 0; i < count; i++) { 168033122798SLuo Jie val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP, 168133122798SLuo Jie qm_cfg[i].ensch_port_bmp); 168233122798SLuo Jie val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT, 168333122798SLuo Jie qm_cfg[i].ensch_port); 168433122798SLuo Jie val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT, 168533122798SLuo Jie qm_cfg[i].desch_port); 168633122798SLuo Jie val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN, 168733122798SLuo Jie qm_cfg[i].desch_backup_port_valid); 168833122798SLuo Jie val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT, 168933122798SLuo Jie qm_cfg[i].desch_backup_port); 169033122798SLuo Jie 169133122798SLuo Jie reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC; 169233122798SLuo Jie ret = regmap_write(ppe_dev->regmap, reg, val); 169333122798SLuo Jie if (ret) 169433122798SLuo Jie goto sch_config_fail; 169533122798SLuo Jie } 169633122798SLuo Jie 169733122798SLuo Jie count = ARRAY_SIZE(ppe_port_sch_config); 169833122798SLuo Jie port_cfg = ppe_port_sch_config; 169933122798SLuo Jie 170033122798SLuo Jie /* Configure scheduler per PPE queue or flow. */ 170133122798SLuo Jie for (i = 0; i < count; i++) { 170233122798SLuo Jie if (port_cfg[i].port >= ppe_dev->num_ports) 170333122798SLuo Jie break; 170433122798SLuo Jie 170533122798SLuo Jie ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]); 170633122798SLuo Jie if (ret) 170733122798SLuo Jie goto sch_config_fail; 170833122798SLuo Jie } 170933122798SLuo Jie 171033122798SLuo Jie return 0; 171133122798SLuo Jie 171233122798SLuo Jie sch_config_fail: 171333122798SLuo Jie dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret); 171433122798SLuo Jie return ret; 171533122798SLuo Jie }; 171633122798SLuo Jie 17177a23a8afSLuo Jie /* Configure PPE queue destination of each PPE port. */ 17187a23a8afSLuo Jie static int ppe_queue_dest_init(struct ppe_device *ppe_dev) 17197a23a8afSLuo Jie { 17207a23a8afSLuo Jie int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max; 17217a23a8afSLuo Jie struct ppe_queue_ucast_dest queue_dst; 17227a23a8afSLuo Jie 17237a23a8afSLuo Jie for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) { 17247a23a8afSLuo Jie memset(&queue_dst, 0, sizeof(queue_dst)); 17257a23a8afSLuo Jie 17267a23a8afSLuo Jie ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST, 17277a23a8afSLuo Jie &res_start, &res_end); 17287a23a8afSLuo Jie if (ret) 17297a23a8afSLuo Jie return ret; 17307a23a8afSLuo Jie 17317a23a8afSLuo Jie q_base = res_start; 17327a23a8afSLuo Jie queue_dst.dest_port = port_id; 17337a23a8afSLuo Jie 17347a23a8afSLuo Jie /* Configure queue base ID and profile ID that is same as 17357a23a8afSLuo Jie * physical port ID. 17367a23a8afSLuo Jie */ 17377a23a8afSLuo Jie ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst, 17387a23a8afSLuo Jie q_base, port_id); 17397a23a8afSLuo Jie if (ret) 17407a23a8afSLuo Jie return ret; 17417a23a8afSLuo Jie 17427a23a8afSLuo Jie /* Queue priority range supported by each PPE port */ 17437a23a8afSLuo Jie ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE, 17447a23a8afSLuo Jie &res_start, &res_end); 17457a23a8afSLuo Jie if (ret) 17467a23a8afSLuo Jie return ret; 17477a23a8afSLuo Jie 17487a23a8afSLuo Jie pri_max = res_end - res_start; 17497a23a8afSLuo Jie 17507a23a8afSLuo Jie /* Redirect ARP reply packet with the max priority on CPU port, 17517a23a8afSLuo Jie * which keeps the ARP reply directed to CPU (CPU code is 101) 17527a23a8afSLuo Jie * with highest priority queue of EDMA. 17537a23a8afSLuo Jie */ 17547a23a8afSLuo Jie if (port_id == 0) { 17557a23a8afSLuo Jie memset(&queue_dst, 0, sizeof(queue_dst)); 17567a23a8afSLuo Jie 17577a23a8afSLuo Jie queue_dst.cpu_code_en = true; 17587a23a8afSLuo Jie queue_dst.cpu_code = 101; 17597a23a8afSLuo Jie ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst, 17607a23a8afSLuo Jie q_base + pri_max, 17617a23a8afSLuo Jie 0); 17627a23a8afSLuo Jie if (ret) 17637a23a8afSLuo Jie return ret; 17647a23a8afSLuo Jie } 17657a23a8afSLuo Jie 17667a23a8afSLuo Jie /* Initialize the queue offset of internal priority. */ 17677a23a8afSLuo Jie for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) { 17687a23a8afSLuo Jie q_offset = index > pri_max ? pri_max : index; 17697a23a8afSLuo Jie 17707a23a8afSLuo Jie ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id, 17717a23a8afSLuo Jie index, q_offset); 17727a23a8afSLuo Jie if (ret) 17737a23a8afSLuo Jie return ret; 17747a23a8afSLuo Jie } 17757a23a8afSLuo Jie 17767a23a8afSLuo Jie /* Initialize the queue offset of RSS hash as 0 to avoid the 17777a23a8afSLuo Jie * random hardware value that will lead to the unexpected 17787a23a8afSLuo Jie * destination queue generated. 17797a23a8afSLuo Jie */ 17807a23a8afSLuo Jie for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) { 17817a23a8afSLuo Jie ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id, 17827a23a8afSLuo Jie index, 0); 17837a23a8afSLuo Jie if (ret) 17847a23a8afSLuo Jie return ret; 17857a23a8afSLuo Jie } 17867a23a8afSLuo Jie } 17877a23a8afSLuo Jie 17887a23a8afSLuo Jie return 0; 17897a23a8afSLuo Jie } 17907a23a8afSLuo Jie 179173d05bdaSLuo Jie /* Initialize the service code 1 used by CPU port. */ 179273d05bdaSLuo Jie static int ppe_servcode_init(struct ppe_device *ppe_dev) 179373d05bdaSLuo Jie { 179473d05bdaSLuo Jie struct ppe_sc_cfg sc_cfg = {}; 179573d05bdaSLuo Jie 179673d05bdaSLuo Jie bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE); 179773d05bdaSLuo Jie bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE); 179873d05bdaSLuo Jie 179973d05bdaSLuo Jie bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE); 180073d05bdaSLuo Jie clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress); 180173d05bdaSLuo Jie clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress); 180273d05bdaSLuo Jie clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress); 180373d05bdaSLuo Jie 180473d05bdaSLuo Jie bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE); 180573d05bdaSLuo Jie clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress); 180673d05bdaSLuo Jie 180773d05bdaSLuo Jie return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg); 180873d05bdaSLuo Jie } 180973d05bdaSLuo Jie 18108821bb0fSLuo Jie /* Initialize PPE port configurations. */ 18118821bb0fSLuo Jie static int ppe_port_config_init(struct ppe_device *ppe_dev) 18128821bb0fSLuo Jie { 18138821bb0fSLuo Jie u32 reg, val, mru_mtu_val[3]; 18148821bb0fSLuo Jie int i, ret; 18158821bb0fSLuo Jie 18168821bb0fSLuo Jie /* MTU and MRU settings are not required for CPU port 0. */ 18178821bb0fSLuo Jie for (i = 1; i < ppe_dev->num_ports; i++) { 18188821bb0fSLuo Jie /* Enable Ethernet port counter */ 18198821bb0fSLuo Jie ret = ppe_counter_enable_set(ppe_dev, i); 18208821bb0fSLuo Jie if (ret) 18218821bb0fSLuo Jie return ret; 18228821bb0fSLuo Jie 18238821bb0fSLuo Jie reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i; 18248821bb0fSLuo Jie ret = regmap_bulk_read(ppe_dev->regmap, reg, 18258821bb0fSLuo Jie mru_mtu_val, ARRAY_SIZE(mru_mtu_val)); 18268821bb0fSLuo Jie if (ret) 18278821bb0fSLuo Jie return ret; 18288821bb0fSLuo Jie 18298821bb0fSLuo Jie /* Drop the packet when the packet size is more than the MTU 18308821bb0fSLuo Jie * and redirect the packet to the CPU port when the received 18318821bb0fSLuo Jie * packet size is more than the MRU of the physical interface. 18328821bb0fSLuo Jie */ 18338821bb0fSLuo Jie PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU); 18348821bb0fSLuo Jie PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP); 18358821bb0fSLuo Jie ret = regmap_bulk_write(ppe_dev->regmap, reg, 18368821bb0fSLuo Jie mru_mtu_val, ARRAY_SIZE(mru_mtu_val)); 18378821bb0fSLuo Jie if (ret) 18388821bb0fSLuo Jie return ret; 18398821bb0fSLuo Jie 18408821bb0fSLuo Jie reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i; 18418821bb0fSLuo Jie val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP); 18428821bb0fSLuo Jie ret = regmap_update_bits(ppe_dev->regmap, reg, 18438821bb0fSLuo Jie PPE_MC_MTU_CTRL_TBL_MTU_CMD, 18448821bb0fSLuo Jie val); 18458821bb0fSLuo Jie if (ret) 18468821bb0fSLuo Jie return ret; 18478821bb0fSLuo Jie } 18488821bb0fSLuo Jie 18498821bb0fSLuo Jie /* Enable CPU port counters. */ 18508821bb0fSLuo Jie return ppe_counter_enable_set(ppe_dev, 0); 18518821bb0fSLuo Jie } 18528821bb0fSLuo Jie 18531c46c3c0SLuo Jie /* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive. 18541c46c3c0SLuo Jie * RSS settings are to calculate the random RSS hash value generated during 18551c46c3c0SLuo Jie * packet receive. This hash is then used to generate the queue offset used 18561c46c3c0SLuo Jie * to determine the queue used to transmit the packet. 18571c46c3c0SLuo Jie */ 18581c46c3c0SLuo Jie static int ppe_rss_hash_init(struct ppe_device *ppe_dev) 18591c46c3c0SLuo Jie { 18601c46c3c0SLuo Jie u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 }; 18611c46c3c0SLuo Jie u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb }; 18621c46c3c0SLuo Jie struct ppe_rss_hash_cfg hash_cfg; 18631c46c3c0SLuo Jie int i, ret; 18641c46c3c0SLuo Jie 18651c46c3c0SLuo Jie hash_cfg.hash_seed = get_random_u32(); 18661c46c3c0SLuo Jie hash_cfg.hash_mask = 0xfff; 18671c46c3c0SLuo Jie 18681c46c3c0SLuo Jie /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP 18691c46c3c0SLuo Jie * and UDP-Lite packets. 18701c46c3c0SLuo Jie */ 18711c46c3c0SLuo Jie hash_cfg.hash_fragment_mode = false; 18721c46c3c0SLuo Jie 18731c46c3c0SLuo Jie /* The final common seed configs used to calculate the RSS has value, 18741c46c3c0SLuo Jie * which is available for both IPv4 and IPv6 packet. 18751c46c3c0SLuo Jie */ 18761c46c3c0SLuo Jie for (i = 0; i < ARRAY_SIZE(fins); i++) { 18771c46c3c0SLuo Jie hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f; 18781c46c3c0SLuo Jie hash_cfg.hash_fin_outer[i] = fins[i] >> 5; 18791c46c3c0SLuo Jie } 18801c46c3c0SLuo Jie 18811c46c3c0SLuo Jie /* RSS seeds for IP protocol, L4 destination & source port and 18821c46c3c0SLuo Jie * destination & source IP used to calculate the RSS hash value. 18831c46c3c0SLuo Jie */ 18841c46c3c0SLuo Jie hash_cfg.hash_protocol_mix = 0x13; 18851c46c3c0SLuo Jie hash_cfg.hash_dport_mix = 0xb; 18861c46c3c0SLuo Jie hash_cfg.hash_sport_mix = 0x13; 18871c46c3c0SLuo Jie hash_cfg.hash_dip_mix[0] = 0xb; 18881c46c3c0SLuo Jie hash_cfg.hash_sip_mix[0] = 0x13; 18891c46c3c0SLuo Jie 18901c46c3c0SLuo Jie /* Configure RSS seed configs for IPv4 packet. */ 18911c46c3c0SLuo Jie ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg); 18921c46c3c0SLuo Jie if (ret) 18931c46c3c0SLuo Jie return ret; 18941c46c3c0SLuo Jie 18951c46c3c0SLuo Jie for (i = 0; i < ARRAY_SIZE(ips); i++) { 18961c46c3c0SLuo Jie hash_cfg.hash_sip_mix[i] = ips[i]; 18971c46c3c0SLuo Jie hash_cfg.hash_dip_mix[i] = ips[i]; 18981c46c3c0SLuo Jie } 18991c46c3c0SLuo Jie 19001c46c3c0SLuo Jie /* Configure RSS seed configs for IPv6 packet. */ 19011c46c3c0SLuo Jie return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg); 19021c46c3c0SLuo Jie } 19031c46c3c0SLuo Jie 1904fa99608aSLuo Jie /* Initialize mapping between PPE queues assigned to CPU port 0 1905fa99608aSLuo Jie * to Ethernet DMA ring 0. 1906fa99608aSLuo Jie */ 1907fa99608aSLuo Jie static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev) 1908fa99608aSLuo Jie { 1909fa99608aSLuo Jie u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {}; 1910fa99608aSLuo Jie int ret, queue_id, queue_max; 1911fa99608aSLuo Jie 1912fa99608aSLuo Jie ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST, 1913fa99608aSLuo Jie &queue_id, &queue_max); 1914fa99608aSLuo Jie if (ret) 1915fa99608aSLuo Jie return ret; 1916fa99608aSLuo Jie 1917fa99608aSLuo Jie for (; queue_id <= queue_max; queue_id++) 1918fa99608aSLuo Jie queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32); 1919fa99608aSLuo Jie 1920fa99608aSLuo Jie return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap); 1921fa99608aSLuo Jie } 1922fa99608aSLuo Jie 1923*8cc72c6cSLei Wei /* Initialize PPE bridge settings to only enable L2 frame receive and 1924*8cc72c6cSLei Wei * transmit between CPU port and PPE Ethernet ports. 1925*8cc72c6cSLei Wei */ 1926*8cc72c6cSLei Wei static int ppe_bridge_init(struct ppe_device *ppe_dev) 1927*8cc72c6cSLei Wei { 1928*8cc72c6cSLei Wei u32 reg, mask, port_cfg[4], vsi_cfg[2]; 1929*8cc72c6cSLei Wei int ret, i; 1930*8cc72c6cSLei Wei 1931*8cc72c6cSLei Wei /* Configure the following settings for CPU port0: 1932*8cc72c6cSLei Wei * a.) Enable Bridge TX 1933*8cc72c6cSLei Wei * b.) Disable FDB new address learning 1934*8cc72c6cSLei Wei * c.) Disable station move address learning 1935*8cc72c6cSLei Wei */ 1936*8cc72c6cSLei Wei mask = PPE_PORT_BRIDGE_TXMAC_EN; 1937*8cc72c6cSLei Wei mask |= PPE_PORT_BRIDGE_NEW_LRN_EN; 1938*8cc72c6cSLei Wei mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN; 1939*8cc72c6cSLei Wei ret = regmap_update_bits(ppe_dev->regmap, 1940*8cc72c6cSLei Wei PPE_PORT_BRIDGE_CTRL_ADDR, 1941*8cc72c6cSLei Wei mask, 1942*8cc72c6cSLei Wei PPE_PORT_BRIDGE_TXMAC_EN); 1943*8cc72c6cSLei Wei if (ret) 1944*8cc72c6cSLei Wei return ret; 1945*8cc72c6cSLei Wei 1946*8cc72c6cSLei Wei for (i = 1; i < ppe_dev->num_ports; i++) { 1947*8cc72c6cSLei Wei /* Enable invalid VSI forwarding for all the physical ports 1948*8cc72c6cSLei Wei * to CPU port0, in case no VSI is assigned to the physical 1949*8cc72c6cSLei Wei * port. 1950*8cc72c6cSLei Wei */ 1951*8cc72c6cSLei Wei reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i; 1952*8cc72c6cSLei Wei ret = regmap_bulk_read(ppe_dev->regmap, reg, 1953*8cc72c6cSLei Wei port_cfg, ARRAY_SIZE(port_cfg)); 1954*8cc72c6cSLei Wei 1955*8cc72c6cSLei Wei if (ret) 1956*8cc72c6cSLei Wei return ret; 1957*8cc72c6cSLei Wei 1958*8cc72c6cSLei Wei PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true); 1959*8cc72c6cSLei Wei PPE_L2_PORT_SET_DST_INFO(port_cfg, 0); 1960*8cc72c6cSLei Wei 1961*8cc72c6cSLei Wei ret = regmap_bulk_write(ppe_dev->regmap, reg, 1962*8cc72c6cSLei Wei port_cfg, ARRAY_SIZE(port_cfg)); 1963*8cc72c6cSLei Wei if (ret) 1964*8cc72c6cSLei Wei return ret; 1965*8cc72c6cSLei Wei } 1966*8cc72c6cSLei Wei 1967*8cc72c6cSLei Wei for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) { 1968*8cc72c6cSLei Wei /* Set the VSI forward membership to include only CPU port0. 1969*8cc72c6cSLei Wei * FDB learning and forwarding take place only after switchdev 1970*8cc72c6cSLei Wei * is supported later to create the VSI and join the physical 1971*8cc72c6cSLei Wei * ports to the VSI port member. 1972*8cc72c6cSLei Wei */ 1973*8cc72c6cSLei Wei reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i; 1974*8cc72c6cSLei Wei ret = regmap_bulk_read(ppe_dev->regmap, reg, 1975*8cc72c6cSLei Wei vsi_cfg, ARRAY_SIZE(vsi_cfg)); 1976*8cc72c6cSLei Wei if (ret) 1977*8cc72c6cSLei Wei return ret; 1978*8cc72c6cSLei Wei 1979*8cc72c6cSLei Wei PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0)); 1980*8cc72c6cSLei Wei PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0)); 1981*8cc72c6cSLei Wei PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0)); 1982*8cc72c6cSLei Wei PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0)); 1983*8cc72c6cSLei Wei PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true); 1984*8cc72c6cSLei Wei PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD); 1985*8cc72c6cSLei Wei PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true); 1986*8cc72c6cSLei Wei PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD); 1987*8cc72c6cSLei Wei 1988*8cc72c6cSLei Wei ret = regmap_bulk_write(ppe_dev->regmap, reg, 1989*8cc72c6cSLei Wei vsi_cfg, ARRAY_SIZE(vsi_cfg)); 1990*8cc72c6cSLei Wei if (ret) 1991*8cc72c6cSLei Wei return ret; 1992*8cc72c6cSLei Wei } 1993*8cc72c6cSLei Wei 1994*8cc72c6cSLei Wei return 0; 1995*8cc72c6cSLei Wei } 1996*8cc72c6cSLei Wei 19978a971df9SLuo Jie int ppe_hw_config(struct ppe_device *ppe_dev) 19988a971df9SLuo Jie { 1999806268dcSLuo Jie int ret; 2000806268dcSLuo Jie 2001806268dcSLuo Jie ret = ppe_config_bm(ppe_dev); 2002806268dcSLuo Jie if (ret) 2003806268dcSLuo Jie return ret; 2004806268dcSLuo Jie 200533122798SLuo Jie ret = ppe_config_qm(ppe_dev); 200633122798SLuo Jie if (ret) 200733122798SLuo Jie return ret; 200833122798SLuo Jie 20097a23a8afSLuo Jie ret = ppe_config_scheduler(ppe_dev); 20107a23a8afSLuo Jie if (ret) 20117a23a8afSLuo Jie return ret; 20127a23a8afSLuo Jie 201373d05bdaSLuo Jie ret = ppe_queue_dest_init(ppe_dev); 201473d05bdaSLuo Jie if (ret) 201573d05bdaSLuo Jie return ret; 201673d05bdaSLuo Jie 20178821bb0fSLuo Jie ret = ppe_servcode_init(ppe_dev); 20188821bb0fSLuo Jie if (ret) 20198821bb0fSLuo Jie return ret; 20208821bb0fSLuo Jie 20211c46c3c0SLuo Jie ret = ppe_port_config_init(ppe_dev); 20221c46c3c0SLuo Jie if (ret) 20231c46c3c0SLuo Jie return ret; 20241c46c3c0SLuo Jie 2025fa99608aSLuo Jie ret = ppe_rss_hash_init(ppe_dev); 2026fa99608aSLuo Jie if (ret) 2027fa99608aSLuo Jie return ret; 2028fa99608aSLuo Jie 2029*8cc72c6cSLei Wei ret = ppe_queues_to_ring_init(ppe_dev); 2030*8cc72c6cSLei Wei if (ret) 2031*8cc72c6cSLei Wei return ret; 2032*8cc72c6cSLei Wei 2033*8cc72c6cSLei Wei return ppe_bridge_init(ppe_dev); 20348a971df9SLuo Jie } 2035