xref: /linux/drivers/net/ethernet/qualcomm/ppe/ppe_config.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
4  */
5 
6 /* PPE HW initialization configs such as BM(buffer management),
7  * QM(queue management) and scheduler configs.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bitmap.h>
12 #include <linux/bits.h>
13 #include <linux/device.h>
14 #include <linux/regmap.h>
15 
16 #include "ppe.h"
17 #include "ppe_config.h"
18 #include "ppe_regs.h"
19 
20 #define PPE_QUEUE_SCH_PRI_NUM		8
21 
22 /**
23  * struct ppe_bm_port_config - PPE BM port configuration.
24  * @port_id_start: The fist BM port ID to configure.
25  * @port_id_end: The last BM port ID to configure.
26  * @pre_alloc: BM port dedicated buffer number.
27  * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
28  * @ceil: Ceil to generate the back pressure.
29  * @weight: Weight value.
30  * @resume_offset: Resume offset from the threshold value.
31  * @resume_ceil: Ceil to resume from the back pressure state.
32  * @dynamic: Dynamic threshold used or not.
33  *
34  * The is for configuring the threshold that impacts the port
35  * flow control.
36  */
37 struct ppe_bm_port_config {
38 	unsigned int port_id_start;
39 	unsigned int port_id_end;
40 	unsigned int pre_alloc;
41 	unsigned int in_fly_buf;
42 	unsigned int ceil;
43 	unsigned int weight;
44 	unsigned int resume_offset;
45 	unsigned int resume_ceil;
46 	bool dynamic;
47 };
48 
49 /**
50  * struct ppe_qm_queue_config - PPE queue config.
51  * @queue_start: PPE start of queue ID.
52  * @queue_end: PPE end of queue ID.
53  * @prealloc_buf: Queue dedicated buffer number.
54  * @ceil: Ceil to start drop packet from queue.
55  * @weight: Weight value.
56  * @resume_offset: Resume offset from the threshold.
57  * @dynamic: Threshold value is decided dynamically or statically.
58  *
59  * Queue configuration decides the threshold to drop packet from PPE
60  * hardware queue.
61  */
62 struct ppe_qm_queue_config {
63 	unsigned int queue_start;
64 	unsigned int queue_end;
65 	unsigned int prealloc_buf;
66 	unsigned int ceil;
67 	unsigned int weight;
68 	unsigned int resume_offset;
69 	bool dynamic;
70 };
71 
72 /**
73  * enum ppe_scheduler_direction - PPE scheduler direction for packet.
74  * @PPE_SCH_INGRESS: Scheduler for the packet on ingress,
75  * @PPE_SCH_EGRESS: Scheduler for the packet on egress,
76  */
77 enum ppe_scheduler_direction {
78 	PPE_SCH_INGRESS = 0,
79 	PPE_SCH_EGRESS = 1,
80 };
81 
82 /**
83  * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
84  * @valid: Arbitration entry valid or not.
85  * @dir: Arbitration entry for egress or ingress.
86  * @port: Port ID to use arbitration entry.
87  * @backup_port_valid: Backup port valid or not.
88  * @backup_port: Backup port ID to use.
89  *
90  * Configure the scheduler settings for accessing and releasing the PPE buffers.
91  */
92 struct ppe_scheduler_bm_config {
93 	bool valid;
94 	enum ppe_scheduler_direction dir;
95 	unsigned int port;
96 	bool backup_port_valid;
97 	unsigned int backup_port;
98 };
99 
100 /**
101  * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
102  * @ensch_port_bmp: Port bit map for enqueue scheduler.
103  * @ensch_port: Port ID to enqueue scheduler.
104  * @desch_port: Port ID to dequeue scheduler.
105  * @desch_backup_port_valid: Dequeue for the backup port valid or not.
106  * @desch_backup_port: Backup port ID to dequeue scheduler.
107  *
108  * Configure the scheduler settings for enqueuing and dequeuing packets on
109  * the PPE port.
110  */
111 struct ppe_scheduler_qm_config {
112 	unsigned int ensch_port_bmp;
113 	unsigned int ensch_port;
114 	unsigned int desch_port;
115 	bool desch_backup_port_valid;
116 	unsigned int desch_backup_port;
117 };
118 
119 /**
120  * struct ppe_scheduler_port_config - PPE port scheduler config.
121  * @port: Port ID to be scheduled.
122  * @flow_level: Scheduler flow level or not.
123  * @node_id: Node ID, for level 0, queue ID is used.
124  * @loop_num: Loop number of scheduler config.
125  * @pri_max: Max priority configured.
126  * @flow_id: Strict priority ID.
127  * @drr_node_id: Node ID for scheduler.
128  *
129  * PPE port scheduler configuration which decides the priority in the
130  * packet scheduler for the egress port.
131  */
132 struct ppe_scheduler_port_config {
133 	unsigned int port;
134 	bool flow_level;
135 	unsigned int node_id;
136 	unsigned int loop_num;
137 	unsigned int pri_max;
138 	unsigned int flow_id;
139 	unsigned int drr_node_id;
140 };
141 
142 /**
143  * struct ppe_port_schedule_resource - PPE port scheduler resource.
144  * @ucastq_start: Unicast queue start ID.
145  * @ucastq_end: Unicast queue end ID.
146  * @mcastq_start: Multicast queue start ID.
147  * @mcastq_end: Multicast queue end ID.
148  * @flow_id_start: Flow start ID.
149  * @flow_id_end: Flow end ID.
150  * @l0node_start: Scheduler node start ID for queue level.
151  * @l0node_end: Scheduler node end ID for queue level.
152  * @l1node_start: Scheduler node start ID for flow level.
153  * @l1node_end: Scheduler node end ID for flow level.
154  *
155  * PPE scheduler resource allocated among the PPE ports.
156  */
157 struct ppe_port_schedule_resource {
158 	unsigned int ucastq_start;
159 	unsigned int ucastq_end;
160 	unsigned int mcastq_start;
161 	unsigned int mcastq_end;
162 	unsigned int flow_id_start;
163 	unsigned int flow_id_end;
164 	unsigned int l0node_start;
165 	unsigned int l0node_end;
166 	unsigned int l1node_start;
167 	unsigned int l1node_end;
168 };
169 
170 /* There are total 2048 buffers available in PPE, out of which some
171  * buffers are reserved for some specific purposes per PPE port. The
172  * rest of the pool of 1550 buffers are assigned to the general 'group0'
173  * which is shared among all ports of the PPE.
174  */
175 static const int ipq9574_ppe_bm_group_config = 1550;
176 
177 /* The buffer configurations per PPE port. There are 15 BM ports and
178  * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
179  * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
180  * EIP port.
181  */
182 static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
183 	{
184 		/* Buffer configuration for the BM port ID 0 of EDMA. */
185 		.port_id_start	= 0,
186 		.port_id_end	= 0,
187 		.pre_alloc	= 0,
188 		.in_fly_buf	= 100,
189 		.ceil		= 1146,
190 		.weight		= 7,
191 		.resume_offset	= 8,
192 		.resume_ceil	= 0,
193 		.dynamic	= true,
194 	},
195 	{
196 		/* Buffer configuration for the BM port ID 1-7 of EDMA. */
197 		.port_id_start	= 1,
198 		.port_id_end	= 7,
199 		.pre_alloc	= 0,
200 		.in_fly_buf	= 100,
201 		.ceil		= 250,
202 		.weight		= 4,
203 		.resume_offset	= 36,
204 		.resume_ceil	= 0,
205 		.dynamic	= true,
206 	},
207 	{
208 		/* Buffer configuration for the BM port ID 8-13 of PPE ports. */
209 		.port_id_start	= 8,
210 		.port_id_end	= 13,
211 		.pre_alloc	= 0,
212 		.in_fly_buf	= 128,
213 		.ceil		= 250,
214 		.weight		= 4,
215 		.resume_offset	= 36,
216 		.resume_ceil	= 0,
217 		.dynamic	= true,
218 	},
219 	{
220 		/* Buffer configuration for the BM port ID 14 of EIP. */
221 		.port_id_start	= 14,
222 		.port_id_end	= 14,
223 		.pre_alloc	= 0,
224 		.in_fly_buf	= 40,
225 		.ceil		= 250,
226 		.weight		= 4,
227 		.resume_offset	= 36,
228 		.resume_ceil	= 0,
229 		.dynamic	= true,
230 	},
231 };
232 
233 /* QM fetches the packet from PPE buffer management for transmitting the
234  * packet out. The QM group configuration limits the total number of buffers
235  * enqueued by all PPE hardware queues.
236  * There are total 2048 buffers available, out of which some buffers are
237  * dedicated to hardware exception handlers. The remaining buffers are
238  * assigned to the general 'group0', which is the group assigned to all
239  * queues by default.
240  */
241 static const int ipq9574_ppe_qm_group_config = 2000;
242 
243 /* Default QM settings for unicast and multicast queues for IPQ9754. */
244 static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
245 	{
246 		/* QM settings for unicast queues 0 to 255. */
247 		.queue_start	= 0,
248 		.queue_end	= 255,
249 		.prealloc_buf	= 0,
250 		.ceil		= 1200,
251 		.weight		= 7,
252 		.resume_offset	= 36,
253 		.dynamic	= true,
254 	},
255 	{
256 		/* QM settings for multicast queues 256 to 299. */
257 		.queue_start	= 256,
258 		.queue_end	= 299,
259 		.prealloc_buf	= 0,
260 		.ceil		= 250,
261 		.weight		= 0,
262 		.resume_offset	= 36,
263 		.dynamic	= false,
264 	},
265 };
266 
267 /* PPE scheduler configuration for BM includes multiple entries. Each entry
268  * indicates the primary port to be assigned the buffers for the ingress or
269  * to release the buffers for the egress. Backup port ID will be used when
270  * the primary port ID is down.
271  */
272 static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
273 	{true, PPE_SCH_INGRESS, 0, false, 0},
274 	{true, PPE_SCH_EGRESS,  0, false, 0},
275 	{true, PPE_SCH_INGRESS, 5, false, 0},
276 	{true, PPE_SCH_EGRESS,  5, false, 0},
277 	{true, PPE_SCH_INGRESS, 6, false, 0},
278 	{true, PPE_SCH_EGRESS,  6, false, 0},
279 	{true, PPE_SCH_INGRESS, 1, false, 0},
280 	{true, PPE_SCH_EGRESS,  1, false, 0},
281 	{true, PPE_SCH_INGRESS, 0, false, 0},
282 	{true, PPE_SCH_EGRESS,  0, false, 0},
283 	{true, PPE_SCH_INGRESS, 5, false, 0},
284 	{true, PPE_SCH_EGRESS,  5, false, 0},
285 	{true, PPE_SCH_INGRESS, 6, false, 0},
286 	{true, PPE_SCH_EGRESS,  6, false, 0},
287 	{true, PPE_SCH_INGRESS, 7, false, 0},
288 	{true, PPE_SCH_EGRESS,  7, false, 0},
289 	{true, PPE_SCH_INGRESS, 0, false, 0},
290 	{true, PPE_SCH_EGRESS,  0, false, 0},
291 	{true, PPE_SCH_INGRESS, 1, false, 0},
292 	{true, PPE_SCH_EGRESS,  1, false, 0},
293 	{true, PPE_SCH_INGRESS, 5, false, 0},
294 	{true, PPE_SCH_EGRESS,  5, false, 0},
295 	{true, PPE_SCH_INGRESS, 6, false, 0},
296 	{true, PPE_SCH_EGRESS,  6, false, 0},
297 	{true, PPE_SCH_INGRESS, 2, false, 0},
298 	{true, PPE_SCH_EGRESS,  2, false, 0},
299 	{true, PPE_SCH_INGRESS, 0, false, 0},
300 	{true, PPE_SCH_EGRESS,  0, false, 0},
301 	{true, PPE_SCH_INGRESS, 5, false, 0},
302 	{true, PPE_SCH_EGRESS,  5, false, 0},
303 	{true, PPE_SCH_INGRESS, 6, false, 0},
304 	{true, PPE_SCH_EGRESS,  6, false, 0},
305 	{true, PPE_SCH_INGRESS, 1, false, 0},
306 	{true, PPE_SCH_EGRESS,  1, false, 0},
307 	{true, PPE_SCH_INGRESS, 3, false, 0},
308 	{true, PPE_SCH_EGRESS,  3, false, 0},
309 	{true, PPE_SCH_INGRESS, 0, false, 0},
310 	{true, PPE_SCH_EGRESS,  0, false, 0},
311 	{true, PPE_SCH_INGRESS, 5, false, 0},
312 	{true, PPE_SCH_EGRESS,  5, false, 0},
313 	{true, PPE_SCH_INGRESS, 6, false, 0},
314 	{true, PPE_SCH_EGRESS,  6, false, 0},
315 	{true, PPE_SCH_INGRESS, 7, false, 0},
316 	{true, PPE_SCH_EGRESS,  7, false, 0},
317 	{true, PPE_SCH_INGRESS, 0, false, 0},
318 	{true, PPE_SCH_EGRESS,  0, false, 0},
319 	{true, PPE_SCH_INGRESS, 1, false, 0},
320 	{true, PPE_SCH_EGRESS,  1, false, 0},
321 	{true, PPE_SCH_INGRESS, 5, false, 0},
322 	{true, PPE_SCH_EGRESS,  5, false, 0},
323 	{true, PPE_SCH_INGRESS, 6, false, 0},
324 	{true, PPE_SCH_EGRESS,  6, false, 0},
325 	{true, PPE_SCH_INGRESS, 4, false, 0},
326 	{true, PPE_SCH_EGRESS,  4, false, 0},
327 	{true, PPE_SCH_INGRESS, 0, false, 0},
328 	{true, PPE_SCH_EGRESS,  0, false, 0},
329 	{true, PPE_SCH_INGRESS, 5, false, 0},
330 	{true, PPE_SCH_EGRESS,  5, false, 0},
331 	{true, PPE_SCH_INGRESS, 6, false, 0},
332 	{true, PPE_SCH_EGRESS,  6, false, 0},
333 	{true, PPE_SCH_INGRESS, 1, false, 0},
334 	{true, PPE_SCH_EGRESS,  1, false, 0},
335 	{true, PPE_SCH_INGRESS, 0, false, 0},
336 	{true, PPE_SCH_EGRESS,  0, false, 0},
337 	{true, PPE_SCH_INGRESS, 5, false, 0},
338 	{true, PPE_SCH_EGRESS,  5, false, 0},
339 	{true, PPE_SCH_INGRESS, 6, false, 0},
340 	{true, PPE_SCH_EGRESS,  6, false, 0},
341 	{true, PPE_SCH_INGRESS, 2, false, 0},
342 	{true, PPE_SCH_EGRESS,  2, false, 0},
343 	{true, PPE_SCH_INGRESS, 0, false, 0},
344 	{true, PPE_SCH_EGRESS,  0, false, 0},
345 	{true, PPE_SCH_INGRESS, 7, false, 0},
346 	{true, PPE_SCH_EGRESS,  7, false, 0},
347 	{true, PPE_SCH_INGRESS, 5, false, 0},
348 	{true, PPE_SCH_EGRESS,  5, false, 0},
349 	{true, PPE_SCH_INGRESS, 6, false, 0},
350 	{true, PPE_SCH_EGRESS,  6, false, 0},
351 	{true, PPE_SCH_INGRESS, 1, false, 0},
352 	{true, PPE_SCH_EGRESS,  1, false, 0},
353 	{true, PPE_SCH_INGRESS, 0, false, 0},
354 	{true, PPE_SCH_EGRESS,  0, false, 0},
355 	{true, PPE_SCH_INGRESS, 5, false, 0},
356 	{true, PPE_SCH_EGRESS,  5, false, 0},
357 	{true, PPE_SCH_INGRESS, 6, false, 0},
358 	{true, PPE_SCH_EGRESS,  6, false, 0},
359 	{true, PPE_SCH_INGRESS, 3, false, 0},
360 	{true, PPE_SCH_EGRESS,  3, false, 0},
361 	{true, PPE_SCH_INGRESS, 1, false, 0},
362 	{true, PPE_SCH_EGRESS,  1, false, 0},
363 	{true, PPE_SCH_INGRESS, 0, false, 0},
364 	{true, PPE_SCH_EGRESS,  0, false, 0},
365 	{true, PPE_SCH_INGRESS, 5, false, 0},
366 	{true, PPE_SCH_EGRESS,  5, false, 0},
367 	{true, PPE_SCH_INGRESS, 6, false, 0},
368 	{true, PPE_SCH_EGRESS,  6, false, 0},
369 	{true, PPE_SCH_INGRESS, 4, false, 0},
370 	{true, PPE_SCH_EGRESS,  4, false, 0},
371 	{true, PPE_SCH_INGRESS, 7, false, 0},
372 	{true, PPE_SCH_EGRESS,  7, false, 0},
373 };
374 
375 /* PPE scheduler configuration for QM includes multiple entries. Each entry
376  * contains ports to be dispatched for enqueueing and dequeueing. The backup
377  * port for dequeueing is supported to be used when the primary port for
378  * dequeueing is down.
379  */
380 static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
381 	{0x98, 6, 0, true, 1},
382 	{0x94, 5, 6, true, 3},
383 	{0x86, 0, 5, true, 4},
384 	{0x8C, 1, 6, true, 0},
385 	{0x1C, 7, 5, true, 1},
386 	{0x98, 2, 6, true, 0},
387 	{0x1C, 5, 7, true, 1},
388 	{0x34, 3, 6, true, 0},
389 	{0x8C, 4, 5, true, 1},
390 	{0x98, 2, 6, true, 0},
391 	{0x8C, 5, 4, true, 1},
392 	{0xA8, 0, 6, true, 2},
393 	{0x98, 5, 1, true, 0},
394 	{0x98, 6, 5, true, 2},
395 	{0x89, 1, 6, true, 4},
396 	{0xA4, 3, 0, true, 1},
397 	{0x8C, 5, 6, true, 4},
398 	{0xA8, 0, 2, true, 1},
399 	{0x98, 6, 5, true, 0},
400 	{0xC4, 4, 3, true, 1},
401 	{0x94, 6, 5, true, 0},
402 	{0x1C, 7, 6, true, 1},
403 	{0x98, 2, 5, true, 0},
404 	{0x1C, 6, 7, true, 1},
405 	{0x1C, 5, 6, true, 0},
406 	{0x94, 3, 5, true, 1},
407 	{0x8C, 4, 6, true, 0},
408 	{0x94, 1, 5, true, 3},
409 	{0x94, 6, 1, true, 0},
410 	{0xD0, 3, 5, true, 2},
411 	{0x98, 6, 0, true, 1},
412 	{0x94, 5, 6, true, 3},
413 	{0x94, 1, 5, true, 0},
414 	{0x98, 2, 6, true, 1},
415 	{0x8C, 4, 5, true, 0},
416 	{0x1C, 7, 6, true, 1},
417 	{0x8C, 0, 5, true, 4},
418 	{0x89, 1, 6, true, 2},
419 	{0x98, 5, 0, true, 1},
420 	{0x94, 6, 5, true, 3},
421 	{0x92, 0, 6, true, 2},
422 	{0x98, 1, 5, true, 0},
423 	{0x98, 6, 2, true, 1},
424 	{0xD0, 0, 5, true, 3},
425 	{0x94, 6, 0, true, 1},
426 	{0x8C, 5, 6, true, 4},
427 	{0x8C, 1, 5, true, 0},
428 	{0x1C, 6, 7, true, 1},
429 	{0x1C, 5, 6, true, 0},
430 	{0xB0, 2, 3, true, 1},
431 	{0xC4, 4, 5, true, 0},
432 	{0x8C, 6, 4, true, 1},
433 	{0xA4, 3, 6, true, 0},
434 	{0x1C, 5, 7, true, 1},
435 	{0x4C, 0, 5, true, 4},
436 	{0x8C, 6, 0, true, 1},
437 	{0x34, 7, 6, true, 3},
438 	{0x94, 5, 0, true, 1},
439 	{0x98, 6, 5, true, 2},
440 };
441 
442 static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
443 	{
444 		.port		= 0,
445 		.flow_level	= true,
446 		.node_id	= 0,
447 		.loop_num	= 1,
448 		.pri_max	= 1,
449 		.flow_id	= 0,
450 		.drr_node_id	= 0,
451 	},
452 	{
453 		.port		= 0,
454 		.flow_level	= false,
455 		.node_id	= 0,
456 		.loop_num	= 8,
457 		.pri_max	= 8,
458 		.flow_id	= 0,
459 		.drr_node_id	= 0,
460 	},
461 	{
462 		.port		= 0,
463 		.flow_level	= false,
464 		.node_id	= 8,
465 		.loop_num	= 8,
466 		.pri_max	= 8,
467 		.flow_id	= 0,
468 		.drr_node_id	= 0,
469 	},
470 	{
471 		.port		= 0,
472 		.flow_level	= false,
473 		.node_id	= 16,
474 		.loop_num	= 8,
475 		.pri_max	= 8,
476 		.flow_id	= 0,
477 		.drr_node_id	= 0,
478 	},
479 	{
480 		.port		= 0,
481 		.flow_level	= false,
482 		.node_id	= 24,
483 		.loop_num	= 8,
484 		.pri_max	= 8,
485 		.flow_id	= 0,
486 		.drr_node_id	= 0,
487 	},
488 	{
489 		.port		= 0,
490 		.flow_level	= false,
491 		.node_id	= 32,
492 		.loop_num	= 8,
493 		.pri_max	= 8,
494 		.flow_id	= 0,
495 		.drr_node_id	= 0,
496 	},
497 	{
498 		.port		= 0,
499 		.flow_level	= false,
500 		.node_id	= 40,
501 		.loop_num	= 8,
502 		.pri_max	= 8,
503 		.flow_id	= 0,
504 		.drr_node_id	= 0,
505 	},
506 	{
507 		.port		= 0,
508 		.flow_level	= false,
509 		.node_id	= 48,
510 		.loop_num	= 8,
511 		.pri_max	= 8,
512 		.flow_id	= 0,
513 		.drr_node_id	= 0,
514 	},
515 	{
516 		.port		= 0,
517 		.flow_level	= false,
518 		.node_id	= 56,
519 		.loop_num	= 8,
520 		.pri_max	= 8,
521 		.flow_id	= 0,
522 		.drr_node_id	= 0,
523 	},
524 	{
525 		.port		= 0,
526 		.flow_level	= false,
527 		.node_id	= 256,
528 		.loop_num	= 8,
529 		.pri_max	= 8,
530 		.flow_id	= 0,
531 		.drr_node_id	= 0,
532 	},
533 	{
534 		.port		= 0,
535 		.flow_level	= false,
536 		.node_id	= 264,
537 		.loop_num	= 8,
538 		.pri_max	= 8,
539 		.flow_id	= 0,
540 		.drr_node_id	= 0,
541 	},
542 	{
543 		.port		= 1,
544 		.flow_level	= true,
545 		.node_id	= 36,
546 		.loop_num	= 2,
547 		.pri_max	= 0,
548 		.flow_id	= 1,
549 		.drr_node_id	= 8,
550 	},
551 	{
552 		.port		= 1,
553 		.flow_level	= false,
554 		.node_id	= 144,
555 		.loop_num	= 16,
556 		.pri_max	= 8,
557 		.flow_id	= 36,
558 		.drr_node_id	= 48,
559 	},
560 	{
561 		.port		= 1,
562 		.flow_level	= false,
563 		.node_id	= 272,
564 		.loop_num	= 4,
565 		.pri_max	= 4,
566 		.flow_id	= 36,
567 		.drr_node_id	= 48,
568 	},
569 	{
570 		.port		= 2,
571 		.flow_level	= true,
572 		.node_id	= 40,
573 		.loop_num	= 2,
574 		.pri_max	= 0,
575 		.flow_id	= 2,
576 		.drr_node_id	= 12,
577 	},
578 	{
579 		.port		= 2,
580 		.flow_level	= false,
581 		.node_id	= 160,
582 		.loop_num	= 16,
583 		.pri_max	= 8,
584 		.flow_id	= 40,
585 		.drr_node_id	= 64,
586 	},
587 	{
588 		.port		= 2,
589 		.flow_level	= false,
590 		.node_id	= 276,
591 		.loop_num	= 4,
592 		.pri_max	= 4,
593 		.flow_id	= 40,
594 		.drr_node_id	= 64,
595 	},
596 	{
597 		.port		= 3,
598 		.flow_level	= true,
599 		.node_id	= 44,
600 		.loop_num	= 2,
601 		.pri_max	= 0,
602 		.flow_id	= 3,
603 		.drr_node_id	= 16,
604 	},
605 	{
606 		.port		= 3,
607 		.flow_level	= false,
608 		.node_id	= 176,
609 		.loop_num	= 16,
610 		.pri_max	= 8,
611 		.flow_id	= 44,
612 		.drr_node_id	= 80,
613 	},
614 	{
615 		.port		= 3,
616 		.flow_level	= false,
617 		.node_id	= 280,
618 		.loop_num	= 4,
619 		.pri_max	= 4,
620 		.flow_id	= 44,
621 		.drr_node_id	= 80,
622 	},
623 	{
624 		.port		= 4,
625 		.flow_level	= true,
626 		.node_id	= 48,
627 		.loop_num	= 2,
628 		.pri_max	= 0,
629 		.flow_id	= 4,
630 		.drr_node_id	= 20,
631 	},
632 	{
633 		.port		= 4,
634 		.flow_level	= false,
635 		.node_id	= 192,
636 		.loop_num	= 16,
637 		.pri_max	= 8,
638 		.flow_id	= 48,
639 		.drr_node_id	= 96,
640 	},
641 	{
642 		.port		= 4,
643 		.flow_level	= false,
644 		.node_id	= 284,
645 		.loop_num	= 4,
646 		.pri_max	= 4,
647 		.flow_id	= 48,
648 		.drr_node_id	= 96,
649 	},
650 	{
651 		.port		= 5,
652 		.flow_level	= true,
653 		.node_id	= 52,
654 		.loop_num	= 2,
655 		.pri_max	= 0,
656 		.flow_id	= 5,
657 		.drr_node_id	= 24,
658 	},
659 	{
660 		.port		= 5,
661 		.flow_level	= false,
662 		.node_id	= 208,
663 		.loop_num	= 16,
664 		.pri_max	= 8,
665 		.flow_id	= 52,
666 		.drr_node_id	= 112,
667 	},
668 	{
669 		.port		= 5,
670 		.flow_level	= false,
671 		.node_id	= 288,
672 		.loop_num	= 4,
673 		.pri_max	= 4,
674 		.flow_id	= 52,
675 		.drr_node_id	= 112,
676 	},
677 	{
678 		.port		= 6,
679 		.flow_level	= true,
680 		.node_id	= 56,
681 		.loop_num	= 2,
682 		.pri_max	= 0,
683 		.flow_id	= 6,
684 		.drr_node_id	= 28,
685 	},
686 	{
687 		.port		= 6,
688 		.flow_level	= false,
689 		.node_id	= 224,
690 		.loop_num	= 16,
691 		.pri_max	= 8,
692 		.flow_id	= 56,
693 		.drr_node_id	= 128,
694 	},
695 	{
696 		.port		= 6,
697 		.flow_level	= false,
698 		.node_id	= 292,
699 		.loop_num	= 4,
700 		.pri_max	= 4,
701 		.flow_id	= 56,
702 		.drr_node_id	= 128,
703 	},
704 	{
705 		.port		= 7,
706 		.flow_level	= true,
707 		.node_id	= 60,
708 		.loop_num	= 2,
709 		.pri_max	= 0,
710 		.flow_id	= 7,
711 		.drr_node_id	= 32,
712 	},
713 	{
714 		.port		= 7,
715 		.flow_level	= false,
716 		.node_id	= 240,
717 		.loop_num	= 16,
718 		.pri_max	= 8,
719 		.flow_id	= 60,
720 		.drr_node_id	= 144,
721 	},
722 	{
723 		.port		= 7,
724 		.flow_level	= false,
725 		.node_id	= 296,
726 		.loop_num	= 4,
727 		.pri_max	= 4,
728 		.flow_id	= 60,
729 		.drr_node_id	= 144,
730 	},
731 };
732 
733 /* The scheduler resource is applied to each PPE port, The resource
734  * includes the unicast & multicast queues, flow nodes and DRR nodes.
735  */
736 static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
737 	{	.ucastq_start	= 0,
738 		.ucastq_end	= 63,
739 		.mcastq_start	= 256,
740 		.mcastq_end	= 271,
741 		.flow_id_start	= 0,
742 		.flow_id_end	= 0,
743 		.l0node_start	= 0,
744 		.l0node_end	= 7,
745 		.l1node_start	= 0,
746 		.l1node_end	= 0,
747 	},
748 	{	.ucastq_start	= 144,
749 		.ucastq_end	= 159,
750 		.mcastq_start	= 272,
751 		.mcastq_end	= 275,
752 		.flow_id_start	= 36,
753 		.flow_id_end	= 39,
754 		.l0node_start	= 48,
755 		.l0node_end	= 63,
756 		.l1node_start	= 8,
757 		.l1node_end	= 11,
758 	},
759 	{	.ucastq_start	= 160,
760 		.ucastq_end	= 175,
761 		.mcastq_start	= 276,
762 		.mcastq_end	= 279,
763 		.flow_id_start	= 40,
764 		.flow_id_end	= 43,
765 		.l0node_start	= 64,
766 		.l0node_end	= 79,
767 		.l1node_start	= 12,
768 		.l1node_end	= 15,
769 	},
770 	{	.ucastq_start	= 176,
771 		.ucastq_end	= 191,
772 		.mcastq_start	= 280,
773 		.mcastq_end	= 283,
774 		.flow_id_start	= 44,
775 		.flow_id_end	= 47,
776 		.l0node_start	= 80,
777 		.l0node_end	= 95,
778 		.l1node_start	= 16,
779 		.l1node_end	= 19,
780 	},
781 	{	.ucastq_start	= 192,
782 		.ucastq_end	= 207,
783 		.mcastq_start	= 284,
784 		.mcastq_end	= 287,
785 		.flow_id_start	= 48,
786 		.flow_id_end	= 51,
787 		.l0node_start	= 96,
788 		.l0node_end	= 111,
789 		.l1node_start	= 20,
790 		.l1node_end	= 23,
791 	},
792 	{	.ucastq_start	= 208,
793 		.ucastq_end	= 223,
794 		.mcastq_start	= 288,
795 		.mcastq_end	= 291,
796 		.flow_id_start	= 52,
797 		.flow_id_end	= 55,
798 		.l0node_start	= 112,
799 		.l0node_end	= 127,
800 		.l1node_start	= 24,
801 		.l1node_end	= 27,
802 	},
803 	{	.ucastq_start	= 224,
804 		.ucastq_end	= 239,
805 		.mcastq_start	= 292,
806 		.mcastq_end	= 295,
807 		.flow_id_start	= 56,
808 		.flow_id_end	= 59,
809 		.l0node_start	= 128,
810 		.l0node_end	= 143,
811 		.l1node_start	= 28,
812 		.l1node_end	= 31,
813 	},
814 	{	.ucastq_start	= 240,
815 		.ucastq_end	= 255,
816 		.mcastq_start	= 296,
817 		.mcastq_end	= 299,
818 		.flow_id_start	= 60,
819 		.flow_id_end	= 63,
820 		.l0node_start	= 144,
821 		.l0node_end	= 159,
822 		.l1node_start	= 32,
823 		.l1node_end	= 35,
824 	},
825 	{	.ucastq_start	= 64,
826 		.ucastq_end	= 143,
827 		.mcastq_start	= 0,
828 		.mcastq_end	= 0,
829 		.flow_id_start	= 1,
830 		.flow_id_end	= 35,
831 		.l0node_start	= 8,
832 		.l0node_end	= 47,
833 		.l1node_start	= 1,
834 		.l1node_end	= 7,
835 	},
836 };
837 
838 /* Set the PPE queue level scheduler configuration. */
839 static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
840 					  int node_id, int port,
841 					  struct ppe_scheduler_cfg scheduler_cfg)
842 {
843 	u32 val, reg;
844 	int ret;
845 
846 	reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
847 	val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
848 	val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
849 	val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
850 	val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
851 	val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
852 
853 	ret = regmap_write(ppe_dev->regmap, reg, val);
854 	if (ret)
855 		return ret;
856 
857 	reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
858 	      (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
859 	      PPE_L0_C_FLOW_CFG_TBL_INC;
860 	val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
861 	val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
862 
863 	ret = regmap_write(ppe_dev->regmap, reg, val);
864 	if (ret)
865 		return ret;
866 
867 	reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
868 	      (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
869 	      PPE_L0_E_FLOW_CFG_TBL_INC;
870 	val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
871 	val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
872 
873 	ret = regmap_write(ppe_dev->regmap, reg, val);
874 	if (ret)
875 		return ret;
876 
877 	reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
878 	val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
879 
880 	ret = regmap_write(ppe_dev->regmap, reg, val);
881 	if (ret)
882 		return ret;
883 
884 	reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
885 	val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
886 
887 	return regmap_update_bits(ppe_dev->regmap, reg,
888 				  PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
889 				  val);
890 }
891 
892 /* Set the PPE flow level scheduler configuration. */
893 static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
894 					  int node_id, int port,
895 					  struct ppe_scheduler_cfg scheduler_cfg)
896 {
897 	u32 val, reg;
898 	int ret;
899 
900 	val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
901 	val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
902 	val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
903 	val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
904 	val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
905 	reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
906 
907 	ret = regmap_write(ppe_dev->regmap, reg, val);
908 	if (ret)
909 		return ret;
910 
911 	val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
912 	val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
913 	reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
914 	      (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
915 	      PPE_L1_C_FLOW_CFG_TBL_INC;
916 
917 	ret = regmap_write(ppe_dev->regmap, reg, val);
918 	if (ret)
919 		return ret;
920 
921 	val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
922 	val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
923 	reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
924 		(scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
925 		PPE_L1_E_FLOW_CFG_TBL_INC;
926 
927 	ret = regmap_write(ppe_dev->regmap, reg, val);
928 	if (ret)
929 		return ret;
930 
931 	val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
932 	reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
933 
934 	ret = regmap_write(ppe_dev->regmap, reg, val);
935 	if (ret)
936 		return ret;
937 
938 	reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
939 	val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
940 
941 	return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
942 }
943 
944 /**
945  * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
946  * @ppe_dev: PPE device
947  * @node_id: PPE queue ID or flow ID
948  * @flow_level: Flow level scheduler or queue level scheduler
949  * @port: PPE port ID set scheduler configuration
950  * @scheduler_cfg: PPE scheduler configuration
951  *
952  * PPE scheduler configuration supports queue level and flow level on
953  * the PPE egress port.
954  *
955  * Return: 0 on success, negative error code on failure.
956  */
957 int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
958 			    int node_id, bool flow_level, int port,
959 			    struct ppe_scheduler_cfg scheduler_cfg)
960 {
961 	if (flow_level)
962 		return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
963 						      port, scheduler_cfg);
964 
965 	return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
966 					      port, scheduler_cfg);
967 }
968 
969 /**
970  * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
971  * @ppe_dev: PPE device
972  * @queue_dst: PPE queue destination configuration
973  * @queue_base: PPE queue base ID
974  * @profile_id: Profile ID
975  *
976  * The PPE unicast queue base ID and profile ID are configured based on the
977  * destination port information that can be service code or CPU code or the
978  * destination port.
979  *
980  * Return: 0 on success, negative error code on failure.
981  */
982 int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
983 			     struct ppe_queue_ucast_dest queue_dst,
984 			     int queue_base, int profile_id)
985 {
986 	int index, profile_size;
987 	u32 val, reg;
988 
989 	profile_size = queue_dst.src_profile << 8;
990 	if (queue_dst.service_code_en)
991 		index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
992 			queue_dst.service_code;
993 	else if (queue_dst.cpu_code_en)
994 		index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
995 			queue_dst.cpu_code;
996 	else
997 		index = profile_size + queue_dst.dest_port;
998 
999 	val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
1000 	val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
1001 	reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
1002 
1003 	return regmap_write(ppe_dev->regmap, reg, val);
1004 }
1005 
1006 /**
1007  * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
1008  * @ppe_dev: PPE device
1009  * @profile_id: Profile ID
1010  * @priority: PPE internal priority to be used to set queue offset
1011  * @queue_offset: Queue offset used for calculating the destination queue ID
1012  *
1013  * The PPE unicast queue offset is configured based on the PPE
1014  * internal priority.
1015  *
1016  * Return: 0 on success, negative error code on failure.
1017  */
1018 int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
1019 				   int profile_id,
1020 				   int priority,
1021 				   int queue_offset)
1022 {
1023 	u32 val, reg;
1024 	int index;
1025 
1026 	index = (profile_id << 4) + priority;
1027 	val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
1028 	reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
1029 
1030 	return regmap_write(ppe_dev->regmap, reg, val);
1031 }
1032 
1033 /**
1034  * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
1035  * @ppe_dev: PPE device
1036  * @profile_id: Profile ID
1037  * @rss_hash: Packet hash value to be used to set queue offset
1038  * @queue_offset: Queue offset used for calculating the destination queue ID
1039  *
1040  * The PPE unicast queue offset is configured based on the RSS hash value.
1041  *
1042  * Return: 0 on success, negative error code on failure.
1043  */
1044 int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
1045 				    int profile_id,
1046 				    int rss_hash,
1047 				    int queue_offset)
1048 {
1049 	u32 val, reg;
1050 	int index;
1051 
1052 	index = (profile_id << 8) + rss_hash;
1053 	val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
1054 	reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
1055 
1056 	return regmap_write(ppe_dev->regmap, reg, val);
1057 }
1058 
1059 /**
1060  * ppe_port_resource_get - Get PPE resource per port
1061  * @ppe_dev: PPE device
1062  * @port: PPE port
1063  * @type: Resource type
1064  * @res_start: Resource start ID returned
1065  * @res_end: Resource end ID returned
1066  *
1067  * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
1068  *
1069  * Return: 0 on success, negative error code on failure.
1070  */
1071 int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
1072 			  enum ppe_resource_type type,
1073 			  int *res_start, int *res_end)
1074 {
1075 	struct ppe_port_schedule_resource res;
1076 
1077 	/* The reserved resource with the maximum port ID of PPE is
1078 	 * also allowed to be acquired.
1079 	 */
1080 	if (port > ppe_dev->num_ports)
1081 		return -EINVAL;
1082 
1083 	res = ppe_scheduler_res[port];
1084 	switch (type) {
1085 	case PPE_RES_UCAST:
1086 		*res_start = res.ucastq_start;
1087 		*res_end = res.ucastq_end;
1088 		break;
1089 	case PPE_RES_MCAST:
1090 		*res_start = res.mcastq_start;
1091 		*res_end = res.mcastq_end;
1092 		break;
1093 	case PPE_RES_FLOW_ID:
1094 		*res_start = res.flow_id_start;
1095 		*res_end = res.flow_id_end;
1096 		break;
1097 	case PPE_RES_L0_NODE:
1098 		*res_start = res.l0node_start;
1099 		*res_end = res.l0node_end;
1100 		break;
1101 	case PPE_RES_L1_NODE:
1102 		*res_start = res.l1node_start;
1103 		*res_end = res.l1node_end;
1104 		break;
1105 	default:
1106 		return -EINVAL;
1107 	}
1108 
1109 	return 0;
1110 }
1111 
1112 /**
1113  * ppe_sc_config_set - Set PPE service code configuration
1114  * @ppe_dev: PPE device
1115  * @sc: Service ID, 0-255 supported by PPE
1116  * @cfg: Service code configuration
1117  *
1118  * PPE service code is used by the PPE during its packet processing stages,
1119  * to perform or bypass certain selected packet operations on the packet.
1120  *
1121  * Return: 0 on success, negative error code on failure.
1122  */
1123 int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
1124 {
1125 	u32 val, reg, servcode_val[2] = {};
1126 	unsigned long bitmap_value;
1127 	int ret;
1128 
1129 	val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
1130 	val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
1131 	val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
1132 
1133 	bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
1134 	val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
1135 	val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
1136 			  test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
1137 	val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
1138 			  test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
1139 	reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
1140 
1141 	ret = regmap_write(ppe_dev->regmap, reg, val);
1142 	if (ret)
1143 		return ret;
1144 
1145 	bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
1146 	PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
1147 	PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
1148 				  test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
1149 	reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
1150 
1151 	ret = regmap_bulk_write(ppe_dev->regmap, reg,
1152 				servcode_val, ARRAY_SIZE(servcode_val));
1153 	if (ret)
1154 		return ret;
1155 
1156 	reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
1157 	ret = regmap_bulk_read(ppe_dev->regmap, reg,
1158 			       servcode_val, ARRAY_SIZE(servcode_val));
1159 	if (ret)
1160 		return ret;
1161 
1162 	PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
1163 	PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
1164 	PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
1165 	PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
1166 	PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
1167 				     test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
1168 
1169 	ret = regmap_bulk_write(ppe_dev->regmap, reg,
1170 				servcode_val, ARRAY_SIZE(servcode_val));
1171 	if (ret)
1172 		return ret;
1173 
1174 	bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
1175 	val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
1176 	reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
1177 
1178 	return regmap_write(ppe_dev->regmap, reg, val);
1179 }
1180 
1181 /**
1182  * ppe_counter_enable_set - Set PPE port counter enabled
1183  * @ppe_dev: PPE device
1184  * @port: PPE port ID
1185  *
1186  * Enable PPE counters on the given port for the unicast packet, multicast
1187  * packet and VLAN packet received and transmitted by PPE.
1188  *
1189  * Return: 0 on success, negative error code on failure.
1190  */
1191 int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
1192 {
1193 	u32 reg, mru_mtu_val[3];
1194 	int ret;
1195 
1196 	reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
1197 	ret = regmap_bulk_read(ppe_dev->regmap, reg,
1198 			       mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
1199 	if (ret)
1200 		return ret;
1201 
1202 	PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
1203 	PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
1204 	ret = regmap_bulk_write(ppe_dev->regmap, reg,
1205 				mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
1206 	if (ret)
1207 		return ret;
1208 
1209 	reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
1210 	ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
1211 	if (ret)
1212 		return ret;
1213 
1214 	reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
1215 
1216 	return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
1217 }
1218 
1219 static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
1220 				    struct ppe_rss_hash_cfg cfg)
1221 {
1222 	u32 reg, val;
1223 
1224 	switch (index) {
1225 	case 0:
1226 		val = cfg.hash_sip_mix[0];
1227 		break;
1228 	case 1:
1229 		val = cfg.hash_dip_mix[0];
1230 		break;
1231 	case 2:
1232 		val = cfg.hash_protocol_mix;
1233 		break;
1234 	case 3:
1235 		val = cfg.hash_dport_mix;
1236 		break;
1237 	case 4:
1238 		val = cfg.hash_sport_mix;
1239 		break;
1240 	default:
1241 		return -EINVAL;
1242 	}
1243 
1244 	reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
1245 
1246 	return regmap_update_bits(ppe_dev->regmap, reg,
1247 				  PPE_RSS_HASH_MIX_IPV4_VAL,
1248 				  FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val));
1249 }
1250 
1251 static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
1252 				    struct ppe_rss_hash_cfg cfg)
1253 {
1254 	u32 reg, val;
1255 
1256 	switch (index) {
1257 	case 0 ... 3:
1258 		val = cfg.hash_sip_mix[index];
1259 		break;
1260 	case 4 ... 7:
1261 		val = cfg.hash_dip_mix[index - 4];
1262 		break;
1263 	case 8:
1264 		val = cfg.hash_protocol_mix;
1265 		break;
1266 	case 9:
1267 		val = cfg.hash_dport_mix;
1268 		break;
1269 	case 10:
1270 		val = cfg.hash_sport_mix;
1271 		break;
1272 	default:
1273 		return -EINVAL;
1274 	}
1275 
1276 	reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
1277 
1278 	return regmap_update_bits(ppe_dev->regmap, reg,
1279 				  PPE_RSS_HASH_MIX_VAL,
1280 				  FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val));
1281 }
1282 
1283 /**
1284  * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
1285  * @ppe_dev: PPE device.
1286  * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
1287  * @cfg: RSS hash configuration.
1288  *
1289  * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
1290  *
1291  * Return: 0 on success, negative error code on failure.
1292  */
1293 int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
1294 			    struct ppe_rss_hash_cfg cfg)
1295 {
1296 	u32 val, reg;
1297 	int i, ret;
1298 
1299 	if (mode & PPE_RSS_HASH_MODE_IPV4) {
1300 		val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
1301 		val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
1302 		ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
1303 		if (ret)
1304 			return ret;
1305 
1306 		val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
1307 		ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
1308 		if (ret)
1309 			return ret;
1310 
1311 		for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
1312 			ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
1313 			if (ret)
1314 				return ret;
1315 		}
1316 
1317 		for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
1318 			val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
1319 			val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
1320 			reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
1321 
1322 			ret = regmap_write(ppe_dev->regmap, reg, val);
1323 			if (ret)
1324 				return ret;
1325 		}
1326 	}
1327 
1328 	if (mode & PPE_RSS_HASH_MODE_IPV6) {
1329 		val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
1330 		val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
1331 		ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
1332 		if (ret)
1333 			return ret;
1334 
1335 		val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
1336 		ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
1337 		if (ret)
1338 			return ret;
1339 
1340 		for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
1341 			ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
1342 			if (ret)
1343 				return ret;
1344 		}
1345 
1346 		for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
1347 			val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
1348 			val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
1349 			reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
1350 
1351 			ret = regmap_write(ppe_dev->regmap, reg, val);
1352 			if (ret)
1353 				return ret;
1354 		}
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 /**
1361  * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
1362  * @ppe_dev: PPE device
1363  * @ring_id: Ethernet DMA ring ID
1364  * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
1365  *
1366  * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
1367  *
1368  * Return: 0 on success, negative error code on failure.
1369  */
1370 int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
1371 {
1372 	u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
1373 
1374 	memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
1375 	reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
1376 
1377 	return regmap_bulk_write(ppe_dev->regmap, reg,
1378 				 queue_bitmap_val,
1379 				 ARRAY_SIZE(queue_bitmap_val));
1380 }
1381 
1382 static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
1383 				   const struct ppe_bm_port_config port_cfg)
1384 {
1385 	u32 reg, val, bm_fc_val[2];
1386 	int ret;
1387 
1388 	reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
1389 	ret = regmap_bulk_read(ppe_dev->regmap, reg,
1390 			       bm_fc_val, ARRAY_SIZE(bm_fc_val));
1391 	if (ret)
1392 		return ret;
1393 
1394 	/* Configure BM flow control related threshold. */
1395 	PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
1396 	PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
1397 	PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
1398 	PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
1399 	PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
1400 	PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
1401 
1402 	/* Configure low/high bits of the ceiling for the BM port. */
1403 	val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
1404 	PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
1405 	val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
1406 	PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
1407 
1408 	ret = regmap_bulk_write(ppe_dev->regmap, reg,
1409 				bm_fc_val, ARRAY_SIZE(bm_fc_val));
1410 	if (ret)
1411 		return ret;
1412 
1413 	/* Assign the default group ID 0 to the BM port. */
1414 	val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
1415 	reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
1416 	ret = regmap_update_bits(ppe_dev->regmap, reg,
1417 				 PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
1418 				 val);
1419 	if (ret)
1420 		return ret;
1421 
1422 	/* Enable BM port flow control. */
1423 	reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
1424 
1425 	return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
1426 }
1427 
1428 /* Configure the buffer threshold for the port flow control function. */
1429 static int ppe_config_bm(struct ppe_device *ppe_dev)
1430 {
1431 	const struct ppe_bm_port_config *port_cfg;
1432 	unsigned int i, bm_port_id, port_cfg_cnt;
1433 	u32 reg, val;
1434 	int ret;
1435 
1436 	/* Configure the allocated buffer number only for group 0.
1437 	 * The buffer number of group 1-3 is already cleared to 0
1438 	 * after PPE reset during the probe of PPE driver.
1439 	 */
1440 	reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
1441 	val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
1442 			 ipq9574_ppe_bm_group_config);
1443 	ret = regmap_update_bits(ppe_dev->regmap, reg,
1444 				 PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
1445 				 val);
1446 	if (ret)
1447 		goto bm_config_fail;
1448 
1449 	/* Configure buffer thresholds for the BM ports. */
1450 	port_cfg = ipq9574_ppe_bm_port_config;
1451 	port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
1452 	for (i = 0; i < port_cfg_cnt; i++) {
1453 		for (bm_port_id = port_cfg[i].port_id_start;
1454 		     bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
1455 			ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
1456 						      port_cfg[i]);
1457 			if (ret)
1458 				goto bm_config_fail;
1459 		}
1460 	}
1461 
1462 	return 0;
1463 
1464 bm_config_fail:
1465 	dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
1466 	return ret;
1467 }
1468 
1469 /* Configure PPE hardware queue depth, which is decided by the threshold
1470  * of queue.
1471  */
1472 static int ppe_config_qm(struct ppe_device *ppe_dev)
1473 {
1474 	const struct ppe_qm_queue_config *queue_cfg;
1475 	int ret, i, queue_id, queue_cfg_count;
1476 	u32 reg, multicast_queue_cfg[5];
1477 	u32 unicast_queue_cfg[4];
1478 	u32 group_cfg[3];
1479 
1480 	/* Assign the buffer number to the group 0 by default. */
1481 	reg = PPE_AC_GRP_CFG_TBL_ADDR;
1482 	ret = regmap_bulk_read(ppe_dev->regmap, reg,
1483 			       group_cfg, ARRAY_SIZE(group_cfg));
1484 	if (ret)
1485 		goto qm_config_fail;
1486 
1487 	PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
1488 
1489 	ret = regmap_bulk_write(ppe_dev->regmap, reg,
1490 				group_cfg, ARRAY_SIZE(group_cfg));
1491 	if (ret)
1492 		goto qm_config_fail;
1493 
1494 	queue_cfg = ipq9574_ppe_qm_queue_config;
1495 	queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
1496 	for (i = 0; i < queue_cfg_count; i++) {
1497 		queue_id = queue_cfg[i].queue_start;
1498 
1499 		/* Configure threshold for dropping packets separately for
1500 		 * unicast and multicast PPE queues.
1501 		 */
1502 		while (queue_id <= queue_cfg[i].queue_end) {
1503 			if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
1504 				reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
1505 				      PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
1506 
1507 				ret = regmap_bulk_read(ppe_dev->regmap, reg,
1508 						       unicast_queue_cfg,
1509 						       ARRAY_SIZE(unicast_queue_cfg));
1510 				if (ret)
1511 					goto qm_config_fail;
1512 
1513 				PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
1514 				PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
1515 				PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
1516 								   queue_cfg[i].prealloc_buf);
1517 				PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
1518 								 queue_cfg[i].dynamic);
1519 				PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
1520 								queue_cfg[i].weight);
1521 				PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
1522 								   queue_cfg[i].ceil);
1523 				PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
1524 								    queue_cfg[i].resume_offset);
1525 
1526 				ret = regmap_bulk_write(ppe_dev->regmap, reg,
1527 							unicast_queue_cfg,
1528 							ARRAY_SIZE(unicast_queue_cfg));
1529 				if (ret)
1530 					goto qm_config_fail;
1531 			} else {
1532 				reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
1533 				      PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
1534 
1535 				ret = regmap_bulk_read(ppe_dev->regmap, reg,
1536 						       multicast_queue_cfg,
1537 						       ARRAY_SIZE(multicast_queue_cfg));
1538 				if (ret)
1539 					goto qm_config_fail;
1540 
1541 				PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
1542 				PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
1543 				PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
1544 									 queue_cfg[i].prealloc_buf);
1545 				PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
1546 									 queue_cfg[i].ceil);
1547 				PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
1548 								      queue_cfg[i].resume_offset);
1549 
1550 				ret = regmap_bulk_write(ppe_dev->regmap, reg,
1551 							multicast_queue_cfg,
1552 							ARRAY_SIZE(multicast_queue_cfg));
1553 				if (ret)
1554 					goto qm_config_fail;
1555 			}
1556 
1557 			/* Enable enqueue. */
1558 			reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
1559 			ret = regmap_clear_bits(ppe_dev->regmap, reg,
1560 						PPE_ENQ_OPR_TBL_ENQ_DISABLE);
1561 			if (ret)
1562 				goto qm_config_fail;
1563 
1564 			/* Enable dequeue. */
1565 			reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
1566 			ret = regmap_clear_bits(ppe_dev->regmap, reg,
1567 						PPE_DEQ_OPR_TBL_DEQ_DISABLE);
1568 			if (ret)
1569 				goto qm_config_fail;
1570 
1571 			queue_id++;
1572 		}
1573 	}
1574 
1575 	/* Enable queue counter for all PPE hardware queues. */
1576 	ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
1577 			      PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
1578 	if (ret)
1579 		goto qm_config_fail;
1580 
1581 	return 0;
1582 
1583 qm_config_fail:
1584 	dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
1585 	return ret;
1586 }
1587 
1588 static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
1589 				     const struct ppe_scheduler_port_config config)
1590 {
1591 	struct ppe_scheduler_cfg sch_cfg;
1592 	int ret, i;
1593 
1594 	for (i = 0; i < config.loop_num; i++) {
1595 		if (!config.pri_max) {
1596 			/* Round robin scheduler without priority. */
1597 			sch_cfg.flow_id = config.flow_id;
1598 			sch_cfg.pri = 0;
1599 			sch_cfg.drr_node_id = config.drr_node_id;
1600 		} else {
1601 			sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
1602 			sch_cfg.pri = i % config.pri_max;
1603 			sch_cfg.drr_node_id = config.drr_node_id + i;
1604 		}
1605 
1606 		/* Scheduler weight, must be more than 0. */
1607 		sch_cfg.drr_node_wt = 1;
1608 		/* Byte based to be scheduled. */
1609 		sch_cfg.unit_is_packet = false;
1610 		/* Frame + CRC calculated. */
1611 		sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
1612 
1613 		ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
1614 					      config.flow_level,
1615 					      config.port,
1616 					      sch_cfg);
1617 		if (ret)
1618 			return ret;
1619 	}
1620 
1621 	return 0;
1622 }
1623 
1624 /* Initialize scheduler settings for PPE buffer utilization and dispatching
1625  * packet on PPE queue.
1626  */
1627 static int ppe_config_scheduler(struct ppe_device *ppe_dev)
1628 {
1629 	const struct ppe_scheduler_port_config *port_cfg;
1630 	const struct ppe_scheduler_qm_config *qm_cfg;
1631 	const struct ppe_scheduler_bm_config *bm_cfg;
1632 	int ret, i, count;
1633 	u32 val, reg;
1634 
1635 	count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
1636 	bm_cfg = ipq9574_ppe_sch_bm_config;
1637 
1638 	/* Configure the depth of BM scheduler entries. */
1639 	val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
1640 	val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
1641 	val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
1642 
1643 	ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
1644 	if (ret)
1645 		goto sch_config_fail;
1646 
1647 	/* Configure each BM scheduler entry with the valid ingress port and
1648 	 * egress port, the second port takes effect when the specified port
1649 	 * is in the inactive state.
1650 	 */
1651 	for (i = 0; i < count; i++) {
1652 		val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
1653 		val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir);
1654 		val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
1655 		val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID,
1656 				  bm_cfg[i].backup_port_valid);
1657 		val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT,
1658 				  bm_cfg[i].backup_port);
1659 
1660 		reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
1661 		ret = regmap_write(ppe_dev->regmap, reg, val);
1662 		if (ret)
1663 			goto sch_config_fail;
1664 	}
1665 
1666 	count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
1667 	qm_cfg = ipq9574_ppe_sch_qm_config;
1668 
1669 	/* Configure the depth of QM scheduler entries. */
1670 	val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
1671 	ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
1672 	if (ret)
1673 		goto sch_config_fail;
1674 
1675 	/* Configure each QM scheduler entry with enqueue port and dequeue
1676 	 * port, the second port takes effect when the specified dequeue
1677 	 * port is in the inactive port.
1678 	 */
1679 	for (i = 0; i < count; i++) {
1680 		val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
1681 				 qm_cfg[i].ensch_port_bmp);
1682 		val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
1683 				  qm_cfg[i].ensch_port);
1684 		val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
1685 				  qm_cfg[i].desch_port);
1686 		val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
1687 				  qm_cfg[i].desch_backup_port_valid);
1688 		val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
1689 				  qm_cfg[i].desch_backup_port);
1690 
1691 		reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
1692 		ret = regmap_write(ppe_dev->regmap, reg, val);
1693 		if (ret)
1694 			goto sch_config_fail;
1695 	}
1696 
1697 	count = ARRAY_SIZE(ppe_port_sch_config);
1698 	port_cfg = ppe_port_sch_config;
1699 
1700 	/* Configure scheduler per PPE queue or flow. */
1701 	for (i = 0; i < count; i++) {
1702 		if (port_cfg[i].port >= ppe_dev->num_ports)
1703 			break;
1704 
1705 		ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
1706 		if (ret)
1707 			goto sch_config_fail;
1708 	}
1709 
1710 	return 0;
1711 
1712 sch_config_fail:
1713 	dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
1714 	return ret;
1715 };
1716 
1717 /* Configure PPE queue destination of each PPE port. */
1718 static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
1719 {
1720 	int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
1721 	struct ppe_queue_ucast_dest queue_dst;
1722 
1723 	for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
1724 		memset(&queue_dst, 0, sizeof(queue_dst));
1725 
1726 		ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
1727 					    &res_start, &res_end);
1728 		if (ret)
1729 			return ret;
1730 
1731 		q_base = res_start;
1732 		queue_dst.dest_port = port_id;
1733 
1734 		/* Configure queue base ID and profile ID that is same as
1735 		 * physical port ID.
1736 		 */
1737 		ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
1738 					       q_base, port_id);
1739 		if (ret)
1740 			return ret;
1741 
1742 		/* Queue priority range supported by each PPE port */
1743 		ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
1744 					    &res_start, &res_end);
1745 		if (ret)
1746 			return ret;
1747 
1748 		pri_max = res_end - res_start;
1749 
1750 		/* Redirect ARP reply packet with the max priority on CPU port,
1751 		 * which keeps the ARP reply directed to CPU (CPU code is 101)
1752 		 * with highest priority queue of EDMA.
1753 		 */
1754 		if (port_id == 0) {
1755 			memset(&queue_dst, 0, sizeof(queue_dst));
1756 
1757 			queue_dst.cpu_code_en = true;
1758 			queue_dst.cpu_code = 101;
1759 			ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
1760 						       q_base + pri_max,
1761 						       0);
1762 			if (ret)
1763 				return ret;
1764 		}
1765 
1766 		/* Initialize the queue offset of internal priority. */
1767 		for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
1768 			q_offset = index > pri_max ? pri_max : index;
1769 
1770 			ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
1771 							     index, q_offset);
1772 			if (ret)
1773 				return ret;
1774 		}
1775 
1776 		/* Initialize the queue offset of RSS hash as 0 to avoid the
1777 		 * random hardware value that will lead to the unexpected
1778 		 * destination queue generated.
1779 		 */
1780 		for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
1781 			ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
1782 							      index, 0);
1783 			if (ret)
1784 				return ret;
1785 		}
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 /* Initialize the service code 1 used by CPU port. */
1792 static int ppe_servcode_init(struct ppe_device *ppe_dev)
1793 {
1794 	struct ppe_sc_cfg sc_cfg = {};
1795 
1796 	bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
1797 	bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
1798 
1799 	bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
1800 	clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
1801 	clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
1802 	clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
1803 
1804 	bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
1805 	clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
1806 
1807 	return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
1808 }
1809 
1810 /* Initialize PPE port configurations. */
1811 static int ppe_port_config_init(struct ppe_device *ppe_dev)
1812 {
1813 	u32 reg, val, mru_mtu_val[3];
1814 	int i, ret;
1815 
1816 	/* MTU and MRU settings are not required for CPU port 0. */
1817 	for (i = 1; i < ppe_dev->num_ports; i++) {
1818 		/* Enable Ethernet port counter */
1819 		ret = ppe_counter_enable_set(ppe_dev, i);
1820 		if (ret)
1821 			return ret;
1822 
1823 		reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
1824 		ret = regmap_bulk_read(ppe_dev->regmap, reg,
1825 				       mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
1826 		if (ret)
1827 			return ret;
1828 
1829 		/* Drop the packet when the packet size is more than the MTU
1830 		 * and redirect the packet to the CPU port when the received
1831 		 * packet size is more than the MRU of the physical interface.
1832 		 */
1833 		PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU);
1834 		PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
1835 		ret = regmap_bulk_write(ppe_dev->regmap, reg,
1836 					mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
1837 		if (ret)
1838 			return ret;
1839 
1840 		reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
1841 		val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
1842 		ret = regmap_update_bits(ppe_dev->regmap, reg,
1843 					 PPE_MC_MTU_CTRL_TBL_MTU_CMD,
1844 					 val);
1845 		if (ret)
1846 			return ret;
1847 	}
1848 
1849 	/* Enable CPU port counters. */
1850 	return ppe_counter_enable_set(ppe_dev, 0);
1851 }
1852 
1853 /* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
1854  * RSS settings are to calculate the random RSS hash value generated during
1855  * packet receive. This hash is then used to generate the queue offset used
1856  * to determine the queue used to transmit the packet.
1857  */
1858 static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
1859 {
1860 	u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
1861 	u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
1862 	struct ppe_rss_hash_cfg hash_cfg;
1863 	int i, ret;
1864 
1865 	hash_cfg.hash_seed = get_random_u32();
1866 	hash_cfg.hash_mask = 0xfff;
1867 
1868 	/* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
1869 	 * and UDP-Lite packets.
1870 	 */
1871 	hash_cfg.hash_fragment_mode = false;
1872 
1873 	/* The final common seed configs used to calculate the RSS has value,
1874 	 * which is available for both IPv4 and IPv6 packet.
1875 	 */
1876 	for (i = 0; i < ARRAY_SIZE(fins); i++) {
1877 		hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
1878 		hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
1879 	}
1880 
1881 	/* RSS seeds for IP protocol, L4 destination & source port and
1882 	 * destination & source IP used to calculate the RSS hash value.
1883 	 */
1884 	hash_cfg.hash_protocol_mix = 0x13;
1885 	hash_cfg.hash_dport_mix = 0xb;
1886 	hash_cfg.hash_sport_mix = 0x13;
1887 	hash_cfg.hash_dip_mix[0] = 0xb;
1888 	hash_cfg.hash_sip_mix[0] = 0x13;
1889 
1890 	/* Configure RSS seed configs for IPv4 packet. */
1891 	ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
1892 	if (ret)
1893 		return ret;
1894 
1895 	for (i = 0; i < ARRAY_SIZE(ips); i++) {
1896 		hash_cfg.hash_sip_mix[i] = ips[i];
1897 		hash_cfg.hash_dip_mix[i] = ips[i];
1898 	}
1899 
1900 	/* Configure RSS seed configs for IPv6 packet. */
1901 	return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
1902 }
1903 
1904 /* Initialize mapping between PPE queues assigned to CPU port 0
1905  * to Ethernet DMA ring 0.
1906  */
1907 static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
1908 {
1909 	u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
1910 	int ret, queue_id, queue_max;
1911 
1912 	ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
1913 				    &queue_id, &queue_max);
1914 	if (ret)
1915 		return ret;
1916 
1917 	for (; queue_id <= queue_max; queue_id++)
1918 		queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
1919 
1920 	return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
1921 }
1922 
1923 /* Initialize PPE bridge settings to only enable L2 frame receive and
1924  * transmit between CPU port and PPE Ethernet ports.
1925  */
1926 static int ppe_bridge_init(struct ppe_device *ppe_dev)
1927 {
1928 	u32 reg, mask, port_cfg[4], vsi_cfg[2];
1929 	int ret, i;
1930 
1931 	/* Configure the following settings for CPU port0:
1932 	 * a.) Enable Bridge TX
1933 	 * b.) Disable FDB new address learning
1934 	 * c.) Disable station move address learning
1935 	 */
1936 	mask = PPE_PORT_BRIDGE_TXMAC_EN;
1937 	mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
1938 	mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
1939 	ret = regmap_update_bits(ppe_dev->regmap,
1940 				 PPE_PORT_BRIDGE_CTRL_ADDR,
1941 				 mask,
1942 				 PPE_PORT_BRIDGE_TXMAC_EN);
1943 	if (ret)
1944 		return ret;
1945 
1946 	for (i = 1; i < ppe_dev->num_ports; i++) {
1947 		/* Enable invalid VSI forwarding for all the physical ports
1948 		 * to CPU port0, in case no VSI is assigned to the physical
1949 		 * port.
1950 		 */
1951 		reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
1952 		ret = regmap_bulk_read(ppe_dev->regmap, reg,
1953 				       port_cfg, ARRAY_SIZE(port_cfg));
1954 
1955 		if (ret)
1956 			return ret;
1957 
1958 		PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
1959 		PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
1960 
1961 		ret = regmap_bulk_write(ppe_dev->regmap, reg,
1962 					port_cfg, ARRAY_SIZE(port_cfg));
1963 		if (ret)
1964 			return ret;
1965 	}
1966 
1967 	for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
1968 		/* Set the VSI forward membership to include only CPU port0.
1969 		 * FDB learning and forwarding take place only after switchdev
1970 		 * is supported later to create the VSI and join the physical
1971 		 * ports to the VSI port member.
1972 		 */
1973 		reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
1974 		ret = regmap_bulk_read(ppe_dev->regmap, reg,
1975 				       vsi_cfg, ARRAY_SIZE(vsi_cfg));
1976 		if (ret)
1977 			return ret;
1978 
1979 		PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
1980 		PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
1981 		PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
1982 		PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
1983 		PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
1984 		PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
1985 		PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
1986 		PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
1987 
1988 		ret = regmap_bulk_write(ppe_dev->regmap, reg,
1989 					vsi_cfg, ARRAY_SIZE(vsi_cfg));
1990 		if (ret)
1991 			return ret;
1992 	}
1993 
1994 	return 0;
1995 }
1996 
1997 int ppe_hw_config(struct ppe_device *ppe_dev)
1998 {
1999 	int ret;
2000 
2001 	ret = ppe_config_bm(ppe_dev);
2002 	if (ret)
2003 		return ret;
2004 
2005 	ret = ppe_config_qm(ppe_dev);
2006 	if (ret)
2007 		return ret;
2008 
2009 	ret = ppe_config_scheduler(ppe_dev);
2010 	if (ret)
2011 		return ret;
2012 
2013 	ret = ppe_queue_dest_init(ppe_dev);
2014 	if (ret)
2015 		return ret;
2016 
2017 	ret = ppe_servcode_init(ppe_dev);
2018 	if (ret)
2019 		return ret;
2020 
2021 	ret = ppe_port_config_init(ppe_dev);
2022 	if (ret)
2023 		return ret;
2024 
2025 	ret = ppe_rss_hash_init(ppe_dev);
2026 	if (ret)
2027 		return ret;
2028 
2029 	ret = ppe_queues_to_ring_init(ppe_dev);
2030 	if (ret)
2031 		return ret;
2032 
2033 	return ppe_bridge_init(ppe_dev);
2034 }
2035