xref: /linux/drivers/net/ethernet/qualcomm/ppe/ppe.c (revision 5de6c855e23e99d76c143ee2a29766e7f7f9fe65)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
4  */
5 
6 /* PPE platform device probe, DTSI parser and PPE clock initializations. */
7 
8 #include <linux/clk.h>
9 #include <linux/interconnect.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/platform_device.h>
14 #include <linux/regmap.h>
15 #include <linux/reset.h>
16 
17 #include "ppe.h"
18 #include "ppe_config.h"
19 #include "ppe_debugfs.h"
20 
21 #define PPE_PORT_MAX		8
22 #define PPE_CLK_RATE		353000000
23 
24 /* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
25  * will be updated by the clock rate of PPE.
26  */
27 static const struct icc_bulk_data ppe_icc_data[] = {
28 	{
29 		.name = "ppe",
30 		.avg_bw = 0,
31 		.peak_bw = 0,
32 	},
33 	{
34 		.name = "ppe_cfg",
35 		.avg_bw = 0,
36 		.peak_bw = 0,
37 	},
38 	{
39 		.name = "qos_gen",
40 		.avg_bw = 6000,
41 		.peak_bw = 6000,
42 	},
43 	{
44 		.name = "timeout_ref",
45 		.avg_bw = 6000,
46 		.peak_bw = 6000,
47 	},
48 	{
49 		.name = "nssnoc_memnoc",
50 		.avg_bw = 533333,
51 		.peak_bw = 533333,
52 	},
53 	{
54 		.name = "memnoc_nssnoc",
55 		.avg_bw = 533333,
56 		.peak_bw = 533333,
57 	},
58 	{
59 		.name = "memnoc_nssnoc_1",
60 		.avg_bw = 533333,
61 		.peak_bw = 533333,
62 	},
63 };
64 
65 static const struct regmap_range ppe_readable_ranges[] = {
66 	regmap_reg_range(0x0, 0x1ff),		/* Global */
67 	regmap_reg_range(0x400, 0x5ff),		/* LPI CSR */
68 	regmap_reg_range(0x1000, 0x11ff),	/* GMAC0 */
69 	regmap_reg_range(0x1200, 0x13ff),	/* GMAC1 */
70 	regmap_reg_range(0x1400, 0x15ff),	/* GMAC2 */
71 	regmap_reg_range(0x1600, 0x17ff),	/* GMAC3 */
72 	regmap_reg_range(0x1800, 0x19ff),	/* GMAC4 */
73 	regmap_reg_range(0x1a00, 0x1bff),	/* GMAC5 */
74 	regmap_reg_range(0xb000, 0xefff),	/* PRX CSR */
75 	regmap_reg_range(0xf000, 0x1efff),	/* IPE */
76 	regmap_reg_range(0x20000, 0x5ffff),	/* PTX CSR */
77 	regmap_reg_range(0x60000, 0x9ffff),	/* IPE L2 CSR */
78 	regmap_reg_range(0xb0000, 0xeffff),	/* IPO CSR */
79 	regmap_reg_range(0x100000, 0x17ffff),	/* IPE PC */
80 	regmap_reg_range(0x180000, 0x1bffff),	/* PRE IPO CSR */
81 	regmap_reg_range(0x1d0000, 0x1dffff),	/* Tunnel parser */
82 	regmap_reg_range(0x1e0000, 0x1effff),	/* Ingress parse */
83 	regmap_reg_range(0x200000, 0x2fffff),	/* IPE L3 */
84 	regmap_reg_range(0x300000, 0x3fffff),	/* IPE tunnel */
85 	regmap_reg_range(0x400000, 0x4fffff),	/* Scheduler */
86 	regmap_reg_range(0x500000, 0x503fff),	/* XGMAC0 */
87 	regmap_reg_range(0x504000, 0x507fff),	/* XGMAC1 */
88 	regmap_reg_range(0x508000, 0x50bfff),	/* XGMAC2 */
89 	regmap_reg_range(0x50c000, 0x50ffff),	/* XGMAC3 */
90 	regmap_reg_range(0x510000, 0x513fff),	/* XGMAC4 */
91 	regmap_reg_range(0x514000, 0x517fff),	/* XGMAC5 */
92 	regmap_reg_range(0x600000, 0x6fffff),	/* BM */
93 	regmap_reg_range(0x800000, 0x9fffff),	/* QM */
94 	regmap_reg_range(0xb00000, 0xbef800),	/* EDMA */
95 };
96 
97 static const struct regmap_access_table ppe_reg_table = {
98 	.yes_ranges = ppe_readable_ranges,
99 	.n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
100 };
101 
102 static const struct regmap_config regmap_config_ipq9574 = {
103 	.reg_bits = 32,
104 	.reg_stride = 4,
105 	.val_bits = 32,
106 	.rd_table = &ppe_reg_table,
107 	.wr_table = &ppe_reg_table,
108 	.max_register = 0xbef800,
109 	.fast_io = true,
110 };
111 
112 static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
113 {
114 	unsigned long ppe_rate = ppe_dev->clk_rate;
115 	struct device *dev = ppe_dev->dev;
116 	struct reset_control *rstc;
117 	struct clk_bulk_data *clks;
118 	struct clk *clk;
119 	int ret, i;
120 
121 	for (i = 0; i < ppe_dev->num_icc_paths; i++) {
122 		ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
123 		ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
124 					       Bps_to_icc(ppe_rate);
125 
126 		/* PPE does not have an explicit peak bandwidth requirement,
127 		 * so set the peak bandwidth to be equal to the average
128 		 * bandwidth.
129 		 */
130 		ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
131 						Bps_to_icc(ppe_rate);
132 	}
133 
134 	ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
135 				   ppe_dev->icc_paths);
136 	if (ret)
137 		return ret;
138 
139 	ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
140 	if (ret)
141 		return ret;
142 
143 	/* The PPE clocks have a common parent clock. Setting the clock
144 	 * rate of "ppe" ensures the clock rate of all PPE clocks is
145 	 * configured to the same rate.
146 	 */
147 	clk = devm_clk_get(dev, "ppe");
148 	if (IS_ERR(clk))
149 		return PTR_ERR(clk);
150 
151 	ret = clk_set_rate(clk, ppe_rate);
152 	if (ret)
153 		return ret;
154 
155 	ret = devm_clk_bulk_get_all_enabled(dev, &clks);
156 	if (ret < 0)
157 		return ret;
158 
159 	/* Reset the PPE. */
160 	rstc = devm_reset_control_get_exclusive(dev, NULL);
161 	if (IS_ERR(rstc))
162 		return PTR_ERR(rstc);
163 
164 	ret = reset_control_assert(rstc);
165 	if (ret)
166 		return ret;
167 
168 	/* The delay 10 ms of assert is necessary for resetting PPE. */
169 	usleep_range(10000, 11000);
170 
171 	return reset_control_deassert(rstc);
172 }
173 
174 static int qcom_ppe_probe(struct platform_device *pdev)
175 {
176 	struct device *dev = &pdev->dev;
177 	struct ppe_device *ppe_dev;
178 	void __iomem *base;
179 	int ret, num_icc;
180 
181 	num_icc = ARRAY_SIZE(ppe_icc_data);
182 	ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
183 			       GFP_KERNEL);
184 	if (!ppe_dev)
185 		return -ENOMEM;
186 
187 	base = devm_platform_ioremap_resource(pdev, 0);
188 	if (IS_ERR(base))
189 		return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
190 
191 	ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
192 	if (IS_ERR(ppe_dev->regmap))
193 		return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
194 				     "PPE initialize regmap failed\n");
195 	ppe_dev->dev = dev;
196 	ppe_dev->clk_rate = PPE_CLK_RATE;
197 	ppe_dev->num_ports = PPE_PORT_MAX;
198 	ppe_dev->num_icc_paths = num_icc;
199 
200 	ret = ppe_clock_init_and_reset(ppe_dev);
201 	if (ret)
202 		return dev_err_probe(dev, ret, "PPE clock config failed\n");
203 
204 	ret = ppe_hw_config(ppe_dev);
205 	if (ret)
206 		return dev_err_probe(dev, ret, "PPE HW config failed\n");
207 
208 	ppe_debugfs_setup(ppe_dev);
209 	platform_set_drvdata(pdev, ppe_dev);
210 
211 	return 0;
212 }
213 
214 static void qcom_ppe_remove(struct platform_device *pdev)
215 {
216 	struct ppe_device *ppe_dev;
217 
218 	ppe_dev = platform_get_drvdata(pdev);
219 	ppe_debugfs_teardown(ppe_dev);
220 }
221 
222 static const struct of_device_id qcom_ppe_of_match[] = {
223 	{ .compatible = "qcom,ipq9574-ppe" },
224 	{}
225 };
226 MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
227 
228 static struct platform_driver qcom_ppe_driver = {
229 	.driver = {
230 		.name = "qcom_ppe",
231 		.of_match_table = qcom_ppe_of_match,
232 	},
233 	.probe	= qcom_ppe_probe,
234 	.remove = qcom_ppe_remove,
235 };
236 module_platform_driver(qcom_ppe_driver);
237 
238 MODULE_LICENSE("GPL");
239 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
240