1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2012-2017 ASPEED Technology Inc.
3 // Copyright (c) 2018-2021 Intel Corporation
4
5 #include <linux/unaligned.h>
6
7 #include <linux/bitfield.h>
8 #include <linux/clk.h>
9 #include <linux/clkdev.h>
10 #include <linux/clk-provider.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/jiffies.h>
16 #include <linux/math.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/peci.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
22
23 /* ASPEED PECI Registers */
24 /* Control Register */
25 #define ASPEED_PECI_CTRL 0x00
26 #define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
27 #define ASPEED_PECI_CTRL_RD_MODE_MASK GENMASK(13, 12)
28 #define ASPEED_PECI_CTRL_RD_MODE_DBG BIT(13)
29 #define ASPEED_PECI_CTRL_RD_MODE_COUNT BIT(12)
30 #define ASPEED_PECI_CTRL_CLK_SRC_HCLK BIT(11)
31 #define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
32 #define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
33 #define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
34 #define ASPEED_PECI_CTRL_BUS_CONTENTION_EN BIT(5)
35 #define ASPEED_PECI_CTRL_PECI_EN BIT(4)
36 #define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
37
38 /* Timing Negotiation Register */
39 #define ASPEED_PECI_TIMING_NEGOTIATION 0x04
40 #define ASPEED_PECI_T_NEGO_MSG_MASK GENMASK(15, 8)
41 #define ASPEED_PECI_T_NEGO_ADDR_MASK GENMASK(7, 0)
42
43 /* Command Register */
44 #define ASPEED_PECI_CMD 0x08
45 #define ASPEED_PECI_CMD_PIN_MONITORING BIT(31)
46 #define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
47 #define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3
48 #define ASPEED_PECI_CMD_IDLE_MASK \
49 (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING)
50 #define ASPEED_PECI_CMD_FIRE BIT(0)
51
52 /* Read/Write Length Register */
53 #define ASPEED_PECI_RW_LENGTH 0x0c
54 #define ASPEED_PECI_AW_FCS_EN BIT(31)
55 #define ASPEED_PECI_RD_LEN_MASK GENMASK(23, 16)
56 #define ASPEED_PECI_WR_LEN_MASK GENMASK(15, 8)
57 #define ASPEED_PECI_TARGET_ADDR_MASK GENMASK(7, 0)
58
59 /* Expected FCS Data Register */
60 #define ASPEED_PECI_EXPECTED_FCS 0x10
61 #define ASPEED_PECI_EXPECTED_RD_FCS_MASK GENMASK(23, 16)
62 #define ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8)
63 #define ASPEED_PECI_EXPECTED_WR_FCS_MASK GENMASK(7, 0)
64
65 /* Captured FCS Data Register */
66 #define ASPEED_PECI_CAPTURED_FCS 0x14
67 #define ASPEED_PECI_CAPTURED_RD_FCS_MASK GENMASK(23, 16)
68 #define ASPEED_PECI_CAPTURED_WR_FCS_MASK GENMASK(7, 0)
69
70 /* Interrupt Register */
71 #define ASPEED_PECI_INT_CTRL 0x18
72 #define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
73 #define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
74 #define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
75 #define ASPEED_PECI_MESSAGE_NEGO 2
76 #define ASPEED_PECI_INT_MASK GENMASK(4, 0)
77 #define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
78 #define ASPEED_PECI_INT_BUS_CONTENTION BIT(3)
79 #define ASPEED_PECI_INT_WR_FCS_BAD BIT(2)
80 #define ASPEED_PECI_INT_WR_FCS_ABORT BIT(1)
81 #define ASPEED_PECI_INT_CMD_DONE BIT(0)
82
83 /* Interrupt Status Register */
84 #define ASPEED_PECI_INT_STS 0x1c
85 #define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
86 /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
87
88 /* Rx/Tx Data Buffer Registers */
89 #define ASPEED_PECI_WR_DATA0 0x20
90 #define ASPEED_PECI_WR_DATA1 0x24
91 #define ASPEED_PECI_WR_DATA2 0x28
92 #define ASPEED_PECI_WR_DATA3 0x2c
93 #define ASPEED_PECI_RD_DATA0 0x30
94 #define ASPEED_PECI_RD_DATA1 0x34
95 #define ASPEED_PECI_RD_DATA2 0x38
96 #define ASPEED_PECI_RD_DATA3 0x3c
97 #define ASPEED_PECI_WR_DATA4 0x40
98 #define ASPEED_PECI_WR_DATA5 0x44
99 #define ASPEED_PECI_WR_DATA6 0x48
100 #define ASPEED_PECI_WR_DATA7 0x4c
101 #define ASPEED_PECI_RD_DATA4 0x50
102 #define ASPEED_PECI_RD_DATA5 0x54
103 #define ASPEED_PECI_RD_DATA6 0x58
104 #define ASPEED_PECI_RD_DATA7 0x5c
105 #define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
106
107 /* Timing Negotiation */
108 #define ASPEED_PECI_CLK_FREQUENCY_MIN 2000
109 #define ASPEED_PECI_CLK_FREQUENCY_DEFAULT 1000000
110 #define ASPEED_PECI_CLK_FREQUENCY_MAX 2000000
111 #define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
112 /* Timeout */
113 #define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US (50 * USEC_PER_MSEC)
114 #define ASPEED_PECI_IDLE_CHECK_INTERVAL_US (10 * USEC_PER_MSEC)
115 #define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
116 #define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 1000
117
118 #define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1)
119 #define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp)
120 #define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \
121 (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp))
122
123 struct aspeed_peci {
124 struct peci_controller *controller;
125 struct device *dev;
126 void __iomem *base;
127 struct reset_control *rst;
128 int irq;
129 spinlock_t lock; /* to sync completion status handling */
130 struct completion xfer_complete;
131 struct clk *clk;
132 u32 clk_frequency;
133 u32 status;
134 u32 cmd_timeout_ms;
135 };
136
137 struct clk_aspeed_peci {
138 struct clk_hw hw;
139 struct aspeed_peci *aspeed_peci;
140 };
141
aspeed_peci_controller_enable(struct aspeed_peci * priv)142 static void aspeed_peci_controller_enable(struct aspeed_peci *priv)
143 {
144 u32 val = readl(priv->base + ASPEED_PECI_CTRL);
145
146 val |= ASPEED_PECI_CTRL_PECI_CLK_EN;
147 val |= ASPEED_PECI_CTRL_PECI_EN;
148
149 writel(val, priv->base + ASPEED_PECI_CTRL);
150 }
151
aspeed_peci_init_regs(struct aspeed_peci * priv)152 static void aspeed_peci_init_regs(struct aspeed_peci *priv)
153 {
154 u32 val;
155
156 /* Clear interrupts */
157 writel(ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_STS);
158
159 /* Set timing negotiation mode and enable interrupts */
160 val = FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO);
161 val |= ASPEED_PECI_INT_MASK;
162 writel(val, priv->base + ASPEED_PECI_INT_CTRL);
163
164 val = FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
165 writel(val, priv->base + ASPEED_PECI_CTRL);
166 }
167
aspeed_peci_check_idle(struct aspeed_peci * priv)168 static int aspeed_peci_check_idle(struct aspeed_peci *priv)
169 {
170 u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD);
171 int ret;
172
173 /*
174 * Under normal circumstances, we expect to be idle here.
175 * In case there were any errors/timeouts that led to the situation
176 * where the hardware is not in idle state - we need to reset and
177 * reinitialize it to avoid potential controller hang.
178 */
179 if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) {
180 ret = reset_control_assert(priv->rst);
181 if (ret) {
182 dev_err(priv->dev, "cannot assert reset control\n");
183 return ret;
184 }
185
186 ret = reset_control_deassert(priv->rst);
187 if (ret) {
188 dev_err(priv->dev, "cannot deassert reset control\n");
189 return ret;
190 }
191
192 aspeed_peci_init_regs(priv);
193
194 ret = clk_set_rate(priv->clk, priv->clk_frequency);
195 if (ret < 0) {
196 dev_err(priv->dev, "cannot set clock frequency\n");
197 return ret;
198 }
199
200 aspeed_peci_controller_enable(priv);
201 }
202
203 return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
204 cmd_sts,
205 !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
206 ASPEED_PECI_IDLE_CHECK_INTERVAL_US,
207 ASPEED_PECI_IDLE_CHECK_TIMEOUT_US);
208 }
209
aspeed_peci_xfer(struct peci_controller * controller,u8 addr,struct peci_request * req)210 static int aspeed_peci_xfer(struct peci_controller *controller,
211 u8 addr, struct peci_request *req)
212 {
213 struct aspeed_peci *priv = dev_get_drvdata(controller->dev.parent);
214 unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
215 u32 peci_head;
216 int ret, i;
217
218 if (req->tx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
219 req->rx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
220 return -EINVAL;
221
222 /* Check command sts and bus idle state */
223 ret = aspeed_peci_check_idle(priv);
224 if (ret)
225 return ret; /* -ETIMEDOUT */
226
227 spin_lock_irq(&priv->lock);
228 reinit_completion(&priv->xfer_complete);
229
230 peci_head = FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK, addr) |
231 FIELD_PREP(ASPEED_PECI_WR_LEN_MASK, req->tx.len) |
232 FIELD_PREP(ASPEED_PECI_RD_LEN_MASK, req->rx.len);
233
234 writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
235
236 for (i = 0; i < req->tx.len; i += 4) {
237 u32 reg = (i < 16 ? ASPEED_PECI_WR_DATA0 : ASPEED_PECI_WR_DATA4) + i % 16;
238
239 writel(get_unaligned_le32(&req->tx.buf[i]), priv->base + reg);
240 }
241
242 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
243 dev_dbg(priv->dev, "HEAD : %#08x\n", peci_head);
244 print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len);
245 #endif
246
247 priv->status = 0;
248 writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
249 spin_unlock_irq(&priv->lock);
250
251 ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout);
252 if (ret < 0)
253 return ret;
254
255 if (ret == 0) {
256 dev_dbg(priv->dev, "timeout waiting for a response\n");
257 return -ETIMEDOUT;
258 }
259
260 spin_lock_irq(&priv->lock);
261
262 if (priv->status != ASPEED_PECI_INT_CMD_DONE) {
263 spin_unlock_irq(&priv->lock);
264 dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status);
265 return -EIO;
266 }
267
268 spin_unlock_irq(&priv->lock);
269
270 /*
271 * We need to use dword reads for register access, make sure that the
272 * buffer size is multiple of 4-bytes.
273 */
274 BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE % 4);
275
276 for (i = 0; i < req->rx.len; i += 4) {
277 u32 reg = (i < 16 ? ASPEED_PECI_RD_DATA0 : ASPEED_PECI_RD_DATA4) + i % 16;
278 u32 rx_data = readl(priv->base + reg);
279
280 put_unaligned_le32(rx_data, &req->rx.buf[i]);
281 }
282
283 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
284 print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len);
285 #endif
286 return 0;
287 }
288
aspeed_peci_irq_handler(int irq,void * arg)289 static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
290 {
291 struct aspeed_peci *priv = arg;
292 u32 status;
293
294 spin_lock(&priv->lock);
295 status = readl(priv->base + ASPEED_PECI_INT_STS);
296 writel(status, priv->base + ASPEED_PECI_INT_STS);
297 priv->status |= (status & ASPEED_PECI_INT_MASK);
298
299 /*
300 * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
301 * set even in an error case.
302 */
303 if (status & ASPEED_PECI_INT_CMD_DONE)
304 complete(&priv->xfer_complete);
305
306 writel(0, priv->base + ASPEED_PECI_CMD);
307
308 spin_unlock(&priv->lock);
309
310 return IRQ_HANDLED;
311 }
312
clk_aspeed_peci_find_div_values(unsigned long rate,int * msg_timing,int * clk_div_exp)313 static void clk_aspeed_peci_find_div_values(unsigned long rate, int *msg_timing, int *clk_div_exp)
314 {
315 unsigned long best_diff = ~0ul, diff;
316 int msg_timing_temp, clk_div_exp_temp, i, j;
317
318 for (i = 1; i <= 255; i++)
319 for (j = 0; j < 8; j++) {
320 diff = abs(rate - ASPEED_PECI_CLK_DIV1(i) * ASPEED_PECI_CLK_DIV2(j));
321 if (diff < best_diff) {
322 msg_timing_temp = i;
323 clk_div_exp_temp = j;
324 best_diff = diff;
325 }
326 }
327
328 *msg_timing = msg_timing_temp;
329 *clk_div_exp = clk_div_exp_temp;
330 }
331
clk_aspeed_peci_get_div(unsigned long rate,const unsigned long * prate)332 static int clk_aspeed_peci_get_div(unsigned long rate, const unsigned long *prate)
333 {
334 unsigned long this_rate = *prate / (4 * rate);
335 int msg_timing, clk_div_exp;
336
337 clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
338
339 return ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
340 }
341
clk_aspeed_peci_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long prate)342 static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate,
343 unsigned long prate)
344 {
345 struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
346 struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
347 unsigned long this_rate = prate / (4 * rate);
348 int clk_div_exp, msg_timing;
349 u32 val;
350
351 clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
352
353 val = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
354 val &= ~ASPEED_PECI_CTRL_CLK_DIV_MASK;
355 val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp);
356 writel(val, aspeed_peci->base + ASPEED_PECI_CTRL);
357
358 val = FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK, msg_timing);
359 val |= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK, msg_timing);
360 writel(val, aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
361
362 return 0;
363 }
364
clk_aspeed_peci_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)365 static int clk_aspeed_peci_determine_rate(struct clk_hw *hw,
366 struct clk_rate_request *req)
367 {
368 int div = clk_aspeed_peci_get_div(req->rate, &req->best_parent_rate);
369
370 req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
371
372 return 0;
373 }
374
clk_aspeed_peci_recalc_rate(struct clk_hw * hw,unsigned long prate)375 static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw *hw, unsigned long prate)
376 {
377 struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
378 struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
379 int div, msg_timing, addr_timing, clk_div_exp;
380 u32 reg;
381
382 reg = readl(aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
383 msg_timing = FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK, reg);
384 addr_timing = FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK, reg);
385
386 if (msg_timing != addr_timing)
387 return 0;
388
389 reg = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
390 clk_div_exp = FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK, reg);
391
392 div = ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
393
394 return DIV_ROUND_UP_ULL(prate, div);
395 }
396
397 static const struct clk_ops clk_aspeed_peci_ops = {
398 .set_rate = clk_aspeed_peci_set_rate,
399 .determine_rate = clk_aspeed_peci_determine_rate,
400 .recalc_rate = clk_aspeed_peci_recalc_rate,
401 };
402
403 /*
404 * PECI HW contains a clock divider which is a combination of:
405 * div0: 4 (fixed divider)
406 * div1: x + 1
407 * div2: 1 << y
408 * In other words, out_clk = in_clk / (div0 * div1 * div2)
409 * The resulting frequency is used by PECI Controller to drive the PECI bus to
410 * negotiate optimal transfer rate.
411 */
devm_aspeed_peci_register_clk_div(struct device * dev,struct clk * parent,struct aspeed_peci * priv)412 static struct clk *devm_aspeed_peci_register_clk_div(struct device *dev, struct clk *parent,
413 struct aspeed_peci *priv)
414 {
415 struct clk_aspeed_peci *peci_clk;
416 struct clk_init_data init;
417 const char *parent_name;
418 char name[32];
419 int ret;
420
421 snprintf(name, sizeof(name), "%s_div", dev_name(dev));
422
423 parent_name = __clk_get_name(parent);
424
425 init.ops = &clk_aspeed_peci_ops;
426 init.name = name;
427 init.parent_names = (const char* []) { parent_name };
428 init.num_parents = 1;
429 init.flags = 0;
430
431 peci_clk = devm_kzalloc(dev, sizeof(struct clk_aspeed_peci), GFP_KERNEL);
432 if (!peci_clk)
433 return ERR_PTR(-ENOMEM);
434
435 peci_clk->hw.init = &init;
436 peci_clk->aspeed_peci = priv;
437
438 ret = devm_clk_hw_register(dev, &peci_clk->hw);
439 if (ret)
440 return ERR_PTR(ret);
441
442 return peci_clk->hw.clk;
443 }
444
aspeed_peci_property_sanitize(struct device * dev,const char * propname,u32 min,u32 max,u32 default_val,u32 * propval)445 static void aspeed_peci_property_sanitize(struct device *dev, const char *propname,
446 u32 min, u32 max, u32 default_val, u32 *propval)
447 {
448 u32 val;
449 int ret;
450
451 ret = device_property_read_u32(dev, propname, &val);
452 if (ret) {
453 val = default_val;
454 } else if (val > max || val < min) {
455 dev_warn(dev, "invalid %s: %u, falling back to: %u\n",
456 propname, val, default_val);
457
458 val = default_val;
459 }
460
461 *propval = val;
462 }
463
aspeed_peci_property_setup(struct aspeed_peci * priv)464 static void aspeed_peci_property_setup(struct aspeed_peci *priv)
465 {
466 aspeed_peci_property_sanitize(priv->dev, "clock-frequency",
467 ASPEED_PECI_CLK_FREQUENCY_MIN, ASPEED_PECI_CLK_FREQUENCY_MAX,
468 ASPEED_PECI_CLK_FREQUENCY_DEFAULT, &priv->clk_frequency);
469 aspeed_peci_property_sanitize(priv->dev, "cmd-timeout-ms",
470 1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX,
471 ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms);
472 }
473
474 static const struct peci_controller_ops aspeed_ops = {
475 .xfer = aspeed_peci_xfer,
476 };
477
aspeed_peci_reset_control_release(void * data)478 static void aspeed_peci_reset_control_release(void *data)
479 {
480 reset_control_assert(data);
481 }
482
devm_aspeed_peci_reset_control_deassert(struct device * dev,struct reset_control * rst)483 static int devm_aspeed_peci_reset_control_deassert(struct device *dev, struct reset_control *rst)
484 {
485 int ret;
486
487 ret = reset_control_deassert(rst);
488 if (ret)
489 return ret;
490
491 return devm_add_action_or_reset(dev, aspeed_peci_reset_control_release, rst);
492 }
493
aspeed_peci_clk_release(void * data)494 static void aspeed_peci_clk_release(void *data)
495 {
496 clk_disable_unprepare(data);
497 }
498
devm_aspeed_peci_clk_enable(struct device * dev,struct clk * clk)499 static int devm_aspeed_peci_clk_enable(struct device *dev, struct clk *clk)
500 {
501 int ret;
502
503 ret = clk_prepare_enable(clk);
504 if (ret)
505 return ret;
506
507 return devm_add_action_or_reset(dev, aspeed_peci_clk_release, clk);
508 }
509
aspeed_peci_probe(struct platform_device * pdev)510 static int aspeed_peci_probe(struct platform_device *pdev)
511 {
512 struct peci_controller *controller;
513 struct aspeed_peci *priv;
514 struct clk *ref_clk;
515 int ret;
516
517 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
518 if (!priv)
519 return -ENOMEM;
520
521 priv->dev = &pdev->dev;
522 dev_set_drvdata(priv->dev, priv);
523
524 priv->base = devm_platform_ioremap_resource(pdev, 0);
525 if (IS_ERR(priv->base))
526 return PTR_ERR(priv->base);
527
528 priv->irq = platform_get_irq(pdev, 0);
529 if (priv->irq < 0)
530 return priv->irq;
531
532 ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
533 0, "peci-aspeed", priv);
534 if (ret)
535 return ret;
536
537 init_completion(&priv->xfer_complete);
538 spin_lock_init(&priv->lock);
539
540 priv->rst = devm_reset_control_get(&pdev->dev, NULL);
541 if (IS_ERR(priv->rst))
542 return dev_err_probe(priv->dev, PTR_ERR(priv->rst),
543 "failed to get reset control\n");
544
545 ret = devm_aspeed_peci_reset_control_deassert(priv->dev, priv->rst);
546 if (ret)
547 return dev_err_probe(priv->dev, ret, "cannot deassert reset control\n");
548
549 aspeed_peci_property_setup(priv);
550
551 aspeed_peci_init_regs(priv);
552
553 ref_clk = devm_clk_get(priv->dev, NULL);
554 if (IS_ERR(ref_clk))
555 return dev_err_probe(priv->dev, PTR_ERR(ref_clk), "failed to get ref clock\n");
556
557 priv->clk = devm_aspeed_peci_register_clk_div(priv->dev, ref_clk, priv);
558 if (IS_ERR(priv->clk))
559 return dev_err_probe(priv->dev, PTR_ERR(priv->clk), "cannot register clock\n");
560
561 ret = clk_set_rate(priv->clk, priv->clk_frequency);
562 if (ret < 0)
563 return dev_err_probe(priv->dev, ret, "cannot set clock frequency\n");
564
565 ret = devm_aspeed_peci_clk_enable(priv->dev, priv->clk);
566 if (ret)
567 return dev_err_probe(priv->dev, ret, "failed to enable clock\n");
568
569 aspeed_peci_controller_enable(priv);
570
571 controller = devm_peci_controller_add(priv->dev, &aspeed_ops);
572 if (IS_ERR(controller))
573 return dev_err_probe(priv->dev, PTR_ERR(controller),
574 "failed to add aspeed peci controller\n");
575
576 priv->controller = controller;
577
578 return 0;
579 }
580
581 static const struct of_device_id aspeed_peci_of_table[] = {
582 { .compatible = "aspeed,ast2400-peci", },
583 { .compatible = "aspeed,ast2500-peci", },
584 { .compatible = "aspeed,ast2600-peci", },
585 { }
586 };
587 MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
588
589 static struct platform_driver aspeed_peci_driver = {
590 .probe = aspeed_peci_probe,
591 .driver = {
592 .name = "peci-aspeed",
593 .of_match_table = aspeed_peci_of_table,
594 },
595 };
596 module_platform_driver(aspeed_peci_driver);
597
598 MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
599 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
600 MODULE_DESCRIPTION("ASPEED PECI driver");
601 MODULE_LICENSE("GPL");
602 MODULE_IMPORT_NS("PECI");
603