1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/irqdomain.h>
27 #include <linux/pci.h>
28 #include <linux/pm_domain.h>
29 #include <linux/platform_device.h>
30 #include <sound/designware_i2s.h>
31 #include <sound/pcm.h>
32 #include <linux/acpi.h>
33 #include <linux/dmi.h>
34
35 #include "amdgpu.h"
36 #include "atom.h"
37 #include "amdgpu_acp.h"
38
39 #include "acp_gfx_if.h"
40
41 #define ST_JADEITE 1
42 #define ACP_TILE_ON_MASK 0x03
43 #define ACP_TILE_OFF_MASK 0x02
44 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
45 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
46
47 #define ACP_TILE_P1_MASK 0x3e
48 #define ACP_TILE_P2_MASK 0x3d
49 #define ACP_TILE_DSP0_MASK 0x3b
50 #define ACP_TILE_DSP1_MASK 0x37
51
52 #define ACP_TILE_DSP2_MASK 0x2f
53
54 #define ACP_DMA_REGS_END 0x146c0
55 #define ACP_I2S_PLAY_REGS_START 0x14840
56 #define ACP_I2S_PLAY_REGS_END 0x148b4
57 #define ACP_I2S_CAP_REGS_START 0x148b8
58 #define ACP_I2S_CAP_REGS_END 0x1496c
59
60 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
61 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
62 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
63 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
64 #define ACP_BT_PLAY_REGS_START 0x14970
65 #define ACP_BT_PLAY_REGS_END 0x14a24
66 #define ACP_BT_COMP1_REG_OFFSET 0xac
67 #define ACP_BT_COMP2_REG_OFFSET 0xa8
68
69 #define mmACP_PGFSM_RETAIN_REG 0x51c9
70 #define mmACP_PGFSM_CONFIG_REG 0x51ca
71 #define mmACP_PGFSM_READ_REG_0 0x51cc
72
73 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
74 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
75 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
76 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
77
78 #define mmACP_CONTROL 0x5131
79 #define mmACP_STATUS 0x5133
80 #define mmACP_SOFT_RESET 0x5134
81 #define ACP_CONTROL__ClkEn_MASK 0x1
82 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
83 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
84 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
85 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
86
87 #define ACP_TIMEOUT_LOOP 0x000000FF
88 #define ACP_DEVS 4
89 #define ACP_SRC_ID 162
90
91 static unsigned long acp_machine_id;
92
93 enum {
94 ACP_TILE_P1 = 0,
95 ACP_TILE_P2,
96 ACP_TILE_DSP0,
97 ACP_TILE_DSP1,
98 ACP_TILE_DSP2,
99 };
100
acp_sw_init(struct amdgpu_ip_block * ip_block)101 static int acp_sw_init(struct amdgpu_ip_block *ip_block)
102 {
103 struct amdgpu_device *adev = ip_block->adev;
104
105 adev->acp.parent = adev->dev;
106
107 adev->acp.cgs_device =
108 amdgpu_cgs_create_device(adev);
109 if (!adev->acp.cgs_device)
110 return -EINVAL;
111
112 return 0;
113 }
114
acp_sw_fini(struct amdgpu_ip_block * ip_block)115 static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
116 {
117 struct amdgpu_device *adev = ip_block->adev;
118
119 if (adev->acp.cgs_device)
120 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
121
122 return 0;
123 }
124
125 struct acp_pm_domain {
126 void *adev;
127 struct generic_pm_domain gpd;
128 };
129
acp_poweroff(struct generic_pm_domain * genpd)130 static int acp_poweroff(struct generic_pm_domain *genpd)
131 {
132 struct acp_pm_domain *apd;
133 struct amdgpu_device *adev;
134
135 apd = container_of(genpd, struct acp_pm_domain, gpd);
136 adev = apd->adev;
137 /* call smu to POWER GATE ACP block
138 * smu will
139 * 1. turn off the acp clock
140 * 2. power off the acp tiles
141 * 3. check and enter ulv state
142 */
143 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
144 return 0;
145 }
146
acp_poweron(struct generic_pm_domain * genpd)147 static int acp_poweron(struct generic_pm_domain *genpd)
148 {
149 struct acp_pm_domain *apd;
150 struct amdgpu_device *adev;
151
152 apd = container_of(genpd, struct acp_pm_domain, gpd);
153 adev = apd->adev;
154 /* call smu to UNGATE ACP block
155 * smu will
156 * 1. exit ulv
157 * 2. turn on acp clock
158 * 3. power on acp tiles
159 */
160 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
161 return 0;
162 }
163
acp_genpd_add_device(struct device * dev,void * data)164 static int acp_genpd_add_device(struct device *dev, void *data)
165 {
166 struct generic_pm_domain *gpd = data;
167 int ret;
168
169 ret = pm_genpd_add_device(gpd, dev);
170 if (ret)
171 dev_err(dev, "Failed to add dev to genpd %d\n", ret);
172
173 return ret;
174 }
175
acp_genpd_remove_device(struct device * dev,void * data)176 static int acp_genpd_remove_device(struct device *dev, void *data)
177 {
178 int ret;
179
180 ret = pm_genpd_remove_device(dev);
181 if (ret)
182 dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
183
184 /* Continue to remove */
185 return 0;
186 }
187
acp_quirk_cb(const struct dmi_system_id * id)188 static int acp_quirk_cb(const struct dmi_system_id *id)
189 {
190 acp_machine_id = ST_JADEITE;
191 return 1;
192 }
193
194 static const struct dmi_system_id acp_quirk_table[] = {
195 {
196 .callback = acp_quirk_cb,
197 .matches = {
198 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
199 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
200 }
201 },
202 {
203 .callback = acp_quirk_cb,
204 .matches = {
205 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
206 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
207 },
208 },
209 {
210 .callback = acp_quirk_cb,
211 .matches = {
212 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
213 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
214 },
215 },
216 {}
217 };
218
219 /**
220 * acp_hw_init - start and test ACP block
221 *
222 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
223 *
224 */
acp_hw_init(struct amdgpu_ip_block * ip_block)225 static int acp_hw_init(struct amdgpu_ip_block *ip_block)
226 {
227 int r;
228 u64 acp_base;
229 u32 val = 0;
230 u32 count = 0;
231 struct i2s_platform_data *i2s_pdata = NULL;
232
233 struct amdgpu_device *adev = ip_block->adev;
234
235 r = amd_acp_hw_init(adev->acp.cgs_device,
236 ip_block->version->major, ip_block->version->minor);
237 /* -ENODEV means board uses AZ rather than ACP */
238 if (r == -ENODEV) {
239 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
240 return 0;
241 } else if (r) {
242 return r;
243 }
244
245 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
246 return -EINVAL;
247
248 acp_base = adev->rmmio_base;
249 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
250 if (!adev->acp.acp_genpd)
251 return -ENOMEM;
252
253 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
254 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
255 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
256 adev->acp.acp_genpd->adev = adev;
257
258 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
259 dmi_check_system(acp_quirk_table);
260 switch (acp_machine_id) {
261 case ST_JADEITE:
262 {
263 adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
264 GFP_KERNEL);
265 if (!adev->acp.acp_cell) {
266 r = -ENOMEM;
267 goto failure;
268 }
269
270 adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
271 if (!adev->acp.acp_res) {
272 r = -ENOMEM;
273 goto failure;
274 }
275
276 i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
277 if (!i2s_pdata) {
278 r = -ENOMEM;
279 goto failure;
280 }
281
282 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
283 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
284 i2s_pdata[0].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
285 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
286 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
287 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
288
289 adev->acp.acp_res[0].name = "acp2x_dma";
290 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
291 adev->acp.acp_res[0].start = acp_base;
292 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
293
294 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play_cap";
295 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
296 adev->acp.acp_res[1].start = acp_base + ACP_I2S_CAP_REGS_START;
297 adev->acp.acp_res[1].end = acp_base + ACP_I2S_CAP_REGS_END;
298
299 adev->acp.acp_res[2].name = "acp2x_dma_irq";
300 adev->acp.acp_res[2].flags = IORESOURCE_IRQ;
301 adev->acp.acp_res[2].start = amdgpu_irq_create_mapping(adev, 162);
302 adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
303
304 adev->acp.acp_cell[0].name = "acp_audio_dma";
305 adev->acp.acp_cell[0].num_resources = 3;
306 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
307 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
308 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
309
310 adev->acp.acp_cell[1].name = "designware-i2s";
311 adev->acp.acp_cell[1].num_resources = 1;
312 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
313 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
314 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
315 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
316 if (r)
317 goto failure;
318 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
319 acp_genpd_add_device);
320 if (r)
321 goto failure;
322 break;
323 }
324 default:
325 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
326 GFP_KERNEL);
327
328 if (!adev->acp.acp_cell) {
329 r = -ENOMEM;
330 goto failure;
331 }
332
333 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
334 if (!adev->acp.acp_res) {
335 r = -ENOMEM;
336 goto failure;
337 }
338
339 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
340 if (!i2s_pdata) {
341 r = -ENOMEM;
342 goto failure;
343 }
344
345 switch (adev->asic_type) {
346 case CHIP_STONEY:
347 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
348 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
349 break;
350 default:
351 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
352 }
353 i2s_pdata[0].cap = DWC_I2S_PLAY;
354 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
355 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
356 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
357 switch (adev->asic_type) {
358 case CHIP_STONEY:
359 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
360 DW_I2S_QUIRK_COMP_PARAM1 |
361 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
362 break;
363 default:
364 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
365 DW_I2S_QUIRK_COMP_PARAM1;
366 }
367
368 i2s_pdata[1].cap = DWC_I2S_RECORD;
369 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
370 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
371 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
372
373 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
374 switch (adev->asic_type) {
375 case CHIP_STONEY:
376 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
377 break;
378 default:
379 break;
380 }
381
382 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
383 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
384 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
385 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
386
387 adev->acp.acp_res[0].name = "acp2x_dma";
388 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
389 adev->acp.acp_res[0].start = acp_base;
390 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
391
392 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
393 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
394 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
395 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
396
397 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
398 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
399 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
400 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
401
402 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
403 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
404 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
405 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
406
407 adev->acp.acp_res[4].name = "acp2x_dma_irq";
408 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
409 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
410 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
411
412 adev->acp.acp_cell[0].name = "acp_audio_dma";
413 adev->acp.acp_cell[0].num_resources = 5;
414 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
415 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
416 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
417
418 adev->acp.acp_cell[1].name = "designware-i2s";
419 adev->acp.acp_cell[1].num_resources = 1;
420 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
421 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
422 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
423
424 adev->acp.acp_cell[2].name = "designware-i2s";
425 adev->acp.acp_cell[2].num_resources = 1;
426 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
427 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
428 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
429
430 adev->acp.acp_cell[3].name = "designware-i2s";
431 adev->acp.acp_cell[3].num_resources = 1;
432 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
433 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
434 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
435
436 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
437 if (r)
438 goto failure;
439
440 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
441 acp_genpd_add_device);
442 if (r)
443 goto failure;
444 }
445
446 /* Assert Soft reset of ACP */
447 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
448
449 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
450 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
451
452 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
453 while (true) {
454 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
455 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
456 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
457 break;
458 if (--count == 0) {
459 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
460 r = -ETIMEDOUT;
461 goto failure;
462 }
463 udelay(100);
464 }
465 /* Enable clock to ACP and wait until the clock is enabled */
466 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
467 val = val | ACP_CONTROL__ClkEn_MASK;
468 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
469
470 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
471
472 while (true) {
473 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
474 if (val & (u32) 0x1)
475 break;
476 if (--count == 0) {
477 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
478 r = -ETIMEDOUT;
479 goto failure;
480 }
481 udelay(100);
482 }
483 /* Deassert the SOFT RESET flags */
484 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
485 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
486 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
487 return 0;
488
489 failure:
490 kfree(i2s_pdata);
491 kfree(adev->acp.acp_res);
492 kfree(adev->acp.acp_cell);
493 kfree(adev->acp.acp_genpd);
494 return r;
495 }
496
497 /**
498 * acp_hw_fini - stop the hardware block
499 *
500 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
501 *
502 */
acp_hw_fini(struct amdgpu_ip_block * ip_block)503 static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
504 {
505 u32 val = 0;
506 u32 count = 0;
507 struct amdgpu_device *adev = ip_block->adev;
508
509 /* return early if no ACP */
510 if (!adev->acp.acp_genpd) {
511 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
512 return 0;
513 }
514
515 /* Assert Soft reset of ACP */
516 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
517
518 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
519 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
520
521 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
522 while (true) {
523 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
524 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
525 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
526 break;
527 if (--count == 0) {
528 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
529 return -ETIMEDOUT;
530 }
531 udelay(100);
532 }
533 /* Disable ACP clock */
534 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
535 val &= ~ACP_CONTROL__ClkEn_MASK;
536 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
537
538 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
539
540 while (true) {
541 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
542 if (val & (u32) 0x1)
543 break;
544 if (--count == 0) {
545 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
546 return -ETIMEDOUT;
547 }
548 udelay(100);
549 }
550
551 device_for_each_child(adev->acp.parent, NULL,
552 acp_genpd_remove_device);
553
554 mfd_remove_devices(adev->acp.parent);
555 kfree(adev->acp.acp_res);
556 kfree(adev->acp.acp_genpd);
557 kfree(adev->acp.acp_cell);
558
559 return 0;
560 }
561
acp_suspend(struct amdgpu_ip_block * ip_block)562 static int acp_suspend(struct amdgpu_ip_block *ip_block)
563 {
564 struct amdgpu_device *adev = ip_block->adev;
565
566 /* power up on suspend */
567 if (!adev->acp.acp_cell)
568 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
569 return 0;
570 }
571
acp_resume(struct amdgpu_ip_block * ip_block)572 static int acp_resume(struct amdgpu_ip_block *ip_block)
573 {
574 struct amdgpu_device *adev = ip_block->adev;
575
576 /* power down again on resume */
577 if (!adev->acp.acp_cell)
578 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
579 return 0;
580 }
581
acp_is_idle(void * handle)582 static bool acp_is_idle(void *handle)
583 {
584 return true;
585 }
586
acp_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)587 static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
588 enum amd_clockgating_state state)
589 {
590 return 0;
591 }
592
acp_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)593 static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
594 enum amd_powergating_state state)
595 {
596 struct amdgpu_device *adev = ip_block->adev;
597 bool enable = (state == AMD_PG_STATE_GATE);
598
599 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
600
601 return 0;
602 }
603
604 static const struct amd_ip_funcs acp_ip_funcs = {
605 .name = "acp_ip",
606 .sw_init = acp_sw_init,
607 .sw_fini = acp_sw_fini,
608 .hw_init = acp_hw_init,
609 .hw_fini = acp_hw_fini,
610 .suspend = acp_suspend,
611 .resume = acp_resume,
612 .is_idle = acp_is_idle,
613 .set_clockgating_state = acp_set_clockgating_state,
614 .set_powergating_state = acp_set_powergating_state,
615 };
616
617 const struct amdgpu_ip_block_version acp_ip_block = {
618 .type = AMD_IP_BLOCK_TYPE_ACP,
619 .major = 2,
620 .minor = 2,
621 .rev = 0,
622 .funcs = &acp_ip_funcs,
623 };
624