1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2020 Intel Corporation
4 //
5 // Author: Cezary Rojewski <cezary.rojewski@intel.com>
6 //
7
8 #include <linux/devcoredump.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/pci.h>
12 #include <linux/pxa2xx_ssp.h>
13 #include "core.h"
14 #include "messages.h"
15 #include "registers.h"
16
catpt_dma_filter(struct dma_chan * chan,void * param)17 static bool catpt_dma_filter(struct dma_chan *chan, void *param)
18 {
19 return param == chan->device->dev;
20 }
21
22 /*
23 * Either engine 0 or 1 can be used for image loading.
24 * Align with Windows driver equivalent and stick to engine 1.
25 */
26 #define CATPT_DMA_DEVID 1
27 #define CATPT_DMA_DSP_ADDR_MASK GENMASK(31, 20)
28
catpt_dma_request_config_chan(struct catpt_dev * cdev)29 struct dma_chan *catpt_dma_request_config_chan(struct catpt_dev *cdev)
30 {
31 struct dma_slave_config config;
32 struct dma_chan *chan;
33 dma_cap_mask_t mask;
34 int ret;
35
36 dma_cap_zero(mask);
37 dma_cap_set(DMA_MEMCPY, mask);
38
39 chan = dma_request_channel(mask, catpt_dma_filter, cdev->dev);
40 if (!chan) {
41 dev_err(cdev->dev, "request channel failed\n");
42 return ERR_PTR(-ENODEV);
43 }
44
45 memset(&config, 0, sizeof(config));
46 config.direction = DMA_MEM_TO_DEV;
47 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
48 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
49 config.src_maxburst = 16;
50 config.dst_maxburst = 16;
51
52 ret = dmaengine_slave_config(chan, &config);
53 if (ret) {
54 dev_err(cdev->dev, "slave config failed: %d\n", ret);
55 dma_release_channel(chan);
56 return ERR_PTR(ret);
57 }
58
59 return chan;
60 }
61
catpt_dma_memcpy(struct catpt_dev * cdev,struct dma_chan * chan,dma_addr_t dst_addr,dma_addr_t src_addr,size_t size)62 static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan,
63 dma_addr_t dst_addr, dma_addr_t src_addr,
64 size_t size)
65 {
66 struct dma_async_tx_descriptor *desc;
67 enum dma_status status;
68 int ret;
69
70 desc = dmaengine_prep_dma_memcpy(chan, dst_addr, src_addr, size,
71 DMA_CTRL_ACK);
72 if (!desc) {
73 dev_err(cdev->dev, "prep dma memcpy failed\n");
74 return -EIO;
75 }
76
77 /* enable demand mode for dma channel */
78 catpt_updatel_shim(cdev, HMDC,
79 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id),
80 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id));
81
82 ret = dma_submit_error(dmaengine_submit(desc));
83 if (ret) {
84 dev_err(cdev->dev, "submit tx failed: %d\n", ret);
85 goto clear_hdda;
86 }
87
88 status = dma_wait_for_async_tx(desc);
89 ret = (status == DMA_COMPLETE) ? 0 : -EPROTO;
90
91 clear_hdda:
92 /* regardless of status, disable access to HOST memory in demand mode */
93 catpt_updatel_shim(cdev, HMDC,
94 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 0);
95
96 return ret;
97 }
98
catpt_dma_memcpy_todsp(struct catpt_dev * cdev,struct dma_chan * chan,dma_addr_t dst_addr,dma_addr_t src_addr,size_t size)99 int catpt_dma_memcpy_todsp(struct catpt_dev *cdev, struct dma_chan *chan,
100 dma_addr_t dst_addr, dma_addr_t src_addr,
101 size_t size)
102 {
103 return catpt_dma_memcpy(cdev, chan, dst_addr | CATPT_DMA_DSP_ADDR_MASK,
104 src_addr, size);
105 }
106
catpt_dma_memcpy_fromdsp(struct catpt_dev * cdev,struct dma_chan * chan,dma_addr_t dst_addr,dma_addr_t src_addr,size_t size)107 int catpt_dma_memcpy_fromdsp(struct catpt_dev *cdev, struct dma_chan *chan,
108 dma_addr_t dst_addr, dma_addr_t src_addr,
109 size_t size)
110 {
111 return catpt_dma_memcpy(cdev, chan, dst_addr,
112 src_addr | CATPT_DMA_DSP_ADDR_MASK, size);
113 }
114
catpt_dmac_probe(struct catpt_dev * cdev)115 int catpt_dmac_probe(struct catpt_dev *cdev)
116 {
117 struct dw_dma_chip *dmac;
118 int ret;
119
120 dmac = devm_kzalloc(cdev->dev, sizeof(*dmac), GFP_KERNEL);
121 if (!dmac)
122 return -ENOMEM;
123
124 dmac->regs = cdev->lpe_ba + cdev->spec->host_dma_offset[CATPT_DMA_DEVID];
125 dmac->dev = cdev->dev;
126 dmac->irq = cdev->irq;
127
128 /*
129 * Caller is responsible for putting device in D0 to allow
130 * for I/O and memory access before probing DW.
131 */
132 ret = dw_dma_probe(dmac);
133 if (ret)
134 return ret;
135
136 cdev->dmac = dmac;
137 return 0;
138 }
139
catpt_dmac_remove(struct catpt_dev * cdev)140 void catpt_dmac_remove(struct catpt_dev *cdev)
141 {
142 /*
143 * As do_dma_remove() juggles with pm_runtime_get_xxx() and
144 * pm_runtime_put_xxx() while both ADSP and DW 'devices' are part of
145 * the same module, caller makes sure pm_runtime_disable() is invoked
146 * before removing DW to prevent postmortem resume and suspend.
147 */
148 dw_dma_remove(cdev->dmac);
149 }
150
catpt_dsp_set_srampge(struct catpt_dev * cdev,struct resource * sram,unsigned long mask,unsigned long new)151 static void catpt_dsp_set_srampge(struct catpt_dev *cdev, struct resource *sram,
152 unsigned long mask, unsigned long new)
153 {
154 unsigned long old;
155 u32 off = sram->start;
156 unsigned long b = __ffs(mask);
157
158 old = catpt_readl_pci(cdev, VDRTCTL0) & mask;
159 dev_dbg(cdev->dev, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx",
160 mask, old, new);
161
162 if (old == new)
163 return;
164
165 catpt_updatel_pci(cdev, VDRTCTL0, mask, new);
166 /* wait for SRAM power gating to propagate */
167 udelay(60);
168
169 /*
170 * Dummy read as the very first access after block enable
171 * to prevent byte loss in future operations.
172 */
173 for_each_clear_bit_from(b, &new, fls_long(mask)) {
174 u8 buf[4];
175
176 /* newly enabled: new bit=0 while old bit=1 */
177 if (test_bit(b, &old)) {
178 dev_dbg(cdev->dev, "sanitize block %ld: off 0x%08x\n",
179 b - __ffs(mask), off);
180 memcpy_fromio(buf, cdev->lpe_ba + off, sizeof(buf));
181 }
182 off += CATPT_MEMBLOCK_SIZE;
183 }
184 }
185
catpt_dsp_update_srampge(struct catpt_dev * cdev,struct resource * sram,unsigned long mask)186 void catpt_dsp_update_srampge(struct catpt_dev *cdev, struct resource *sram,
187 unsigned long mask)
188 {
189 struct resource *res;
190 unsigned long new = 0;
191
192 /* flag all busy blocks */
193 for (res = sram->child; res; res = res->sibling) {
194 u32 h, l;
195
196 h = (res->end - sram->start) / CATPT_MEMBLOCK_SIZE;
197 l = (res->start - sram->start) / CATPT_MEMBLOCK_SIZE;
198 new |= GENMASK(h, l);
199 }
200
201 /* offset value given mask's start and invert it as ON=b0 */
202 new = ~(new << __ffs(mask)) & mask;
203
204 /* disable core clock gating */
205 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
206
207 catpt_dsp_set_srampge(cdev, sram, mask, new);
208
209 /* enable core clock gating */
210 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
211 CATPT_VDRTCTL2_DCLCGE);
212 }
213
catpt_dsp_stall(struct catpt_dev * cdev,bool stall)214 int catpt_dsp_stall(struct catpt_dev *cdev, bool stall)
215 {
216 u32 reg, val;
217
218 val = stall ? CATPT_CS_STALL : 0;
219 catpt_updatel_shim(cdev, CS1, CATPT_CS_STALL, val);
220
221 return catpt_readl_poll_shim(cdev, CS1,
222 reg, (reg & CATPT_CS_STALL) == val,
223 500, 10000);
224 }
225
catpt_dsp_reset(struct catpt_dev * cdev,bool reset)226 static int catpt_dsp_reset(struct catpt_dev *cdev, bool reset)
227 {
228 u32 reg, val;
229
230 val = reset ? CATPT_CS_RST : 0;
231 catpt_updatel_shim(cdev, CS1, CATPT_CS_RST, val);
232
233 return catpt_readl_poll_shim(cdev, CS1,
234 reg, (reg & CATPT_CS_RST) == val,
235 500, 10000);
236 }
237
lpt_dsp_pll_shutdown(struct catpt_dev * cdev,bool enable)238 void lpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable)
239 {
240 u32 val;
241
242 val = enable ? LPT_VDRTCTL0_APLLSE : 0;
243 catpt_updatel_pci(cdev, VDRTCTL0, LPT_VDRTCTL0_APLLSE, val);
244 }
245
wpt_dsp_pll_shutdown(struct catpt_dev * cdev,bool enable)246 void wpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable)
247 {
248 u32 val;
249
250 val = enable ? WPT_VDRTCTL2_APLLSE : 0;
251 catpt_updatel_pci(cdev, VDRTCTL2, WPT_VDRTCTL2_APLLSE, val);
252 }
253
catpt_dsp_select_lpclock(struct catpt_dev * cdev,bool lp,bool waiti)254 static int catpt_dsp_select_lpclock(struct catpt_dev *cdev, bool lp, bool waiti)
255 {
256 u32 mask, reg, val;
257 int ret;
258
259 mutex_lock(&cdev->clk_mutex);
260
261 val = lp ? CATPT_CS_LPCS : 0;
262 reg = catpt_readl_shim(cdev, CS1) & CATPT_CS_LPCS;
263 dev_dbg(cdev->dev, "LPCS [0x%08lx] 0x%08x -> 0x%08x",
264 CATPT_CS_LPCS, reg, val);
265
266 if (reg == val) {
267 mutex_unlock(&cdev->clk_mutex);
268 return 0;
269 }
270
271 if (waiti) {
272 /* wait for DSP to signal WAIT state */
273 ret = catpt_readl_poll_shim(cdev, ISD,
274 reg, (reg & CATPT_ISD_DCPWM),
275 500, 10000);
276 if (ret) {
277 dev_warn(cdev->dev, "await WAITI timeout\n");
278 /* no signal - only high clock selection allowed */
279 if (lp) {
280 mutex_unlock(&cdev->clk_mutex);
281 return 0;
282 }
283 }
284 }
285
286 ret = catpt_readl_poll_shim(cdev, CLKCTL,
287 reg, !(reg & CATPT_CLKCTL_CFCIP),
288 500, 10000);
289 if (ret)
290 dev_warn(cdev->dev, "clock change still in progress\n");
291
292 /* default to DSP core & audio fabric high clock */
293 val |= CATPT_CS_DCS_HIGH;
294 mask = CATPT_CS_LPCS | CATPT_CS_DCS;
295 catpt_updatel_shim(cdev, CS1, mask, val);
296
297 ret = catpt_readl_poll_shim(cdev, CLKCTL,
298 reg, !(reg & CATPT_CLKCTL_CFCIP),
299 500, 10000);
300 if (ret)
301 dev_warn(cdev->dev, "clock change still in progress\n");
302
303 /* update PLL accordingly */
304 cdev->spec->pll_shutdown(cdev, lp);
305
306 mutex_unlock(&cdev->clk_mutex);
307 return 0;
308 }
309
catpt_dsp_update_lpclock(struct catpt_dev * cdev)310 int catpt_dsp_update_lpclock(struct catpt_dev *cdev)
311 {
312 struct catpt_stream_runtime *stream;
313
314 list_for_each_entry(stream, &cdev->stream_list, node)
315 if (stream->prepared)
316 return catpt_dsp_select_lpclock(cdev, false, true);
317
318 return catpt_dsp_select_lpclock(cdev, true, true);
319 }
320
321 /* bring registers to their defaults as HW won't reset itself */
catpt_dsp_set_regs_defaults(struct catpt_dev * cdev)322 static void catpt_dsp_set_regs_defaults(struct catpt_dev *cdev)
323 {
324 int i;
325
326 catpt_writel_shim(cdev, CS1, CATPT_CS_DEFAULT);
327 catpt_writel_shim(cdev, ISC, CATPT_ISC_DEFAULT);
328 catpt_writel_shim(cdev, ISD, CATPT_ISD_DEFAULT);
329 catpt_writel_shim(cdev, IMC, CATPT_IMC_DEFAULT);
330 catpt_writel_shim(cdev, IMD, CATPT_IMD_DEFAULT);
331 catpt_writel_shim(cdev, IPCC, CATPT_IPCC_DEFAULT);
332 catpt_writel_shim(cdev, IPCD, CATPT_IPCD_DEFAULT);
333 catpt_writel_shim(cdev, CLKCTL, CATPT_CLKCTL_DEFAULT);
334 catpt_writel_shim(cdev, CS2, CATPT_CS2_DEFAULT);
335 catpt_writel_shim(cdev, LTRC, CATPT_LTRC_DEFAULT);
336 catpt_writel_shim(cdev, HMDC, CATPT_HMDC_DEFAULT);
337
338 for (i = 0; i < CATPT_SSP_COUNT; i++) {
339 catpt_writel_ssp(cdev, i, SSCR0, CATPT_SSC0_DEFAULT);
340 catpt_writel_ssp(cdev, i, SSCR1, CATPT_SSC1_DEFAULT);
341 catpt_writel_ssp(cdev, i, SSSR, CATPT_SSS_DEFAULT);
342 catpt_writel_ssp(cdev, i, SSITR, CATPT_SSIT_DEFAULT);
343 catpt_writel_ssp(cdev, i, SSDR, CATPT_SSD_DEFAULT);
344 catpt_writel_ssp(cdev, i, SSTO, CATPT_SSTO_DEFAULT);
345 catpt_writel_ssp(cdev, i, SSPSP, CATPT_SSPSP_DEFAULT);
346 catpt_writel_ssp(cdev, i, SSTSA, CATPT_SSTSA_DEFAULT);
347 catpt_writel_ssp(cdev, i, SSRSA, CATPT_SSRSA_DEFAULT);
348 catpt_writel_ssp(cdev, i, SSTSS, CATPT_SSTSS_DEFAULT);
349 catpt_writel_ssp(cdev, i, SSCR2, CATPT_SSCR2_DEFAULT);
350 catpt_writel_ssp(cdev, i, SSPSP2, CATPT_SSPSP2_DEFAULT);
351 }
352 }
353
catpt_dsp_power_down(struct catpt_dev * cdev)354 int catpt_dsp_power_down(struct catpt_dev *cdev)
355 {
356 u32 mask, val;
357
358 /* disable core clock gating */
359 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
360
361 catpt_dsp_reset(cdev, true);
362 /* set 24Mhz clock for both SSPs */
363 catpt_updatel_shim(cdev, CS1, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
364 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
365 catpt_dsp_select_lpclock(cdev, true, false);
366 /* disable MCLK */
367 catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, 0);
368
369 catpt_dsp_set_regs_defaults(cdev);
370
371 /* switch clock gating */
372 mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE);
373 val = mask & (~CATPT_VDRTCTL2_DTCGE);
374 catpt_updatel_pci(cdev, VDRTCTL2, mask, val);
375 /* enable DTCGE separatelly */
376 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DTCGE,
377 CATPT_VDRTCTL2_DTCGE);
378
379 /* SRAM power gating all */
380 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask,
381 cdev->spec->dram_mask);
382 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask,
383 cdev->spec->iram_mask);
384 mask = cdev->spec->d3srampgd_bit | cdev->spec->d3pgd_bit;
385 catpt_updatel_pci(cdev, VDRTCTL0, mask, cdev->spec->d3pgd_bit);
386
387 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
388 /* give hw time to drop off */
389 udelay(50);
390
391 /* enable core clock gating */
392 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
393 CATPT_VDRTCTL2_DCLCGE);
394 udelay(50);
395
396 return 0;
397 }
398
catpt_dsp_power_up(struct catpt_dev * cdev)399 int catpt_dsp_power_up(struct catpt_dev *cdev)
400 {
401 u32 mask, val;
402
403 /* disable core clock gating */
404 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
405
406 /* switch clock gating */
407 mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE);
408 val = mask & (~CATPT_VDRTCTL2_DTCGE);
409 catpt_updatel_pci(cdev, VDRTCTL2, mask, val);
410
411 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
412
413 /* SRAM power gating none */
414 mask = cdev->spec->d3srampgd_bit | cdev->spec->d3pgd_bit;
415 catpt_updatel_pci(cdev, VDRTCTL0, mask, mask);
416 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask, 0);
417 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask, 0);
418
419 catpt_dsp_set_regs_defaults(cdev);
420
421 /* restore MCLK */
422 catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, CATPT_CLKCTL_SMOS);
423 catpt_dsp_select_lpclock(cdev, false, false);
424 /* set 24Mhz clock for both SSPs */
425 catpt_updatel_shim(cdev, CS1, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
426 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
427 catpt_dsp_reset(cdev, false);
428
429 /* enable core clock gating */
430 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
431 CATPT_VDRTCTL2_DCLCGE);
432
433 /* generate int deassert msg to fix inversed int logic */
434 catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB | CATPT_IMC_IPCCD, 0);
435
436 return 0;
437 }
438
439 #define CATPT_DUMP_MAGIC 0xcd42
440 #define CATPT_DUMP_SECTION_ID_FILE 0x00
441 #define CATPT_DUMP_SECTION_ID_IRAM 0x01
442 #define CATPT_DUMP_SECTION_ID_DRAM 0x02
443 #define CATPT_DUMP_SECTION_ID_REGS 0x03
444 #define CATPT_DUMP_HASH_SIZE 20
445
446 struct catpt_dump_section_hdr {
447 u16 magic;
448 u8 core_id;
449 u8 section_id;
450 u32 size;
451 };
452
catpt_coredump(struct catpt_dev * cdev)453 int catpt_coredump(struct catpt_dev *cdev)
454 {
455 struct catpt_dump_section_hdr *hdr;
456 size_t dump_size, regs_size;
457 u8 *dump, *pos;
458 const char *eof;
459 char *info;
460 int i;
461
462 regs_size = CATPT_SHIM_REGS_SIZE;
463 regs_size += CATPT_DMA_COUNT * CATPT_DMA_REGS_SIZE;
464 regs_size += CATPT_SSP_COUNT * CATPT_SSP_REGS_SIZE;
465 dump_size = resource_size(&cdev->dram);
466 dump_size += resource_size(&cdev->iram);
467 dump_size += regs_size;
468 /* account for header of each section and hash chunk */
469 dump_size += 4 * sizeof(*hdr) + CATPT_DUMP_HASH_SIZE;
470
471 dump = vzalloc(dump_size);
472 if (!dump)
473 return -ENOMEM;
474
475 pos = dump;
476
477 hdr = (struct catpt_dump_section_hdr *)pos;
478 hdr->magic = CATPT_DUMP_MAGIC;
479 hdr->core_id = cdev->spec->core_id;
480 hdr->section_id = CATPT_DUMP_SECTION_ID_FILE;
481 hdr->size = dump_size - sizeof(*hdr);
482 pos += sizeof(*hdr);
483
484 info = cdev->ipc.config.fw_info;
485 eof = info + FW_INFO_SIZE_MAX;
486 /* navigate to fifth info segment (fw hash) */
487 for (i = 0; i < 4 && info < eof; i++, info++) {
488 /* info segments are separated by space each */
489 info = strnchr(info, eof - info, ' ');
490 if (!info)
491 break;
492 }
493
494 if (i == 4 && info)
495 memcpy(pos, info, min_t(u32, eof - info, CATPT_DUMP_HASH_SIZE));
496 pos += CATPT_DUMP_HASH_SIZE;
497
498 hdr = (struct catpt_dump_section_hdr *)pos;
499 hdr->magic = CATPT_DUMP_MAGIC;
500 hdr->core_id = cdev->spec->core_id;
501 hdr->section_id = CATPT_DUMP_SECTION_ID_IRAM;
502 hdr->size = resource_size(&cdev->iram);
503 pos += sizeof(*hdr);
504
505 memcpy_fromio(pos, cdev->lpe_ba + cdev->iram.start, hdr->size);
506 pos += hdr->size;
507
508 hdr = (struct catpt_dump_section_hdr *)pos;
509 hdr->magic = CATPT_DUMP_MAGIC;
510 hdr->core_id = cdev->spec->core_id;
511 hdr->section_id = CATPT_DUMP_SECTION_ID_DRAM;
512 hdr->size = resource_size(&cdev->dram);
513 pos += sizeof(*hdr);
514
515 memcpy_fromio(pos, cdev->lpe_ba + cdev->dram.start, hdr->size);
516 pos += hdr->size;
517
518 hdr = (struct catpt_dump_section_hdr *)pos;
519 hdr->magic = CATPT_DUMP_MAGIC;
520 hdr->core_id = cdev->spec->core_id;
521 hdr->section_id = CATPT_DUMP_SECTION_ID_REGS;
522 hdr->size = regs_size;
523 pos += sizeof(*hdr);
524
525 memcpy_fromio(pos, catpt_shim_addr(cdev), CATPT_SHIM_REGS_SIZE);
526 pos += CATPT_SHIM_REGS_SIZE;
527
528 for (i = 0; i < CATPT_SSP_COUNT; i++) {
529 memcpy_fromio(pos, catpt_ssp_addr(cdev, i),
530 CATPT_SSP_REGS_SIZE);
531 pos += CATPT_SSP_REGS_SIZE;
532 }
533 for (i = 0; i < CATPT_DMA_COUNT; i++) {
534 memcpy_fromio(pos, catpt_dma_addr(cdev, i),
535 CATPT_DMA_REGS_SIZE);
536 pos += CATPT_DMA_REGS_SIZE;
537 }
538
539 dev_coredumpv(cdev->dev, dump, dump_size, GFP_KERNEL);
540
541 return 0;
542 }
543