xref: /linux/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c (revision 79b95d74470dd97d7d0908d5a3c0734a23e51aa4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2024 Intel Corporation */
3 
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/overflow.h>
7 #include <linux/regmap.h>
8 #include <linux/scatterlist.h>
9 
10 #include "intel-thc-dev.h"
11 #include "intel-thc-dma.h"
12 #include "intel-thc-hw.h"
13 
dma_set_prd_base_addr(struct thc_device * dev,u64 physical_addr,struct thc_dma_configuration * dma_config)14 static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr,
15 				  struct thc_dma_configuration *dma_config)
16 {
17 	u32 addr_high, addr_low;
18 
19 	if (!dma_config->is_enabled)
20 		return;
21 
22 	addr_high = upper_32_bits(physical_addr);
23 	addr_low = lower_32_bits(physical_addr);
24 
25 	regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high);
26 	regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low);
27 }
28 
dma_set_start_bit(struct thc_device * dev,struct thc_dma_configuration * dma_config)29 static void dma_set_start_bit(struct thc_device *dev,
30 			      struct thc_dma_configuration *dma_config)
31 {
32 	u32 ctrl, mask, mbits, data, offset;
33 
34 	if (!dma_config->is_enabled)
35 		return;
36 
37 	switch (dma_config->dma_channel) {
38 	case THC_RXDMA1:
39 	case THC_RXDMA2:
40 		if (dma_config->dma_channel == THC_RXDMA2) {
41 			mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
42 					   THC_BITMASK_INTERRUPT_TYPE_DATA);
43 			mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
44 			regmap_write_bits(dev->thc_regmap,
45 					  THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits);
46 		}
47 
48 		mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF |
49 			THC_M_PRT_READ_DMA_CNTRL_SOO |
50 			THC_M_PRT_READ_DMA_CNTRL_IE_STALL |
51 			THC_M_PRT_READ_DMA_CNTRL_IE_ERROR |
52 			THC_M_PRT_READ_DMA_CNTRL_START;
53 
54 		mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
55 		mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN;
56 		ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
57 		offset = dma_config->dma_channel == THC_RXDMA1 ?
58 			 THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
59 		regmap_write_bits(dev->thc_regmap, offset, mask, ctrl);
60 		break;
61 
62 	case THC_SWDMA:
63 		mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL |
64 			THC_M_PRT_READ_DMA_CNTRL_IE_IOC |
65 			THC_M_PRT_READ_DMA_CNTRL_SOO |
66 			THC_M_PRT_READ_DMA_CNTRL_START;
67 
68 		mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
69 		ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
70 		regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
71 				  mask, ctrl);
72 		break;
73 
74 	case THC_TXDMA:
75 		regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET,
76 				  THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS,
77 				  THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS);
78 
79 		/* Select interrupt or polling method upon Write completion */
80 		if (dev->dma_ctx->use_write_interrupts)
81 			data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL;
82 		else
83 			data = 0;
84 
85 		data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
86 		mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL |
87 		       THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
88 		regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
89 				  mask, data);
90 		break;
91 
92 	default:
93 		break;
94 	}
95 }
96 
dma_set_prd_control(struct thc_device * dev,u8 entry_count,u8 cb_depth,struct thc_dma_configuration * dma_config)97 static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth,
98 				struct thc_dma_configuration *dma_config)
99 {
100 	u32 ctrl, mask;
101 
102 	if (!dma_config->is_enabled)
103 		return;
104 
105 	if (dma_config->dma_channel == THC_TXDMA) {
106 		mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
107 		ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count);
108 	} else {
109 		mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
110 		ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) |
111 		       FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth);
112 	}
113 
114 	regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl);
115 }
116 
dma_clear_prd_control(struct thc_device * dev,struct thc_dma_configuration * dma_config)117 static void dma_clear_prd_control(struct thc_device *dev,
118 				  struct thc_dma_configuration *dma_config)
119 {
120 	u32 mask;
121 
122 	if (!dma_config->is_enabled)
123 		return;
124 
125 	if (dma_config->dma_channel == THC_TXDMA)
126 		mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
127 	else
128 		mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
129 
130 	regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0);
131 }
132 
dma_get_read_pointer(struct thc_device * dev,struct thc_dma_configuration * dma_config)133 static u8 dma_get_read_pointer(struct thc_device *dev,
134 			       struct thc_dma_configuration *dma_config)
135 {
136 	u32 ctrl, read_pointer;
137 
138 	regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
139 	read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl);
140 
141 	dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n",
142 		ctrl, dma_config->dma_cntrl, read_pointer);
143 
144 	return read_pointer;
145 }
146 
dma_get_write_pointer(struct thc_device * dev,struct thc_dma_configuration * dma_config)147 static u8 dma_get_write_pointer(struct thc_device *dev,
148 				struct thc_dma_configuration *dma_config)
149 {
150 	u32 ctrl, write_pointer;
151 
152 	regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
153 	write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl);
154 
155 	dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n",
156 		ctrl, dma_config->dma_cntrl, write_pointer);
157 
158 	return write_pointer;
159 }
160 
dma_set_write_pointer(struct thc_device * dev,u8 value,struct thc_dma_configuration * dma_config)161 static void dma_set_write_pointer(struct thc_device *dev, u8 value,
162 				  struct thc_dma_configuration *dma_config)
163 {
164 	u32 ctrl, mask;
165 
166 	mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP;
167 	ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value);
168 	regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl);
169 }
170 
dma_get_max_packet_size(struct thc_device * dev,struct thc_dma_configuration * dma_config)171 static size_t dma_get_max_packet_size(struct thc_device *dev,
172 				      struct thc_dma_configuration *dma_config)
173 {
174 	return dma_config->max_packet_size;
175 }
176 
dma_set_max_packet_size(struct thc_device * dev,size_t size,struct thc_dma_configuration * dma_config)177 static void dma_set_max_packet_size(struct thc_device *dev, size_t size,
178 				    struct thc_dma_configuration *dma_config)
179 {
180 	if (size) {
181 		dma_config->max_packet_size = ALIGN(size, SZ_4K);
182 		dma_config->is_enabled = true;
183 	}
184 }
185 
thc_copy_one_sgl_to_prd(struct thc_device * dev,struct thc_dma_configuration * config,unsigned int ind)186 static void thc_copy_one_sgl_to_prd(struct thc_device *dev,
187 				    struct thc_dma_configuration *config,
188 				    unsigned int ind)
189 {
190 	struct thc_prd_table *prd_tbl;
191 	struct scatterlist *sg;
192 	int j;
193 
194 	prd_tbl = &config->prd_tbls[ind];
195 
196 	for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) {
197 		prd_tbl->entries[j].dest_addr =
198 				sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
199 		prd_tbl->entries[j].len = sg_dma_len(sg);
200 		prd_tbl->entries[j].hw_status = 0;
201 		prd_tbl->entries[j].end_of_prd = 0;
202 	}
203 
204 	/* Set the end_of_prd flag in the last filled entry */
205 	if (j > 0)
206 		prd_tbl->entries[j - 1].end_of_prd = 1;
207 }
208 
thc_copy_sgls_to_prd(struct thc_device * dev,struct thc_dma_configuration * config)209 static void thc_copy_sgls_to_prd(struct thc_device *dev,
210 				 struct thc_dma_configuration *config)
211 {
212 	unsigned int i;
213 
214 	memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num));
215 
216 	for (i = 0; i < config->prd_tbl_num; i++)
217 		thc_copy_one_sgl_to_prd(dev, config, i);
218 }
219 
setup_dma_buffers(struct thc_device * dev,struct thc_dma_configuration * config,enum dma_data_direction dir)220 static int setup_dma_buffers(struct thc_device *dev,
221 			     struct thc_dma_configuration *config,
222 			     enum dma_data_direction dir)
223 {
224 	size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
225 	unsigned int i, nent = PRD_ENTRIES_NUM;
226 	dma_addr_t dma_handle;
227 	void *cpu_addr;
228 	size_t buf_sz;
229 	int count;
230 
231 	if (!config->is_enabled)
232 		return 0;
233 
234 	memset(config->sgls, 0, sizeof(config->sgls));
235 	memset(config->sgls_nent_pages, 0, sizeof(config->sgls_nent_pages));
236 	memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
237 
238 	cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
239 				      &dma_handle, GFP_KERNEL);
240 	if (!cpu_addr)
241 		return -ENOMEM;
242 
243 	config->prd_tbls = cpu_addr;
244 	config->prd_tbls_dma_handle = dma_handle;
245 
246 	buf_sz = dma_get_max_packet_size(dev, config);
247 
248 	/* Allocate and map the scatter-gather lists, one for each PRD table */
249 	for (i = 0; i < config->prd_tbl_num; i++) {
250 		config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent);
251 		if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) {
252 			dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n",
253 				     i, nent);
254 			return -ENOMEM;
255 		}
256 		count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
257 
258 		config->sgls_nent_pages[i] = nent;
259 		config->sgls_nent[i] = count;
260 	}
261 
262 	thc_copy_sgls_to_prd(dev, config);
263 
264 	return 0;
265 }
266 
thc_reset_dma_settings(struct thc_device * dev)267 static void thc_reset_dma_settings(struct thc_device *dev)
268 {
269 	/* Stop all DMA channels and reset DMA read pointers */
270 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
271 			  THC_M_PRT_READ_DMA_CNTRL_START, 0);
272 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
273 			  THC_M_PRT_READ_DMA_CNTRL_START, 0);
274 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
275 			  THC_M_PRT_READ_DMA_CNTRL_START, 0);
276 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
277 			  THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0);
278 
279 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
280 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR,
281 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR);
282 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
283 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR,
284 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR);
285 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
286 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR,
287 			  THC_M_PRT_READ_DMA_CNTRL_TPCPR);
288 }
289 
release_dma_buffers(struct thc_device * dev,struct thc_dma_configuration * config)290 static void release_dma_buffers(struct thc_device *dev,
291 				struct thc_dma_configuration *config)
292 {
293 	size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
294 	unsigned int i;
295 
296 	if (!config->is_enabled)
297 		return;
298 
299 	for (i = 0; i < config->prd_tbl_num; i++) {
300 		if (!config->sgls[i] || !config->sgls_nent[i])
301 			continue;
302 
303 		dma_unmap_sg(dev->dev, config->sgls[i],
304 			     config->sgls_nent_pages[i],
305 			     config->dir);
306 
307 		sgl_free(config->sgls[i]);
308 		config->sgls[i] = NULL;
309 	}
310 
311 	memset(config->prd_tbls, 0, prd_tbls_size);
312 
313 	if (config->prd_tbls) {
314 		dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls,
315 				  config->prd_tbls_dma_handle);
316 		config->prd_tbls = NULL;
317 		config->prd_tbls_dma_handle = 0;
318 	}
319 }
320 
thc_dma_init(struct thc_device * dev)321 struct thc_dma_context *thc_dma_init(struct thc_device *dev)
322 {
323 	struct thc_dma_context *dma_ctx;
324 
325 	dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL);
326 	if (!dma_ctx)
327 		return NULL;
328 
329 	dev->dma_ctx = dma_ctx;
330 
331 	dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1;
332 	dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2;
333 	dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA;
334 	dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA;
335 
336 	dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE;
337 	dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE;
338 	dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE;
339 	dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE;
340 
341 	dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM;
342 	dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM;
343 	dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1;
344 	dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1;
345 
346 	dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET;
347 	dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET;
348 	dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET;
349 	dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET;
350 
351 	dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET;
352 	dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET;
353 	dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET;
354 	dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET;
355 
356 	dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET;
357 	dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET;
358 	dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
359 	dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET;
360 
361 	dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET;
362 	dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
363 	dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
364 	dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET;
365 
366 	/* Enable write DMA completion interrupt by default */
367 	dma_ctx->use_write_interrupts = 1;
368 
369 	return dma_ctx;
370 }
371 
372 /**
373  * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines
374  *
375  * @dev: The pointer of THC private device context
376  * @mps_read1: RxDMA1 max packet size
377  * @mps_read2: RxDMA2 max packet size
378  * @mps_write: TxDMA max packet size
379  * @mps_swdma: Software DMA max packet size
380  *
381  * If mps is not 0, it means the corresponding DMA channel is used, then set
382  * the flag to turn on this channel.
383  *
384  * Return: 0 on success, other error codes on failed.
385  */
thc_dma_set_max_packet_sizes(struct thc_device * dev,size_t mps_read1,size_t mps_read2,size_t mps_write,size_t mps_swdma)386 int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1,
387 				 size_t mps_read2, size_t mps_write,
388 				 size_t mps_swdma)
389 {
390 	if (!dev->dma_ctx) {
391 		dev_err_once(dev->dev,
392 			     "Cannot set max packet sizes because DMA context is NULL!\n");
393 		return -EINVAL;
394 	}
395 
396 	dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]);
397 	dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]);
398 	dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]);
399 	dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]);
400 
401 	return 0;
402 }
403 EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC");
404 
405 /**
406  * thc_dma_allocate - Allocate DMA buffers for all DMA engines
407  *
408  * @dev: The pointer of THC private device context
409  *
410  * Return: 0 on success, other error codes on failed.
411  */
thc_dma_allocate(struct thc_device * dev)412 int thc_dma_allocate(struct thc_device *dev)
413 {
414 	int ret, chan;
415 
416 	for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
417 		ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan],
418 					dev->dma_ctx->dma_config[chan].dir);
419 		if (ret < 0) {
420 			dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan);
421 			goto release_bufs;
422 		}
423 	}
424 
425 	return 0;
426 
427 release_bufs:
428 	while (chan--)
429 		release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
430 
431 	return ret;
432 }
433 EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC");
434 
435 /**
436  * thc_dma_release - Release DMA buffers for all DMA engines
437  *
438  * @dev: The pointer of THC private device context
439  */
thc_dma_release(struct thc_device * dev)440 void thc_dma_release(struct thc_device *dev)
441 {
442 	int chan;
443 
444 	for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++)
445 		release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
446 }
447 EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC");
448 
calc_prd_entries_num(struct thc_prd_table * prd_tbl,size_t mes_len,u8 * nent)449 static int calc_prd_entries_num(struct thc_prd_table *prd_tbl,
450 				size_t mes_len, u8 *nent)
451 {
452 	*nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY);
453 	if (*nent > PRD_ENTRIES_NUM)
454 		return -EMSGSIZE;
455 
456 	return 0;
457 }
458 
calc_message_len(struct thc_prd_table * prd_tbl,u8 * nent)459 static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent)
460 {
461 	size_t mes_len = 0;
462 	unsigned int j;
463 
464 	for (j = 0; j < PRD_ENTRIES_NUM; j++) {
465 		mes_len += prd_tbl->entries[j].len;
466 		if (prd_tbl->entries[j].end_of_prd)
467 			break;
468 	}
469 
470 	*nent = j + 1;
471 
472 	return mes_len;
473 }
474 
475 /**
476  * thc_dma_configure - Configure DMA settings for all DMA engines
477  *
478  * @dev: The pointer of THC private device context
479  *
480  * Return: 0 on success, other error codes on failed.
481  */
thc_dma_configure(struct thc_device * dev)482 int thc_dma_configure(struct thc_device *dev)
483 {
484 	struct thc_dma_context *dma_ctx = dev->dma_ctx;
485 	int chan;
486 
487 	thc_reset_dma_settings(dev);
488 
489 	if (!dma_ctx) {
490 		dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n");
491 		return -EINVAL;
492 	}
493 
494 	for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
495 		dma_set_prd_base_addr(dev,
496 				      dma_ctx->dma_config[chan].prd_tbls_dma_handle,
497 				      &dma_ctx->dma_config[chan]);
498 
499 		dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1,
500 				    dma_ctx->dma_config[chan].prd_tbl_num - 1,
501 				    &dma_ctx->dma_config[chan]);
502 	}
503 
504 	/* Start read2 DMA engine */
505 	dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]);
506 
507 	dev_dbg(dev->dev, "DMA configured successfully!\n");
508 
509 	return 0;
510 }
511 EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC");
512 
513 /**
514  * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines
515  *
516  * @dev: The pointer of THC private device context
517  */
thc_dma_unconfigure(struct thc_device * dev)518 void thc_dma_unconfigure(struct thc_device *dev)
519 {
520 	int chan;
521 
522 	for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
523 		dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]);
524 		dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]);
525 	}
526 
527 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
528 			  THC_M_PRT_READ_DMA_CNTRL_START, 0);
529 
530 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
531 			  THC_M_PRT_READ_DMA_CNTRL_START, 0);
532 }
533 EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC");
534 
thc_wait_for_dma_pause(struct thc_device * dev,enum thc_dma_channel channel)535 static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel)
536 {
537 	u32 ctrl_reg, sts_reg, sts;
538 	int ret;
539 
540 	ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET :
541 			((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET :
542 						   THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET);
543 
544 	regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0);
545 
546 	sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET :
547 			((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET :
548 						   THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET);
549 
550 	ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts,
551 				       !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE),
552 				       THC_DEFAULT_RXDMA_POLLING_US_INTERVAL,
553 				       THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT);
554 
555 	if (ret) {
556 		dev_err_once(dev->dev,
557 			     "Timeout while waiting for DMA %d stop\n", channel);
558 		return ret;
559 	}
560 
561 	return 0;
562 }
563 
read_dma_buffer(struct thc_device * dev,struct thc_dma_configuration * read_config,u8 prd_table_index,void * read_buff)564 static int read_dma_buffer(struct thc_device *dev,
565 			   struct thc_dma_configuration *read_config,
566 			   u8 prd_table_index, void *read_buff)
567 {
568 	struct thc_prd_table *prd_tbl;
569 	struct scatterlist *sg;
570 	size_t mes_len, ret;
571 	u8 nent;
572 
573 	if (prd_table_index >= read_config->prd_tbl_num) {
574 		dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index);
575 		return -EINVAL;
576 	}
577 
578 	if (!read_config->prd_tbls || !read_config->sgls[prd_table_index]) {
579 		dev_err_once(dev->dev, "PRD tables are not ready yet\n");
580 		return -EINVAL;
581 	}
582 
583 	prd_tbl = &read_config->prd_tbls[prd_table_index];
584 	mes_len = calc_message_len(prd_tbl, &nent);
585 	if (mes_len > read_config->max_packet_size) {
586 		dev_err(dev->dev,
587 			"Message length %zu is bigger than buffer length %lu\n",
588 			mes_len, read_config->max_packet_size);
589 		return -EMSGSIZE;
590 	}
591 
592 	sg = read_config->sgls[prd_table_index];
593 	ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len);
594 	if (ret != mes_len) {
595 		dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
596 			     ret, mes_len);
597 		return -EIO;
598 	}
599 
600 	return mes_len;
601 }
602 
update_write_pointer(struct thc_device * dev,struct thc_dma_configuration * read_config)603 static void update_write_pointer(struct thc_device *dev,
604 				 struct thc_dma_configuration *read_config)
605 {
606 	u8 write_ptr = dma_get_write_pointer(dev, read_config);
607 
608 	if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD)
609 		dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config);
610 	else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN)
611 		dma_set_write_pointer(dev, 0, read_config);
612 	else
613 		dma_set_write_pointer(dev, write_ptr + 1, read_config);
614 }
615 
is_dma_buf_empty(struct thc_device * dev,struct thc_dma_configuration * read_config,u8 * read_ptr,u8 * write_ptr)616 static int is_dma_buf_empty(struct thc_device *dev,
617 			    struct thc_dma_configuration *read_config,
618 			    u8 *read_ptr, u8 *write_ptr)
619 {
620 	*read_ptr = dma_get_read_pointer(dev, read_config);
621 	*write_ptr = dma_get_write_pointer(dev, read_config);
622 
623 	if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK))
624 		if (*read_ptr != *write_ptr)
625 			return true;
626 
627 	return false;
628 }
629 
thc_dma_read(struct thc_device * dev,struct thc_dma_configuration * read_config,void * read_buff,size_t * read_len,int * read_finished)630 static int thc_dma_read(struct thc_device *dev,
631 			struct thc_dma_configuration *read_config,
632 			void *read_buff, size_t *read_len, int *read_finished)
633 {
634 	u8 read_ptr, write_ptr, prd_table_index;
635 	int status;
636 
637 	if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) {
638 		prd_table_index = write_ptr & THC_POINTER_MASK;
639 
640 		status = read_dma_buffer(dev, read_config, prd_table_index, read_buff);
641 		if (status <= 0) {
642 			dev_err_once(dev->dev, "read DMA buffer failed %d\n", status);
643 			return -EIO;
644 		}
645 
646 		*read_len = status;
647 
648 		/* Clear the relevant PRD table */
649 		thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index);
650 
651 		/* Increment the write pointer to let the HW know we have processed this PRD */
652 		update_write_pointer(dev, read_config);
653 	}
654 
655 	/*
656 	 * This function only reads one frame from PRD table for each call, so we need to
657 	 * check if all DMAed data is read out and return the flag to the caller. Caller
658 	 * should repeatedly call thc_dma_read() until all DMAed data is handled.
659 	 */
660 	if (read_finished)
661 		*read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0;
662 
663 	return 0;
664 }
665 
666 /**
667  * thc_rxdma_read - Read data from RXDMA buffer
668  *
669  * @dev: The pointer of THC private device context
670  * @dma_channel: The RXDMA engine of read data source
671  * @read_buff: The pointer of the read data buffer
672  * @read_len: The pointer of the read data length
673  * @read_finished: The pointer of the flag indicating if all pending data has been read out
674  *
675  * Return: 0 on success, other error codes on failed.
676  */
thc_rxdma_read(struct thc_device * dev,enum thc_dma_channel dma_channel,void * read_buff,size_t * read_len,int * read_finished)677 int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
678 		   void *read_buff, size_t *read_len, int *read_finished)
679 {
680 	struct thc_dma_configuration *dma_config;
681 	int ret;
682 
683 	dma_config = &dev->dma_ctx->dma_config[dma_channel];
684 
685 	if (!dma_config->is_enabled) {
686 		dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel);
687 		return -EINVAL;
688 	}
689 
690 	if (!read_buff || !read_len) {
691 		dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
692 			read_buff, read_len);
693 		return -EINVAL;
694 	}
695 
696 	if (dma_channel >= THC_TXDMA) {
697 		dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel);
698 		return -EINVAL;
699 	}
700 
701 	ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished);
702 
703 	return ret;
704 }
705 EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC");
706 
thc_swdma_read_start(struct thc_device * dev,void * write_buff,size_t write_len,u32 * prd_tbl_len)707 static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
708 				size_t write_len, u32 *prd_tbl_len)
709 {
710 	u32 mask, val, data0 = 0, data1 = 0;
711 	int ret;
712 
713 	ret = thc_interrupt_quiesce(dev, true);
714 	if (ret)
715 		return ret;
716 
717 	if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2))
718 		return -EIO;
719 
720 	thc_reset_dma_settings(dev);
721 
722 	/*
723 	 * Max input size control feature is only available for RxDMA, it must keep disabled
724 	 * during SWDMA operation, and restore to previous state after SWDMA is done.
725 	 * Max input size variables in THC device context track hardware state, and keep change
726 	 * when feature state was changed, so those variables cannot be used to record feature
727 	 * state after state was changed during SWDMA operation. Here have to use a temp variable
728 	 * in DMA context to record feature state before SWDMA operation.
729 	 */
730 	if (dev->i2c_max_rx_size_en) {
731 		thc_i2c_rx_max_size_enable(dev, false);
732 		dev->dma_ctx->rx_max_size_en = true;
733 	}
734 
735 	/*
736 	 * Interrupt delay feature is in the same situation with max input size control feature,
737 	 * needs record feature state before SWDMA.
738 	 */
739 	if (dev->i2c_int_delay_en) {
740 		thc_i2c_rx_int_delay_enable(dev, false);
741 		dev->dma_ctx->rx_int_delay_en = true;
742 	}
743 
744 	mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
745 	       THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
746 	val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
747 	      ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0);
748 	regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET,
749 			  mask, val);
750 
751 	if (prd_tbl_len) {
752 		mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN;
753 		val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN,
754 				 *prd_tbl_len);
755 		regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET,
756 				  mask, val);
757 	}
758 
759 	if (write_len <= sizeof(u32)) {
760 		for (int i = 0; i < write_len; i++)
761 			data0 |= *(((u8 *)write_buff) + i) << (i * 8);
762 
763 		regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
764 	} else if (write_len <= 2 * sizeof(u32)) {
765 		data0 = *(u32 *)write_buff;
766 		regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
767 
768 		for (int i = 0; i < write_len - sizeof(u32); i++)
769 			data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8);
770 
771 		regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1);
772 	}
773 	dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]);
774 
775 	return 0;
776 }
777 
thc_swdma_read_completion(struct thc_device * dev)778 static int thc_swdma_read_completion(struct thc_device *dev)
779 {
780 	int ret;
781 
782 	ret = thc_wait_for_dma_pause(dev, THC_SWDMA);
783 	if (ret)
784 		return ret;
785 
786 	/*
787 	 * Restore max input size control feature to previous state after SWDMA if it was
788 	 * enabled before SWDMA, and reset temp rx_max_size_en variable for next time.
789 	 */
790 	if (dev->dma_ctx->rx_max_size_en) {
791 		thc_i2c_rx_max_size_enable(dev, true);
792 		dev->dma_ctx->rx_max_size_en = false;
793 	}
794 
795 	/*
796 	 * Restore input interrupt delay feature to previous state after SWDMA if it was
797 	 * enabled before SWDMA, and reset temp rx_int_delay_en variable for next time.
798 	 */
799 	if (dev->dma_ctx->rx_int_delay_en) {
800 		thc_i2c_rx_int_delay_enable(dev, true);
801 		dev->dma_ctx->rx_int_delay_en = false;
802 	}
803 
804 	thc_reset_dma_settings(dev);
805 
806 	dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
807 
808 	ret = thc_interrupt_quiesce(dev, false);
809 
810 	return ret;
811 }
812 
813 /**
814  * thc_swdma_read - Use software DMA to read data from touch device
815  *
816  * @dev: The pointer of THC private device context
817  * @write_buff: The pointer of write buffer for SWDMA sequence
818  * @write_len: The write data length for SWDMA sequence
819  * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL
820  * @read_buff: The pointer of the read data buffer
821  * @read_len: The pointer of the read data length
822  *
823  * Return: 0 on success, other error codes on failed.
824  */
thc_swdma_read(struct thc_device * dev,void * write_buff,size_t write_len,u32 * prd_tbl_len,void * read_buff,size_t * read_len)825 int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
826 		   u32 *prd_tbl_len, void *read_buff, size_t *read_len)
827 {
828 	int ret;
829 
830 	if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) {
831 		dev_err_once(dev->dev, "The SWDMA channel is not enabled");
832 		return -EINVAL;
833 	}
834 
835 	if (!read_buff || !read_len) {
836 		dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
837 			read_buff, read_len);
838 		return -EINVAL;
839 	}
840 
841 	if (mutex_lock_interruptible(&dev->thc_bus_lock))
842 		return -EINTR;
843 
844 	dev->swdma_done = false;
845 
846 	ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len);
847 	if (ret)
848 		goto end;
849 
850 	ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ);
851 	if (ret <= 0 || !dev->swdma_done) {
852 		dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n");
853 		ret = -ETIMEDOUT;
854 		goto end;
855 	}
856 
857 	ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL);
858 	if (ret)
859 		goto end;
860 
861 	ret = thc_swdma_read_completion(dev);
862 
863 end:
864 	mutex_unlock(&dev->thc_bus_lock);
865 	return ret;
866 }
867 EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC");
868 
write_dma_buffer(struct thc_device * dev,void * buffer,size_t buf_len)869 static int write_dma_buffer(struct thc_device *dev,
870 			    void *buffer, size_t buf_len)
871 {
872 	struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA];
873 	struct thc_prd_table *prd_tbl;
874 	struct scatterlist *sg;
875 	unsigned long len_left;
876 	size_t ret;
877 	u8 nent;
878 	int i;
879 
880 	/* There is only one PRD table for write */
881 	prd_tbl = &write_config->prd_tbls[0];
882 
883 	if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) {
884 		dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len);
885 		return -EOVERFLOW;
886 	}
887 
888 	sg = write_config->sgls[0];
889 	ret = sg_copy_from_buffer(sg, nent, buffer, buf_len);
890 	if (ret != buf_len) {
891 		dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
892 			     ret, buf_len);
893 		return -EIO;
894 	}
895 
896 	prd_tbl = &write_config->prd_tbls[0];
897 	len_left = buf_len;
898 
899 	for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) {
900 		if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) {
901 			dev_err_once(dev->dev, "SGList: zero address or length\n");
902 			return -EINVAL;
903 		}
904 
905 		prd_tbl->entries[i].dest_addr =
906 				sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
907 
908 		if (len_left < sg_dma_len(sg)) {
909 			prd_tbl->entries[i].len = len_left;
910 			prd_tbl->entries[i].end_of_prd = 1;
911 			break;
912 		}
913 
914 		prd_tbl->entries[i].len = sg_dma_len(sg);
915 		prd_tbl->entries[i].end_of_prd = 0;
916 
917 		len_left -= sg_dma_len(sg);
918 	}
919 
920 	dma_set_prd_control(dev, i, 0, write_config);
921 
922 	return 0;
923 }
924 
thc_ensure_performance_limitations(struct thc_device * dev)925 static void thc_ensure_performance_limitations(struct thc_device *dev)
926 {
927 	unsigned long delay_usec = 0;
928 	/*
929 	 * Minimum amount of delay the THC / QUICKSPI driver must wait
930 	 * between end of write operation and begin of read operation.
931 	 * This value shall be in 10us multiples.
932 	 */
933 	if (dev->perf_limit > 0) {
934 		delay_usec = dev->perf_limit * 10;
935 		udelay(delay_usec);
936 	}
937 }
938 
thc_dma_write_completion(struct thc_device * dev)939 static void thc_dma_write_completion(struct thc_device *dev)
940 {
941 	thc_ensure_performance_limitations(dev);
942 }
943 
944 /**
945  * thc_dma_write - Use TXDMA to write data to touch device
946  *
947  * @dev: The pointer of THC private device context
948  * @buffer: The pointer of write data buffer
949  * @buf_len: The write data length
950  *
951  * Return: 0 on success, other error codes on failed.
952  */
thc_dma_write(struct thc_device * dev,void * buffer,size_t buf_len)953 int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len)
954 {
955 	bool restore_interrupts = false;
956 	u32 sts, ctrl;
957 	int ret;
958 
959 	if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) {
960 		dev_err_once(dev->dev, "The TxDMA channel is not enabled\n");
961 		return -EINVAL;
962 	}
963 
964 	if (!buffer || buf_len <= 0) {
965 		dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n",
966 			buffer, buf_len);
967 		return -EINVAL;
968 	}
969 
970 	regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts);
971 	if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) {
972 		dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n");
973 		return -EBUSY;
974 	}
975 
976 	if (mutex_lock_interruptible(&dev->thc_bus_lock))
977 		return -EINTR;
978 
979 	regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
980 
981 	ret = write_dma_buffer(dev, buffer, buf_len);
982 	if (ret)
983 		goto end;
984 
985 	if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) {
986 		ret = thc_interrupt_quiesce(dev, true);
987 		if (ret)
988 			goto end;
989 
990 		restore_interrupts = true;
991 	}
992 
993 	dev->write_done = false;
994 
995 	dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]);
996 
997 	ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ);
998 	if (ret <= 0 || !dev->write_done) {
999 		dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n");
1000 		ret = -ETIMEDOUT;
1001 		goto end;
1002 	}
1003 
1004 	thc_dma_write_completion(dev);
1005 	mutex_unlock(&dev->thc_bus_lock);
1006 	return 0;
1007 
1008 end:
1009 	mutex_unlock(&dev->thc_bus_lock);
1010 
1011 	if (restore_interrupts)
1012 		ret = thc_interrupt_quiesce(dev, false);
1013 
1014 	return ret;
1015 }
1016 EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC");
1017