1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2024 Intel Corporation */ 3 4 #include <linux/bitfield.h> 5 #include <linux/delay.h> 6 #include <linux/overflow.h> 7 #include <linux/regmap.h> 8 #include <linux/scatterlist.h> 9 10 #include "intel-thc-dev.h" 11 #include "intel-thc-dma.h" 12 #include "intel-thc-hw.h" 13 14 static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr, 15 struct thc_dma_configuration *dma_config) 16 { 17 u32 addr_high, addr_low; 18 19 if (!dma_config->is_enabled) 20 return; 21 22 addr_high = upper_32_bits(physical_addr); 23 addr_low = lower_32_bits(physical_addr); 24 25 regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high); 26 regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low); 27 } 28 29 static void dma_set_start_bit(struct thc_device *dev, 30 struct thc_dma_configuration *dma_config) 31 { 32 u32 ctrl, mask, mbits, data, offset; 33 34 if (!dma_config->is_enabled) 35 return; 36 37 switch (dma_config->dma_channel) { 38 case THC_RXDMA1: 39 case THC_RXDMA2: 40 if (dma_config->dma_channel == THC_RXDMA2) { 41 mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL, 42 THC_BITMASK_INTERRUPT_TYPE_DATA); 43 mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL; 44 regmap_write_bits(dev->thc_regmap, 45 THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits); 46 } 47 48 mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF | 49 THC_M_PRT_READ_DMA_CNTRL_SOO | 50 THC_M_PRT_READ_DMA_CNTRL_IE_STALL | 51 THC_M_PRT_READ_DMA_CNTRL_IE_ERROR | 52 THC_M_PRT_READ_DMA_CNTRL_START; 53 54 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits; 55 mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN; 56 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits; 57 offset = dma_config->dma_channel == THC_RXDMA1 ? 58 THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET; 59 regmap_write_bits(dev->thc_regmap, offset, mask, ctrl); 60 break; 61 62 case THC_SWDMA: 63 mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL | 64 THC_M_PRT_READ_DMA_CNTRL_IE_IOC | 65 THC_M_PRT_READ_DMA_CNTRL_SOO | 66 THC_M_PRT_READ_DMA_CNTRL_START; 67 68 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits; 69 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits; 70 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET, 71 mask, ctrl); 72 break; 73 74 case THC_TXDMA: 75 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, 76 THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS, 77 THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS); 78 79 /* Select interrupt or polling method upon Write completion */ 80 if (dev->dma_ctx->use_write_interrupts) 81 data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL; 82 else 83 data = 0; 84 85 data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START; 86 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL | 87 THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START; 88 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET, 89 mask, data); 90 break; 91 92 default: 93 break; 94 } 95 } 96 97 static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth, 98 struct thc_dma_configuration *dma_config) 99 { 100 u32 ctrl, mask; 101 102 if (!dma_config->is_enabled) 103 return; 104 105 if (dma_config->dma_channel == THC_TXDMA) { 106 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC; 107 ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count); 108 } else { 109 mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD; 110 ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) | 111 FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth); 112 } 113 114 regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl); 115 } 116 117 static void dma_clear_prd_control(struct thc_device *dev, 118 struct thc_dma_configuration *dma_config) 119 { 120 u32 mask; 121 122 if (!dma_config->is_enabled) 123 return; 124 125 if (dma_config->dma_channel == THC_TXDMA) 126 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC; 127 else 128 mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD; 129 130 regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0); 131 } 132 133 static u8 dma_get_read_pointer(struct thc_device *dev, 134 struct thc_dma_configuration *dma_config) 135 { 136 u32 ctrl, read_pointer; 137 138 regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl); 139 read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl); 140 141 dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n", 142 ctrl, dma_config->dma_cntrl, read_pointer); 143 144 return read_pointer; 145 } 146 147 static u8 dma_get_write_pointer(struct thc_device *dev, 148 struct thc_dma_configuration *dma_config) 149 { 150 u32 ctrl, write_pointer; 151 152 regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl); 153 write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl); 154 155 dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n", 156 ctrl, dma_config->dma_cntrl, write_pointer); 157 158 return write_pointer; 159 } 160 161 static void dma_set_write_pointer(struct thc_device *dev, u8 value, 162 struct thc_dma_configuration *dma_config) 163 { 164 u32 ctrl, mask; 165 166 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP; 167 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value); 168 regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl); 169 } 170 171 static size_t dma_get_max_packet_size(struct thc_device *dev, 172 struct thc_dma_configuration *dma_config) 173 { 174 return dma_config->max_packet_size; 175 } 176 177 static void dma_set_max_packet_size(struct thc_device *dev, size_t size, 178 struct thc_dma_configuration *dma_config) 179 { 180 if (size) { 181 dma_config->max_packet_size = ALIGN(size, SZ_4K); 182 dma_config->is_enabled = true; 183 } 184 } 185 186 static void thc_copy_one_sgl_to_prd(struct thc_device *dev, 187 struct thc_dma_configuration *config, 188 unsigned int ind) 189 { 190 struct thc_prd_table *prd_tbl; 191 struct scatterlist *sg; 192 int j; 193 194 prd_tbl = &config->prd_tbls[ind]; 195 196 for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) { 197 prd_tbl->entries[j].dest_addr = 198 sg_dma_address(sg) >> THC_ADDRESS_SHIFT; 199 prd_tbl->entries[j].len = sg_dma_len(sg); 200 prd_tbl->entries[j].hw_status = 0; 201 prd_tbl->entries[j].end_of_prd = 0; 202 } 203 204 /* Set the end_of_prd flag in the last filled entry */ 205 if (j > 0) 206 prd_tbl->entries[j - 1].end_of_prd = 1; 207 } 208 209 static void thc_copy_sgls_to_prd(struct thc_device *dev, 210 struct thc_dma_configuration *config) 211 { 212 unsigned int i; 213 214 memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num)); 215 216 for (i = 0; i < config->prd_tbl_num; i++) 217 thc_copy_one_sgl_to_prd(dev, config, i); 218 } 219 220 static int setup_dma_buffers(struct thc_device *dev, 221 struct thc_dma_configuration *config, 222 enum dma_data_direction dir) 223 { 224 size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num); 225 unsigned int i, nent = PRD_ENTRIES_NUM; 226 dma_addr_t dma_handle; 227 void *cpu_addr; 228 size_t buf_sz; 229 int count; 230 231 if (!config->is_enabled) 232 return 0; 233 234 memset(config->sgls, 0, sizeof(config->sgls)); 235 memset(config->sgls_nent, 0, sizeof(config->sgls_nent)); 236 237 cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size, 238 &dma_handle, GFP_KERNEL); 239 if (!cpu_addr) 240 return -ENOMEM; 241 242 config->prd_tbls = cpu_addr; 243 config->prd_tbls_dma_handle = dma_handle; 244 245 buf_sz = dma_get_max_packet_size(dev, config); 246 247 /* Allocate and map the scatter-gather lists, one for each PRD table */ 248 for (i = 0; i < config->prd_tbl_num; i++) { 249 config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent); 250 if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) { 251 dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n", 252 i, nent); 253 return -ENOMEM; 254 } 255 count = dma_map_sg(dev->dev, config->sgls[i], nent, dir); 256 257 config->sgls_nent[i] = count; 258 } 259 260 thc_copy_sgls_to_prd(dev, config); 261 262 return 0; 263 } 264 265 static void thc_reset_dma_settings(struct thc_device *dev) 266 { 267 /* Stop all DMA channels and reset DMA read pointers */ 268 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET, 269 THC_M_PRT_READ_DMA_CNTRL_START, 0); 270 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET, 271 THC_M_PRT_READ_DMA_CNTRL_START, 0); 272 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET, 273 THC_M_PRT_READ_DMA_CNTRL_START, 0); 274 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET, 275 THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0); 276 277 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET, 278 THC_M_PRT_READ_DMA_CNTRL_TPCPR, 279 THC_M_PRT_READ_DMA_CNTRL_TPCPR); 280 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET, 281 THC_M_PRT_READ_DMA_CNTRL_TPCPR, 282 THC_M_PRT_READ_DMA_CNTRL_TPCPR); 283 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET, 284 THC_M_PRT_READ_DMA_CNTRL_TPCPR, 285 THC_M_PRT_READ_DMA_CNTRL_TPCPR); 286 } 287 288 static void release_dma_buffers(struct thc_device *dev, 289 struct thc_dma_configuration *config) 290 { 291 size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num); 292 unsigned int i; 293 294 if (!config->is_enabled) 295 return; 296 297 for (i = 0; i < config->prd_tbl_num; i++) { 298 if (!config->sgls[i] || !config->sgls_nent[i]) 299 continue; 300 301 dma_unmap_sg(dev->dev, config->sgls[i], 302 config->sgls_nent[i], 303 config->dir); 304 305 sgl_free(config->sgls[i]); 306 config->sgls[i] = NULL; 307 } 308 309 memset(config->prd_tbls, 0, prd_tbls_size); 310 311 if (config->prd_tbls) { 312 dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls, 313 config->prd_tbls_dma_handle); 314 config->prd_tbls = NULL; 315 config->prd_tbls_dma_handle = 0; 316 } 317 } 318 319 struct thc_dma_context *thc_dma_init(struct thc_device *dev) 320 { 321 struct thc_dma_context *dma_ctx; 322 323 dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL); 324 if (!dma_ctx) 325 return NULL; 326 327 dev->dma_ctx = dma_ctx; 328 329 dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1; 330 dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2; 331 dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA; 332 dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA; 333 334 dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE; 335 dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE; 336 dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE; 337 dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE; 338 339 dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM; 340 dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM; 341 dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1; 342 dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1; 343 344 dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET; 345 dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET; 346 dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET; 347 dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET; 348 349 dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET; 350 dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET; 351 dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET; 352 dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET; 353 354 dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET; 355 dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET; 356 dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET; 357 dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET; 358 359 dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET; 360 dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET; 361 dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET; 362 dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET; 363 364 /* Enable write DMA completion interrupt by default */ 365 dma_ctx->use_write_interrupts = 1; 366 367 return dma_ctx; 368 } 369 370 /** 371 * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines 372 * 373 * @dev: The pointer of THC private device context 374 * @mps_read1: RxDMA1 max packet size 375 * @mps_read2: RxDMA2 max packet size 376 * @mps_write: TxDMA max packet size 377 * @mps_swdma: Software DMA max packet size 378 * 379 * If mps is not 0, it means the corresponding DMA channel is used, then set 380 * the flag to turn on this channel. 381 * 382 * Return: 0 on success, other error codes on failed. 383 */ 384 int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1, 385 size_t mps_read2, size_t mps_write, 386 size_t mps_swdma) 387 { 388 if (!dev->dma_ctx) { 389 dev_err_once(dev->dev, 390 "Cannot set max packet sizes because DMA context is NULL!\n"); 391 return -EINVAL; 392 } 393 394 dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]); 395 dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]); 396 dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]); 397 dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]); 398 399 return 0; 400 } 401 EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC"); 402 403 /** 404 * thc_dma_allocate - Allocate DMA buffers for all DMA engines 405 * 406 * @dev: The pointer of THC private device context 407 * 408 * Return: 0 on success, other error codes on failed. 409 */ 410 int thc_dma_allocate(struct thc_device *dev) 411 { 412 int ret, chan; 413 414 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) { 415 ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan], 416 dev->dma_ctx->dma_config[chan].dir); 417 if (ret < 0) { 418 dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan); 419 goto release_bufs; 420 } 421 } 422 423 return 0; 424 425 release_bufs: 426 while (chan--) 427 release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]); 428 429 return ret; 430 } 431 EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC"); 432 433 /** 434 * thc_dma_release - Release DMA buffers for all DMA engines 435 * 436 * @dev: The pointer of THC private device context 437 */ 438 void thc_dma_release(struct thc_device *dev) 439 { 440 int chan; 441 442 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) 443 release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]); 444 } 445 EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC"); 446 447 static int calc_prd_entries_num(struct thc_prd_table *prd_tbl, 448 size_t mes_len, u8 *nent) 449 { 450 *nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY); 451 if (*nent > PRD_ENTRIES_NUM) 452 return -EMSGSIZE; 453 454 return 0; 455 } 456 457 static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent) 458 { 459 size_t mes_len = 0; 460 unsigned int j; 461 462 for (j = 0; j < PRD_ENTRIES_NUM; j++) { 463 mes_len += prd_tbl->entries[j].len; 464 if (prd_tbl->entries[j].end_of_prd) 465 break; 466 } 467 468 *nent = j + 1; 469 470 return mes_len; 471 } 472 473 /** 474 * thc_dma_configure - Configure DMA settings for all DMA engines 475 * 476 * @dev: The pointer of THC private device context 477 * 478 * Return: 0 on success, other error codes on failed. 479 */ 480 int thc_dma_configure(struct thc_device *dev) 481 { 482 struct thc_dma_context *dma_ctx = dev->dma_ctx; 483 int chan; 484 485 thc_reset_dma_settings(dev); 486 487 if (!dma_ctx) { 488 dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n"); 489 return -EINVAL; 490 } 491 492 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) { 493 dma_set_prd_base_addr(dev, 494 dma_ctx->dma_config[chan].prd_tbls_dma_handle, 495 &dma_ctx->dma_config[chan]); 496 497 dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1, 498 dma_ctx->dma_config[chan].prd_tbl_num - 1, 499 &dma_ctx->dma_config[chan]); 500 } 501 502 /* Start read2 DMA engine */ 503 dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]); 504 505 dev_dbg(dev->dev, "DMA configured successfully!\n"); 506 507 return 0; 508 } 509 EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC"); 510 511 /** 512 * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines 513 * 514 * @dev: The pointer of THC private device context 515 */ 516 void thc_dma_unconfigure(struct thc_device *dev) 517 { 518 int chan; 519 520 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) { 521 dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]); 522 dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]); 523 } 524 525 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET, 526 THC_M_PRT_READ_DMA_CNTRL_START, 0); 527 528 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET, 529 THC_M_PRT_READ_DMA_CNTRL_START, 0); 530 } 531 EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC"); 532 533 static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel) 534 { 535 u32 ctrl_reg, sts_reg, sts; 536 int ret; 537 538 ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : 539 ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET : 540 THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET); 541 542 regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0); 543 544 sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET : 545 ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET : 546 THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET); 547 548 ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts, 549 !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE), 550 THC_DEFAULT_RXDMA_POLLING_US_INTERVAL, 551 THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT); 552 553 if (ret) { 554 dev_err_once(dev->dev, 555 "Timeout while waiting for DMA %d stop\n", channel); 556 return ret; 557 } 558 559 return 0; 560 } 561 562 static int read_dma_buffer(struct thc_device *dev, 563 struct thc_dma_configuration *read_config, 564 u8 prd_table_index, void *read_buff) 565 { 566 struct thc_prd_table *prd_tbl; 567 struct scatterlist *sg; 568 size_t mes_len, ret; 569 u8 nent; 570 571 if (prd_table_index >= read_config->prd_tbl_num) { 572 dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index); 573 return -EINVAL; 574 } 575 576 prd_tbl = &read_config->prd_tbls[prd_table_index]; 577 mes_len = calc_message_len(prd_tbl, &nent); 578 if (mes_len > read_config->max_packet_size) { 579 dev_err(dev->dev, 580 "Message length %zu is bigger than buffer length %lu\n", 581 mes_len, read_config->max_packet_size); 582 return -EMSGSIZE; 583 } 584 585 sg = read_config->sgls[prd_table_index]; 586 ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len); 587 if (ret != mes_len) { 588 dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n", 589 ret, mes_len); 590 return -EIO; 591 } 592 593 return mes_len; 594 } 595 596 static void update_write_pointer(struct thc_device *dev, 597 struct thc_dma_configuration *read_config) 598 { 599 u8 write_ptr = dma_get_write_pointer(dev, read_config); 600 601 if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD) 602 dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config); 603 else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN) 604 dma_set_write_pointer(dev, 0, read_config); 605 else 606 dma_set_write_pointer(dev, write_ptr + 1, read_config); 607 } 608 609 static int is_dma_buf_empty(struct thc_device *dev, 610 struct thc_dma_configuration *read_config, 611 u8 *read_ptr, u8 *write_ptr) 612 { 613 *read_ptr = dma_get_read_pointer(dev, read_config); 614 *write_ptr = dma_get_write_pointer(dev, read_config); 615 616 if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK)) 617 if (*read_ptr != *write_ptr) 618 return true; 619 620 return false; 621 } 622 623 static int thc_dma_read(struct thc_device *dev, 624 struct thc_dma_configuration *read_config, 625 void *read_buff, size_t *read_len, int *read_finished) 626 { 627 u8 read_ptr, write_ptr, prd_table_index; 628 int status; 629 630 if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) { 631 prd_table_index = write_ptr & THC_POINTER_MASK; 632 633 status = read_dma_buffer(dev, read_config, prd_table_index, read_buff); 634 if (status <= 0) { 635 dev_err_once(dev->dev, "read DMA buffer failed %d\n", status); 636 return -EIO; 637 } 638 639 *read_len = status; 640 641 /* Clear the relevant PRD table */ 642 thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index); 643 644 /* Increment the write pointer to let the HW know we have processed this PRD */ 645 update_write_pointer(dev, read_config); 646 } 647 648 /* 649 * This function only reads one frame from PRD table for each call, so we need to 650 * check if all DMAed data is read out and return the flag to the caller. Caller 651 * should repeatedly call thc_dma_read() until all DMAed data is handled. 652 */ 653 if (read_finished) 654 *read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0; 655 656 return 0; 657 } 658 659 /** 660 * thc_rxdma_read - Read data from RXDMA buffer 661 * 662 * @dev: The pointer of THC private device context 663 * @dma_channel: The RXDMA engine of read data source 664 * @read_buff: The pointer of the read data buffer 665 * @read_len: The pointer of the read data length 666 * @read_finished: The pointer of the flag indicating if all pending data has been read out 667 * 668 * Return: 0 on success, other error codes on failed. 669 */ 670 int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel, 671 void *read_buff, size_t *read_len, int *read_finished) 672 { 673 struct thc_dma_configuration *dma_config; 674 int ret; 675 676 dma_config = &dev->dma_ctx->dma_config[dma_channel]; 677 678 if (!dma_config->is_enabled) { 679 dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel); 680 return -EINVAL; 681 } 682 683 if (!read_buff || !read_len) { 684 dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n", 685 read_buff, read_len); 686 return -EINVAL; 687 } 688 689 if (dma_channel >= THC_TXDMA) { 690 dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel); 691 return -EINVAL; 692 } 693 694 ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished); 695 696 return ret; 697 } 698 EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC"); 699 700 static int thc_swdma_read_start(struct thc_device *dev, void *write_buff, 701 size_t write_len, u32 *prd_tbl_len) 702 { 703 u32 mask, val, data0 = 0, data1 = 0; 704 int ret; 705 706 ret = thc_interrupt_quiesce(dev, true); 707 if (ret) 708 return ret; 709 710 if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2)) 711 return -EIO; 712 713 thc_reset_dma_settings(dev); 714 715 /* 716 * Max input size control feature is only available for RxDMA, it must keep disabled 717 * during SWDMA operation, and restore to previous state after SWDMA is done. 718 * Max input size variables in THC device context track hardware state, and keep change 719 * when feature state was changed, so those variables cannot be used to record feature 720 * state after state was changed during SWDMA operation. Here have to use a temp variable 721 * in DMA context to record feature state before SWDMA operation. 722 */ 723 if (dev->i2c_max_rx_size_en) { 724 thc_i2c_rx_max_size_enable(dev, false); 725 dev->dma_ctx->rx_max_size_en = true; 726 } 727 728 /* 729 * Interrupt delay feature is in the same situation with max input size control feature, 730 * needs record feature state before SWDMA. 731 */ 732 if (dev->i2c_int_delay_en) { 733 thc_i2c_rx_int_delay_enable(dev, false); 734 dev->dma_ctx->rx_int_delay_en = true; 735 } 736 737 mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC | 738 THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN; 739 val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) | 740 ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0); 741 regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET, 742 mask, val); 743 744 if (prd_tbl_len) { 745 mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN; 746 val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN, 747 *prd_tbl_len); 748 regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET, 749 mask, val); 750 } 751 752 if (write_len <= sizeof(u32)) { 753 for (int i = 0; i < write_len; i++) 754 data0 |= *(((u8 *)write_buff) + i) << (i * 8); 755 756 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0); 757 } else if (write_len <= 2 * sizeof(u32)) { 758 data0 = *(u32 *)write_buff; 759 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0); 760 761 for (int i = 0; i < write_len - sizeof(u32); i++) 762 data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8); 763 764 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1); 765 } 766 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]); 767 768 return 0; 769 } 770 771 static int thc_swdma_read_completion(struct thc_device *dev) 772 { 773 int ret; 774 775 ret = thc_wait_for_dma_pause(dev, THC_SWDMA); 776 if (ret) 777 return ret; 778 779 /* 780 * Restore max input size control feature to previous state after SWDMA if it was 781 * enabled before SWDMA, and reset temp rx_max_size_en variable for next time. 782 */ 783 if (dev->dma_ctx->rx_max_size_en) { 784 thc_i2c_rx_max_size_enable(dev, true); 785 dev->dma_ctx->rx_max_size_en = false; 786 } 787 788 /* 789 * Restore input interrupt delay feature to previous state after SWDMA if it was 790 * enabled before SWDMA, and reset temp rx_int_delay_en variable for next time. 791 */ 792 if (dev->dma_ctx->rx_int_delay_en) { 793 thc_i2c_rx_int_delay_enable(dev, true); 794 dev->dma_ctx->rx_int_delay_en = false; 795 } 796 797 thc_reset_dma_settings(dev); 798 799 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]); 800 801 ret = thc_interrupt_quiesce(dev, false); 802 803 return ret; 804 } 805 806 /** 807 * thc_swdma_read - Use software DMA to read data from touch device 808 * 809 * @dev: The pointer of THC private device context 810 * @write_buff: The pointer of write buffer for SWDMA sequence 811 * @write_len: The write data length for SWDMA sequence 812 * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL 813 * @read_buff: The pointer of the read data buffer 814 * @read_len: The pointer of the read data length 815 * 816 * Return: 0 on success, other error codes on failed. 817 */ 818 int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len, 819 u32 *prd_tbl_len, void *read_buff, size_t *read_len) 820 { 821 int ret; 822 823 if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) { 824 dev_err_once(dev->dev, "The SWDMA channel is not enabled"); 825 return -EINVAL; 826 } 827 828 if (!read_buff || !read_len) { 829 dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n", 830 read_buff, read_len); 831 return -EINVAL; 832 } 833 834 if (mutex_lock_interruptible(&dev->thc_bus_lock)) 835 return -EINTR; 836 837 dev->swdma_done = false; 838 839 ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len); 840 if (ret) 841 goto end; 842 843 ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ); 844 if (ret <= 0 || !dev->swdma_done) { 845 dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n"); 846 ret = -ETIMEDOUT; 847 goto end; 848 } 849 850 ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL); 851 if (ret) 852 goto end; 853 854 ret = thc_swdma_read_completion(dev); 855 856 end: 857 mutex_unlock(&dev->thc_bus_lock); 858 return ret; 859 } 860 EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC"); 861 862 static int write_dma_buffer(struct thc_device *dev, 863 void *buffer, size_t buf_len) 864 { 865 struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA]; 866 struct thc_prd_table *prd_tbl; 867 struct scatterlist *sg; 868 unsigned long len_left; 869 size_t ret; 870 u8 nent; 871 int i; 872 873 /* There is only one PRD table for write */ 874 prd_tbl = &write_config->prd_tbls[0]; 875 876 if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) { 877 dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len); 878 return -EOVERFLOW; 879 } 880 881 sg = write_config->sgls[0]; 882 ret = sg_copy_from_buffer(sg, nent, buffer, buf_len); 883 if (ret != buf_len) { 884 dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n", 885 ret, buf_len); 886 return -EIO; 887 } 888 889 prd_tbl = &write_config->prd_tbls[0]; 890 len_left = buf_len; 891 892 for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) { 893 if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) { 894 dev_err_once(dev->dev, "SGList: zero address or length\n"); 895 return -EINVAL; 896 } 897 898 prd_tbl->entries[i].dest_addr = 899 sg_dma_address(sg) >> THC_ADDRESS_SHIFT; 900 901 if (len_left < sg_dma_len(sg)) { 902 prd_tbl->entries[i].len = len_left; 903 prd_tbl->entries[i].end_of_prd = 1; 904 break; 905 } 906 907 prd_tbl->entries[i].len = sg_dma_len(sg); 908 prd_tbl->entries[i].end_of_prd = 0; 909 910 len_left -= sg_dma_len(sg); 911 } 912 913 dma_set_prd_control(dev, i, 0, write_config); 914 915 return 0; 916 } 917 918 static void thc_ensure_performance_limitations(struct thc_device *dev) 919 { 920 unsigned long delay_usec = 0; 921 /* 922 * Minimum amount of delay the THC / QUICKSPI driver must wait 923 * between end of write operation and begin of read operation. 924 * This value shall be in 10us multiples. 925 */ 926 if (dev->perf_limit > 0) { 927 delay_usec = dev->perf_limit * 10; 928 udelay(delay_usec); 929 } 930 } 931 932 static void thc_dma_write_completion(struct thc_device *dev) 933 { 934 thc_ensure_performance_limitations(dev); 935 } 936 937 /** 938 * thc_dma_write - Use TXDMA to write data to touch device 939 * 940 * @dev: The pointer of THC private device context 941 * @buffer: The pointer of write data buffer 942 * @buf_len: The write data length 943 * 944 * Return: 0 on success, other error codes on failed. 945 */ 946 int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len) 947 { 948 bool restore_interrupts = false; 949 u32 sts, ctrl; 950 int ret; 951 952 if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) { 953 dev_err_once(dev->dev, "The TxDMA channel is not enabled\n"); 954 return -EINVAL; 955 } 956 957 if (!buffer || buf_len <= 0) { 958 dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n", 959 buffer, buf_len); 960 return -EINVAL; 961 } 962 963 regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts); 964 if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) { 965 dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n"); 966 return -EBUSY; 967 } 968 969 if (mutex_lock_interruptible(&dev->thc_bus_lock)) 970 return -EINTR; 971 972 regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl); 973 974 ret = write_dma_buffer(dev, buffer, buf_len); 975 if (ret) 976 goto end; 977 978 if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) { 979 ret = thc_interrupt_quiesce(dev, true); 980 if (ret) 981 goto end; 982 983 restore_interrupts = true; 984 } 985 986 dev->write_done = false; 987 988 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]); 989 990 ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ); 991 if (ret <= 0 || !dev->write_done) { 992 dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n"); 993 ret = -ETIMEDOUT; 994 goto end; 995 } 996 997 thc_dma_write_completion(dev); 998 mutex_unlock(&dev->thc_bus_lock); 999 return 0; 1000 1001 end: 1002 mutex_unlock(&dev->thc_bus_lock); 1003 1004 if (restore_interrupts) 1005 ret = thc_interrupt_quiesce(dev, false); 1006 1007 return ret; 1008 } 1009 EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC"); 1010