1 /* 2 * Copyright (C) Ericsson AB 2007-2008 3 * Copyright (C) ST-Ericsson SA 2008-2010 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 6 * License terms: GNU General Public License (GPL) version 2 7 */ 8 9 #include <linux/dma-mapping.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/dmaengine.h> 14 #include <linux/platform_device.h> 15 #include <linux/clk.h> 16 #include <linux/delay.h> 17 #include <linux/log2.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/err.h> 21 #include <linux/of.h> 22 #include <linux/of_dma.h> 23 #include <linux/amba/bus.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/platform_data/dma-ste-dma40.h> 26 27 #include "dmaengine.h" 28 #include "ste_dma40_ll.h" 29 30 #define D40_NAME "dma40" 31 32 #define D40_PHY_CHAN -1 33 34 /* For masking out/in 2 bit channel positions */ 35 #define D40_CHAN_POS(chan) (2 * (chan / 2)) 36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 37 38 /* Maximum iterations taken before giving up suspending a channel */ 39 #define D40_SUSPEND_MAX_IT 500 40 41 /* Milliseconds */ 42 #define DMA40_AUTOSUSPEND_DELAY 100 43 44 /* Hardware requirement on LCLA alignment */ 45 #define LCLA_ALIGNMENT 0x40000 46 47 /* Max number of links per event group */ 48 #define D40_LCLA_LINK_PER_EVENT_GRP 128 49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 50 51 /* Max number of logical channels per physical channel */ 52 #define D40_MAX_LOG_CHAN_PER_PHY 32 53 54 /* Attempts before giving up to trying to get pages that are aligned */ 55 #define MAX_LCLA_ALLOC_ATTEMPTS 256 56 57 /* Bit markings for allocation map */ 58 #define D40_ALLOC_FREE BIT(31) 59 #define D40_ALLOC_PHY BIT(30) 60 #define D40_ALLOC_LOG_FREE 0 61 62 #define D40_MEMCPY_MAX_CHANS 8 63 64 /* Reserved event lines for memcpy only. */ 65 #define DB8500_DMA_MEMCPY_EV_0 51 66 #define DB8500_DMA_MEMCPY_EV_1 56 67 #define DB8500_DMA_MEMCPY_EV_2 57 68 #define DB8500_DMA_MEMCPY_EV_3 58 69 #define DB8500_DMA_MEMCPY_EV_4 59 70 #define DB8500_DMA_MEMCPY_EV_5 60 71 72 static int dma40_memcpy_channels[] = { 73 DB8500_DMA_MEMCPY_EV_0, 74 DB8500_DMA_MEMCPY_EV_1, 75 DB8500_DMA_MEMCPY_EV_2, 76 DB8500_DMA_MEMCPY_EV_3, 77 DB8500_DMA_MEMCPY_EV_4, 78 DB8500_DMA_MEMCPY_EV_5, 79 }; 80 81 /* Default configuration for physcial memcpy */ 82 static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { 83 .mode = STEDMA40_MODE_PHYSICAL, 84 .dir = DMA_MEM_TO_MEM, 85 86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 87 .src_info.psize = STEDMA40_PSIZE_PHY_1, 88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 89 90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 91 .dst_info.psize = STEDMA40_PSIZE_PHY_1, 92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 93 }; 94 95 /* Default configuration for logical memcpy */ 96 static struct stedma40_chan_cfg dma40_memcpy_conf_log = { 97 .mode = STEDMA40_MODE_LOGICAL, 98 .dir = DMA_MEM_TO_MEM, 99 100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 101 .src_info.psize = STEDMA40_PSIZE_LOG_1, 102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 103 104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 105 .dst_info.psize = STEDMA40_PSIZE_LOG_1, 106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 107 }; 108 109 /** 110 * enum 40_command - The different commands and/or statuses. 111 * 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 116 */ 117 enum d40_command { 118 D40_DMA_STOP = 0, 119 D40_DMA_RUN = 1, 120 D40_DMA_SUSPEND_REQ = 2, 121 D40_DMA_SUSPENDED = 3 122 }; 123 124 /* 125 * enum d40_events - The different Event Enables for the event lines. 126 * 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. 130 * @D40_ROUND_EVENTLINE: Status check for event line. 131 */ 132 133 enum d40_events { 134 D40_DEACTIVATE_EVENTLINE = 0, 135 D40_ACTIVATE_EVENTLINE = 1, 136 D40_SUSPEND_REQ_EVENTLINE = 2, 137 D40_ROUND_EVENTLINE = 3 138 }; 139 140 /* 141 * These are the registers that has to be saved and later restored 142 * when the DMA hw is powered off. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 144 */ 145 static u32 d40_backup_regs[] = { 146 D40_DREG_LCPA, 147 D40_DREG_LCLA, 148 D40_DREG_PRMSE, 149 D40_DREG_PRMSO, 150 D40_DREG_PRMOE, 151 D40_DREG_PRMOO, 152 }; 153 154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) 155 156 /* 157 * since 9540 and 8540 has the same HW revision 158 * use v4a for 9540 or ealier 159 * use v4b for 8540 or later 160 * HW revision: 161 * DB8500ed has revision 0 162 * DB8500v1 has revision 2 163 * DB8500v2 has revision 3 164 * AP9540v1 has revision 4 165 * DB8540v1 has revision 4 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a 167 */ 168 static u32 d40_backup_regs_v4a[] = { 169 D40_DREG_PSEG1, 170 D40_DREG_PSEG2, 171 D40_DREG_PSEG3, 172 D40_DREG_PSEG4, 173 D40_DREG_PCEG1, 174 D40_DREG_PCEG2, 175 D40_DREG_PCEG3, 176 D40_DREG_PCEG4, 177 D40_DREG_RSEG1, 178 D40_DREG_RSEG2, 179 D40_DREG_RSEG3, 180 D40_DREG_RSEG4, 181 D40_DREG_RCEG1, 182 D40_DREG_RCEG2, 183 D40_DREG_RCEG3, 184 D40_DREG_RCEG4, 185 }; 186 187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) 188 189 static u32 d40_backup_regs_v4b[] = { 190 D40_DREG_CPSEG1, 191 D40_DREG_CPSEG2, 192 D40_DREG_CPSEG3, 193 D40_DREG_CPSEG4, 194 D40_DREG_CPSEG5, 195 D40_DREG_CPCEG1, 196 D40_DREG_CPCEG2, 197 D40_DREG_CPCEG3, 198 D40_DREG_CPCEG4, 199 D40_DREG_CPCEG5, 200 D40_DREG_CRSEG1, 201 D40_DREG_CRSEG2, 202 D40_DREG_CRSEG3, 203 D40_DREG_CRSEG4, 204 D40_DREG_CRSEG5, 205 D40_DREG_CRCEG1, 206 D40_DREG_CRCEG2, 207 D40_DREG_CRCEG3, 208 D40_DREG_CRCEG4, 209 D40_DREG_CRCEG5, 210 }; 211 212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 213 214 static u32 d40_backup_regs_chan[] = { 215 D40_CHAN_REG_SSCFG, 216 D40_CHAN_REG_SSELT, 217 D40_CHAN_REG_SSPTR, 218 D40_CHAN_REG_SSLNK, 219 D40_CHAN_REG_SDCFG, 220 D40_CHAN_REG_SDELT, 221 D40_CHAN_REG_SDPTR, 222 D40_CHAN_REG_SDLNK, 223 }; 224 225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ 226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) 227 228 /** 229 * struct d40_interrupt_lookup - lookup table for interrupt handler 230 * 231 * @src: Interrupt mask register. 232 * @clr: Interrupt clear register. 233 * @is_error: true if this is an error interrupt. 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 236 */ 237 struct d40_interrupt_lookup { 238 u32 src; 239 u32 clr; 240 bool is_error; 241 int offset; 242 }; 243 244 245 static struct d40_interrupt_lookup il_v4a[] = { 246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 256 }; 257 258 static struct d40_interrupt_lookup il_v4b[] = { 259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, 260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, 261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, 262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, 263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, 264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, 265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, 266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, 267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, 268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, 269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, 270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, 271 }; 272 273 /** 274 * struct d40_reg_val - simple lookup struct 275 * 276 * @reg: The register. 277 * @val: The value that belongs to the register in reg. 278 */ 279 struct d40_reg_val { 280 unsigned int reg; 281 unsigned int val; 282 }; 283 284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = { 285 /* Clock every part of the DMA block from start */ 286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 287 288 /* Interrupts on all logical channels */ 289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 301 }; 302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = { 303 /* Clock every part of the DMA block from start */ 304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 305 306 /* Interrupts on all logical channels */ 307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, 308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, 309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, 310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, 311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, 312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, 313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, 314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, 315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, 316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, 317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, 318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, 319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, 320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, 321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} 322 }; 323 324 /** 325 * struct d40_lli_pool - Structure for keeping LLIs in memory 326 * 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 329 * pre_alloc_lli is used. 330 * @dma_addr: DMA address, if mapped 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 333 * one buffer to one buffer. 334 */ 335 struct d40_lli_pool { 336 void *base; 337 int size; 338 dma_addr_t dma_addr; 339 /* Space for dst and src, plus an extra for padding */ 340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 341 }; 342 343 /** 344 * struct d40_desc - A descriptor is one DMA job. 345 * 346 * @lli_phy: LLI settings for physical channel. Both src and dst= 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 348 * lli_len equals one. 349 * @lli_log: Same as above but for logical channels. 350 * @lli_pool: The pool with two entries pre-allocated. 351 * @lli_len: Number of llis of current descriptor. 352 * @lli_current: Number of transferred llis. 353 * @lcla_alloc: Number of LCLA entries allocated. 354 * @txd: DMA engine struct. Used for among other things for communication 355 * during a transfer. 356 * @node: List entry. 357 * @is_in_client_list: true if the client owns this descriptor. 358 * @cyclic: true if this is a cyclic job 359 * 360 * This descriptor is used for both logical and physical transfers. 361 */ 362 struct d40_desc { 363 /* LLI physical */ 364 struct d40_phy_lli_bidir lli_phy; 365 /* LLI logical */ 366 struct d40_log_lli_bidir lli_log; 367 368 struct d40_lli_pool lli_pool; 369 int lli_len; 370 int lli_current; 371 int lcla_alloc; 372 373 struct dma_async_tx_descriptor txd; 374 struct list_head node; 375 376 bool is_in_client_list; 377 bool cyclic; 378 }; 379 380 /** 381 * struct d40_lcla_pool - LCLA pool settings and data. 382 * 383 * @base: The virtual address of LCLA. 18 bit aligned. 384 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. 385 * This pointer is only there for clean-up on error. 386 * @pages: The number of pages needed for all physical channels. 387 * Only used later for clean-up on error 388 * @lock: Lock to protect the content in this struct. 389 * @alloc_map: big map over which LCLA entry is own by which job. 390 */ 391 struct d40_lcla_pool { 392 void *base; 393 dma_addr_t dma_addr; 394 void *base_unaligned; 395 int pages; 396 spinlock_t lock; 397 struct d40_desc **alloc_map; 398 }; 399 400 /** 401 * struct d40_phy_res - struct for handling eventlines mapped to physical 402 * channels. 403 * 404 * @lock: A lock protection this entity. 405 * @reserved: True if used by secure world or otherwise. 406 * @num: The physical channel number of this entity. 407 * @allocated_src: Bit mapped to show which src event line's are mapped to 408 * this physical channel. Can also be free or physically allocated. 409 * @allocated_dst: Same as for src but is dst. 410 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 411 * event line number. 412 * @use_soft_lli: To mark if the linked lists of channel are managed by SW. 413 */ 414 struct d40_phy_res { 415 spinlock_t lock; 416 bool reserved; 417 int num; 418 u32 allocated_src; 419 u32 allocated_dst; 420 bool use_soft_lli; 421 }; 422 423 struct d40_base; 424 425 /** 426 * struct d40_chan - Struct that describes a channel. 427 * 428 * @lock: A spinlock to protect this struct. 429 * @log_num: The logical number, if any of this channel. 430 * @pending_tx: The number of pending transfers. Used between interrupt handler 431 * and tasklet. 432 * @busy: Set to true when transfer is ongoing on this channel. 433 * @phy_chan: Pointer to physical channel which this instance runs on. If this 434 * point is NULL, then the channel is not allocated. 435 * @chan: DMA engine handle. 436 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 437 * transfer and call client callback. 438 * @client: Cliented owned descriptor list. 439 * @pending_queue: Submitted jobs, to be issued by issue_pending() 440 * @active: Active descriptor. 441 * @done: Completed jobs 442 * @queue: Queued jobs. 443 * @prepare_queue: Prepared jobs. 444 * @dma_cfg: The client configuration of this dma channel. 445 * @configured: whether the dma_cfg configuration is valid 446 * @base: Pointer to the device instance struct. 447 * @src_def_cfg: Default cfg register setting for src. 448 * @dst_def_cfg: Default cfg register setting for dst. 449 * @log_def: Default logical channel settings. 450 * @lcpa: Pointer to dst and src lcpa settings. 451 * @runtime_addr: runtime configured address. 452 * @runtime_direction: runtime configured direction. 453 * 454 * This struct can either "be" a logical or a physical channel. 455 */ 456 struct d40_chan { 457 spinlock_t lock; 458 int log_num; 459 int pending_tx; 460 bool busy; 461 struct d40_phy_res *phy_chan; 462 struct dma_chan chan; 463 struct tasklet_struct tasklet; 464 struct list_head client; 465 struct list_head pending_queue; 466 struct list_head active; 467 struct list_head done; 468 struct list_head queue; 469 struct list_head prepare_queue; 470 struct stedma40_chan_cfg dma_cfg; 471 bool configured; 472 struct d40_base *base; 473 /* Default register configurations */ 474 u32 src_def_cfg; 475 u32 dst_def_cfg; 476 struct d40_def_lcsp log_def; 477 struct d40_log_lli_full *lcpa; 478 /* Runtime reconfiguration */ 479 dma_addr_t runtime_addr; 480 enum dma_transfer_direction runtime_direction; 481 }; 482 483 /** 484 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA 485 * controller 486 * 487 * @backup: the pointer to the registers address array for backup 488 * @backup_size: the size of the registers address array for backup 489 * @realtime_en: the realtime enable register 490 * @realtime_clear: the realtime clear register 491 * @high_prio_en: the high priority enable register 492 * @high_prio_clear: the high priority clear register 493 * @interrupt_en: the interrupt enable register 494 * @interrupt_clear: the interrupt clear register 495 * @il: the pointer to struct d40_interrupt_lookup 496 * @il_size: the size of d40_interrupt_lookup array 497 * @init_reg: the pointer to the struct d40_reg_val 498 * @init_reg_size: the size of d40_reg_val array 499 */ 500 struct d40_gen_dmac { 501 u32 *backup; 502 u32 backup_size; 503 u32 realtime_en; 504 u32 realtime_clear; 505 u32 high_prio_en; 506 u32 high_prio_clear; 507 u32 interrupt_en; 508 u32 interrupt_clear; 509 struct d40_interrupt_lookup *il; 510 u32 il_size; 511 struct d40_reg_val *init_reg; 512 u32 init_reg_size; 513 }; 514 515 /** 516 * struct d40_base - The big global struct, one for each probe'd instance. 517 * 518 * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 519 * @execmd_lock: Lock for execute command usage since several channels share 520 * the same physical register. 521 * @dev: The device structure. 522 * @virtbase: The virtual base address of the DMA's register. 523 * @rev: silicon revision detected. 524 * @clk: Pointer to the DMA clock structure. 525 * @phy_start: Physical memory start of the DMA registers. 526 * @phy_size: Size of the DMA register map. 527 * @irq: The IRQ number. 528 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem 529 * transfers). 530 * @num_phy_chans: The number of physical channels. Read from HW. This 531 * is the number of available channels for this driver, not counting "Secure 532 * mode" allocated physical channels. 533 * @num_log_chans: The number of logical channels. Calculated from 534 * num_phy_chans. 535 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 536 * @dma_slave: dma_device channels that can do only do slave transfers. 537 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 538 * @phy_chans: Room for all possible physical channels in system. 539 * @log_chans: Room for all possible logical channels in system. 540 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 541 * to log_chans entries. 542 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 543 * to phy_chans entries. 544 * @plat_data: Pointer to provided platform_data which is the driver 545 * configuration. 546 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. 547 * @phy_res: Vector containing all physical channels. 548 * @lcla_pool: lcla pool settings and data. 549 * @lcpa_base: The virtual mapped address of LCPA. 550 * @phy_lcpa: The physical address of the LCPA. 551 * @lcpa_size: The size of the LCPA area. 552 * @desc_slab: cache for descriptors. 553 * @reg_val_backup: Here the values of some hardware registers are stored 554 * before the DMA is powered off. They are restored when the power is back on. 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and 556 * later 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers. 558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. 559 * @gen_dmac: the struct for generic registers values to represent u8500/8540 560 * DMA controller 561 */ 562 struct d40_base { 563 spinlock_t interrupt_lock; 564 spinlock_t execmd_lock; 565 struct device *dev; 566 void __iomem *virtbase; 567 u8 rev:4; 568 struct clk *clk; 569 phys_addr_t phy_start; 570 resource_size_t phy_size; 571 int irq; 572 int num_memcpy_chans; 573 int num_phy_chans; 574 int num_log_chans; 575 struct device_dma_parameters dma_parms; 576 struct dma_device dma_both; 577 struct dma_device dma_slave; 578 struct dma_device dma_memcpy; 579 struct d40_chan *phy_chans; 580 struct d40_chan *log_chans; 581 struct d40_chan **lookup_log_chans; 582 struct d40_chan **lookup_phy_chans; 583 struct stedma40_platform_data *plat_data; 584 struct regulator *lcpa_regulator; 585 /* Physical half channels */ 586 struct d40_phy_res *phy_res; 587 struct d40_lcla_pool lcla_pool; 588 void *lcpa_base; 589 dma_addr_t phy_lcpa; 590 resource_size_t lcpa_size; 591 struct kmem_cache *desc_slab; 592 u32 reg_val_backup[BACKUP_REGS_SZ]; 593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; 594 u32 *reg_val_backup_chan; 595 u16 gcc_pwr_off_mask; 596 struct d40_gen_dmac gen_dmac; 597 }; 598 599 static struct device *chan2dev(struct d40_chan *d40c) 600 { 601 return &d40c->chan.dev->device; 602 } 603 604 static bool chan_is_physical(struct d40_chan *chan) 605 { 606 return chan->log_num == D40_PHY_CHAN; 607 } 608 609 static bool chan_is_logical(struct d40_chan *chan) 610 { 611 return !chan_is_physical(chan); 612 } 613 614 static void __iomem *chan_base(struct d40_chan *chan) 615 { 616 return chan->base->virtbase + D40_DREG_PCBASE + 617 chan->phy_chan->num * D40_DREG_PCDELTA; 618 } 619 620 #define d40_err(dev, format, arg...) \ 621 dev_err(dev, "[%s] " format, __func__, ## arg) 622 623 #define chan_err(d40c, format, arg...) \ 624 d40_err(chan2dev(d40c), format, ## arg) 625 626 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, 627 int lli_len) 628 { 629 bool is_log = chan_is_logical(d40c); 630 u32 align; 631 void *base; 632 633 if (is_log) 634 align = sizeof(struct d40_log_lli); 635 else 636 align = sizeof(struct d40_phy_lli); 637 638 if (lli_len == 1) { 639 base = d40d->lli_pool.pre_alloc_lli; 640 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 641 d40d->lli_pool.base = NULL; 642 } else { 643 d40d->lli_pool.size = lli_len * 2 * align; 644 645 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 646 d40d->lli_pool.base = base; 647 648 if (d40d->lli_pool.base == NULL) 649 return -ENOMEM; 650 } 651 652 if (is_log) { 653 d40d->lli_log.src = PTR_ALIGN(base, align); 654 d40d->lli_log.dst = d40d->lli_log.src + lli_len; 655 656 d40d->lli_pool.dma_addr = 0; 657 } else { 658 d40d->lli_phy.src = PTR_ALIGN(base, align); 659 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; 660 661 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, 662 d40d->lli_phy.src, 663 d40d->lli_pool.size, 664 DMA_TO_DEVICE); 665 666 if (dma_mapping_error(d40c->base->dev, 667 d40d->lli_pool.dma_addr)) { 668 kfree(d40d->lli_pool.base); 669 d40d->lli_pool.base = NULL; 670 d40d->lli_pool.dma_addr = 0; 671 return -ENOMEM; 672 } 673 } 674 675 return 0; 676 } 677 678 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) 679 { 680 if (d40d->lli_pool.dma_addr) 681 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, 682 d40d->lli_pool.size, DMA_TO_DEVICE); 683 684 kfree(d40d->lli_pool.base); 685 d40d->lli_pool.base = NULL; 686 d40d->lli_pool.size = 0; 687 d40d->lli_log.src = NULL; 688 d40d->lli_log.dst = NULL; 689 d40d->lli_phy.src = NULL; 690 d40d->lli_phy.dst = NULL; 691 } 692 693 static int d40_lcla_alloc_one(struct d40_chan *d40c, 694 struct d40_desc *d40d) 695 { 696 unsigned long flags; 697 int i; 698 int ret = -EINVAL; 699 700 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 701 702 /* 703 * Allocate both src and dst at the same time, therefore the half 704 * start on 1 since 0 can't be used since zero is used as end marker. 705 */ 706 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 707 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 708 709 if (!d40c->base->lcla_pool.alloc_map[idx]) { 710 d40c->base->lcla_pool.alloc_map[idx] = d40d; 711 d40d->lcla_alloc++; 712 ret = i; 713 break; 714 } 715 } 716 717 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 718 719 return ret; 720 } 721 722 static int d40_lcla_free_all(struct d40_chan *d40c, 723 struct d40_desc *d40d) 724 { 725 unsigned long flags; 726 int i; 727 int ret = -EINVAL; 728 729 if (chan_is_physical(d40c)) 730 return 0; 731 732 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 733 734 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 735 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 736 737 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { 738 d40c->base->lcla_pool.alloc_map[idx] = NULL; 739 d40d->lcla_alloc--; 740 if (d40d->lcla_alloc == 0) { 741 ret = 0; 742 break; 743 } 744 } 745 } 746 747 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 748 749 return ret; 750 751 } 752 753 static void d40_desc_remove(struct d40_desc *d40d) 754 { 755 list_del(&d40d->node); 756 } 757 758 static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 759 { 760 struct d40_desc *desc = NULL; 761 762 if (!list_empty(&d40c->client)) { 763 struct d40_desc *d; 764 struct d40_desc *_d; 765 766 list_for_each_entry_safe(d, _d, &d40c->client, node) { 767 if (async_tx_test_ack(&d->txd)) { 768 d40_desc_remove(d); 769 desc = d; 770 memset(desc, 0, sizeof(*desc)); 771 break; 772 } 773 } 774 } 775 776 if (!desc) 777 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); 778 779 if (desc) 780 INIT_LIST_HEAD(&desc->node); 781 782 return desc; 783 } 784 785 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 786 { 787 788 d40_pool_lli_free(d40c, d40d); 789 d40_lcla_free_all(d40c, d40d); 790 kmem_cache_free(d40c->base->desc_slab, d40d); 791 } 792 793 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 794 { 795 list_add_tail(&desc->node, &d40c->active); 796 } 797 798 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) 799 { 800 struct d40_phy_lli *lli_dst = desc->lli_phy.dst; 801 struct d40_phy_lli *lli_src = desc->lli_phy.src; 802 void __iomem *base = chan_base(chan); 803 804 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); 805 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); 806 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); 807 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); 808 809 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); 810 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); 811 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); 812 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); 813 } 814 815 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) 816 { 817 list_add_tail(&desc->node, &d40c->done); 818 } 819 820 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) 821 { 822 struct d40_lcla_pool *pool = &chan->base->lcla_pool; 823 struct d40_log_lli_bidir *lli = &desc->lli_log; 824 int lli_current = desc->lli_current; 825 int lli_len = desc->lli_len; 826 bool cyclic = desc->cyclic; 827 int curr_lcla = -EINVAL; 828 int first_lcla = 0; 829 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; 830 bool linkback; 831 832 /* 833 * We may have partially running cyclic transfers, in case we did't get 834 * enough LCLA entries. 835 */ 836 linkback = cyclic && lli_current == 0; 837 838 /* 839 * For linkback, we need one LCLA even with only one link, because we 840 * can't link back to the one in LCPA space 841 */ 842 if (linkback || (lli_len - lli_current > 1)) { 843 /* 844 * If the channel is expected to use only soft_lli don't 845 * allocate a lcla. This is to avoid a HW issue that exists 846 * in some controller during a peripheral to memory transfer 847 * that uses linked lists. 848 */ 849 if (!(chan->phy_chan->use_soft_lli && 850 chan->dma_cfg.dir == DMA_DEV_TO_MEM)) 851 curr_lcla = d40_lcla_alloc_one(chan, desc); 852 853 first_lcla = curr_lcla; 854 } 855 856 /* 857 * For linkback, we normally load the LCPA in the loop since we need to 858 * link it to the second LCLA and not the first. However, if we 859 * couldn't even get a first LCLA, then we have to run in LCPA and 860 * reload manually. 861 */ 862 if (!linkback || curr_lcla == -EINVAL) { 863 unsigned int flags = 0; 864 865 if (curr_lcla == -EINVAL) 866 flags |= LLI_TERM_INT; 867 868 d40_log_lli_lcpa_write(chan->lcpa, 869 &lli->dst[lli_current], 870 &lli->src[lli_current], 871 curr_lcla, 872 flags); 873 lli_current++; 874 } 875 876 if (curr_lcla < 0) 877 goto out; 878 879 for (; lli_current < lli_len; lli_current++) { 880 unsigned int lcla_offset = chan->phy_chan->num * 1024 + 881 8 * curr_lcla * 2; 882 struct d40_log_lli *lcla = pool->base + lcla_offset; 883 unsigned int flags = 0; 884 int next_lcla; 885 886 if (lli_current + 1 < lli_len) 887 next_lcla = d40_lcla_alloc_one(chan, desc); 888 else 889 next_lcla = linkback ? first_lcla : -EINVAL; 890 891 if (cyclic || next_lcla == -EINVAL) 892 flags |= LLI_TERM_INT; 893 894 if (linkback && curr_lcla == first_lcla) { 895 /* First link goes in both LCPA and LCLA */ 896 d40_log_lli_lcpa_write(chan->lcpa, 897 &lli->dst[lli_current], 898 &lli->src[lli_current], 899 next_lcla, flags); 900 } 901 902 /* 903 * One unused LCLA in the cyclic case if the very first 904 * next_lcla fails... 905 */ 906 d40_log_lli_lcla_write(lcla, 907 &lli->dst[lli_current], 908 &lli->src[lli_current], 909 next_lcla, flags); 910 911 /* 912 * Cache maintenance is not needed if lcla is 913 * mapped in esram 914 */ 915 if (!use_esram_lcla) { 916 dma_sync_single_range_for_device(chan->base->dev, 917 pool->dma_addr, lcla_offset, 918 2 * sizeof(struct d40_log_lli), 919 DMA_TO_DEVICE); 920 } 921 curr_lcla = next_lcla; 922 923 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { 924 lli_current++; 925 break; 926 } 927 } 928 929 out: 930 desc->lli_current = lli_current; 931 } 932 933 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 934 { 935 if (chan_is_physical(d40c)) { 936 d40_phy_lli_load(d40c, d40d); 937 d40d->lli_current = d40d->lli_len; 938 } else 939 d40_log_lli_to_lcxa(d40c, d40d); 940 } 941 942 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 943 { 944 struct d40_desc *d; 945 946 if (list_empty(&d40c->active)) 947 return NULL; 948 949 d = list_first_entry(&d40c->active, 950 struct d40_desc, 951 node); 952 return d; 953 } 954 955 /* remove desc from current queue and add it to the pending_queue */ 956 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 957 { 958 d40_desc_remove(desc); 959 desc->is_in_client_list = false; 960 list_add_tail(&desc->node, &d40c->pending_queue); 961 } 962 963 static struct d40_desc *d40_first_pending(struct d40_chan *d40c) 964 { 965 struct d40_desc *d; 966 967 if (list_empty(&d40c->pending_queue)) 968 return NULL; 969 970 d = list_first_entry(&d40c->pending_queue, 971 struct d40_desc, 972 node); 973 return d; 974 } 975 976 static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 977 { 978 struct d40_desc *d; 979 980 if (list_empty(&d40c->queue)) 981 return NULL; 982 983 d = list_first_entry(&d40c->queue, 984 struct d40_desc, 985 node); 986 return d; 987 } 988 989 static struct d40_desc *d40_first_done(struct d40_chan *d40c) 990 { 991 if (list_empty(&d40c->done)) 992 return NULL; 993 994 return list_first_entry(&d40c->done, struct d40_desc, node); 995 } 996 997 static int d40_psize_2_burst_size(bool is_log, int psize) 998 { 999 if (is_log) { 1000 if (psize == STEDMA40_PSIZE_LOG_1) 1001 return 1; 1002 } else { 1003 if (psize == STEDMA40_PSIZE_PHY_1) 1004 return 1; 1005 } 1006 1007 return 2 << psize; 1008 } 1009 1010 /* 1011 * The dma only supports transmitting packages up to 1012 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. 1013 * 1014 * Calculate the total number of dma elements required to send the entire sg list. 1015 */ 1016 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 1017 { 1018 int dmalen; 1019 u32 max_w = max(data_width1, data_width2); 1020 u32 min_w = min(data_width1, data_width2); 1021 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 1022 1023 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1024 seg_max -= max_w; 1025 1026 if (!IS_ALIGNED(size, max_w)) 1027 return -EINVAL; 1028 1029 if (size <= seg_max) 1030 dmalen = 1; 1031 else { 1032 dmalen = size / seg_max; 1033 if (dmalen * seg_max < size) 1034 dmalen++; 1035 } 1036 return dmalen; 1037 } 1038 1039 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, 1040 u32 data_width1, u32 data_width2) 1041 { 1042 struct scatterlist *sg; 1043 int i; 1044 int len = 0; 1045 int ret; 1046 1047 for_each_sg(sgl, sg, sg_len, i) { 1048 ret = d40_size_2_dmalen(sg_dma_len(sg), 1049 data_width1, data_width2); 1050 if (ret < 0) 1051 return ret; 1052 len += ret; 1053 } 1054 return len; 1055 } 1056 1057 static int __d40_execute_command_phy(struct d40_chan *d40c, 1058 enum d40_command command) 1059 { 1060 u32 status; 1061 int i; 1062 void __iomem *active_reg; 1063 int ret = 0; 1064 unsigned long flags; 1065 u32 wmask; 1066 1067 if (command == D40_DMA_STOP) { 1068 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); 1069 if (ret) 1070 return ret; 1071 } 1072 1073 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 1074 1075 if (d40c->phy_chan->num % 2 == 0) 1076 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1077 else 1078 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1079 1080 if (command == D40_DMA_SUSPEND_REQ) { 1081 status = (readl(active_reg) & 1082 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1083 D40_CHAN_POS(d40c->phy_chan->num); 1084 1085 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1086 goto done; 1087 } 1088 1089 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); 1090 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), 1091 active_reg); 1092 1093 if (command == D40_DMA_SUSPEND_REQ) { 1094 1095 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 1096 status = (readl(active_reg) & 1097 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1098 D40_CHAN_POS(d40c->phy_chan->num); 1099 1100 cpu_relax(); 1101 /* 1102 * Reduce the number of bus accesses while 1103 * waiting for the DMA to suspend. 1104 */ 1105 udelay(3); 1106 1107 if (status == D40_DMA_STOP || 1108 status == D40_DMA_SUSPENDED) 1109 break; 1110 } 1111 1112 if (i == D40_SUSPEND_MAX_IT) { 1113 chan_err(d40c, 1114 "unable to suspend the chl %d (log: %d) status %x\n", 1115 d40c->phy_chan->num, d40c->log_num, 1116 status); 1117 dump_stack(); 1118 ret = -EBUSY; 1119 } 1120 1121 } 1122 done: 1123 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 1124 return ret; 1125 } 1126 1127 static void d40_term_all(struct d40_chan *d40c) 1128 { 1129 struct d40_desc *d40d; 1130 struct d40_desc *_d; 1131 1132 /* Release completed descriptors */ 1133 while ((d40d = d40_first_done(d40c))) { 1134 d40_desc_remove(d40d); 1135 d40_desc_free(d40c, d40d); 1136 } 1137 1138 /* Release active descriptors */ 1139 while ((d40d = d40_first_active_get(d40c))) { 1140 d40_desc_remove(d40d); 1141 d40_desc_free(d40c, d40d); 1142 } 1143 1144 /* Release queued descriptors waiting for transfer */ 1145 while ((d40d = d40_first_queued(d40c))) { 1146 d40_desc_remove(d40d); 1147 d40_desc_free(d40c, d40d); 1148 } 1149 1150 /* Release pending descriptors */ 1151 while ((d40d = d40_first_pending(d40c))) { 1152 d40_desc_remove(d40d); 1153 d40_desc_free(d40c, d40d); 1154 } 1155 1156 /* Release client owned descriptors */ 1157 if (!list_empty(&d40c->client)) 1158 list_for_each_entry_safe(d40d, _d, &d40c->client, node) { 1159 d40_desc_remove(d40d); 1160 d40_desc_free(d40c, d40d); 1161 } 1162 1163 /* Release descriptors in prepare queue */ 1164 if (!list_empty(&d40c->prepare_queue)) 1165 list_for_each_entry_safe(d40d, _d, 1166 &d40c->prepare_queue, node) { 1167 d40_desc_remove(d40d); 1168 d40_desc_free(d40c, d40d); 1169 } 1170 1171 d40c->pending_tx = 0; 1172 } 1173 1174 static void __d40_config_set_event(struct d40_chan *d40c, 1175 enum d40_events event_type, u32 event, 1176 int reg) 1177 { 1178 void __iomem *addr = chan_base(d40c) + reg; 1179 int tries; 1180 u32 status; 1181 1182 switch (event_type) { 1183 1184 case D40_DEACTIVATE_EVENTLINE: 1185 1186 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1187 | ~D40_EVENTLINE_MASK(event), addr); 1188 break; 1189 1190 case D40_SUSPEND_REQ_EVENTLINE: 1191 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1192 D40_EVENTLINE_POS(event); 1193 1194 if (status == D40_DEACTIVATE_EVENTLINE || 1195 status == D40_SUSPEND_REQ_EVENTLINE) 1196 break; 1197 1198 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) 1199 | ~D40_EVENTLINE_MASK(event), addr); 1200 1201 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { 1202 1203 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1204 D40_EVENTLINE_POS(event); 1205 1206 cpu_relax(); 1207 /* 1208 * Reduce the number of bus accesses while 1209 * waiting for the DMA to suspend. 1210 */ 1211 udelay(3); 1212 1213 if (status == D40_DEACTIVATE_EVENTLINE) 1214 break; 1215 } 1216 1217 if (tries == D40_SUSPEND_MAX_IT) { 1218 chan_err(d40c, 1219 "unable to stop the event_line chl %d (log: %d)" 1220 "status %x\n", d40c->phy_chan->num, 1221 d40c->log_num, status); 1222 } 1223 break; 1224 1225 case D40_ACTIVATE_EVENTLINE: 1226 /* 1227 * The hardware sometimes doesn't register the enable when src and dst 1228 * event lines are active on the same logical channel. Retry to ensure 1229 * it does. Usually only one retry is sufficient. 1230 */ 1231 tries = 100; 1232 while (--tries) { 1233 writel((D40_ACTIVATE_EVENTLINE << 1234 D40_EVENTLINE_POS(event)) | 1235 ~D40_EVENTLINE_MASK(event), addr); 1236 1237 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1238 break; 1239 } 1240 1241 if (tries != 99) 1242 dev_dbg(chan2dev(d40c), 1243 "[%s] workaround enable S%cLNK (%d tries)\n", 1244 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1245 100 - tries); 1246 1247 WARN_ON(!tries); 1248 break; 1249 1250 case D40_ROUND_EVENTLINE: 1251 BUG(); 1252 break; 1253 1254 } 1255 } 1256 1257 static void d40_config_set_event(struct d40_chan *d40c, 1258 enum d40_events event_type) 1259 { 1260 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 1261 1262 /* Enable event line connected to device (or memcpy) */ 1263 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 1264 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 1265 __d40_config_set_event(d40c, event_type, event, 1266 D40_CHAN_REG_SSLNK); 1267 1268 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) 1269 __d40_config_set_event(d40c, event_type, event, 1270 D40_CHAN_REG_SDLNK); 1271 } 1272 1273 static u32 d40_chan_has_events(struct d40_chan *d40c) 1274 { 1275 void __iomem *chanbase = chan_base(d40c); 1276 u32 val; 1277 1278 val = readl(chanbase + D40_CHAN_REG_SSLNK); 1279 val |= readl(chanbase + D40_CHAN_REG_SDLNK); 1280 1281 return val; 1282 } 1283 1284 static int 1285 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) 1286 { 1287 unsigned long flags; 1288 int ret = 0; 1289 u32 active_status; 1290 void __iomem *active_reg; 1291 1292 if (d40c->phy_chan->num % 2 == 0) 1293 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1294 else 1295 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1296 1297 1298 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1299 1300 switch (command) { 1301 case D40_DMA_STOP: 1302 case D40_DMA_SUSPEND_REQ: 1303 1304 active_status = (readl(active_reg) & 1305 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1306 D40_CHAN_POS(d40c->phy_chan->num); 1307 1308 if (active_status == D40_DMA_RUN) 1309 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); 1310 else 1311 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); 1312 1313 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) 1314 ret = __d40_execute_command_phy(d40c, command); 1315 1316 break; 1317 1318 case D40_DMA_RUN: 1319 1320 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); 1321 ret = __d40_execute_command_phy(d40c, command); 1322 break; 1323 1324 case D40_DMA_SUSPENDED: 1325 BUG(); 1326 break; 1327 } 1328 1329 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 1330 return ret; 1331 } 1332 1333 static int d40_channel_execute_command(struct d40_chan *d40c, 1334 enum d40_command command) 1335 { 1336 if (chan_is_logical(d40c)) 1337 return __d40_execute_command_log(d40c, command); 1338 else 1339 return __d40_execute_command_phy(d40c, command); 1340 } 1341 1342 static u32 d40_get_prmo(struct d40_chan *d40c) 1343 { 1344 static const unsigned int phy_map[] = { 1345 [STEDMA40_PCHAN_BASIC_MODE] 1346 = D40_DREG_PRMO_PCHAN_BASIC, 1347 [STEDMA40_PCHAN_MODULO_MODE] 1348 = D40_DREG_PRMO_PCHAN_MODULO, 1349 [STEDMA40_PCHAN_DOUBLE_DST_MODE] 1350 = D40_DREG_PRMO_PCHAN_DOUBLE_DST, 1351 }; 1352 static const unsigned int log_map[] = { 1353 [STEDMA40_LCHAN_SRC_PHY_DST_LOG] 1354 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, 1355 [STEDMA40_LCHAN_SRC_LOG_DST_PHY] 1356 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, 1357 [STEDMA40_LCHAN_SRC_LOG_DST_LOG] 1358 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 1359 }; 1360 1361 if (chan_is_physical(d40c)) 1362 return phy_map[d40c->dma_cfg.mode_opt]; 1363 else 1364 return log_map[d40c->dma_cfg.mode_opt]; 1365 } 1366 1367 static void d40_config_write(struct d40_chan *d40c) 1368 { 1369 u32 addr_base; 1370 u32 var; 1371 1372 /* Odd addresses are even addresses + 4 */ 1373 addr_base = (d40c->phy_chan->num % 2) * 4; 1374 /* Setup channel mode to logical or physical */ 1375 var = ((u32)(chan_is_logical(d40c)) + 1) << 1376 D40_CHAN_POS(d40c->phy_chan->num); 1377 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 1378 1379 /* Setup operational mode option register */ 1380 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); 1381 1382 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 1383 1384 if (chan_is_logical(d40c)) { 1385 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) 1386 & D40_SREG_ELEM_LOG_LIDX_MASK; 1387 void __iomem *chanbase = chan_base(d40c); 1388 1389 /* Set default config for CFG reg */ 1390 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); 1391 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); 1392 1393 /* Set LIDX for lcla */ 1394 writel(lidx, chanbase + D40_CHAN_REG_SSELT); 1395 writel(lidx, chanbase + D40_CHAN_REG_SDELT); 1396 1397 /* Clear LNK which will be used by d40_chan_has_events() */ 1398 writel(0, chanbase + D40_CHAN_REG_SSLNK); 1399 writel(0, chanbase + D40_CHAN_REG_SDLNK); 1400 } 1401 } 1402 1403 static u32 d40_residue(struct d40_chan *d40c) 1404 { 1405 u32 num_elt; 1406 1407 if (chan_is_logical(d40c)) 1408 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1409 >> D40_MEM_LCSP2_ECNT_POS; 1410 else { 1411 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); 1412 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) 1413 >> D40_SREG_ELEM_PHY_ECNT_POS; 1414 } 1415 1416 return num_elt * d40c->dma_cfg.dst_info.data_width; 1417 } 1418 1419 static bool d40_tx_is_linked(struct d40_chan *d40c) 1420 { 1421 bool is_link; 1422 1423 if (chan_is_logical(d40c)) 1424 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1425 else 1426 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) 1427 & D40_SREG_LNK_PHYS_LNK_MASK; 1428 1429 return is_link; 1430 } 1431 1432 static int d40_pause(struct dma_chan *chan) 1433 { 1434 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 1435 int res = 0; 1436 unsigned long flags; 1437 1438 if (d40c->phy_chan == NULL) { 1439 chan_err(d40c, "Channel is not allocated!\n"); 1440 return -EINVAL; 1441 } 1442 1443 if (!d40c->busy) 1444 return 0; 1445 1446 spin_lock_irqsave(&d40c->lock, flags); 1447 pm_runtime_get_sync(d40c->base->dev); 1448 1449 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1450 1451 pm_runtime_mark_last_busy(d40c->base->dev); 1452 pm_runtime_put_autosuspend(d40c->base->dev); 1453 spin_unlock_irqrestore(&d40c->lock, flags); 1454 return res; 1455 } 1456 1457 static int d40_resume(struct dma_chan *chan) 1458 { 1459 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 1460 int res = 0; 1461 unsigned long flags; 1462 1463 if (d40c->phy_chan == NULL) { 1464 chan_err(d40c, "Channel is not allocated!\n"); 1465 return -EINVAL; 1466 } 1467 1468 if (!d40c->busy) 1469 return 0; 1470 1471 spin_lock_irqsave(&d40c->lock, flags); 1472 pm_runtime_get_sync(d40c->base->dev); 1473 1474 /* If bytes left to transfer or linked tx resume job */ 1475 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) 1476 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1477 1478 pm_runtime_mark_last_busy(d40c->base->dev); 1479 pm_runtime_put_autosuspend(d40c->base->dev); 1480 spin_unlock_irqrestore(&d40c->lock, flags); 1481 return res; 1482 } 1483 1484 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1485 { 1486 struct d40_chan *d40c = container_of(tx->chan, 1487 struct d40_chan, 1488 chan); 1489 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1490 unsigned long flags; 1491 dma_cookie_t cookie; 1492 1493 spin_lock_irqsave(&d40c->lock, flags); 1494 cookie = dma_cookie_assign(tx); 1495 d40_desc_queue(d40c, d40d); 1496 spin_unlock_irqrestore(&d40c->lock, flags); 1497 1498 return cookie; 1499 } 1500 1501 static int d40_start(struct d40_chan *d40c) 1502 { 1503 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1504 } 1505 1506 static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 1507 { 1508 struct d40_desc *d40d; 1509 int err; 1510 1511 /* Start queued jobs, if any */ 1512 d40d = d40_first_queued(d40c); 1513 1514 if (d40d != NULL) { 1515 if (!d40c->busy) { 1516 d40c->busy = true; 1517 pm_runtime_get_sync(d40c->base->dev); 1518 } 1519 1520 /* Remove from queue */ 1521 d40_desc_remove(d40d); 1522 1523 /* Add to active queue */ 1524 d40_desc_submit(d40c, d40d); 1525 1526 /* Initiate DMA job */ 1527 d40_desc_load(d40c, d40d); 1528 1529 /* Start dma job */ 1530 err = d40_start(d40c); 1531 1532 if (err) 1533 return NULL; 1534 } 1535 1536 return d40d; 1537 } 1538 1539 /* called from interrupt context */ 1540 static void dma_tc_handle(struct d40_chan *d40c) 1541 { 1542 struct d40_desc *d40d; 1543 1544 /* Get first active entry from list */ 1545 d40d = d40_first_active_get(d40c); 1546 1547 if (d40d == NULL) 1548 return; 1549 1550 if (d40d->cyclic) { 1551 /* 1552 * If this was a paritially loaded list, we need to reloaded 1553 * it, and only when the list is completed. We need to check 1554 * for done because the interrupt will hit for every link, and 1555 * not just the last one. 1556 */ 1557 if (d40d->lli_current < d40d->lli_len 1558 && !d40_tx_is_linked(d40c) 1559 && !d40_residue(d40c)) { 1560 d40_lcla_free_all(d40c, d40d); 1561 d40_desc_load(d40c, d40d); 1562 (void) d40_start(d40c); 1563 1564 if (d40d->lli_current == d40d->lli_len) 1565 d40d->lli_current = 0; 1566 } 1567 } else { 1568 d40_lcla_free_all(d40c, d40d); 1569 1570 if (d40d->lli_current < d40d->lli_len) { 1571 d40_desc_load(d40c, d40d); 1572 /* Start dma job */ 1573 (void) d40_start(d40c); 1574 return; 1575 } 1576 1577 if (d40_queue_start(d40c) == NULL) { 1578 d40c->busy = false; 1579 1580 pm_runtime_mark_last_busy(d40c->base->dev); 1581 pm_runtime_put_autosuspend(d40c->base->dev); 1582 } 1583 1584 d40_desc_remove(d40d); 1585 d40_desc_done(d40c, d40d); 1586 } 1587 1588 d40c->pending_tx++; 1589 tasklet_schedule(&d40c->tasklet); 1590 1591 } 1592 1593 static void dma_tasklet(unsigned long data) 1594 { 1595 struct d40_chan *d40c = (struct d40_chan *) data; 1596 struct d40_desc *d40d; 1597 unsigned long flags; 1598 bool callback_active; 1599 dma_async_tx_callback callback; 1600 void *callback_param; 1601 1602 spin_lock_irqsave(&d40c->lock, flags); 1603 1604 /* Get first entry from the done list */ 1605 d40d = d40_first_done(d40c); 1606 if (d40d == NULL) { 1607 /* Check if we have reached here for cyclic job */ 1608 d40d = d40_first_active_get(d40c); 1609 if (d40d == NULL || !d40d->cyclic) 1610 goto err; 1611 } 1612 1613 if (!d40d->cyclic) 1614 dma_cookie_complete(&d40d->txd); 1615 1616 /* 1617 * If terminating a channel pending_tx is set to zero. 1618 * This prevents any finished active jobs to return to the client. 1619 */ 1620 if (d40c->pending_tx == 0) { 1621 spin_unlock_irqrestore(&d40c->lock, flags); 1622 return; 1623 } 1624 1625 /* Callback to client */ 1626 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); 1627 callback = d40d->txd.callback; 1628 callback_param = d40d->txd.callback_param; 1629 1630 if (!d40d->cyclic) { 1631 if (async_tx_test_ack(&d40d->txd)) { 1632 d40_desc_remove(d40d); 1633 d40_desc_free(d40c, d40d); 1634 } else if (!d40d->is_in_client_list) { 1635 d40_desc_remove(d40d); 1636 d40_lcla_free_all(d40c, d40d); 1637 list_add_tail(&d40d->node, &d40c->client); 1638 d40d->is_in_client_list = true; 1639 } 1640 } 1641 1642 d40c->pending_tx--; 1643 1644 if (d40c->pending_tx) 1645 tasklet_schedule(&d40c->tasklet); 1646 1647 spin_unlock_irqrestore(&d40c->lock, flags); 1648 1649 if (callback_active && callback) 1650 callback(callback_param); 1651 1652 return; 1653 1654 err: 1655 /* Rescue manouver if receiving double interrupts */ 1656 if (d40c->pending_tx > 0) 1657 d40c->pending_tx--; 1658 spin_unlock_irqrestore(&d40c->lock, flags); 1659 } 1660 1661 static irqreturn_t d40_handle_interrupt(int irq, void *data) 1662 { 1663 int i; 1664 u32 idx; 1665 u32 row; 1666 long chan = -1; 1667 struct d40_chan *d40c; 1668 unsigned long flags; 1669 struct d40_base *base = data; 1670 u32 regs[base->gen_dmac.il_size]; 1671 struct d40_interrupt_lookup *il = base->gen_dmac.il; 1672 u32 il_size = base->gen_dmac.il_size; 1673 1674 spin_lock_irqsave(&base->interrupt_lock, flags); 1675 1676 /* Read interrupt status of both logical and physical channels */ 1677 for (i = 0; i < il_size; i++) 1678 regs[i] = readl(base->virtbase + il[i].src); 1679 1680 for (;;) { 1681 1682 chan = find_next_bit((unsigned long *)regs, 1683 BITS_PER_LONG * il_size, chan + 1); 1684 1685 /* No more set bits found? */ 1686 if (chan == BITS_PER_LONG * il_size) 1687 break; 1688 1689 row = chan / BITS_PER_LONG; 1690 idx = chan & (BITS_PER_LONG - 1); 1691 1692 if (il[row].offset == D40_PHY_CHAN) 1693 d40c = base->lookup_phy_chans[idx]; 1694 else 1695 d40c = base->lookup_log_chans[il[row].offset + idx]; 1696 1697 if (!d40c) { 1698 /* 1699 * No error because this can happen if something else 1700 * in the system is using the channel. 1701 */ 1702 continue; 1703 } 1704 1705 /* ACK interrupt */ 1706 writel(BIT(idx), base->virtbase + il[row].clr); 1707 1708 spin_lock(&d40c->lock); 1709 1710 if (!il[row].is_error) 1711 dma_tc_handle(d40c); 1712 else 1713 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", 1714 chan, il[row].offset, idx); 1715 1716 spin_unlock(&d40c->lock); 1717 } 1718 1719 spin_unlock_irqrestore(&base->interrupt_lock, flags); 1720 1721 return IRQ_HANDLED; 1722 } 1723 1724 static int d40_validate_conf(struct d40_chan *d40c, 1725 struct stedma40_chan_cfg *conf) 1726 { 1727 int res = 0; 1728 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1729 1730 if (!conf->dir) { 1731 chan_err(d40c, "Invalid direction.\n"); 1732 res = -EINVAL; 1733 } 1734 1735 if ((is_log && conf->dev_type > d40c->base->num_log_chans) || 1736 (!is_log && conf->dev_type > d40c->base->num_phy_chans) || 1737 (conf->dev_type < 0)) { 1738 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); 1739 res = -EINVAL; 1740 } 1741 1742 if (conf->dir == DMA_DEV_TO_DEV) { 1743 /* 1744 * DMAC HW supports it. Will be added to this driver, 1745 * in case any dma client requires it. 1746 */ 1747 chan_err(d40c, "periph to periph not supported\n"); 1748 res = -EINVAL; 1749 } 1750 1751 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1752 conf->src_info.data_width != 1753 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1754 conf->dst_info.data_width) { 1755 /* 1756 * The DMAC hardware only supports 1757 * src (burst x width) == dst (burst x width) 1758 */ 1759 1760 chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); 1761 res = -EINVAL; 1762 } 1763 1764 return res; 1765 } 1766 1767 static bool d40_alloc_mask_set(struct d40_phy_res *phy, 1768 bool is_src, int log_event_line, bool is_log, 1769 bool *first_user) 1770 { 1771 unsigned long flags; 1772 spin_lock_irqsave(&phy->lock, flags); 1773 1774 *first_user = ((phy->allocated_src | phy->allocated_dst) 1775 == D40_ALLOC_FREE); 1776 1777 if (!is_log) { 1778 /* Physical interrupts are masked per physical full channel */ 1779 if (phy->allocated_src == D40_ALLOC_FREE && 1780 phy->allocated_dst == D40_ALLOC_FREE) { 1781 phy->allocated_dst = D40_ALLOC_PHY; 1782 phy->allocated_src = D40_ALLOC_PHY; 1783 goto found; 1784 } else 1785 goto not_found; 1786 } 1787 1788 /* Logical channel */ 1789 if (is_src) { 1790 if (phy->allocated_src == D40_ALLOC_PHY) 1791 goto not_found; 1792 1793 if (phy->allocated_src == D40_ALLOC_FREE) 1794 phy->allocated_src = D40_ALLOC_LOG_FREE; 1795 1796 if (!(phy->allocated_src & BIT(log_event_line))) { 1797 phy->allocated_src |= BIT(log_event_line); 1798 goto found; 1799 } else 1800 goto not_found; 1801 } else { 1802 if (phy->allocated_dst == D40_ALLOC_PHY) 1803 goto not_found; 1804 1805 if (phy->allocated_dst == D40_ALLOC_FREE) 1806 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1807 1808 if (!(phy->allocated_dst & BIT(log_event_line))) { 1809 phy->allocated_dst |= BIT(log_event_line); 1810 goto found; 1811 } else 1812 goto not_found; 1813 } 1814 1815 not_found: 1816 spin_unlock_irqrestore(&phy->lock, flags); 1817 return false; 1818 found: 1819 spin_unlock_irqrestore(&phy->lock, flags); 1820 return true; 1821 } 1822 1823 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1824 int log_event_line) 1825 { 1826 unsigned long flags; 1827 bool is_free = false; 1828 1829 spin_lock_irqsave(&phy->lock, flags); 1830 if (!log_event_line) { 1831 phy->allocated_dst = D40_ALLOC_FREE; 1832 phy->allocated_src = D40_ALLOC_FREE; 1833 is_free = true; 1834 goto out; 1835 } 1836 1837 /* Logical channel */ 1838 if (is_src) { 1839 phy->allocated_src &= ~BIT(log_event_line); 1840 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1841 phy->allocated_src = D40_ALLOC_FREE; 1842 } else { 1843 phy->allocated_dst &= ~BIT(log_event_line); 1844 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1845 phy->allocated_dst = D40_ALLOC_FREE; 1846 } 1847 1848 is_free = ((phy->allocated_src | phy->allocated_dst) == 1849 D40_ALLOC_FREE); 1850 1851 out: 1852 spin_unlock_irqrestore(&phy->lock, flags); 1853 1854 return is_free; 1855 } 1856 1857 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) 1858 { 1859 int dev_type = d40c->dma_cfg.dev_type; 1860 int event_group; 1861 int event_line; 1862 struct d40_phy_res *phys; 1863 int i; 1864 int j; 1865 int log_num; 1866 int num_phy_chans; 1867 bool is_src; 1868 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; 1869 1870 phys = d40c->base->phy_res; 1871 num_phy_chans = d40c->base->num_phy_chans; 1872 1873 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 1874 log_num = 2 * dev_type; 1875 is_src = true; 1876 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 1877 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1878 /* dst event lines are used for logical memcpy */ 1879 log_num = 2 * dev_type + 1; 1880 is_src = false; 1881 } else 1882 return -EINVAL; 1883 1884 event_group = D40_TYPE_TO_GROUP(dev_type); 1885 event_line = D40_TYPE_TO_EVENT(dev_type); 1886 1887 if (!is_log) { 1888 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1889 /* Find physical half channel */ 1890 if (d40c->dma_cfg.use_fixed_channel) { 1891 i = d40c->dma_cfg.phy_channel; 1892 if (d40_alloc_mask_set(&phys[i], is_src, 1893 0, is_log, 1894 first_phy_user)) 1895 goto found_phy; 1896 } else { 1897 for (i = 0; i < num_phy_chans; i++) { 1898 if (d40_alloc_mask_set(&phys[i], is_src, 1899 0, is_log, 1900 first_phy_user)) 1901 goto found_phy; 1902 } 1903 } 1904 } else 1905 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1906 int phy_num = j + event_group * 2; 1907 for (i = phy_num; i < phy_num + 2; i++) { 1908 if (d40_alloc_mask_set(&phys[i], 1909 is_src, 1910 0, 1911 is_log, 1912 first_phy_user)) 1913 goto found_phy; 1914 } 1915 } 1916 return -EINVAL; 1917 found_phy: 1918 d40c->phy_chan = &phys[i]; 1919 d40c->log_num = D40_PHY_CHAN; 1920 goto out; 1921 } 1922 if (dev_type == -1) 1923 return -EINVAL; 1924 1925 /* Find logical channel */ 1926 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1927 int phy_num = j + event_group * 2; 1928 1929 if (d40c->dma_cfg.use_fixed_channel) { 1930 i = d40c->dma_cfg.phy_channel; 1931 1932 if ((i != phy_num) && (i != phy_num + 1)) { 1933 dev_err(chan2dev(d40c), 1934 "invalid fixed phy channel %d\n", i); 1935 return -EINVAL; 1936 } 1937 1938 if (d40_alloc_mask_set(&phys[i], is_src, event_line, 1939 is_log, first_phy_user)) 1940 goto found_log; 1941 1942 dev_err(chan2dev(d40c), 1943 "could not allocate fixed phy channel %d\n", i); 1944 return -EINVAL; 1945 } 1946 1947 /* 1948 * Spread logical channels across all available physical rather 1949 * than pack every logical channel at the first available phy 1950 * channels. 1951 */ 1952 if (is_src) { 1953 for (i = phy_num; i < phy_num + 2; i++) { 1954 if (d40_alloc_mask_set(&phys[i], is_src, 1955 event_line, is_log, 1956 first_phy_user)) 1957 goto found_log; 1958 } 1959 } else { 1960 for (i = phy_num + 1; i >= phy_num; i--) { 1961 if (d40_alloc_mask_set(&phys[i], is_src, 1962 event_line, is_log, 1963 first_phy_user)) 1964 goto found_log; 1965 } 1966 } 1967 } 1968 return -EINVAL; 1969 1970 found_log: 1971 d40c->phy_chan = &phys[i]; 1972 d40c->log_num = log_num; 1973 out: 1974 1975 if (is_log) 1976 d40c->base->lookup_log_chans[d40c->log_num] = d40c; 1977 else 1978 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 1979 1980 return 0; 1981 1982 } 1983 1984 static int d40_config_memcpy(struct d40_chan *d40c) 1985 { 1986 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 1987 1988 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 1989 d40c->dma_cfg = dma40_memcpy_conf_log; 1990 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; 1991 1992 d40_log_cfg(&d40c->dma_cfg, 1993 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1994 1995 } else if (dma_has_cap(DMA_MEMCPY, cap) && 1996 dma_has_cap(DMA_SLAVE, cap)) { 1997 d40c->dma_cfg = dma40_memcpy_conf_phy; 1998 1999 /* Generate interrrupt at end of transfer or relink. */ 2000 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); 2001 2002 /* Generate interrupt on error. */ 2003 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 2004 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 2005 2006 } else { 2007 chan_err(d40c, "No memcpy\n"); 2008 return -EINVAL; 2009 } 2010 2011 return 0; 2012 } 2013 2014 static int d40_free_dma(struct d40_chan *d40c) 2015 { 2016 2017 int res = 0; 2018 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 2019 struct d40_phy_res *phy = d40c->phy_chan; 2020 bool is_src; 2021 2022 /* Terminate all queued and active transfers */ 2023 d40_term_all(d40c); 2024 2025 if (phy == NULL) { 2026 chan_err(d40c, "phy == null\n"); 2027 return -EINVAL; 2028 } 2029 2030 if (phy->allocated_src == D40_ALLOC_FREE && 2031 phy->allocated_dst == D40_ALLOC_FREE) { 2032 chan_err(d40c, "channel already free\n"); 2033 return -EINVAL; 2034 } 2035 2036 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2037 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) 2038 is_src = false; 2039 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2040 is_src = true; 2041 else { 2042 chan_err(d40c, "Unknown direction\n"); 2043 return -EINVAL; 2044 } 2045 2046 pm_runtime_get_sync(d40c->base->dev); 2047 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 2048 if (res) { 2049 chan_err(d40c, "stop failed\n"); 2050 goto out; 2051 } 2052 2053 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); 2054 2055 if (chan_is_logical(d40c)) 2056 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 2057 else 2058 d40c->base->lookup_phy_chans[phy->num] = NULL; 2059 2060 if (d40c->busy) { 2061 pm_runtime_mark_last_busy(d40c->base->dev); 2062 pm_runtime_put_autosuspend(d40c->base->dev); 2063 } 2064 2065 d40c->busy = false; 2066 d40c->phy_chan = NULL; 2067 d40c->configured = false; 2068 out: 2069 2070 pm_runtime_mark_last_busy(d40c->base->dev); 2071 pm_runtime_put_autosuspend(d40c->base->dev); 2072 return res; 2073 } 2074 2075 static bool d40_is_paused(struct d40_chan *d40c) 2076 { 2077 void __iomem *chanbase = chan_base(d40c); 2078 bool is_paused = false; 2079 unsigned long flags; 2080 void __iomem *active_reg; 2081 u32 status; 2082 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 2083 2084 spin_lock_irqsave(&d40c->lock, flags); 2085 2086 if (chan_is_physical(d40c)) { 2087 if (d40c->phy_chan->num % 2 == 0) 2088 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 2089 else 2090 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 2091 2092 status = (readl(active_reg) & 2093 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 2094 D40_CHAN_POS(d40c->phy_chan->num); 2095 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 2096 is_paused = true; 2097 2098 goto _exit; 2099 } 2100 2101 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2102 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 2103 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2104 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 2105 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2106 } else { 2107 chan_err(d40c, "Unknown direction\n"); 2108 goto _exit; 2109 } 2110 2111 status = (status & D40_EVENTLINE_MASK(event)) >> 2112 D40_EVENTLINE_POS(event); 2113 2114 if (status != D40_DMA_RUN) 2115 is_paused = true; 2116 _exit: 2117 spin_unlock_irqrestore(&d40c->lock, flags); 2118 return is_paused; 2119 2120 } 2121 2122 static u32 stedma40_residue(struct dma_chan *chan) 2123 { 2124 struct d40_chan *d40c = 2125 container_of(chan, struct d40_chan, chan); 2126 u32 bytes_left; 2127 unsigned long flags; 2128 2129 spin_lock_irqsave(&d40c->lock, flags); 2130 bytes_left = d40_residue(d40c); 2131 spin_unlock_irqrestore(&d40c->lock, flags); 2132 2133 return bytes_left; 2134 } 2135 2136 static int 2137 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, 2138 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2139 unsigned int sg_len, dma_addr_t src_dev_addr, 2140 dma_addr_t dst_dev_addr) 2141 { 2142 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2143 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2144 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2145 int ret; 2146 2147 ret = d40_log_sg_to_lli(sg_src, sg_len, 2148 src_dev_addr, 2149 desc->lli_log.src, 2150 chan->log_def.lcsp1, 2151 src_info->data_width, 2152 dst_info->data_width); 2153 2154 ret = d40_log_sg_to_lli(sg_dst, sg_len, 2155 dst_dev_addr, 2156 desc->lli_log.dst, 2157 chan->log_def.lcsp3, 2158 dst_info->data_width, 2159 src_info->data_width); 2160 2161 return ret < 0 ? ret : 0; 2162 } 2163 2164 static int 2165 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, 2166 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2167 unsigned int sg_len, dma_addr_t src_dev_addr, 2168 dma_addr_t dst_dev_addr) 2169 { 2170 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2171 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2172 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2173 unsigned long flags = 0; 2174 int ret; 2175 2176 if (desc->cyclic) 2177 flags |= LLI_CYCLIC | LLI_TERM_INT; 2178 2179 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, 2180 desc->lli_phy.src, 2181 virt_to_phys(desc->lli_phy.src), 2182 chan->src_def_cfg, 2183 src_info, dst_info, flags); 2184 2185 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, 2186 desc->lli_phy.dst, 2187 virt_to_phys(desc->lli_phy.dst), 2188 chan->dst_def_cfg, 2189 dst_info, src_info, flags); 2190 2191 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, 2192 desc->lli_pool.size, DMA_TO_DEVICE); 2193 2194 return ret < 0 ? ret : 0; 2195 } 2196 2197 static struct d40_desc * 2198 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, 2199 unsigned int sg_len, unsigned long dma_flags) 2200 { 2201 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2202 struct d40_desc *desc; 2203 int ret; 2204 2205 desc = d40_desc_get(chan); 2206 if (!desc) 2207 return NULL; 2208 2209 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, 2210 cfg->dst_info.data_width); 2211 if (desc->lli_len < 0) { 2212 chan_err(chan, "Unaligned size\n"); 2213 goto err; 2214 } 2215 2216 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); 2217 if (ret < 0) { 2218 chan_err(chan, "Could not allocate lli\n"); 2219 goto err; 2220 } 2221 2222 desc->lli_current = 0; 2223 desc->txd.flags = dma_flags; 2224 desc->txd.tx_submit = d40_tx_submit; 2225 2226 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); 2227 2228 return desc; 2229 2230 err: 2231 d40_desc_free(chan, desc); 2232 return NULL; 2233 } 2234 2235 static struct dma_async_tx_descriptor * 2236 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2237 struct scatterlist *sg_dst, unsigned int sg_len, 2238 enum dma_transfer_direction direction, unsigned long dma_flags) 2239 { 2240 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 2241 dma_addr_t src_dev_addr = 0; 2242 dma_addr_t dst_dev_addr = 0; 2243 struct d40_desc *desc; 2244 unsigned long flags; 2245 int ret; 2246 2247 if (!chan->phy_chan) { 2248 chan_err(chan, "Cannot prepare unallocated channel\n"); 2249 return NULL; 2250 } 2251 2252 spin_lock_irqsave(&chan->lock, flags); 2253 2254 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); 2255 if (desc == NULL) 2256 goto err; 2257 2258 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2259 desc->cyclic = true; 2260 2261 if (direction == DMA_DEV_TO_MEM) 2262 src_dev_addr = chan->runtime_addr; 2263 else if (direction == DMA_MEM_TO_DEV) 2264 dst_dev_addr = chan->runtime_addr; 2265 2266 if (chan_is_logical(chan)) 2267 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 2268 sg_len, src_dev_addr, dst_dev_addr); 2269 else 2270 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, 2271 sg_len, src_dev_addr, dst_dev_addr); 2272 2273 if (ret) { 2274 chan_err(chan, "Failed to prepare %s sg job: %d\n", 2275 chan_is_logical(chan) ? "log" : "phy", ret); 2276 goto err; 2277 } 2278 2279 /* 2280 * add descriptor to the prepare queue in order to be able 2281 * to free them later in terminate_all 2282 */ 2283 list_add_tail(&desc->node, &chan->prepare_queue); 2284 2285 spin_unlock_irqrestore(&chan->lock, flags); 2286 2287 return &desc->txd; 2288 2289 err: 2290 if (desc) 2291 d40_desc_free(chan, desc); 2292 spin_unlock_irqrestore(&chan->lock, flags); 2293 return NULL; 2294 } 2295 2296 bool stedma40_filter(struct dma_chan *chan, void *data) 2297 { 2298 struct stedma40_chan_cfg *info = data; 2299 struct d40_chan *d40c = 2300 container_of(chan, struct d40_chan, chan); 2301 int err; 2302 2303 if (data) { 2304 err = d40_validate_conf(d40c, info); 2305 if (!err) 2306 d40c->dma_cfg = *info; 2307 } else 2308 err = d40_config_memcpy(d40c); 2309 2310 if (!err) 2311 d40c->configured = true; 2312 2313 return err == 0; 2314 } 2315 EXPORT_SYMBOL(stedma40_filter); 2316 2317 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) 2318 { 2319 bool realtime = d40c->dma_cfg.realtime; 2320 bool highprio = d40c->dma_cfg.high_priority; 2321 u32 rtreg; 2322 u32 event = D40_TYPE_TO_EVENT(dev_type); 2323 u32 group = D40_TYPE_TO_GROUP(dev_type); 2324 u32 bit = BIT(event); 2325 u32 prioreg; 2326 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2327 2328 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; 2329 /* 2330 * Due to a hardware bug, in some cases a logical channel triggered by 2331 * a high priority destination event line can generate extra packet 2332 * transactions. 2333 * 2334 * The workaround is to not set the high priority level for the 2335 * destination event lines that trigger logical channels. 2336 */ 2337 if (!src && chan_is_logical(d40c)) 2338 highprio = false; 2339 2340 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; 2341 2342 /* Destination event lines are stored in the upper halfword */ 2343 if (!src) 2344 bit <<= 16; 2345 2346 writel(bit, d40c->base->virtbase + prioreg + group * 4); 2347 writel(bit, d40c->base->virtbase + rtreg + group * 4); 2348 } 2349 2350 static void d40_set_prio_realtime(struct d40_chan *d40c) 2351 { 2352 if (d40c->base->rev < 3) 2353 return; 2354 2355 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 2356 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2357 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); 2358 2359 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || 2360 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2361 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); 2362 } 2363 2364 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) 2365 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) 2366 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) 2367 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) 2368 #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1) 2369 2370 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, 2371 struct of_dma *ofdma) 2372 { 2373 struct stedma40_chan_cfg cfg; 2374 dma_cap_mask_t cap; 2375 u32 flags; 2376 2377 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); 2378 2379 dma_cap_zero(cap); 2380 dma_cap_set(DMA_SLAVE, cap); 2381 2382 cfg.dev_type = dma_spec->args[0]; 2383 flags = dma_spec->args[2]; 2384 2385 switch (D40_DT_FLAGS_MODE(flags)) { 2386 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; 2387 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; 2388 } 2389 2390 switch (D40_DT_FLAGS_DIR(flags)) { 2391 case 0: 2392 cfg.dir = DMA_MEM_TO_DEV; 2393 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2394 break; 2395 case 1: 2396 cfg.dir = DMA_DEV_TO_MEM; 2397 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2398 break; 2399 } 2400 2401 if (D40_DT_FLAGS_FIXED_CHAN(flags)) { 2402 cfg.phy_channel = dma_spec->args[1]; 2403 cfg.use_fixed_channel = true; 2404 } 2405 2406 if (D40_DT_FLAGS_HIGH_PRIO(flags)) 2407 cfg.high_priority = true; 2408 2409 return dma_request_channel(cap, stedma40_filter, &cfg); 2410 } 2411 2412 /* DMA ENGINE functions */ 2413 static int d40_alloc_chan_resources(struct dma_chan *chan) 2414 { 2415 int err; 2416 unsigned long flags; 2417 struct d40_chan *d40c = 2418 container_of(chan, struct d40_chan, chan); 2419 bool is_free_phy; 2420 spin_lock_irqsave(&d40c->lock, flags); 2421 2422 dma_cookie_init(chan); 2423 2424 /* If no dma configuration is set use default configuration (memcpy) */ 2425 if (!d40c->configured) { 2426 err = d40_config_memcpy(d40c); 2427 if (err) { 2428 chan_err(d40c, "Failed to configure memcpy channel\n"); 2429 goto fail; 2430 } 2431 } 2432 2433 err = d40_allocate_channel(d40c, &is_free_phy); 2434 if (err) { 2435 chan_err(d40c, "Failed to allocate channel\n"); 2436 d40c->configured = false; 2437 goto fail; 2438 } 2439 2440 pm_runtime_get_sync(d40c->base->dev); 2441 2442 d40_set_prio_realtime(d40c); 2443 2444 if (chan_is_logical(d40c)) { 2445 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2446 d40c->lcpa = d40c->base->lcpa_base + 2447 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; 2448 else 2449 d40c->lcpa = d40c->base->lcpa_base + 2450 d40c->dma_cfg.dev_type * 2451 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2452 2453 /* Unmask the Global Interrupt Mask. */ 2454 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2455 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2456 } 2457 2458 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", 2459 chan_is_logical(d40c) ? "logical" : "physical", 2460 d40c->phy_chan->num, 2461 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); 2462 2463 2464 /* 2465 * Only write channel configuration to the DMA if the physical 2466 * resource is free. In case of multiple logical channels 2467 * on the same physical resource, only the first write is necessary. 2468 */ 2469 if (is_free_phy) 2470 d40_config_write(d40c); 2471 fail: 2472 pm_runtime_mark_last_busy(d40c->base->dev); 2473 pm_runtime_put_autosuspend(d40c->base->dev); 2474 spin_unlock_irqrestore(&d40c->lock, flags); 2475 return err; 2476 } 2477 2478 static void d40_free_chan_resources(struct dma_chan *chan) 2479 { 2480 struct d40_chan *d40c = 2481 container_of(chan, struct d40_chan, chan); 2482 int err; 2483 unsigned long flags; 2484 2485 if (d40c->phy_chan == NULL) { 2486 chan_err(d40c, "Cannot free unallocated channel\n"); 2487 return; 2488 } 2489 2490 spin_lock_irqsave(&d40c->lock, flags); 2491 2492 err = d40_free_dma(d40c); 2493 2494 if (err) 2495 chan_err(d40c, "Failed to free channel\n"); 2496 spin_unlock_irqrestore(&d40c->lock, flags); 2497 } 2498 2499 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 2500 dma_addr_t dst, 2501 dma_addr_t src, 2502 size_t size, 2503 unsigned long dma_flags) 2504 { 2505 struct scatterlist dst_sg; 2506 struct scatterlist src_sg; 2507 2508 sg_init_table(&dst_sg, 1); 2509 sg_init_table(&src_sg, 1); 2510 2511 sg_dma_address(&dst_sg) = dst; 2512 sg_dma_address(&src_sg) = src; 2513 2514 sg_dma_len(&dst_sg) = size; 2515 sg_dma_len(&src_sg) = size; 2516 2517 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, 2518 DMA_MEM_TO_MEM, dma_flags); 2519 } 2520 2521 static struct dma_async_tx_descriptor * 2522 d40_prep_memcpy_sg(struct dma_chan *chan, 2523 struct scatterlist *dst_sg, unsigned int dst_nents, 2524 struct scatterlist *src_sg, unsigned int src_nents, 2525 unsigned long dma_flags) 2526 { 2527 if (dst_nents != src_nents) 2528 return NULL; 2529 2530 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, 2531 DMA_MEM_TO_MEM, dma_flags); 2532 } 2533 2534 static struct dma_async_tx_descriptor * 2535 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2536 unsigned int sg_len, enum dma_transfer_direction direction, 2537 unsigned long dma_flags, void *context) 2538 { 2539 if (!is_slave_direction(direction)) 2540 return NULL; 2541 2542 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); 2543 } 2544 2545 static struct dma_async_tx_descriptor * 2546 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2547 size_t buf_len, size_t period_len, 2548 enum dma_transfer_direction direction, unsigned long flags) 2549 { 2550 unsigned int periods = buf_len / period_len; 2551 struct dma_async_tx_descriptor *txd; 2552 struct scatterlist *sg; 2553 int i; 2554 2555 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); 2556 if (!sg) 2557 return NULL; 2558 2559 for (i = 0; i < periods; i++) { 2560 sg_dma_address(&sg[i]) = dma_addr; 2561 sg_dma_len(&sg[i]) = period_len; 2562 dma_addr += period_len; 2563 } 2564 2565 sg[periods].offset = 0; 2566 sg_dma_len(&sg[periods]) = 0; 2567 sg[periods].page_link = 2568 ((unsigned long)sg | 0x01) & ~0x02; 2569 2570 txd = d40_prep_sg(chan, sg, sg, periods, direction, 2571 DMA_PREP_INTERRUPT); 2572 2573 kfree(sg); 2574 2575 return txd; 2576 } 2577 2578 static enum dma_status d40_tx_status(struct dma_chan *chan, 2579 dma_cookie_t cookie, 2580 struct dma_tx_state *txstate) 2581 { 2582 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2583 enum dma_status ret; 2584 2585 if (d40c->phy_chan == NULL) { 2586 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2587 return -EINVAL; 2588 } 2589 2590 ret = dma_cookie_status(chan, cookie, txstate); 2591 if (ret != DMA_COMPLETE) 2592 dma_set_residue(txstate, stedma40_residue(chan)); 2593 2594 if (d40_is_paused(d40c)) 2595 ret = DMA_PAUSED; 2596 2597 return ret; 2598 } 2599 2600 static void d40_issue_pending(struct dma_chan *chan) 2601 { 2602 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2603 unsigned long flags; 2604 2605 if (d40c->phy_chan == NULL) { 2606 chan_err(d40c, "Channel is not allocated!\n"); 2607 return; 2608 } 2609 2610 spin_lock_irqsave(&d40c->lock, flags); 2611 2612 list_splice_tail_init(&d40c->pending_queue, &d40c->queue); 2613 2614 /* Busy means that queued jobs are already being processed */ 2615 if (!d40c->busy) 2616 (void) d40_queue_start(d40c); 2617 2618 spin_unlock_irqrestore(&d40c->lock, flags); 2619 } 2620 2621 static int d40_terminate_all(struct dma_chan *chan) 2622 { 2623 unsigned long flags; 2624 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2625 int ret; 2626 2627 if (d40c->phy_chan == NULL) { 2628 chan_err(d40c, "Channel is not allocated!\n"); 2629 return -EINVAL; 2630 } 2631 2632 spin_lock_irqsave(&d40c->lock, flags); 2633 2634 pm_runtime_get_sync(d40c->base->dev); 2635 ret = d40_channel_execute_command(d40c, D40_DMA_STOP); 2636 if (ret) 2637 chan_err(d40c, "Failed to stop channel\n"); 2638 2639 d40_term_all(d40c); 2640 pm_runtime_mark_last_busy(d40c->base->dev); 2641 pm_runtime_put_autosuspend(d40c->base->dev); 2642 if (d40c->busy) { 2643 pm_runtime_mark_last_busy(d40c->base->dev); 2644 pm_runtime_put_autosuspend(d40c->base->dev); 2645 } 2646 d40c->busy = false; 2647 2648 spin_unlock_irqrestore(&d40c->lock, flags); 2649 return 0; 2650 } 2651 2652 static int 2653 dma40_config_to_halfchannel(struct d40_chan *d40c, 2654 struct stedma40_half_channel_info *info, 2655 u32 maxburst) 2656 { 2657 int psize; 2658 2659 if (chan_is_logical(d40c)) { 2660 if (maxburst >= 16) 2661 psize = STEDMA40_PSIZE_LOG_16; 2662 else if (maxburst >= 8) 2663 psize = STEDMA40_PSIZE_LOG_8; 2664 else if (maxburst >= 4) 2665 psize = STEDMA40_PSIZE_LOG_4; 2666 else 2667 psize = STEDMA40_PSIZE_LOG_1; 2668 } else { 2669 if (maxburst >= 16) 2670 psize = STEDMA40_PSIZE_PHY_16; 2671 else if (maxburst >= 8) 2672 psize = STEDMA40_PSIZE_PHY_8; 2673 else if (maxburst >= 4) 2674 psize = STEDMA40_PSIZE_PHY_4; 2675 else 2676 psize = STEDMA40_PSIZE_PHY_1; 2677 } 2678 2679 info->psize = psize; 2680 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2681 2682 return 0; 2683 } 2684 2685 /* Runtime reconfiguration extension */ 2686 static int d40_set_runtime_config(struct dma_chan *chan, 2687 struct dma_slave_config *config) 2688 { 2689 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2690 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2691 enum dma_slave_buswidth src_addr_width, dst_addr_width; 2692 dma_addr_t config_addr; 2693 u32 src_maxburst, dst_maxburst; 2694 int ret; 2695 2696 if (d40c->phy_chan == NULL) { 2697 chan_err(d40c, "Channel is not allocated!\n"); 2698 return -EINVAL; 2699 } 2700 2701 src_addr_width = config->src_addr_width; 2702 src_maxburst = config->src_maxburst; 2703 dst_addr_width = config->dst_addr_width; 2704 dst_maxburst = config->dst_maxburst; 2705 2706 if (config->direction == DMA_DEV_TO_MEM) { 2707 config_addr = config->src_addr; 2708 2709 if (cfg->dir != DMA_DEV_TO_MEM) 2710 dev_dbg(d40c->base->dev, 2711 "channel was not configured for peripheral " 2712 "to memory transfer (%d) overriding\n", 2713 cfg->dir); 2714 cfg->dir = DMA_DEV_TO_MEM; 2715 2716 /* Configure the memory side */ 2717 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2718 dst_addr_width = src_addr_width; 2719 if (dst_maxburst == 0) 2720 dst_maxburst = src_maxburst; 2721 2722 } else if (config->direction == DMA_MEM_TO_DEV) { 2723 config_addr = config->dst_addr; 2724 2725 if (cfg->dir != DMA_MEM_TO_DEV) 2726 dev_dbg(d40c->base->dev, 2727 "channel was not configured for memory " 2728 "to peripheral transfer (%d) overriding\n", 2729 cfg->dir); 2730 cfg->dir = DMA_MEM_TO_DEV; 2731 2732 /* Configure the memory side */ 2733 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2734 src_addr_width = dst_addr_width; 2735 if (src_maxburst == 0) 2736 src_maxburst = dst_maxburst; 2737 } else { 2738 dev_err(d40c->base->dev, 2739 "unrecognized channel direction %d\n", 2740 config->direction); 2741 return -EINVAL; 2742 } 2743 2744 if (config_addr <= 0) { 2745 dev_err(d40c->base->dev, "no address supplied\n"); 2746 return -EINVAL; 2747 } 2748 2749 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { 2750 dev_err(d40c->base->dev, 2751 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", 2752 src_maxburst, 2753 src_addr_width, 2754 dst_maxburst, 2755 dst_addr_width); 2756 return -EINVAL; 2757 } 2758 2759 if (src_maxburst > 16) { 2760 src_maxburst = 16; 2761 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; 2762 } else if (dst_maxburst > 16) { 2763 dst_maxburst = 16; 2764 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2765 } 2766 2767 /* Only valid widths are; 1, 2, 4 and 8. */ 2768 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2769 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2770 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2771 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2772 !is_power_of_2(src_addr_width) || 2773 !is_power_of_2(dst_addr_width)) 2774 return -EINVAL; 2775 2776 cfg->src_info.data_width = src_addr_width; 2777 cfg->dst_info.data_width = dst_addr_width; 2778 2779 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2780 src_maxburst); 2781 if (ret) 2782 return ret; 2783 2784 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2785 dst_maxburst); 2786 if (ret) 2787 return ret; 2788 2789 /* Fill in register values */ 2790 if (chan_is_logical(d40c)) 2791 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2792 else 2793 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); 2794 2795 /* These settings will take precedence later */ 2796 d40c->runtime_addr = config_addr; 2797 d40c->runtime_direction = config->direction; 2798 dev_dbg(d40c->base->dev, 2799 "configured channel %s for %s, data width %d/%d, " 2800 "maxburst %d/%d elements, LE, no flow control\n", 2801 dma_chan_name(chan), 2802 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 2803 src_addr_width, dst_addr_width, 2804 src_maxburst, dst_maxburst); 2805 2806 return 0; 2807 } 2808 2809 /* Initialization functions */ 2810 2811 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2812 struct d40_chan *chans, int offset, 2813 int num_chans) 2814 { 2815 int i = 0; 2816 struct d40_chan *d40c; 2817 2818 INIT_LIST_HEAD(&dma->channels); 2819 2820 for (i = offset; i < offset + num_chans; i++) { 2821 d40c = &chans[i]; 2822 d40c->base = base; 2823 d40c->chan.device = dma; 2824 2825 spin_lock_init(&d40c->lock); 2826 2827 d40c->log_num = D40_PHY_CHAN; 2828 2829 INIT_LIST_HEAD(&d40c->done); 2830 INIT_LIST_HEAD(&d40c->active); 2831 INIT_LIST_HEAD(&d40c->queue); 2832 INIT_LIST_HEAD(&d40c->pending_queue); 2833 INIT_LIST_HEAD(&d40c->client); 2834 INIT_LIST_HEAD(&d40c->prepare_queue); 2835 2836 tasklet_init(&d40c->tasklet, dma_tasklet, 2837 (unsigned long) d40c); 2838 2839 list_add_tail(&d40c->chan.device_node, 2840 &dma->channels); 2841 } 2842 } 2843 2844 static void d40_ops_init(struct d40_base *base, struct dma_device *dev) 2845 { 2846 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) 2847 dev->device_prep_slave_sg = d40_prep_slave_sg; 2848 2849 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { 2850 dev->device_prep_dma_memcpy = d40_prep_memcpy; 2851 2852 /* 2853 * This controller can only access address at even 2854 * 32bit boundaries, i.e. 2^2 2855 */ 2856 dev->copy_align = DMAENGINE_ALIGN_4_BYTES; 2857 } 2858 2859 if (dma_has_cap(DMA_SG, dev->cap_mask)) 2860 dev->device_prep_dma_sg = d40_prep_memcpy_sg; 2861 2862 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) 2863 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; 2864 2865 dev->device_alloc_chan_resources = d40_alloc_chan_resources; 2866 dev->device_free_chan_resources = d40_free_chan_resources; 2867 dev->device_issue_pending = d40_issue_pending; 2868 dev->device_tx_status = d40_tx_status; 2869 dev->device_config = d40_set_runtime_config; 2870 dev->device_pause = d40_pause; 2871 dev->device_resume = d40_resume; 2872 dev->device_terminate_all = d40_terminate_all; 2873 dev->dev = base->dev; 2874 } 2875 2876 static int __init d40_dmaengine_init(struct d40_base *base, 2877 int num_reserved_chans) 2878 { 2879 int err ; 2880 2881 d40_chan_init(base, &base->dma_slave, base->log_chans, 2882 0, base->num_log_chans); 2883 2884 dma_cap_zero(base->dma_slave.cap_mask); 2885 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2886 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2887 2888 d40_ops_init(base, &base->dma_slave); 2889 2890 err = dma_async_device_register(&base->dma_slave); 2891 2892 if (err) { 2893 d40_err(base->dev, "Failed to register slave channels\n"); 2894 goto failure1; 2895 } 2896 2897 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2898 base->num_log_chans, base->num_memcpy_chans); 2899 2900 dma_cap_zero(base->dma_memcpy.cap_mask); 2901 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2902 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); 2903 2904 d40_ops_init(base, &base->dma_memcpy); 2905 2906 err = dma_async_device_register(&base->dma_memcpy); 2907 2908 if (err) { 2909 d40_err(base->dev, 2910 "Failed to regsiter memcpy only channels\n"); 2911 goto failure2; 2912 } 2913 2914 d40_chan_init(base, &base->dma_both, base->phy_chans, 2915 0, num_reserved_chans); 2916 2917 dma_cap_zero(base->dma_both.cap_mask); 2918 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2919 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2920 dma_cap_set(DMA_SG, base->dma_both.cap_mask); 2921 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2922 2923 d40_ops_init(base, &base->dma_both); 2924 err = dma_async_device_register(&base->dma_both); 2925 2926 if (err) { 2927 d40_err(base->dev, 2928 "Failed to register logical and physical capable channels\n"); 2929 goto failure3; 2930 } 2931 return 0; 2932 failure3: 2933 dma_async_device_unregister(&base->dma_memcpy); 2934 failure2: 2935 dma_async_device_unregister(&base->dma_slave); 2936 failure1: 2937 return err; 2938 } 2939 2940 /* Suspend resume functionality */ 2941 #ifdef CONFIG_PM_SLEEP 2942 static int dma40_suspend(struct device *dev) 2943 { 2944 struct platform_device *pdev = to_platform_device(dev); 2945 struct d40_base *base = platform_get_drvdata(pdev); 2946 int ret; 2947 2948 ret = pm_runtime_force_suspend(dev); 2949 if (ret) 2950 return ret; 2951 2952 if (base->lcpa_regulator) 2953 ret = regulator_disable(base->lcpa_regulator); 2954 return ret; 2955 } 2956 2957 static int dma40_resume(struct device *dev) 2958 { 2959 struct platform_device *pdev = to_platform_device(dev); 2960 struct d40_base *base = platform_get_drvdata(pdev); 2961 int ret = 0; 2962 2963 if (base->lcpa_regulator) { 2964 ret = regulator_enable(base->lcpa_regulator); 2965 if (ret) 2966 return ret; 2967 } 2968 2969 return pm_runtime_force_resume(dev); 2970 } 2971 #endif 2972 2973 #ifdef CONFIG_PM 2974 static void dma40_backup(void __iomem *baseaddr, u32 *backup, 2975 u32 *regaddr, int num, bool save) 2976 { 2977 int i; 2978 2979 for (i = 0; i < num; i++) { 2980 void __iomem *addr = baseaddr + regaddr[i]; 2981 2982 if (save) 2983 backup[i] = readl_relaxed(addr); 2984 else 2985 writel_relaxed(backup[i], addr); 2986 } 2987 } 2988 2989 static void d40_save_restore_registers(struct d40_base *base, bool save) 2990 { 2991 int i; 2992 2993 /* Save/Restore channel specific registers */ 2994 for (i = 0; i < base->num_phy_chans; i++) { 2995 void __iomem *addr; 2996 int idx; 2997 2998 if (base->phy_res[i].reserved) 2999 continue; 3000 3001 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; 3002 idx = i * ARRAY_SIZE(d40_backup_regs_chan); 3003 3004 dma40_backup(addr, &base->reg_val_backup_chan[idx], 3005 d40_backup_regs_chan, 3006 ARRAY_SIZE(d40_backup_regs_chan), 3007 save); 3008 } 3009 3010 /* Save/Restore global registers */ 3011 dma40_backup(base->virtbase, base->reg_val_backup, 3012 d40_backup_regs, ARRAY_SIZE(d40_backup_regs), 3013 save); 3014 3015 /* Save/Restore registers only existing on dma40 v3 and later */ 3016 if (base->gen_dmac.backup) 3017 dma40_backup(base->virtbase, base->reg_val_backup_v4, 3018 base->gen_dmac.backup, 3019 base->gen_dmac.backup_size, 3020 save); 3021 } 3022 3023 static int dma40_runtime_suspend(struct device *dev) 3024 { 3025 struct platform_device *pdev = to_platform_device(dev); 3026 struct d40_base *base = platform_get_drvdata(pdev); 3027 3028 d40_save_restore_registers(base, true); 3029 3030 /* Don't disable/enable clocks for v1 due to HW bugs */ 3031 if (base->rev != 1) 3032 writel_relaxed(base->gcc_pwr_off_mask, 3033 base->virtbase + D40_DREG_GCC); 3034 3035 return 0; 3036 } 3037 3038 static int dma40_runtime_resume(struct device *dev) 3039 { 3040 struct platform_device *pdev = to_platform_device(dev); 3041 struct d40_base *base = platform_get_drvdata(pdev); 3042 3043 d40_save_restore_registers(base, false); 3044 3045 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, 3046 base->virtbase + D40_DREG_GCC); 3047 return 0; 3048 } 3049 #endif 3050 3051 static const struct dev_pm_ops dma40_pm_ops = { 3052 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) 3053 SET_RUNTIME_PM_OPS(dma40_runtime_suspend, 3054 dma40_runtime_resume, 3055 NULL) 3056 }; 3057 3058 /* Initialization functions. */ 3059 3060 static int __init d40_phy_res_init(struct d40_base *base) 3061 { 3062 int i; 3063 int num_phy_chans_avail = 0; 3064 u32 val[2]; 3065 int odd_even_bit = -2; 3066 int gcc = D40_DREG_GCC_ENA; 3067 3068 val[0] = readl(base->virtbase + D40_DREG_PRSME); 3069 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 3070 3071 for (i = 0; i < base->num_phy_chans; i++) { 3072 base->phy_res[i].num = i; 3073 odd_even_bit += 2 * ((i % 2) == 0); 3074 if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 3075 /* Mark security only channels as occupied */ 3076 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 3077 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 3078 base->phy_res[i].reserved = true; 3079 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3080 D40_DREG_GCC_SRC); 3081 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3082 D40_DREG_GCC_DST); 3083 3084 3085 } else { 3086 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 3087 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 3088 base->phy_res[i].reserved = false; 3089 num_phy_chans_avail++; 3090 } 3091 spin_lock_init(&base->phy_res[i].lock); 3092 } 3093 3094 /* Mark disabled channels as occupied */ 3095 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 3096 int chan = base->plat_data->disabled_channels[i]; 3097 3098 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; 3099 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; 3100 base->phy_res[chan].reserved = true; 3101 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3102 D40_DREG_GCC_SRC); 3103 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3104 D40_DREG_GCC_DST); 3105 num_phy_chans_avail--; 3106 } 3107 3108 /* Mark soft_lli channels */ 3109 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { 3110 int chan = base->plat_data->soft_lli_chans[i]; 3111 3112 base->phy_res[chan].use_soft_lli = true; 3113 } 3114 3115 dev_info(base->dev, "%d of %d physical DMA channels available\n", 3116 num_phy_chans_avail, base->num_phy_chans); 3117 3118 /* Verify settings extended vs standard */ 3119 val[0] = readl(base->virtbase + D40_DREG_PRTYP); 3120 3121 for (i = 0; i < base->num_phy_chans; i++) { 3122 3123 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 3124 (val[0] & 0x3) != 1) 3125 dev_info(base->dev, 3126 "[%s] INFO: channel %d is misconfigured (%d)\n", 3127 __func__, i, val[0] & 0x3); 3128 3129 val[0] = val[0] >> 2; 3130 } 3131 3132 /* 3133 * To keep things simple, Enable all clocks initially. 3134 * The clocks will get managed later post channel allocation. 3135 * The clocks for the event lines on which reserved channels exists 3136 * are not managed here. 3137 */ 3138 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); 3139 base->gcc_pwr_off_mask = gcc; 3140 3141 return num_phy_chans_avail; 3142 } 3143 3144 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3145 { 3146 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3147 struct clk *clk = NULL; 3148 void __iomem *virtbase = NULL; 3149 struct resource *res = NULL; 3150 struct d40_base *base = NULL; 3151 int num_log_chans = 0; 3152 int num_phy_chans; 3153 int num_memcpy_chans; 3154 int clk_ret = -EINVAL; 3155 int i; 3156 u32 pid; 3157 u32 cid; 3158 u8 rev; 3159 3160 clk = clk_get(&pdev->dev, NULL); 3161 if (IS_ERR(clk)) { 3162 d40_err(&pdev->dev, "No matching clock found\n"); 3163 goto failure; 3164 } 3165 3166 clk_ret = clk_prepare_enable(clk); 3167 if (clk_ret) { 3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 3169 goto failure; 3170 } 3171 3172 /* Get IO for DMAC base address */ 3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3174 if (!res) 3175 goto failure; 3176 3177 if (request_mem_region(res->start, resource_size(res), 3178 D40_NAME " I/O base") == NULL) 3179 goto failure; 3180 3181 virtbase = ioremap(res->start, resource_size(res)); 3182 if (!virtbase) 3183 goto failure; 3184 3185 /* This is just a regular AMBA PrimeCell ID actually */ 3186 for (pid = 0, i = 0; i < 4; i++) 3187 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) 3188 & 255) << (i * 8); 3189 for (cid = 0, i = 0; i < 4; i++) 3190 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) 3191 & 255) << (i * 8); 3192 3193 if (cid != AMBA_CID) { 3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); 3195 goto failure; 3196 } 3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { 3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 3199 AMBA_MANF_BITS(pid), 3200 AMBA_VENDOR_ST); 3201 goto failure; 3202 } 3203 /* 3204 * HW revision: 3205 * DB8500ed has revision 0 3206 * ? has revision 1 3207 * DB8500v1 has revision 2 3208 * DB8500v2 has revision 3 3209 * AP9540v1 has revision 4 3210 * DB8540v1 has revision 4 3211 */ 3212 rev = AMBA_REV_BITS(pid); 3213 if (rev < 2) { 3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); 3215 goto failure; 3216 } 3217 3218 /* The number of physical channels on this HW */ 3219 if (plat_data->num_of_phy_chans) 3220 num_phy_chans = plat_data->num_of_phy_chans; 3221 else 3222 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3223 3224 /* The number of channels used for memcpy */ 3225 if (plat_data->num_of_memcpy_chans) 3226 num_memcpy_chans = plat_data->num_of_memcpy_chans; 3227 else 3228 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); 3229 3230 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; 3231 3232 dev_info(&pdev->dev, 3233 "hardware rev: %d @ %pa with %d physical and %d logical channels\n", 3234 rev, &res->start, num_phy_chans, num_log_chans); 3235 3236 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3237 (num_phy_chans + num_log_chans + num_memcpy_chans) * 3238 sizeof(struct d40_chan), GFP_KERNEL); 3239 3240 if (base == NULL) { 3241 d40_err(&pdev->dev, "Out of memory\n"); 3242 goto failure; 3243 } 3244 3245 base->rev = rev; 3246 base->clk = clk; 3247 base->num_memcpy_chans = num_memcpy_chans; 3248 base->num_phy_chans = num_phy_chans; 3249 base->num_log_chans = num_log_chans; 3250 base->phy_start = res->start; 3251 base->phy_size = resource_size(res); 3252 base->virtbase = virtbase; 3253 base->plat_data = plat_data; 3254 base->dev = &pdev->dev; 3255 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 3256 base->log_chans = &base->phy_chans[num_phy_chans]; 3257 3258 if (base->plat_data->num_of_phy_chans == 14) { 3259 base->gen_dmac.backup = d40_backup_regs_v4b; 3260 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; 3261 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; 3262 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; 3263 base->gen_dmac.realtime_en = D40_DREG_CRSEG1; 3264 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; 3265 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; 3266 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; 3267 base->gen_dmac.il = il_v4b; 3268 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); 3269 base->gen_dmac.init_reg = dma_init_reg_v4b; 3270 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); 3271 } else { 3272 if (base->rev >= 3) { 3273 base->gen_dmac.backup = d40_backup_regs_v4a; 3274 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; 3275 } 3276 base->gen_dmac.interrupt_en = D40_DREG_PCMIS; 3277 base->gen_dmac.interrupt_clear = D40_DREG_PCICR; 3278 base->gen_dmac.realtime_en = D40_DREG_RSEG1; 3279 base->gen_dmac.realtime_clear = D40_DREG_RCEG1; 3280 base->gen_dmac.high_prio_en = D40_DREG_PSEG1; 3281 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; 3282 base->gen_dmac.il = il_v4a; 3283 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); 3284 base->gen_dmac.init_reg = dma_init_reg_v4a; 3285 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); 3286 } 3287 3288 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 3289 GFP_KERNEL); 3290 if (!base->phy_res) 3291 goto failure; 3292 3293 base->lookup_phy_chans = kzalloc(num_phy_chans * 3294 sizeof(struct d40_chan *), 3295 GFP_KERNEL); 3296 if (!base->lookup_phy_chans) 3297 goto failure; 3298 3299 base->lookup_log_chans = kzalloc(num_log_chans * 3300 sizeof(struct d40_chan *), 3301 GFP_KERNEL); 3302 if (!base->lookup_log_chans) 3303 goto failure; 3304 3305 base->reg_val_backup_chan = kmalloc(base->num_phy_chans * 3306 sizeof(d40_backup_regs_chan), 3307 GFP_KERNEL); 3308 if (!base->reg_val_backup_chan) 3309 goto failure; 3310 3311 base->lcla_pool.alloc_map = 3312 kzalloc(num_phy_chans * sizeof(struct d40_desc *) 3313 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); 3314 if (!base->lcla_pool.alloc_map) 3315 goto failure; 3316 3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3318 0, SLAB_HWCACHE_ALIGN, 3319 NULL); 3320 if (base->desc_slab == NULL) 3321 goto failure; 3322 3323 return base; 3324 3325 failure: 3326 if (!clk_ret) 3327 clk_disable_unprepare(clk); 3328 if (!IS_ERR(clk)) 3329 clk_put(clk); 3330 if (virtbase) 3331 iounmap(virtbase); 3332 if (res) 3333 release_mem_region(res->start, 3334 resource_size(res)); 3335 if (virtbase) 3336 iounmap(virtbase); 3337 3338 if (base) { 3339 kfree(base->lcla_pool.alloc_map); 3340 kfree(base->reg_val_backup_chan); 3341 kfree(base->lookup_log_chans); 3342 kfree(base->lookup_phy_chans); 3343 kfree(base->phy_res); 3344 kfree(base); 3345 } 3346 3347 return NULL; 3348 } 3349 3350 static void __init d40_hw_init(struct d40_base *base) 3351 { 3352 3353 int i; 3354 u32 prmseo[2] = {0, 0}; 3355 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 3356 u32 pcmis = 0; 3357 u32 pcicr = 0; 3358 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; 3359 u32 reg_size = base->gen_dmac.init_reg_size; 3360 3361 for (i = 0; i < reg_size; i++) 3362 writel(dma_init_reg[i].val, 3363 base->virtbase + dma_init_reg[i].reg); 3364 3365 /* Configure all our dma channels to default settings */ 3366 for (i = 0; i < base->num_phy_chans; i++) { 3367 3368 activeo[i % 2] = activeo[i % 2] << 2; 3369 3370 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 3371 == D40_ALLOC_PHY) { 3372 activeo[i % 2] |= 3; 3373 continue; 3374 } 3375 3376 /* Enable interrupt # */ 3377 pcmis = (pcmis << 1) | 1; 3378 3379 /* Clear interrupt # */ 3380 pcicr = (pcicr << 1) | 1; 3381 3382 /* Set channel to physical mode */ 3383 prmseo[i % 2] = prmseo[i % 2] << 2; 3384 prmseo[i % 2] |= 1; 3385 3386 } 3387 3388 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 3389 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 3390 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 3391 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 3392 3393 /* Write which interrupt to enable */ 3394 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); 3395 3396 /* Write which interrupt to clear */ 3397 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); 3398 3399 /* These are __initdata and cannot be accessed after init */ 3400 base->gen_dmac.init_reg = NULL; 3401 base->gen_dmac.init_reg_size = 0; 3402 } 3403 3404 static int __init d40_lcla_allocate(struct d40_base *base) 3405 { 3406 struct d40_lcla_pool *pool = &base->lcla_pool; 3407 unsigned long *page_list; 3408 int i, j; 3409 int ret = 0; 3410 3411 /* 3412 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, 3413 * To full fill this hardware requirement without wasting 256 kb 3414 * we allocate pages until we get an aligned one. 3415 */ 3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, 3417 GFP_KERNEL); 3418 3419 if (!page_list) { 3420 ret = -ENOMEM; 3421 goto failure; 3422 } 3423 3424 /* Calculating how many pages that are required */ 3425 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; 3426 3427 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { 3428 page_list[i] = __get_free_pages(GFP_KERNEL, 3429 base->lcla_pool.pages); 3430 if (!page_list[i]) { 3431 3432 d40_err(base->dev, "Failed to allocate %d pages.\n", 3433 base->lcla_pool.pages); 3434 ret = -ENOMEM; 3435 3436 for (j = 0; j < i; j++) 3437 free_pages(page_list[j], base->lcla_pool.pages); 3438 goto failure; 3439 } 3440 3441 if ((virt_to_phys((void *)page_list[i]) & 3442 (LCLA_ALIGNMENT - 1)) == 0) 3443 break; 3444 } 3445 3446 for (j = 0; j < i; j++) 3447 free_pages(page_list[j], base->lcla_pool.pages); 3448 3449 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 3450 base->lcla_pool.base = (void *)page_list[i]; 3451 } else { 3452 /* 3453 * After many attempts and no succees with finding the correct 3454 * alignment, try with allocating a big buffer. 3455 */ 3456 dev_warn(base->dev, 3457 "[%s] Failed to get %d pages @ 18 bit align.\n", 3458 __func__, base->lcla_pool.pages); 3459 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * 3460 base->num_phy_chans + 3461 LCLA_ALIGNMENT, 3462 GFP_KERNEL); 3463 if (!base->lcla_pool.base_unaligned) { 3464 ret = -ENOMEM; 3465 goto failure; 3466 } 3467 3468 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, 3469 LCLA_ALIGNMENT); 3470 } 3471 3472 pool->dma_addr = dma_map_single(base->dev, pool->base, 3473 SZ_1K * base->num_phy_chans, 3474 DMA_TO_DEVICE); 3475 if (dma_mapping_error(base->dev, pool->dma_addr)) { 3476 pool->dma_addr = 0; 3477 ret = -ENOMEM; 3478 goto failure; 3479 } 3480 3481 writel(virt_to_phys(base->lcla_pool.base), 3482 base->virtbase + D40_DREG_LCLA); 3483 failure: 3484 kfree(page_list); 3485 return ret; 3486 } 3487 3488 static int __init d40_of_probe(struct platform_device *pdev, 3489 struct device_node *np) 3490 { 3491 struct stedma40_platform_data *pdata; 3492 int num_phy = 0, num_memcpy = 0, num_disabled = 0; 3493 const __be32 *list; 3494 3495 pdata = devm_kzalloc(&pdev->dev, 3496 sizeof(struct stedma40_platform_data), 3497 GFP_KERNEL); 3498 if (!pdata) 3499 return -ENOMEM; 3500 3501 /* If absent this value will be obtained from h/w. */ 3502 of_property_read_u32(np, "dma-channels", &num_phy); 3503 if (num_phy > 0) 3504 pdata->num_of_phy_chans = num_phy; 3505 3506 list = of_get_property(np, "memcpy-channels", &num_memcpy); 3507 num_memcpy /= sizeof(*list); 3508 3509 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { 3510 d40_err(&pdev->dev, 3511 "Invalid number of memcpy channels specified (%d)\n", 3512 num_memcpy); 3513 return -EINVAL; 3514 } 3515 pdata->num_of_memcpy_chans = num_memcpy; 3516 3517 of_property_read_u32_array(np, "memcpy-channels", 3518 dma40_memcpy_channels, 3519 num_memcpy); 3520 3521 list = of_get_property(np, "disabled-channels", &num_disabled); 3522 num_disabled /= sizeof(*list); 3523 3524 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { 3525 d40_err(&pdev->dev, 3526 "Invalid number of disabled channels specified (%d)\n", 3527 num_disabled); 3528 return -EINVAL; 3529 } 3530 3531 of_property_read_u32_array(np, "disabled-channels", 3532 pdata->disabled_channels, 3533 num_disabled); 3534 pdata->disabled_channels[num_disabled] = -1; 3535 3536 pdev->dev.platform_data = pdata; 3537 3538 return 0; 3539 } 3540 3541 static int __init d40_probe(struct platform_device *pdev) 3542 { 3543 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3544 struct device_node *np = pdev->dev.of_node; 3545 int ret = -ENOENT; 3546 struct d40_base *base = NULL; 3547 struct resource *res = NULL; 3548 int num_reserved_chans; 3549 u32 val; 3550 3551 if (!plat_data) { 3552 if (np) { 3553 if (d40_of_probe(pdev, np)) { 3554 ret = -ENOMEM; 3555 goto failure; 3556 } 3557 } else { 3558 d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); 3559 goto failure; 3560 } 3561 } 3562 3563 base = d40_hw_detect_init(pdev); 3564 if (!base) 3565 goto failure; 3566 3567 num_reserved_chans = d40_phy_res_init(base); 3568 3569 platform_set_drvdata(pdev, base); 3570 3571 spin_lock_init(&base->interrupt_lock); 3572 spin_lock_init(&base->execmd_lock); 3573 3574 /* Get IO for logical channel parameter address */ 3575 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 3576 if (!res) { 3577 ret = -ENOENT; 3578 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); 3579 goto failure; 3580 } 3581 base->lcpa_size = resource_size(res); 3582 base->phy_lcpa = res->start; 3583 3584 if (request_mem_region(res->start, resource_size(res), 3585 D40_NAME " I/O lcpa") == NULL) { 3586 ret = -EBUSY; 3587 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); 3588 goto failure; 3589 } 3590 3591 /* We make use of ESRAM memory for this. */ 3592 val = readl(base->virtbase + D40_DREG_LCPA); 3593 if (res->start != val && val != 0) { 3594 dev_warn(&pdev->dev, 3595 "[%s] Mismatch LCPA dma 0x%x, def %pa\n", 3596 __func__, val, &res->start); 3597 } else 3598 writel(res->start, base->virtbase + D40_DREG_LCPA); 3599 3600 base->lcpa_base = ioremap(res->start, resource_size(res)); 3601 if (!base->lcpa_base) { 3602 ret = -ENOMEM; 3603 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 3604 goto failure; 3605 } 3606 /* If lcla has to be located in ESRAM we don't need to allocate */ 3607 if (base->plat_data->use_esram_lcla) { 3608 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 3609 "lcla_esram"); 3610 if (!res) { 3611 ret = -ENOENT; 3612 d40_err(&pdev->dev, 3613 "No \"lcla_esram\" memory resource\n"); 3614 goto failure; 3615 } 3616 base->lcla_pool.base = ioremap(res->start, 3617 resource_size(res)); 3618 if (!base->lcla_pool.base) { 3619 ret = -ENOMEM; 3620 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); 3621 goto failure; 3622 } 3623 writel(res->start, base->virtbase + D40_DREG_LCLA); 3624 3625 } else { 3626 ret = d40_lcla_allocate(base); 3627 if (ret) { 3628 d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 3629 goto failure; 3630 } 3631 } 3632 3633 spin_lock_init(&base->lcla_pool.lock); 3634 3635 base->irq = platform_get_irq(pdev, 0); 3636 3637 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 3638 if (ret) { 3639 d40_err(&pdev->dev, "No IRQ defined\n"); 3640 goto failure; 3641 } 3642 3643 if (base->plat_data->use_esram_lcla) { 3644 3645 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); 3646 if (IS_ERR(base->lcpa_regulator)) { 3647 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3648 ret = PTR_ERR(base->lcpa_regulator); 3649 base->lcpa_regulator = NULL; 3650 goto failure; 3651 } 3652 3653 ret = regulator_enable(base->lcpa_regulator); 3654 if (ret) { 3655 d40_err(&pdev->dev, 3656 "Failed to enable lcpa_regulator\n"); 3657 regulator_put(base->lcpa_regulator); 3658 base->lcpa_regulator = NULL; 3659 goto failure; 3660 } 3661 } 3662 3663 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); 3664 3665 pm_runtime_irq_safe(base->dev); 3666 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); 3667 pm_runtime_use_autosuspend(base->dev); 3668 pm_runtime_mark_last_busy(base->dev); 3669 pm_runtime_set_active(base->dev); 3670 pm_runtime_enable(base->dev); 3671 3672 ret = d40_dmaengine_init(base, num_reserved_chans); 3673 if (ret) 3674 goto failure; 3675 3676 base->dev->dma_parms = &base->dma_parms; 3677 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3678 if (ret) { 3679 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3680 goto failure; 3681 } 3682 3683 d40_hw_init(base); 3684 3685 if (np) { 3686 ret = of_dma_controller_register(np, d40_xlate, NULL); 3687 if (ret) 3688 dev_err(&pdev->dev, 3689 "could not register of_dma_controller\n"); 3690 } 3691 3692 dev_info(base->dev, "initialized\n"); 3693 return 0; 3694 3695 failure: 3696 if (base) { 3697 if (base->desc_slab) 3698 kmem_cache_destroy(base->desc_slab); 3699 if (base->virtbase) 3700 iounmap(base->virtbase); 3701 3702 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { 3703 iounmap(base->lcla_pool.base); 3704 base->lcla_pool.base = NULL; 3705 } 3706 3707 if (base->lcla_pool.dma_addr) 3708 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, 3709 SZ_1K * base->num_phy_chans, 3710 DMA_TO_DEVICE); 3711 3712 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 3713 free_pages((unsigned long)base->lcla_pool.base, 3714 base->lcla_pool.pages); 3715 3716 kfree(base->lcla_pool.base_unaligned); 3717 3718 if (base->phy_lcpa) 3719 release_mem_region(base->phy_lcpa, 3720 base->lcpa_size); 3721 if (base->phy_start) 3722 release_mem_region(base->phy_start, 3723 base->phy_size); 3724 if (base->clk) { 3725 clk_disable_unprepare(base->clk); 3726 clk_put(base->clk); 3727 } 3728 3729 if (base->lcpa_regulator) { 3730 regulator_disable(base->lcpa_regulator); 3731 regulator_put(base->lcpa_regulator); 3732 } 3733 3734 kfree(base->lcla_pool.alloc_map); 3735 kfree(base->lookup_log_chans); 3736 kfree(base->lookup_phy_chans); 3737 kfree(base->phy_res); 3738 kfree(base); 3739 } 3740 3741 d40_err(&pdev->dev, "probe failed\n"); 3742 return ret; 3743 } 3744 3745 static const struct of_device_id d40_match[] = { 3746 { .compatible = "stericsson,dma40", }, 3747 {} 3748 }; 3749 3750 static struct platform_driver d40_driver = { 3751 .driver = { 3752 .name = D40_NAME, 3753 .pm = &dma40_pm_ops, 3754 .of_match_table = d40_match, 3755 }, 3756 }; 3757 3758 static int __init stedma40_init(void) 3759 { 3760 return platform_driver_probe(&d40_driver, d40_probe); 3761 } 3762 subsys_initcall(stedma40_init); 3763