1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * vsp1_dl.c -- R-Car VSP1 Display List 4 * 5 * Copyright (C) 2015 Renesas Corporation 6 * 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 */ 9 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/gfp.h> 13 #include <linux/lockdep.h> 14 #include <linux/refcount.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 18 #include "vsp1.h" 19 #include "vsp1_dl.h" 20 21 #define VSP1_DL_NUM_ENTRIES 256 22 23 #define VSP1_DLH_INT_ENABLE (1 << 1) 24 #define VSP1_DLH_AUTO_START (1 << 0) 25 26 #define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9) 27 #define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8) 28 29 struct vsp1_dl_header_list { 30 u32 num_bytes; 31 u32 addr; 32 } __packed; 33 34 struct vsp1_dl_header { 35 u32 num_lists; 36 struct vsp1_dl_header_list lists[8]; 37 u32 next_header; 38 u32 flags; 39 } __packed; 40 41 /** 42 * struct vsp1_dl_ext_header - Extended display list header 43 * @padding: padding zero bytes for alignment 44 * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse 45 * @flags: enables or disables execution of the pre and post command 46 * @pre_ext_dl_plist: start address of pre-extended display list bodies 47 * @post_ext_dl_num_cmd: number of post-extended command bodies to parse 48 * @post_ext_dl_plist: start address of post-extended display list bodies 49 */ 50 struct vsp1_dl_ext_header { 51 u32 padding; 52 53 /* 54 * The datasheet represents flags as stored before pre_ext_dl_num_cmd, 55 * expecting 32-bit accesses. The flags are appropriate to the whole 56 * header, not just the pre_ext command, and thus warrant being 57 * separated out. Due to byte ordering, and representing as 16 bit 58 * values here, the flags must be positioned after the 59 * pre_ext_dl_num_cmd. 60 */ 61 u16 pre_ext_dl_num_cmd; 62 u16 flags; 63 u32 pre_ext_dl_plist; 64 65 u32 post_ext_dl_num_cmd; 66 u32 post_ext_dl_plist; 67 } __packed; 68 69 struct vsp1_dl_header_extended { 70 struct vsp1_dl_header header; 71 struct vsp1_dl_ext_header ext; 72 } __packed; 73 74 struct vsp1_dl_entry { 75 u32 addr; 76 u32 data; 77 } __packed; 78 79 /** 80 * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body 81 * @opcode: Extended display list command operation code 82 * @flags: Pre-extended command flags. These are specific to each command 83 * @address_set: Source address set pointer. Must have 16-byte alignment 84 * @reserved: Zero bits for alignment. 85 */ 86 struct vsp1_pre_ext_dl_body { 87 u32 opcode; 88 u32 flags; 89 u32 address_set; 90 u32 reserved; 91 } __packed; 92 93 /** 94 * struct vsp1_dl_body - Display list body 95 * @list: entry in the display list list of bodies 96 * @free: entry in the pool free body list 97 * @refcnt: reference tracking for the body 98 * @pool: pool to which this body belongs 99 * @entries: array of entries 100 * @dma: DMA address of the entries 101 * @size: size of the DMA memory in bytes 102 * @num_entries: number of stored entries 103 * @max_entries: number of entries available 104 */ 105 struct vsp1_dl_body { 106 struct list_head list; 107 struct list_head free; 108 109 refcount_t refcnt; 110 111 struct vsp1_dl_body_pool *pool; 112 113 struct vsp1_dl_entry *entries; 114 dma_addr_t dma; 115 size_t size; 116 117 unsigned int num_entries; 118 unsigned int max_entries; 119 }; 120 121 /** 122 * struct vsp1_dl_body_pool - display list body pool 123 * @dma: DMA address of the entries 124 * @size: size of the full DMA memory pool in bytes 125 * @mem: CPU memory pointer for the pool 126 * @bodies: Array of DLB structures for the pool 127 * @free: List of free DLB entries 128 * @lock: Protects the free list 129 * @vsp1: the VSP1 device 130 */ 131 struct vsp1_dl_body_pool { 132 /* DMA allocation */ 133 dma_addr_t dma; 134 size_t size; 135 void *mem; 136 137 /* Body management */ 138 struct vsp1_dl_body *bodies; 139 struct list_head free; 140 spinlock_t lock; 141 142 struct vsp1_device *vsp1; 143 }; 144 145 /** 146 * struct vsp1_dl_cmd_pool - Display List commands pool 147 * @dma: DMA address of the entries 148 * @size: size of the full DMA memory pool in bytes 149 * @mem: CPU memory pointer for the pool 150 * @cmds: Array of command structures for the pool 151 * @free: Free pool entries 152 * @lock: Protects the free list 153 * @vsp1: the VSP1 device 154 */ 155 struct vsp1_dl_cmd_pool { 156 /* DMA allocation */ 157 dma_addr_t dma; 158 size_t size; 159 void *mem; 160 161 struct vsp1_dl_ext_cmd *cmds; 162 struct list_head free; 163 164 spinlock_t lock; 165 166 struct vsp1_device *vsp1; 167 }; 168 169 /** 170 * struct vsp1_dl_list - Display list 171 * @list: entry in the display list manager lists 172 * @dlm: the display list manager 173 * @header: display list header 174 * @extension: extended display list header. NULL for normal lists 175 * @dma: DMA address for the header 176 * @body0: first display list body 177 * @bodies: list of extra display list bodies 178 * @pre_cmd: pre command to be issued through extended dl header 179 * @post_cmd: post command to be issued through extended dl header 180 * @allocated: flag to detect double list release 181 * @has_chain: if true, indicates that there's a partition chain 182 * @chain: entry in the display list partition chain 183 * @flags: display list flags, a combination of VSP1_DL_FRAME_END_* 184 */ 185 struct vsp1_dl_list { 186 struct list_head list; 187 struct vsp1_dl_manager *dlm; 188 189 struct vsp1_dl_header *header; 190 struct vsp1_dl_ext_header *extension; 191 dma_addr_t dma; 192 193 struct vsp1_dl_body *body0; 194 struct list_head bodies; 195 196 struct vsp1_dl_ext_cmd *pre_cmd; 197 struct vsp1_dl_ext_cmd *post_cmd; 198 199 bool allocated; 200 201 bool has_chain; 202 struct list_head chain; 203 204 unsigned int flags; 205 }; 206 207 /** 208 * struct vsp1_dl_manager - Display List manager 209 * @index: index of the related WPF 210 * @singleshot: execute the display list in single-shot mode 211 * @vsp1: the VSP1 device 212 * @lock: protects the free, active, queued, and pending lists 213 * @free: array of all free display lists 214 * @active: list currently being processed (loaded) by hardware 215 * @queued: list queued to the hardware (written to the DL registers) 216 * @pending: list waiting to be queued to the hardware 217 * @pool: body pool for the display list bodies 218 * @cmdpool: commands pool for extended display list 219 * @list_count: number of allocated display lists 220 */ 221 struct vsp1_dl_manager { 222 unsigned int index; 223 bool singleshot; 224 struct vsp1_device *vsp1; 225 226 spinlock_t lock; 227 struct list_head free; 228 struct vsp1_dl_list *active; 229 struct vsp1_dl_list *queued; 230 struct vsp1_dl_list *pending; 231 232 struct vsp1_dl_body_pool *pool; 233 struct vsp1_dl_cmd_pool *cmdpool; 234 235 size_t list_count; 236 }; 237 238 /* ----------------------------------------------------------------------------- 239 * Display List Body Management 240 */ 241 242 /** 243 * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation 244 * @vsp1: The VSP1 device 245 * @num_bodies: The number of bodies to allocate 246 * @num_entries: The maximum number of entries that a body can contain 247 * @extra_size: Extra allocation provided for the bodies 248 * 249 * Allocate a pool of display list bodies each with enough memory to contain the 250 * requested number of entries plus the @extra_size. 251 * 252 * Return a pointer to a pool on success or NULL if memory can't be allocated. 253 */ 254 struct vsp1_dl_body_pool * 255 vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies, 256 unsigned int num_entries, size_t extra_size) 257 { 258 struct vsp1_dl_body_pool *pool; 259 size_t dlb_size; 260 unsigned int i; 261 262 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 263 if (!pool) 264 return NULL; 265 266 pool->vsp1 = vsp1; 267 268 /* 269 * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate 270 * extra memory for the display list header. We need only one header per 271 * display list, not per display list body, thus this allocation is 272 * extraneous and should be reworked in the future. 273 */ 274 dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size; 275 pool->size = dlb_size * num_bodies; 276 277 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL); 278 if (!pool->bodies) { 279 kfree(pool); 280 return NULL; 281 } 282 283 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, 284 GFP_KERNEL); 285 if (!pool->mem) { 286 kfree(pool->bodies); 287 kfree(pool); 288 return NULL; 289 } 290 291 spin_lock_init(&pool->lock); 292 INIT_LIST_HEAD(&pool->free); 293 294 for (i = 0; i < num_bodies; ++i) { 295 struct vsp1_dl_body *dlb = &pool->bodies[i]; 296 297 dlb->pool = pool; 298 dlb->max_entries = num_entries; 299 300 dlb->dma = pool->dma + i * dlb_size; 301 dlb->entries = pool->mem + i * dlb_size; 302 303 list_add_tail(&dlb->free, &pool->free); 304 } 305 306 return pool; 307 } 308 309 /** 310 * vsp1_dl_body_pool_destroy - Release a body pool 311 * @pool: The body pool 312 * 313 * Release all components of a pool allocation. 314 */ 315 void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool) 316 { 317 if (!pool) 318 return; 319 320 if (pool->mem) 321 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, 322 pool->dma); 323 324 kfree(pool->bodies); 325 kfree(pool); 326 } 327 328 /** 329 * vsp1_dl_body_get - Obtain a body from a pool 330 * @pool: The body pool 331 * 332 * Obtain a body from the pool without blocking. 333 * 334 * Returns a display list body or NULL if there are none available. 335 */ 336 struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool) 337 { 338 struct vsp1_dl_body *dlb = NULL; 339 unsigned long flags; 340 341 spin_lock_irqsave(&pool->lock, flags); 342 343 if (!list_empty(&pool->free)) { 344 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free); 345 list_del(&dlb->free); 346 refcount_set(&dlb->refcnt, 1); 347 } 348 349 spin_unlock_irqrestore(&pool->lock, flags); 350 351 return dlb; 352 } 353 354 /** 355 * vsp1_dl_body_put - Return a body back to its pool 356 * @dlb: The display list body 357 * 358 * Return a body back to the pool, and reset the num_entries to clear the list. 359 */ 360 void vsp1_dl_body_put(struct vsp1_dl_body *dlb) 361 { 362 unsigned long flags; 363 364 if (!dlb) 365 return; 366 367 if (!refcount_dec_and_test(&dlb->refcnt)) 368 return; 369 370 dlb->num_entries = 0; 371 372 spin_lock_irqsave(&dlb->pool->lock, flags); 373 list_add_tail(&dlb->free, &dlb->pool->free); 374 spin_unlock_irqrestore(&dlb->pool->lock, flags); 375 } 376 377 /** 378 * vsp1_dl_body_write - Write a register to a display list body 379 * @dlb: The body 380 * @reg: The register address 381 * @data: The register value 382 * 383 * Write the given register and value to the display list body. The maximum 384 * number of entries that can be written in a body is specified when the body is 385 * allocated by vsp1_dl_body_alloc(). 386 */ 387 void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data) 388 { 389 if (WARN_ONCE(dlb->num_entries >= dlb->max_entries, 390 "DLB size exceeded (max %u)", dlb->max_entries)) 391 return; 392 393 dlb->entries[dlb->num_entries].addr = reg; 394 dlb->entries[dlb->num_entries].data = data; 395 dlb->num_entries++; 396 } 397 398 /* ----------------------------------------------------------------------------- 399 * Display List Extended Command Management 400 */ 401 402 enum vsp1_extcmd_type { 403 VSP1_EXTCMD_AUTODISP, 404 VSP1_EXTCMD_AUTOFLD, 405 }; 406 407 struct vsp1_extended_command_info { 408 u16 opcode; 409 size_t body_size; 410 }; 411 412 static const struct vsp1_extended_command_info vsp1_extended_commands[] = { 413 [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 }, 414 [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 }, 415 }; 416 417 /** 418 * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation 419 * @vsp1: The VSP1 device 420 * @type: The command pool type 421 * @num_cmds: The number of commands to allocate 422 * 423 * Allocate a pool of commands each with enough memory to contain the private 424 * data of each command. The allocation sizes are dependent upon the command 425 * type. 426 * 427 * Return a pointer to the pool on success or NULL if memory can't be allocated. 428 */ 429 static struct vsp1_dl_cmd_pool * 430 vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, 431 unsigned int num_cmds) 432 { 433 struct vsp1_dl_cmd_pool *pool; 434 unsigned int i; 435 size_t cmd_size; 436 437 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 438 if (!pool) 439 return NULL; 440 441 pool->vsp1 = vsp1; 442 443 spin_lock_init(&pool->lock); 444 INIT_LIST_HEAD(&pool->free); 445 446 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL); 447 if (!pool->cmds) { 448 kfree(pool); 449 return NULL; 450 } 451 452 cmd_size = sizeof(struct vsp1_pre_ext_dl_body) + 453 vsp1_extended_commands[type].body_size; 454 cmd_size = ALIGN(cmd_size, 16); 455 456 pool->size = cmd_size * num_cmds; 457 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, 458 GFP_KERNEL); 459 if (!pool->mem) { 460 kfree(pool->cmds); 461 kfree(pool); 462 return NULL; 463 } 464 465 for (i = 0; i < num_cmds; ++i) { 466 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i]; 467 size_t cmd_offset = i * cmd_size; 468 /* data_offset must be 16 byte aligned for DMA. */ 469 size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) + 470 cmd_offset; 471 472 cmd->pool = pool; 473 cmd->opcode = vsp1_extended_commands[type].opcode; 474 475 /* 476 * TODO: Auto-disp can utilise more than one extended body 477 * command per cmd. 478 */ 479 cmd->num_cmds = 1; 480 cmd->cmds = pool->mem + cmd_offset; 481 cmd->cmd_dma = pool->dma + cmd_offset; 482 483 cmd->data = pool->mem + data_offset; 484 cmd->data_dma = pool->dma + data_offset; 485 486 list_add_tail(&cmd->free, &pool->free); 487 } 488 489 return pool; 490 } 491 492 static 493 struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool) 494 { 495 struct vsp1_dl_ext_cmd *cmd = NULL; 496 unsigned long flags; 497 498 spin_lock_irqsave(&pool->lock, flags); 499 500 if (!list_empty(&pool->free)) { 501 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd, 502 free); 503 list_del(&cmd->free); 504 } 505 506 spin_unlock_irqrestore(&pool->lock, flags); 507 508 return cmd; 509 } 510 511 static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd) 512 { 513 unsigned long flags; 514 515 if (!cmd) 516 return; 517 518 /* Reset flags, these mark data usage. */ 519 cmd->flags = 0; 520 521 spin_lock_irqsave(&cmd->pool->lock, flags); 522 list_add_tail(&cmd->free, &cmd->pool->free); 523 spin_unlock_irqrestore(&cmd->pool->lock, flags); 524 } 525 526 static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool) 527 { 528 if (!pool) 529 return; 530 531 if (pool->mem) 532 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, 533 pool->dma); 534 535 kfree(pool->cmds); 536 kfree(pool); 537 } 538 539 struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl) 540 { 541 struct vsp1_dl_manager *dlm = dl->dlm; 542 543 if (dl->pre_cmd) 544 return dl->pre_cmd; 545 546 dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool); 547 548 return dl->pre_cmd; 549 } 550 551 /* ---------------------------------------------------------------------------- 552 * Display List Transaction Management 553 */ 554 555 static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) 556 { 557 struct vsp1_dl_list *dl; 558 size_t header_offset; 559 560 dl = kzalloc(sizeof(*dl), GFP_KERNEL); 561 if (!dl) 562 return NULL; 563 564 INIT_LIST_HEAD(&dl->bodies); 565 dl->dlm = dlm; 566 567 /* Get a default body for our list. */ 568 dl->body0 = vsp1_dl_body_get(dlm->pool); 569 if (!dl->body0) { 570 kfree(dl); 571 return NULL; 572 } 573 574 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); 575 576 dl->header = ((void *)dl->body0->entries) + header_offset; 577 dl->dma = dl->body0->dma + header_offset; 578 579 memset(dl->header, 0, sizeof(*dl->header)); 580 dl->header->lists[0].addr = dl->body0->dma; 581 582 return dl; 583 } 584 585 static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl) 586 { 587 struct vsp1_dl_body *dlb, *tmp; 588 589 list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) { 590 list_del(&dlb->list); 591 vsp1_dl_body_put(dlb); 592 } 593 } 594 595 static void vsp1_dl_list_free(struct vsp1_dl_list *dl) 596 { 597 vsp1_dl_body_put(dl->body0); 598 vsp1_dl_list_bodies_put(dl); 599 600 kfree(dl); 601 } 602 603 /** 604 * vsp1_dl_list_get - Get a free display list 605 * @dlm: The display list manager 606 * 607 * Get a display list from the pool of free lists and return it. 608 * 609 * This function must be called without the display list manager lock held. 610 */ 611 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm) 612 { 613 struct vsp1_dl_list *dl = NULL; 614 unsigned long flags; 615 616 lockdep_assert_not_held(&dlm->lock); 617 618 spin_lock_irqsave(&dlm->lock, flags); 619 620 if (!list_empty(&dlm->free)) { 621 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); 622 list_del(&dl->list); 623 624 /* 625 * The display list chain must be initialised to ensure every 626 * display list can assert list_empty() if it is not in a chain. 627 */ 628 INIT_LIST_HEAD(&dl->chain); 629 dl->allocated = true; 630 } 631 632 spin_unlock_irqrestore(&dlm->lock, flags); 633 634 return dl; 635 } 636 637 /* This function must be called with the display list manager lock held.*/ 638 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) 639 { 640 struct vsp1_dl_list *dl_next; 641 642 if (!dl) 643 return; 644 645 lockdep_assert_held(&dl->dlm->lock); 646 647 /* 648 * Release any linked display-lists which were chained for a single 649 * hardware operation. 650 */ 651 if (dl->has_chain) { 652 list_for_each_entry(dl_next, &dl->chain, chain) 653 __vsp1_dl_list_put(dl_next); 654 } 655 656 dl->has_chain = false; 657 658 vsp1_dl_list_bodies_put(dl); 659 660 vsp1_dl_ext_cmd_put(dl->pre_cmd); 661 vsp1_dl_ext_cmd_put(dl->post_cmd); 662 663 dl->pre_cmd = NULL; 664 dl->post_cmd = NULL; 665 666 /* 667 * body0 is reused as as an optimisation as presently every display list 668 * has at least one body, thus we reinitialise the entries list. 669 */ 670 dl->body0->num_entries = 0; 671 672 /* 673 * Return the display list to the 'free' pool. If the list had already 674 * been returned be loud about it. 675 */ 676 WARN_ON_ONCE(!dl->allocated); 677 dl->allocated = false; 678 679 list_add_tail(&dl->list, &dl->dlm->free); 680 } 681 682 /** 683 * vsp1_dl_list_put - Release a display list 684 * @dl: The display list 685 * 686 * Release the display list and return it to the pool of free lists. 687 * 688 * Passing a NULL pointer to this function is safe, in that case no operation 689 * will be performed. 690 */ 691 void vsp1_dl_list_put(struct vsp1_dl_list *dl) 692 { 693 unsigned long flags; 694 695 if (!dl) 696 return; 697 698 spin_lock_irqsave(&dl->dlm->lock, flags); 699 __vsp1_dl_list_put(dl); 700 spin_unlock_irqrestore(&dl->dlm->lock, flags); 701 } 702 703 /** 704 * vsp1_dl_list_get_body0 - Obtain the default body for the display list 705 * @dl: The display list 706 * 707 * Obtain a pointer to the internal display list body allowing this to be passed 708 * directly to configure operations. 709 */ 710 struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl) 711 { 712 return dl->body0; 713 } 714 715 /** 716 * vsp1_dl_list_add_body - Add a body to the display list 717 * @dl: The display list 718 * @dlb: The body 719 * 720 * Add a display list body to a display list. Registers contained in bodies are 721 * processed after registers contained in the main display list, in the order in 722 * which bodies are added. 723 * 724 * Adding a body to a display list passes ownership of the body to the list. The 725 * caller retains its reference to the body when adding it to the display list, 726 * but is not allowed to add new entries to the body. 727 * 728 * The reference must be explicitly released by a call to vsp1_dl_body_put() 729 * when the body isn't needed anymore. 730 */ 731 int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) 732 { 733 refcount_inc(&dlb->refcnt); 734 735 list_add_tail(&dlb->list, &dl->bodies); 736 737 return 0; 738 } 739 740 /** 741 * vsp1_dl_list_add_chain - Add a display list to a chain 742 * @head: The head display list 743 * @dl: The new display list 744 * 745 * Add a display list to an existing display list chain. The chained lists 746 * will be automatically processed by the hardware without intervention from 747 * the CPU. A display list end interrupt will only complete after the last 748 * display list in the chain has completed processing. 749 * 750 * Adding a display list to a chain passes ownership of the display list to 751 * the head display list item. The chain is released when the head dl item is 752 * put back with __vsp1_dl_list_put(). 753 */ 754 int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, 755 struct vsp1_dl_list *dl) 756 { 757 head->has_chain = true; 758 list_add_tail(&dl->chain, &head->chain); 759 return 0; 760 } 761 762 static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd) 763 { 764 cmd->cmds[0].opcode = cmd->opcode; 765 cmd->cmds[0].flags = cmd->flags; 766 cmd->cmds[0].address_set = cmd->data_dma; 767 cmd->cmds[0].reserved = 0; 768 } 769 770 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) 771 { 772 struct vsp1_dl_manager *dlm = dl->dlm; 773 struct vsp1_dl_header_list *hdr = dl->header->lists; 774 struct vsp1_dl_body *dlb; 775 unsigned int num_lists = 0; 776 777 /* 778 * Fill the header with the display list bodies addresses and sizes. The 779 * address of the first body has already been filled when the display 780 * list was allocated. 781 */ 782 783 hdr->num_bytes = dl->body0->num_entries 784 * sizeof(*dl->header->lists); 785 786 list_for_each_entry(dlb, &dl->bodies, list) { 787 num_lists++; 788 hdr++; 789 790 hdr->addr = dlb->dma; 791 hdr->num_bytes = dlb->num_entries 792 * sizeof(*dl->header->lists); 793 } 794 795 dl->header->num_lists = num_lists; 796 dl->header->flags = 0; 797 798 /* 799 * Enable the interrupt for the end of each frame. In continuous mode 800 * chained lists are used with one list per frame, so enable the 801 * interrupt for each list. In singleshot mode chained lists are used 802 * to partition a single frame, so enable the interrupt for the last 803 * list only. 804 */ 805 if (!dlm->singleshot || is_last) 806 dl->header->flags |= VSP1_DLH_INT_ENABLE; 807 808 /* 809 * In continuous mode enable auto-start for all lists, as the VSP must 810 * loop on the same list until a new one is queued. In singleshot mode 811 * enable auto-start for all lists but the last to chain processing of 812 * partitions without software intervention. 813 */ 814 if (!dlm->singleshot || !is_last) 815 dl->header->flags |= VSP1_DLH_AUTO_START; 816 817 if (!is_last) { 818 /* 819 * If this is not the last display list in the chain, queue the 820 * next item for automatic processing by the hardware. 821 */ 822 struct vsp1_dl_list *next = list_next_entry(dl, chain); 823 824 dl->header->next_header = next->dma; 825 } else if (!dlm->singleshot) { 826 /* 827 * if the display list manager works in continuous mode, the VSP 828 * should loop over the display list continuously until 829 * instructed to do otherwise. 830 */ 831 dl->header->next_header = dl->dma; 832 } 833 834 if (!dl->extension) 835 return; 836 837 dl->extension->flags = 0; 838 839 if (dl->pre_cmd) { 840 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma; 841 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds; 842 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC; 843 844 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd); 845 } 846 847 if (dl->post_cmd) { 848 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma; 849 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds; 850 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC; 851 852 vsp1_dl_ext_cmd_fill_header(dl->post_cmd); 853 } 854 } 855 856 static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) 857 { 858 struct vsp1_device *vsp1 = dlm->vsp1; 859 860 if (!dlm->queued) 861 return false; 862 863 /* 864 * Check whether the VSP1 has taken the update. The hardware indicates 865 * this by clearing the UPDHDR bit in the CMD register. 866 */ 867 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR); 868 } 869 870 static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) 871 { 872 struct vsp1_dl_manager *dlm = dl->dlm; 873 struct vsp1_device *vsp1 = dlm->vsp1; 874 875 /* 876 * Program the display list header address. If the hardware is idle 877 * (single-shot mode or first frame in continuous mode) it will then be 878 * started independently. If the hardware is operating, the 879 * VI6_DL_HDR_REF_ADDR register will be updated with the display list 880 * address. 881 */ 882 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); 883 } 884 885 static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl) 886 { 887 struct vsp1_dl_manager *dlm = dl->dlm; 888 889 /* 890 * If a previous display list has been queued to the hardware but not 891 * processed yet, the VSP can start processing it at any time. In that 892 * case we can't replace the queued list by the new one, as we could 893 * race with the hardware. We thus mark the update as pending, it will 894 * be queued up to the hardware by the frame end interrupt handler. 895 * 896 * If a display list is already pending we simply drop it as the new 897 * display list is assumed to contain a more recent configuration. It is 898 * an error if the already pending list has the 899 * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process 900 * waiting for that list to complete. This shouldn't happen as the 901 * waiting process should perform proper locking, but warn just in 902 * case. 903 */ 904 if (vsp1_dl_list_hw_update_pending(dlm)) { 905 WARN_ON(dlm->pending && 906 (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL)); 907 __vsp1_dl_list_put(dlm->pending); 908 dlm->pending = dl; 909 return; 910 } 911 912 /* 913 * Pass the new display list to the hardware and mark it as queued. It 914 * will become active when the hardware starts processing it. 915 */ 916 vsp1_dl_list_hw_enqueue(dl); 917 918 __vsp1_dl_list_put(dlm->queued); 919 dlm->queued = dl; 920 } 921 922 static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl) 923 { 924 struct vsp1_dl_manager *dlm = dl->dlm; 925 926 /* 927 * When working in single-shot mode, the caller guarantees that the 928 * hardware is idle at this point. Just commit the head display list 929 * to hardware. Chained lists will be started automatically. 930 */ 931 vsp1_dl_list_hw_enqueue(dl); 932 933 dlm->active = dl; 934 } 935 936 void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags) 937 { 938 struct vsp1_dl_manager *dlm = dl->dlm; 939 struct vsp1_dl_list *dl_next; 940 unsigned long flags; 941 942 /* Fill the header for the head and chained display lists. */ 943 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); 944 945 list_for_each_entry(dl_next, &dl->chain, chain) { 946 bool last = list_is_last(&dl_next->chain, &dl->chain); 947 948 vsp1_dl_list_fill_header(dl_next, last); 949 } 950 951 dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED; 952 953 spin_lock_irqsave(&dlm->lock, flags); 954 955 if (dlm->singleshot) 956 vsp1_dl_list_commit_singleshot(dl); 957 else 958 vsp1_dl_list_commit_continuous(dl); 959 960 spin_unlock_irqrestore(&dlm->lock, flags); 961 } 962 963 /* ----------------------------------------------------------------------------- 964 * Display List Manager 965 */ 966 967 /** 968 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt 969 * @dlm: the display list manager 970 * 971 * Return a set of flags that indicates display list completion status. 972 * 973 * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list 974 * has completed at frame end. If the flag is not returned display list 975 * completion has been delayed by one frame because the display list commit 976 * raced with the frame end interrupt. The function always returns with the flag 977 * set in single-shot mode as display list processing is then not continuous and 978 * races never occur. 979 * 980 * The following flags are only supported for continuous mode. 981 * 982 * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just 983 * became active had been queued with the internal notification flag. 984 * 985 * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active 986 * display list had been queued with the writeback flag. 987 */ 988 unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) 989 { 990 struct vsp1_device *vsp1 = dlm->vsp1; 991 u32 status = vsp1_read(vsp1, VI6_STATUS); 992 unsigned int flags = 0; 993 994 spin_lock(&dlm->lock); 995 996 /* 997 * The mem-to-mem pipelines work in single-shot mode. No new display 998 * list can be queued, we don't have to do anything. 999 */ 1000 if (dlm->singleshot) { 1001 __vsp1_dl_list_put(dlm->active); 1002 dlm->active = NULL; 1003 flags |= VSP1_DL_FRAME_END_COMPLETED; 1004 goto done; 1005 } 1006 1007 /* 1008 * If the commit operation raced with the interrupt and occurred after 1009 * the frame end event but before interrupt processing, the hardware 1010 * hasn't taken the update into account yet. We have to skip one frame 1011 * and retry. 1012 */ 1013 if (vsp1_dl_list_hw_update_pending(dlm)) 1014 goto done; 1015 1016 /* 1017 * Progressive streams report only TOP fields. If we have a BOTTOM 1018 * field, we are interlaced, and expect the frame to complete on the 1019 * next frame end interrupt. 1020 */ 1021 if (status & VI6_STATUS_FLD_STD(dlm->index)) 1022 goto done; 1023 1024 /* 1025 * If the active display list has the writeback flag set, the frame 1026 * completion marks the end of the writeback capture. Return the 1027 * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's 1028 * writeback flag. 1029 */ 1030 if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) { 1031 flags |= VSP1_DL_FRAME_END_WRITEBACK; 1032 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK; 1033 } 1034 1035 /* 1036 * The device starts processing the queued display list right after the 1037 * frame end interrupt. The display list thus becomes active. 1038 */ 1039 if (dlm->queued) { 1040 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) 1041 flags |= VSP1_DL_FRAME_END_INTERNAL; 1042 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; 1043 1044 __vsp1_dl_list_put(dlm->active); 1045 dlm->active = dlm->queued; 1046 dlm->queued = NULL; 1047 flags |= VSP1_DL_FRAME_END_COMPLETED; 1048 } 1049 1050 /* 1051 * Now that the VSP has started processing the queued display list, we 1052 * can queue the pending display list to the hardware if one has been 1053 * prepared. 1054 */ 1055 if (dlm->pending) { 1056 vsp1_dl_list_hw_enqueue(dlm->pending); 1057 dlm->queued = dlm->pending; 1058 dlm->pending = NULL; 1059 } 1060 1061 done: 1062 spin_unlock(&dlm->lock); 1063 1064 return flags; 1065 } 1066 1067 /* Hardware Setup */ 1068 void vsp1_dlm_setup(struct vsp1_device *vsp1) 1069 { 1070 unsigned int i; 1071 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) 1072 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 1073 | VI6_DL_CTRL_DLE; 1074 u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT) 1075 | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT; 1076 1077 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) { 1078 for (i = 0; i < vsp1->info->wpf_count; ++i) 1079 vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl); 1080 } 1081 1082 vsp1_write(vsp1, VI6_DL_CTRL, ctrl); 1083 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); 1084 } 1085 1086 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) 1087 { 1088 unsigned long flags; 1089 size_t list_count; 1090 1091 spin_lock_irqsave(&dlm->lock, flags); 1092 1093 __vsp1_dl_list_put(dlm->active); 1094 __vsp1_dl_list_put(dlm->queued); 1095 __vsp1_dl_list_put(dlm->pending); 1096 1097 list_count = list_count_nodes(&dlm->free); 1098 spin_unlock_irqrestore(&dlm->lock, flags); 1099 1100 WARN_ON_ONCE(list_count != dlm->list_count); 1101 1102 dlm->active = NULL; 1103 dlm->queued = NULL; 1104 dlm->pending = NULL; 1105 } 1106 1107 struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm) 1108 { 1109 return vsp1_dl_body_get(dlm->pool); 1110 } 1111 1112 struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, 1113 unsigned int index, 1114 unsigned int prealloc) 1115 { 1116 struct vsp1_dl_manager *dlm; 1117 size_t header_size; 1118 unsigned int i; 1119 1120 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); 1121 if (!dlm) 1122 return NULL; 1123 1124 dlm->index = index; 1125 /* 1126 * uapi = single shot mode; 1127 * DRM = continuous mode; 1128 * VSPX = single shot mode; 1129 */ 1130 dlm->singleshot = vsp1->info->uapi || vsp1->iif; 1131 dlm->vsp1 = vsp1; 1132 1133 spin_lock_init(&dlm->lock); 1134 INIT_LIST_HEAD(&dlm->free); 1135 1136 /* 1137 * Initialize the display list body and allocate DMA memory for the body 1138 * and the header. Both are allocated together to avoid memory 1139 * fragmentation, with the header located right after the body in 1140 * memory. An extra body is allocated on top of the prealloc to account 1141 * for the cached body used by the vsp1_pipeline object. 1142 */ 1143 header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ? 1144 sizeof(struct vsp1_dl_header_extended) : 1145 sizeof(struct vsp1_dl_header); 1146 1147 header_size = ALIGN(header_size, 8); 1148 1149 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1, 1150 VSP1_DL_NUM_ENTRIES, header_size); 1151 if (!dlm->pool) 1152 return NULL; 1153 1154 for (i = 0; i < prealloc; ++i) { 1155 struct vsp1_dl_list *dl; 1156 1157 dl = vsp1_dl_list_alloc(dlm); 1158 if (!dl) { 1159 vsp1_dlm_destroy(dlm); 1160 return NULL; 1161 } 1162 1163 /* The extended header immediately follows the header. */ 1164 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) 1165 dl->extension = (void *)dl->header 1166 + sizeof(*dl->header); 1167 1168 list_add_tail(&dl->list, &dlm->free); 1169 } 1170 1171 dlm->list_count = prealloc; 1172 1173 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) { 1174 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1, 1175 VSP1_EXTCMD_AUTOFLD, prealloc); 1176 if (!dlm->cmdpool) { 1177 vsp1_dlm_destroy(dlm); 1178 return NULL; 1179 } 1180 } 1181 1182 return dlm; 1183 } 1184 1185 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) 1186 { 1187 struct vsp1_dl_list *dl, *next; 1188 1189 if (!dlm) 1190 return; 1191 1192 list_for_each_entry_safe(dl, next, &dlm->free, list) { 1193 list_del(&dl->list); 1194 vsp1_dl_list_free(dl); 1195 } 1196 1197 vsp1_dl_body_pool_destroy(dlm->pool); 1198 vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool); 1199 } 1200