Lines Matching +full:omap +full:- +full:sdma

1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMM IOMMU driver support functions for TI OMAP processors.
5 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
12 #include <linux/dma-mapping.h>
50 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
51 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
55 u32 x_shft; /* unused X-bits (as part of bpp) */
56 u32 y_shft; /* unused Y-bits (as part of bpp) */
68 /* lookup table for registers w/ per-engine instances */
82 tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0); in dmm_dma_copy()
84 dev_err(dmm->dev, "Failed to prepare DMA memcpy\n"); in dmm_dma_copy()
85 return -EIO; in dmm_dma_copy()
88 cookie = tx->tx_submit(tx); in dmm_dma_copy()
90 dev_err(dmm->dev, "Failed to do DMA tx_submit\n"); in dmm_dma_copy()
91 return -EIO; in dmm_dma_copy()
94 status = dma_sync_wait(dmm->wa_dma_chan, cookie); in dmm_dma_copy()
96 dev_err(dmm->dev, "i878 wa DMA copy failure\n"); in dmm_dma_copy()
98 dmaengine_terminate_all(dmm->wa_dma_chan); in dmm_dma_copy()
107 src = dmm->phys_base + reg; in dmm_read_wa()
108 dst = dmm->wa_dma_handle; in dmm_read_wa()
112 dev_err(dmm->dev, "sDMA read transfer timeout\n"); in dmm_read_wa()
113 return readl(dmm->base + reg); in dmm_read_wa()
122 return readl((__iomem void *)dmm->wa_dma_data); in dmm_read_wa()
130 writel(val, (__iomem void *)dmm->wa_dma_data); in dmm_write_wa()
139 src = dmm->wa_dma_handle; in dmm_write_wa()
140 dst = dmm->phys_base + reg; in dmm_write_wa()
144 dev_err(dmm->dev, "sDMA write transfer timeout\n"); in dmm_write_wa()
145 writel(val, dmm->base + reg); in dmm_write_wa()
151 if (dmm->dmm_workaround) { in dmm_read()
155 spin_lock_irqsave(&dmm->wa_lock, flags); in dmm_read()
157 spin_unlock_irqrestore(&dmm->wa_lock, flags); in dmm_read()
161 return readl(dmm->base + reg); in dmm_read()
167 if (dmm->dmm_workaround) { in dmm_write()
170 spin_lock_irqsave(&dmm->wa_lock, flags); in dmm_write()
172 spin_unlock_irqrestore(&dmm->wa_lock, flags); in dmm_write()
174 writel(val, dmm->base + reg); in dmm_write()
182 spin_lock_init(&dmm->wa_lock); in dmm_workaround_init()
184 dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32), in dmm_workaround_init()
185 &dmm->wa_dma_handle, GFP_KERNEL); in dmm_workaround_init()
186 if (!dmm->wa_dma_data) in dmm_workaround_init()
187 return -ENOMEM; in dmm_workaround_init()
192 dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL); in dmm_workaround_init()
193 if (!dmm->wa_dma_chan) { in dmm_workaround_init()
194 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle); in dmm_workaround_init()
195 return -ENODEV; in dmm_workaround_init()
203 dma_release_channel(dmm->wa_dma_chan); in dmm_workaround_uninit()
205 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle); in dmm_workaround_uninit()
212 struct refill_engine *engine = txn->engine_handle; in alloc_dma()
215 txn->current_pa = round_up(txn->current_pa, 16); in alloc_dma()
216 txn->current_va = (void *)round_up((long)txn->current_va, 16); in alloc_dma()
218 ptr = txn->current_va; in alloc_dma()
219 *pa = txn->current_pa; in alloc_dma()
221 txn->current_pa += sz; in alloc_dma()
222 txn->current_va += sz; in alloc_dma()
224 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); in alloc_dma()
232 struct dmm *dmm = engine->dmm; in wait_status()
237 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]); in wait_status()
240 dev_err(dmm->dev, in wait_status()
242 __func__, engine->id, r); in wait_status()
243 return -EFAULT; in wait_status()
249 if (--i == 0) { in wait_status()
250 dev_err(dmm->dev, in wait_status()
252 __func__, engine->id, r); in wait_status()
253 return -ETIMEDOUT; in wait_status()
267 list_add(&engine->idle_node, &omap_dmm->idle_head); in release_engine()
270 atomic_inc(&omap_dmm->engine_counter); in release_engine()
271 wake_up_interruptible(&omap_dmm->engine_queue); in release_engine()
283 for (i = 0; i < dmm->num_engines; i++) { in omap_dmm_irq_handler()
285 dev_err(dmm->dev, in omap_dmm_irq_handler()
290 if (dmm->engines[i].async) in omap_dmm_irq_handler()
291 release_engine(&dmm->engines[i]); in omap_dmm_irq_handler()
293 complete(&dmm->engines[i].compl); in omap_dmm_irq_handler()
314 ret = wait_event_interruptible(omap_dmm->engine_queue, in dmm_txn_init()
315 atomic_add_unless(&omap_dmm->engine_counter, -1, 0)); in dmm_txn_init()
321 if (!list_empty(&dmm->idle_head)) { in dmm_txn_init()
322 engine = list_entry(dmm->idle_head.next, struct refill_engine, in dmm_txn_init()
324 list_del(&engine->idle_node); in dmm_txn_init()
330 txn = &engine->txn; in dmm_txn_init()
331 engine->tcm = tcm; in dmm_txn_init()
332 txn->engine_handle = engine; in dmm_txn_init()
333 txn->last_pat = NULL; in dmm_txn_init()
334 txn->current_va = engine->refill_va; in dmm_txn_init()
335 txn->current_pa = engine->refill_pa; in dmm_txn_init()
350 struct refill_engine *engine = txn->engine_handle; in dmm_txn_append()
351 int columns = (1 + area->x1 - area->x0); in dmm_txn_append()
352 int rows = (1 + area->y1 - area->y0); in dmm_txn_append()
357 if (txn->last_pat) in dmm_txn_append()
358 txn->last_pat->next_pa = (u32)pat_pa; in dmm_txn_append()
360 pat->area = *area; in dmm_txn_append()
363 pat->area.y0 += engine->tcm->y_offset; in dmm_txn_append()
364 pat->area.y1 += engine->tcm->y_offset; in dmm_txn_append()
366 pat->ctrl = (struct pat_ctrl){ in dmm_txn_append()
368 .lut_id = engine->tcm->lut_id, in dmm_txn_append()
372 /* FIXME: what if data_pa is more than 32-bit ? */ in dmm_txn_append()
373 pat->data_pa = data_pa; in dmm_txn_append()
375 while (i--) { in dmm_txn_append()
378 n -= npages; in dmm_txn_append()
380 page_to_phys(pages[n]) : engine->dmm->dummy_pa; in dmm_txn_append()
383 txn->last_pat = pat; in dmm_txn_append()
394 struct refill_engine *engine = txn->engine_handle; in dmm_txn_commit()
395 struct dmm *dmm = engine->dmm; in dmm_txn_commit()
397 if (!txn->last_pat) { in dmm_txn_commit()
398 dev_err(engine->dmm->dev, "need at least one txn\n"); in dmm_txn_commit()
399 ret = -EINVAL; in dmm_txn_commit()
403 txn->last_pat->next_pa = 0; in dmm_txn_commit()
409 * in OMAP's memory barrier implementation, which in some rare cases may in dmm_txn_commit()
414 readl((__iomem void *)&txn->last_pat->next_pa); in dmm_txn_commit()
417 dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); in dmm_txn_commit()
422 ret = -EFAULT; in dmm_txn_commit()
427 engine->async = wait ? false : true; in dmm_txn_commit()
428 reinit_completion(&engine->compl); in dmm_txn_commit()
433 dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]); in dmm_txn_commit()
436 if (!wait_for_completion_timeout(&engine->compl, in dmm_txn_commit()
438 dev_err(dmm->dev, "timed out waiting for done\n"); in dmm_txn_commit()
439 ret = -ETIMEDOUT; in dmm_txn_commit()
481 txn = dmm_txn_init(omap_dmm, area->tcm); in fill()
483 return -ENOMEM; in fill()
512 ret = fill(&block->area, pages, npages, roll, wait); in tiler_pin()
522 return fill(&block->area, NULL, 0, 0, false); in tiler_unpin()
539 return ERR_PTR(-ENOMEM); in tiler_reserve_2d()
553 block->fmt = fmt; in tiler_reserve_2d()
555 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, in tiler_reserve_2d()
556 &block->area); in tiler_reserve_2d()
559 return ERR_PTR(-ENOMEM); in tiler_reserve_2d()
564 list_add(&block->alloc_node, &omap_dmm->alloc_head); in tiler_reserve_2d()
573 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in tiler_reserve_1d()
577 return ERR_PTR(-ENOMEM); in tiler_reserve_1d()
579 block->fmt = TILFMT_PAGE; in tiler_reserve_1d()
582 &block->area)) { in tiler_reserve_1d()
584 return ERR_PTR(-ENOMEM); in tiler_reserve_1d()
588 list_add(&block->alloc_node, &omap_dmm->alloc_head); in tiler_reserve_1d()
597 int ret = tcm_free(&block->area); in tiler_release()
600 if (block->area.tcm) in tiler_release()
601 dev_err(omap_dmm->dev, "failed to release block\n"); in tiler_release()
604 list_del(&block->alloc_node); in tiler_release()
619 * [28:27] = 0x0 for 8-bit tiled
620 * 0x1 for 16-bit tiled
621 * 0x2 for 32-bit tiled
623 * [31:29] = 0x0 for 0-degree view
624 * 0x1 for 180-degree view + mirroring
625 * 0x2 for 0-degree view + mirroring
626 * 0x3 for 180-degree view
627 * 0x4 for 270-degree view + mirroring
628 * 0x5 for 270-degree view
629 * 0x6 for 90-degree view
630 * 0x7 for 90-degree view + mirroring
638 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft; in tiler_get_address()
639 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft; in tiler_get_address()
669 BUG_ON(!validfmt(block->fmt)); in tiler_ssptr()
671 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0, in tiler_ssptr()
672 block->area.p0.x * geom[block->fmt].slot_w, in tiler_ssptr()
673 block->area.p0.y * geom[block->fmt].slot_h); in tiler_ssptr()
679 struct tcm_pt *p = &block->area.p0; in tiler_tsptr()
680 BUG_ON(!validfmt(block->fmt)); in tiler_tsptr()
682 return tiler_get_address(block->fmt, orient, in tiler_tsptr()
683 (p->x * geom[block->fmt].slot_w) + x, in tiler_tsptr()
684 (p->y * geom[block->fmt].slot_h) + y); in tiler_tsptr()
718 return omap_dmm->plat_data->cpu_cache_flags; in tiler_get_cpu_cache_flags()
735 free_irq(omap_dmm->irq, omap_dmm); in omap_dmm_remove()
739 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head, in omap_dmm_remove()
741 list_del(&block->alloc_node); in omap_dmm_remove()
746 for (i = 0; i < omap_dmm->num_lut; i++) in omap_dmm_remove()
747 if (omap_dmm->tcm && omap_dmm->tcm[i]) in omap_dmm_remove()
748 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]); in omap_dmm_remove()
749 kfree(omap_dmm->tcm); in omap_dmm_remove()
751 kfree(omap_dmm->engines); in omap_dmm_remove()
752 if (omap_dmm->refill_va) in omap_dmm_remove()
753 dma_free_wc(omap_dmm->dev, in omap_dmm_remove()
754 REFILL_BUFFER_SIZE * omap_dmm->num_engines, in omap_dmm_remove()
755 omap_dmm->refill_va, omap_dmm->refill_pa); in omap_dmm_remove()
756 if (omap_dmm->dummy_page) in omap_dmm_remove()
757 __free_page(omap_dmm->dummy_page); in omap_dmm_remove()
759 if (omap_dmm->dmm_workaround) in omap_dmm_remove()
762 iounmap(omap_dmm->base); in omap_dmm_remove()
770 int ret = -EFAULT, i; in omap_dmm_probe()
780 INIT_LIST_HEAD(&omap_dmm->alloc_head); in omap_dmm_probe()
781 INIT_LIST_HEAD(&omap_dmm->idle_head); in omap_dmm_probe()
783 init_waitqueue_head(&omap_dmm->engine_queue); in omap_dmm_probe()
785 if (dev->dev.of_node) { in omap_dmm_probe()
788 match = of_match_node(dmm_of_match, dev->dev.of_node); in omap_dmm_probe()
790 dev_err(&dev->dev, "failed to find matching device node\n"); in omap_dmm_probe()
791 ret = -ENODEV; in omap_dmm_probe()
795 omap_dmm->plat_data = match->data; in omap_dmm_probe()
798 /* lookup hwmod data - base address and irq */ in omap_dmm_probe()
801 dev_err(&dev->dev, "failed to get base address resource\n"); in omap_dmm_probe()
805 omap_dmm->phys_base = mem->start; in omap_dmm_probe()
806 omap_dmm->base = ioremap(mem->start, SZ_2K); in omap_dmm_probe()
808 if (!omap_dmm->base) { in omap_dmm_probe()
809 dev_err(&dev->dev, "failed to get dmm base address\n"); in omap_dmm_probe()
813 omap_dmm->irq = platform_get_irq(dev, 0); in omap_dmm_probe()
814 if (omap_dmm->irq < 0) in omap_dmm_probe()
817 omap_dmm->dev = &dev->dev; in omap_dmm_probe()
826 omap_dmm->dmm_workaround = true; in omap_dmm_probe()
827 dev_info(&dev->dev, in omap_dmm_probe()
830 dev_warn(&dev->dev, in omap_dmm_probe()
831 "failed to initialize work-around for i878\n"); in omap_dmm_probe()
836 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; in omap_dmm_probe()
837 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; in omap_dmm_probe()
838 omap_dmm->container_width = 256; in omap_dmm_probe()
839 omap_dmm->container_height = 128; in omap_dmm_probe()
841 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines); in omap_dmm_probe()
845 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5; in omap_dmm_probe()
846 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5; in omap_dmm_probe()
850 if (omap_dmm->lut_height != omap_dmm->container_height) in omap_dmm_probe()
851 omap_dmm->num_lut++; in omap_dmm_probe()
861 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); in omap_dmm_probe()
862 if (!omap_dmm->dummy_page) { in omap_dmm_probe()
863 dev_err(&dev->dev, "could not allocate dummy page\n"); in omap_dmm_probe()
864 ret = -ENOMEM; in omap_dmm_probe()
869 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)); in omap_dmm_probe()
873 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); in omap_dmm_probe()
876 omap_dmm->refill_va = dma_alloc_wc(&dev->dev, in omap_dmm_probe()
877 REFILL_BUFFER_SIZE * omap_dmm->num_engines, in omap_dmm_probe()
878 &omap_dmm->refill_pa, GFP_KERNEL); in omap_dmm_probe()
879 if (!omap_dmm->refill_va) { in omap_dmm_probe()
880 dev_err(&dev->dev, "could not allocate refill memory\n"); in omap_dmm_probe()
881 ret = -ENOMEM; in omap_dmm_probe()
886 omap_dmm->engines = kcalloc(omap_dmm->num_engines, in omap_dmm_probe()
887 sizeof(*omap_dmm->engines), GFP_KERNEL); in omap_dmm_probe()
888 if (!omap_dmm->engines) { in omap_dmm_probe()
889 ret = -ENOMEM; in omap_dmm_probe()
893 for (i = 0; i < omap_dmm->num_engines; i++) { in omap_dmm_probe()
894 omap_dmm->engines[i].id = i; in omap_dmm_probe()
895 omap_dmm->engines[i].dmm = omap_dmm; in omap_dmm_probe()
896 omap_dmm->engines[i].refill_va = omap_dmm->refill_va + in omap_dmm_probe()
898 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + in omap_dmm_probe()
900 init_completion(&omap_dmm->engines[i].compl); in omap_dmm_probe()
902 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); in omap_dmm_probe()
905 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm), in omap_dmm_probe()
907 if (!omap_dmm->tcm) { in omap_dmm_probe()
908 ret = -ENOMEM; in omap_dmm_probe()
916 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_probe()
917 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, in omap_dmm_probe()
918 omap_dmm->container_height); in omap_dmm_probe()
920 if (!omap_dmm->tcm[i]) { in omap_dmm_probe()
921 dev_err(&dev->dev, "failed to allocate container\n"); in omap_dmm_probe()
922 ret = -ENOMEM; in omap_dmm_probe()
926 omap_dmm->tcm[i]->lut_id = i; in omap_dmm_probe()
930 /* OMAP 4 has 1 container for all 4 views */ in omap_dmm_probe()
931 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */ in omap_dmm_probe()
932 containers[TILFMT_8BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
933 containers[TILFMT_16BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
934 containers[TILFMT_32BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
936 if (omap_dmm->container_height != omap_dmm->lut_height) { in omap_dmm_probe()
940 containers[TILFMT_PAGE] = omap_dmm->tcm[1]; in omap_dmm_probe()
941 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET; in omap_dmm_probe()
942 omap_dmm->tcm[1]->lut_id = 0; in omap_dmm_probe()
944 containers[TILFMT_PAGE] = omap_dmm->tcm[0]; in omap_dmm_probe()
949 .p1.x = omap_dmm->container_width - 1, in omap_dmm_probe()
950 .p1.y = omap_dmm->container_height - 1, in omap_dmm_probe()
953 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED, in omap_dmm_probe()
957 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n", in omap_dmm_probe()
958 omap_dmm->irq, ret); in omap_dmm_probe()
959 omap_dmm->irq = -1; in omap_dmm_probe()
972 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_probe()
973 area.tcm = omap_dmm->tcm[i]; in omap_dmm_probe()
975 dev_err(omap_dmm->dev, "refill failed"); in omap_dmm_probe()
978 dev_info(omap_dmm->dev, "initialized all PAT entries\n"); in omap_dmm_probe()
995 static const char *special = ".,:;'\"`~!^-+";
1001 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++) in fill_map()
1002 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++) in fill_map()
1010 map[p->y / ydiv][p->x / xdiv] = c; in fill_map_pt()
1015 return map[p->y / ydiv][p->x / xdiv]; in read_map_pt()
1020 return (x1 / xdiv) - (x0 / xdiv) + 1; in map_width()
1026 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2; in text_map()
1038 if (a->p0.y + 1 < a->p1.y) { in map_1d_info()
1039 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, in map_1d_info()
1040 256 - 1); in map_1d_info()
1041 } else if (a->p0.y < a->p1.y) { in map_1d_info()
1042 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1)) in map_1d_info()
1043 text_map(map, xdiv, nice, a->p0.y / ydiv, in map_1d_info()
1044 a->p0.x + xdiv, 256 - 1); in map_1d_info()
1045 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x)) in map_1d_info()
1046 text_map(map, xdiv, nice, a->p1.y / ydiv, in map_1d_info()
1047 0, a->p1.y - xdiv); in map_1d_info()
1048 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) { in map_1d_info()
1049 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); in map_1d_info()
1057 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) in map_2d_info()
1058 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, in map_2d_info()
1059 a->p0.x, a->p1.x); in map_2d_info()
1084 h_adj = omap_dmm->container_height / ydiv; in tiler_map_show()
1085 w_adj = omap_dmm->container_width / xdiv; in tiler_map_show()
1093 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { in tiler_map_show()
1097 for (i = 0; i < omap_dmm->container_height; i++) { in tiler_map_show()
1104 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) { in tiler_map_show()
1105 if (block->area.tcm == omap_dmm->tcm[lut_idx]) { in tiler_map_show()
1106 if (block->fmt != TILFMT_PAGE) { in tiler_map_show()
1107 fill_map(map, xdiv, ydiv, &block->area, in tiler_map_show()
1114 &block->area); in tiler_map_show()
1117 ydiv, &block->area.p0) == ' '; in tiler_map_show()
1119 &block->area.p1) == ' '; in tiler_map_show()
1121 tcm_for_each_slice(a, block->area, p) in tiler_map_show()
1125 &block->area.p0, in tiler_map_show()
1128 &block->area.p1, in tiler_map_show()
1131 &block->area); in tiler_map_show()
1144 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n", in tiler_map_show()
1147 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); in tiler_map_show()
1148 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n", in tiler_map_show()
1168 return -ENODEV; in omap_dmm_resume()
1172 .p1.x = omap_dmm->container_width - 1, in omap_dmm_resume()
1173 .p1.y = omap_dmm->container_height - 1, in omap_dmm_resume()
1177 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_resume()
1178 area.tcm = omap_dmm->tcm[i]; in omap_dmm_resume()
1200 .compatible = "ti,omap4-dmm",
1204 .compatible = "ti,omap5-dmm",
1223 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");