xref: /linux/drivers/gpu/drm/lima/lima_mmu.c (revision cfee1b50775869de9076d021ea11a8438854dcba)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3 
4 #include <linux/interrupt.h>
5 #include <linux/iopoll.h>
6 #include <linux/device.h>
7 
8 #include "lima_device.h"
9 #include "lima_mmu.h"
10 #include "lima_vm.h"
11 #include "lima_regs.h"
12 
13 #define mmu_write(reg, data) writel(data, ip->iomem + reg)
14 #define mmu_read(reg) readl(ip->iomem + reg)
15 
16 #define lima_mmu_send_command(cmd, addr, val, cond)	     \
17 ({							     \
18 	int __ret;					     \
19 							     \
20 	mmu_write(LIMA_MMU_COMMAND, cmd);		     \
21 	__ret = readl_poll_timeout(ip->iomem + (addr), val,  \
22 				  cond, 0, 100);	     \
23 	if (__ret)					     \
24 		dev_err(dev->dev,			     \
25 			"%s command %x timeout\n",           \
26 			lima_ip_name(ip), cmd);              \
27 	__ret;						     \
28 })
29 
30 static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
31 {
32 	struct lima_ip *ip = data;
33 	struct lima_device *dev = ip->dev;
34 	u32 status = mmu_read(LIMA_MMU_INT_STATUS);
35 	struct lima_sched_pipe *pipe;
36 
37 	/* for shared irq case */
38 	if (!status)
39 		return IRQ_NONE;
40 
41 	if (status & LIMA_MMU_INT_PAGE_FAULT) {
42 		u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
43 
44 		dev_err(dev->dev, "%s page fault at 0x%x from bus id %d of type %s\n",
45 			lima_ip_name(ip), fault, LIMA_MMU_STATUS_BUS_ID(status),
46 			status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read");
47 	}
48 
49 	if (status & LIMA_MMU_INT_READ_BUS_ERROR)
50 		dev_err(dev->dev, "%s irq bus error\n", lima_ip_name(ip));
51 
52 	/* mask all interrupts before resume */
53 	mmu_write(LIMA_MMU_INT_MASK, 0);
54 	mmu_write(LIMA_MMU_INT_CLEAR, status);
55 
56 	pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
57 	lima_sched_pipe_mmu_error(pipe);
58 
59 	return IRQ_HANDLED;
60 }
61 
62 static int lima_mmu_hw_init(struct lima_ip *ip)
63 {
64 	struct lima_device *dev = ip->dev;
65 	int err;
66 	u32 v;
67 
68 	mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
69 	err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
70 				    LIMA_MMU_DTE_ADDR, v, v == 0);
71 	if (err)
72 		return err;
73 
74 	mmu_write(LIMA_MMU_INT_MASK,
75 		  LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
76 	mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
77 	return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
78 				     LIMA_MMU_STATUS, v,
79 				     v & LIMA_MMU_STATUS_PAGING_ENABLED);
80 }
81 
82 int lima_mmu_resume(struct lima_ip *ip)
83 {
84 	if (ip->id == lima_ip_ppmmu_bcast)
85 		return 0;
86 
87 	return lima_mmu_hw_init(ip);
88 }
89 
90 void lima_mmu_suspend(struct lima_ip *ip)
91 {
92 
93 }
94 
95 int lima_mmu_init(struct lima_ip *ip)
96 {
97 	struct lima_device *dev = ip->dev;
98 	int err;
99 
100 	if (ip->id == lima_ip_ppmmu_bcast)
101 		return 0;
102 
103 	mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
104 	if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
105 		dev_err(dev->dev, "%s dte write test fail\n", lima_ip_name(ip));
106 		return -EIO;
107 	}
108 
109 	err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
110 			       IRQF_SHARED, lima_ip_name(ip), ip);
111 	if (err) {
112 		dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip));
113 		return err;
114 	}
115 
116 	return lima_mmu_hw_init(ip);
117 }
118 
119 void lima_mmu_fini(struct lima_ip *ip)
120 {
121 	struct lima_device *dev = ip->dev;
122 
123 	if (ip->id == lima_ip_ppmmu_bcast)
124 		return;
125 
126 	devm_free_irq(dev->dev, ip->irq, ip);
127 }
128 
129 void lima_mmu_flush_tlb(struct lima_ip *ip)
130 {
131 	mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
132 }
133 
134 void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
135 {
136 	struct lima_device *dev = ip->dev;
137 	u32 v;
138 
139 	lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
140 			      LIMA_MMU_STATUS, v,
141 			      v & LIMA_MMU_STATUS_STALL_ACTIVE);
142 
143 	mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
144 
145 	/* flush the TLB */
146 	mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
147 
148 	lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
149 			      LIMA_MMU_STATUS, v,
150 			      !(v & LIMA_MMU_STATUS_STALL_ACTIVE));
151 }
152 
153 void lima_mmu_page_fault_resume(struct lima_ip *ip)
154 {
155 	struct lima_device *dev = ip->dev;
156 	u32 status = mmu_read(LIMA_MMU_STATUS);
157 	u32 v;
158 
159 	if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
160 		dev_info(dev->dev, "%s resume\n", lima_ip_name(ip));
161 
162 		mmu_write(LIMA_MMU_INT_MASK, 0);
163 		mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
164 		lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
165 				      LIMA_MMU_DTE_ADDR, v, v == 0);
166 		mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
167 		mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
168 		lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
169 				      LIMA_MMU_STATUS, v,
170 				      v & LIMA_MMU_STATUS_PAGING_ENABLED);
171 	}
172 }
173