xref: /linux/drivers/gpu/drm/lima/lima_pp.c (revision 6af91e3d2cfc8bb579b1aa2d22cd91f8c34acdf6)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3 
4 #include <linux/interrupt.h>
5 #include <linux/io.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
8 
9 #include <drm/lima_drm.h>
10 
11 #include "lima_device.h"
12 #include "lima_pp.h"
13 #include "lima_dlbu.h"
14 #include "lima_bcast.h"
15 #include "lima_vm.h"
16 #include "lima_regs.h"
17 
18 #define pp_write(reg, data) writel(data, ip->iomem + reg)
19 #define pp_read(reg) readl(ip->iomem + reg)
20 
21 static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
22 {
23 	struct lima_device *dev = ip->dev;
24 	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
25 
26 	if (state & LIMA_PP_IRQ_MASK_ERROR) {
27 		u32 status = pp_read(LIMA_PP_STATUS);
28 
29 		dev_err(dev->dev, "%s error irq state=%x status=%x\n",
30 			lima_ip_name(ip), state, status);
31 
32 		pipe->error = true;
33 
34 		/* mask all interrupts before hard reset */
35 		pp_write(LIMA_PP_INT_MASK, 0);
36 	}
37 
38 	pp_write(LIMA_PP_INT_CLEAR, state);
39 }
40 
41 static irqreturn_t lima_pp_irq_handler(int irq, void *data)
42 {
43 	struct lima_ip *ip = data;
44 	struct lima_device *dev = ip->dev;
45 	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
46 	u32 state = pp_read(LIMA_PP_INT_STATUS);
47 
48 	/* for shared irq case */
49 	if (!state)
50 		return IRQ_NONE;
51 
52 	lima_pp_handle_irq(ip, state);
53 
54 	if (atomic_dec_and_test(&pipe->task))
55 		lima_sched_pipe_task_done(pipe);
56 
57 	return IRQ_HANDLED;
58 }
59 
60 static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
61 {
62 	int i;
63 	irqreturn_t ret = IRQ_NONE;
64 	struct lima_ip *pp_bcast = data;
65 	struct lima_device *dev = pp_bcast->dev;
66 	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
67 	struct drm_lima_m450_pp_frame *frame;
68 
69 	/* for shared irq case */
70 	if (!pipe->current_task)
71 		return IRQ_NONE;
72 
73 	frame = pipe->current_task->frame;
74 
75 	for (i = 0; i < frame->num_pp; i++) {
76 		struct lima_ip *ip = pipe->processor[i];
77 		u32 status, state;
78 
79 		if (pipe->done & (1 << i))
80 			continue;
81 
82 		/* status read first in case int state change in the middle
83 		 * which may miss the interrupt handling
84 		 */
85 		status = pp_read(LIMA_PP_STATUS);
86 		state = pp_read(LIMA_PP_INT_STATUS);
87 
88 		if (state) {
89 			lima_pp_handle_irq(ip, state);
90 			ret = IRQ_HANDLED;
91 		} else {
92 			if (status & LIMA_PP_STATUS_RENDERING_ACTIVE)
93 				continue;
94 		}
95 
96 		pipe->done |= (1 << i);
97 		if (atomic_dec_and_test(&pipe->task))
98 			lima_sched_pipe_task_done(pipe);
99 	}
100 
101 	return ret;
102 }
103 
104 static void lima_pp_soft_reset_async(struct lima_ip *ip)
105 {
106 	if (ip->data.async_reset)
107 		return;
108 
109 	pp_write(LIMA_PP_INT_MASK, 0);
110 	pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL);
111 	pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET);
112 	ip->data.async_reset = true;
113 }
114 
115 static int lima_pp_soft_reset_poll(struct lima_ip *ip)
116 {
117 	return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) &&
118 		pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED;
119 }
120 
121 static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
122 {
123 	struct lima_device *dev = ip->dev;
124 	int ret;
125 
126 	ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
127 	if (ret) {
128 		dev_err(dev->dev, "%s reset time out\n", lima_ip_name(ip));
129 		return ret;
130 	}
131 
132 	pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
133 	pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
134 	return 0;
135 }
136 
137 static int lima_pp_soft_reset_async_wait(struct lima_ip *ip)
138 {
139 	int i, err = 0;
140 
141 	if (!ip->data.async_reset)
142 		return 0;
143 
144 	if (ip->id == lima_ip_pp_bcast) {
145 		struct lima_device *dev = ip->dev;
146 		struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
147 		struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
148 
149 		for (i = 0; i < frame->num_pp; i++)
150 			err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]);
151 	} else
152 		err = lima_pp_soft_reset_async_wait_one(ip);
153 
154 	ip->data.async_reset = false;
155 	return err;
156 }
157 
158 static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
159 {
160 	int i, j, n = 0;
161 
162 	for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++)
163 		writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4);
164 
165 	for (i = 0; i < 3; i++) {
166 		for (j = 0; j < LIMA_PP_WB_REG_NUM; j++)
167 			writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4);
168 	}
169 }
170 
171 static int lima_pp_bus_stop_poll(struct lima_ip *ip)
172 {
173 	return !!(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_BUS_STOPPED);
174 }
175 
176 static int lima_pp_hard_reset_poll(struct lima_ip *ip)
177 {
178 	pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
179 	return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000;
180 }
181 
182 static int lima_pp_hard_reset(struct lima_ip *ip)
183 {
184 	struct lima_device *dev = ip->dev;
185 	int ret;
186 
187 	pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
188 	pp_write(LIMA_PP_INT_MASK, 0);
189 
190 	pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_STOP_BUS);
191 	ret = lima_poll_timeout(ip, lima_pp_bus_stop_poll, 10, 100);
192 	if (ret) {
193 		dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
194 		return ret;
195 	}
196 
197 	pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
198 	ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
199 	if (ret) {
200 		dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip));
201 		return ret;
202 	}
203 
204 	pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
205 	pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
206 	pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
207 
208 	/*
209 	 * if there was an async soft reset queued,
210 	 * don't wait for it in the next job
211 	 */
212 	ip->data.async_reset = false;
213 
214 	return 0;
215 }
216 
217 static void lima_pp_print_version(struct lima_ip *ip)
218 {
219 	u32 version, major, minor;
220 	char *name;
221 
222 	version = pp_read(LIMA_PP_VERSION);
223 	major = (version >> 8) & 0xFF;
224 	minor = version & 0xFF;
225 	switch (version >> 16) {
226 	case 0xC807:
227 	    name = "mali200";
228 		break;
229 	case 0xCE07:
230 		name = "mali300";
231 		break;
232 	case 0xCD07:
233 		name = "mali400";
234 		break;
235 	case 0xCF07:
236 		name = "mali450";
237 		break;
238 	default:
239 		name = "unknown";
240 		break;
241 	}
242 	dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
243 		 lima_ip_name(ip), name, major, minor);
244 }
245 
246 static int lima_pp_hw_init(struct lima_ip *ip)
247 {
248 	ip->data.async_reset = false;
249 	lima_pp_soft_reset_async(ip);
250 	return lima_pp_soft_reset_async_wait(ip);
251 }
252 
253 int lima_pp_resume(struct lima_ip *ip)
254 {
255 	return lima_pp_hw_init(ip);
256 }
257 
258 void lima_pp_suspend(struct lima_ip *ip)
259 {
260 
261 }
262 
263 int lima_pp_init(struct lima_ip *ip)
264 {
265 	struct lima_device *dev = ip->dev;
266 	int err;
267 
268 	lima_pp_print_version(ip);
269 
270 	err = lima_pp_hw_init(ip);
271 	if (err)
272 		return err;
273 
274 	err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
275 			       IRQF_SHARED, lima_ip_name(ip), ip);
276 	if (err) {
277 		dev_err(dev->dev, "%s fail to request irq\n",
278 			lima_ip_name(ip));
279 		return err;
280 	}
281 
282 	dev->pp_version = pp_read(LIMA_PP_VERSION);
283 
284 	return 0;
285 }
286 
287 void lima_pp_fini(struct lima_ip *ip)
288 {
289 	struct lima_device *dev = ip->dev;
290 
291 	devm_free_irq(dev->dev, ip->irq, ip);
292 }
293 
294 int lima_pp_bcast_resume(struct lima_ip *ip)
295 {
296 	/* PP has been reset by individual PP resume */
297 	ip->data.async_reset = false;
298 	return 0;
299 }
300 
301 void lima_pp_bcast_suspend(struct lima_ip *ip)
302 {
303 
304 }
305 
306 int lima_pp_bcast_init(struct lima_ip *ip)
307 {
308 	struct lima_device *dev = ip->dev;
309 	int err;
310 
311 	err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
312 			       IRQF_SHARED, lima_ip_name(ip), ip);
313 	if (err) {
314 		dev_err(dev->dev, "%s fail to request irq\n",
315 			lima_ip_name(ip));
316 		return err;
317 	}
318 
319 	return 0;
320 }
321 
322 void lima_pp_bcast_fini(struct lima_ip *ip)
323 {
324 	struct lima_device *dev = ip->dev;
325 
326 	devm_free_irq(dev->dev, ip->irq, ip);
327 }
328 
329 static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
330 				 struct lima_sched_task *task)
331 {
332 	u32 num_pp;
333 
334 	if (pipe->bcast_processor) {
335 		struct drm_lima_m450_pp_frame *f = task->frame;
336 
337 		num_pp = f->num_pp;
338 
339 		if (f->_pad)
340 			return -EINVAL;
341 	} else {
342 		struct drm_lima_m400_pp_frame *f = task->frame;
343 
344 		num_pp = f->num_pp;
345 	}
346 
347 	if (num_pp == 0 || num_pp > pipe->num_processor)
348 		return -EINVAL;
349 
350 	return 0;
351 }
352 
353 static void lima_pp_task_run(struct lima_sched_pipe *pipe,
354 			     struct lima_sched_task *task)
355 {
356 	if (pipe->bcast_processor) {
357 		struct drm_lima_m450_pp_frame *frame = task->frame;
358 		struct lima_device *dev = pipe->bcast_processor->dev;
359 		struct lima_ip *ip = pipe->bcast_processor;
360 		int i;
361 
362 		pipe->done = 0;
363 		atomic_set(&pipe->task, frame->num_pp);
364 
365 		if (frame->use_dlbu) {
366 			lima_dlbu_enable(dev, frame->num_pp);
367 
368 			frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU;
369 			lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs);
370 		} else
371 			lima_dlbu_disable(dev);
372 
373 		lima_bcast_enable(dev, frame->num_pp);
374 
375 		lima_pp_soft_reset_async_wait(ip);
376 
377 		lima_pp_write_frame(ip, frame->frame, frame->wb);
378 
379 		for (i = 0; i < frame->num_pp; i++) {
380 			struct lima_ip *ip = pipe->processor[i];
381 
382 			pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]);
383 			if (!frame->use_dlbu)
384 				pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]);
385 		}
386 
387 		pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
388 	} else {
389 		struct drm_lima_m400_pp_frame *frame = task->frame;
390 		int i;
391 
392 		atomic_set(&pipe->task, frame->num_pp);
393 
394 		for (i = 0; i < frame->num_pp; i++) {
395 			struct lima_ip *ip = pipe->processor[i];
396 
397 			frame->frame[LIMA_PP_FRAME >> 2] =
398 				frame->plbu_array_address[i];
399 			frame->frame[LIMA_PP_STACK >> 2] =
400 				frame->fragment_stack_address[i];
401 
402 			lima_pp_soft_reset_async_wait(ip);
403 
404 			lima_pp_write_frame(ip, frame->frame, frame->wb);
405 
406 			pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
407 		}
408 	}
409 }
410 
411 static void lima_pp_task_fini(struct lima_sched_pipe *pipe)
412 {
413 	if (pipe->bcast_processor)
414 		lima_pp_soft_reset_async(pipe->bcast_processor);
415 	else {
416 		int i;
417 
418 		for (i = 0; i < pipe->num_processor; i++)
419 			lima_pp_soft_reset_async(pipe->processor[i]);
420 	}
421 }
422 
423 static void lima_pp_task_error(struct lima_sched_pipe *pipe)
424 {
425 	int i;
426 
427 	for (i = 0; i < pipe->num_processor; i++) {
428 		struct lima_ip *ip = pipe->processor[i];
429 
430 		dev_err(ip->dev->dev, "%s task error %d int_state=%x status=%x\n",
431 			lima_ip_name(ip), i, pp_read(LIMA_PP_INT_STATUS),
432 			pp_read(LIMA_PP_STATUS));
433 
434 		lima_pp_hard_reset(ip);
435 	}
436 
437 	if (pipe->bcast_processor)
438 		lima_bcast_reset(pipe->bcast_processor);
439 }
440 
441 static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
442 {
443 	if (atomic_dec_and_test(&pipe->task))
444 		lima_sched_pipe_task_done(pipe);
445 }
446 
447 static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
448 {
449 	int i;
450 
451 	for (i = 0; i < pipe->num_processor; i++) {
452 		struct lima_ip *ip = pipe->processor[i];
453 
454 		pp_write(LIMA_PP_INT_MASK, 0);
455 	}
456 
457 	if (pipe->bcast_processor)
458 		lima_bcast_mask_irq(pipe->bcast_processor);
459 }
460 
461 static struct kmem_cache *lima_pp_task_slab;
462 static int lima_pp_task_slab_refcnt;
463 
464 int lima_pp_pipe_init(struct lima_device *dev)
465 {
466 	int frame_size;
467 	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
468 
469 	if (dev->id == lima_gpu_mali400)
470 		frame_size = sizeof(struct drm_lima_m400_pp_frame);
471 	else
472 		frame_size = sizeof(struct drm_lima_m450_pp_frame);
473 
474 	if (!lima_pp_task_slab) {
475 		lima_pp_task_slab = kmem_cache_create_usercopy(
476 			"lima_pp_task", sizeof(struct lima_sched_task) + frame_size,
477 			0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
478 			frame_size, NULL);
479 		if (!lima_pp_task_slab)
480 			return -ENOMEM;
481 	}
482 	lima_pp_task_slab_refcnt++;
483 
484 	pipe->frame_size = frame_size;
485 	pipe->task_slab = lima_pp_task_slab;
486 
487 	pipe->task_validate = lima_pp_task_validate;
488 	pipe->task_run = lima_pp_task_run;
489 	pipe->task_fini = lima_pp_task_fini;
490 	pipe->task_error = lima_pp_task_error;
491 	pipe->task_mmu_error = lima_pp_task_mmu_error;
492 	pipe->task_mask_irq = lima_pp_task_mask_irq;
493 
494 	return 0;
495 }
496 
497 void lima_pp_pipe_fini(struct lima_device *dev)
498 {
499 	if (!--lima_pp_task_slab_refcnt) {
500 		kmem_cache_destroy(lima_pp_task_slab);
501 		lima_pp_task_slab = NULL;
502 	}
503 }
504