xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c (revision 07f0148aafe8c95a3a76cd59e9e75b4d78d1d31d)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <subdev/mc.h>
26 
27 #include <nvif/class.h>
28 
29 void
30 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
31 {
32 	struct nvkm_device *device = buffer->fault->subdev.device;
33 	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
34 }
35 
36 void
37 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
38 {
39 	struct nvkm_device *device = buffer->fault->subdev.device;
40 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
41 }
42 
43 void
44 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
45 {
46 	struct nvkm_device *device = buffer->fault->subdev.device;
47 	nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
48 	nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
49 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
50 }
51 
52 u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
53 {
54 	return nvkm_memory_bar2(buffer->mem);
55 }
56 
57 void
58 gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
59 {
60 	buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
61 	buffer->get = 0x002a7c;
62 	buffer->put = 0x002a80;
63 }
64 
65 void
66 gp100_fault_intr(struct nvkm_fault *fault)
67 {
68 	nvkm_event_ntfy(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING);
69 }
70 
71 static const struct nvkm_fault_func
72 gp100_fault = {
73 	.intr = gp100_fault_intr,
74 	.buffer.nr = 1,
75 	.buffer.entry_size = 32,
76 	.buffer.info = gp100_fault_buffer_info,
77 	.buffer.pin = gp100_fault_buffer_pin,
78 	.buffer.init = gp100_fault_buffer_init,
79 	.buffer.fini = gp100_fault_buffer_fini,
80 	.buffer.intr = gp100_fault_buffer_intr,
81 	.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
82 };
83 
84 int
85 gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
86 		struct nvkm_fault **pfault)
87 {
88 	return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault);
89 }
90