xref: /linux/drivers/gpu/drm/xe/xe_reg_sr.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_reg_sr.h"
7 
8 #include <kunit/visibility.h>
9 #include <linux/align.h>
10 #include <linux/string_helpers.h>
11 #include <linux/xarray.h>
12 
13 #include <drm/drm_managed.h>
14 #include <drm/drm_print.h>
15 
16 #include "xe_device.h"
17 #include "xe_device_types.h"
18 #include "xe_force_wake.h"
19 #include "xe_gt_mcr.h"
20 #include "xe_gt_printk.h"
21 #include "xe_gt_types.h"
22 #include "xe_hw_engine_types.h"
23 #include "xe_mmio.h"
24 #include "xe_rtp_types.h"
25 
reg_sr_fini(struct drm_device * drm,void * arg)26 static void reg_sr_fini(struct drm_device *drm, void *arg)
27 {
28 	struct xe_reg_sr *sr = arg;
29 	struct xe_reg_sr_entry *entry;
30 	unsigned long reg;
31 
32 	xa_for_each(&sr->xa, reg, entry)
33 		kfree(entry);
34 
35 	xa_destroy(&sr->xa);
36 }
37 
xe_reg_sr_init(struct xe_reg_sr * sr,const char * name,struct xe_device * xe)38 int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
39 {
40 	xa_init(&sr->xa);
41 	sr->name = name;
42 
43 	return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
44 }
45 EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
46 
compatible_entries(const struct xe_reg_sr_entry * e1,const struct xe_reg_sr_entry * e2)47 static bool compatible_entries(const struct xe_reg_sr_entry *e1,
48 			       const struct xe_reg_sr_entry *e2)
49 {
50 	/*
51 	 * Don't allow overwriting values: clr_bits/set_bits should be disjoint
52 	 * when operating in the same register
53 	 */
54 	if (e1->clr_bits & e2->clr_bits || e1->set_bits & e2->set_bits ||
55 	    e1->clr_bits & e2->set_bits || e1->set_bits & e2->clr_bits)
56 		return false;
57 
58 	if (e1->reg.raw != e2->reg.raw)
59 		return false;
60 
61 	return true;
62 }
63 
reg_sr_inc_error(struct xe_reg_sr * sr)64 static void reg_sr_inc_error(struct xe_reg_sr *sr)
65 {
66 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
67 	sr->errors++;
68 #endif
69 }
70 
xe_reg_sr_add(struct xe_reg_sr * sr,const struct xe_reg_sr_entry * e,struct xe_gt * gt)71 int xe_reg_sr_add(struct xe_reg_sr *sr,
72 		  const struct xe_reg_sr_entry *e,
73 		  struct xe_gt *gt)
74 {
75 	unsigned long idx = e->reg.addr;
76 	struct xe_reg_sr_entry *pentry = xa_load(&sr->xa, idx);
77 	int ret;
78 
79 	if (pentry) {
80 		if (!compatible_entries(pentry, e)) {
81 			ret = -EINVAL;
82 			goto fail;
83 		}
84 
85 		pentry->clr_bits |= e->clr_bits;
86 		pentry->set_bits |= e->set_bits;
87 		pentry->read_mask |= e->read_mask;
88 
89 		return 0;
90 	}
91 
92 	pentry = kmalloc_obj(*pentry);
93 	if (!pentry) {
94 		ret = -ENOMEM;
95 		goto fail;
96 	}
97 
98 	*pentry = *e;
99 	ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
100 	if (ret)
101 		goto fail;
102 
103 	return 0;
104 
105 fail:
106 	xe_gt_err(gt,
107 		  "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
108 		  idx, e->clr_bits, e->set_bits,
109 		  str_yes_no(e->reg.masked),
110 		  str_yes_no(e->reg.mcr),
111 		  ret);
112 	reg_sr_inc_error(sr);
113 
114 	return ret;
115 }
116 
117 /*
118  * Convert back from encoded value to type-safe, only to be used when reg.mcr
119  * is true
120  */
to_xe_reg_mcr(const struct xe_reg reg)121 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
122 {
123 	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
124 }
125 
apply_one_mmio(struct xe_gt * gt,struct xe_reg_sr_entry * entry)126 static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
127 {
128 	struct xe_reg reg = entry->reg;
129 	struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
130 	u32 val;
131 
132 	/*
133 	 * If this is a masked register, need to set the upper 16 bits.
134 	 * Set them to clr_bits since that is always a superset of the bits
135 	 * being modified.
136 	 *
137 	 * When it's not masked, we have to read it from hardware, unless we are
138 	 * supposed to set all bits.
139 	 */
140 	if (reg.masked)
141 		val = entry->clr_bits << 16;
142 	else if (entry->clr_bits + 1)
143 		val = (reg.mcr ?
144 		       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
145 		       xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
146 	else
147 		val = 0;
148 
149 	/*
150 	 * TODO: add selftest to validate all tables, regardless of platform:
151 	 *   - Masked registers can't have set_bits with upper bits set
152 	 *   - set_bits must be contained in clr_bits
153 	 */
154 	val |= entry->set_bits;
155 
156 	xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
157 
158 	if (entry->reg.mcr)
159 		xe_gt_mcr_multicast_write(gt, reg_mcr, val);
160 	else
161 		xe_mmio_write32(&gt->mmio, reg, val);
162 }
163 
xe_reg_sr_apply_mmio(struct xe_reg_sr * sr,struct xe_gt * gt)164 void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
165 {
166 	struct xe_reg_sr_entry *entry;
167 	unsigned long reg;
168 
169 	if (xa_empty(&sr->xa))
170 		return;
171 
172 	if (IS_SRIOV_VF(gt_to_xe(gt)))
173 		return;
174 
175 	xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
176 
177 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
178 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
179 		xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
180 		return;
181 	}
182 
183 	xa_for_each(&sr->xa, reg, entry)
184 		apply_one_mmio(gt, entry);
185 }
186 
187 /**
188  * xe_reg_sr_dump - print all save/restore entries
189  * @sr: Save/restore entries
190  * @p: DRM printer
191  */
xe_reg_sr_dump(struct xe_reg_sr * sr,struct drm_printer * p)192 void xe_reg_sr_dump(struct xe_reg_sr *sr, struct drm_printer *p)
193 {
194 	struct xe_reg_sr_entry *entry;
195 	unsigned long reg;
196 
197 	if (!sr->name || xa_empty(&sr->xa))
198 		return;
199 
200 	drm_printf(p, "%s\n", sr->name);
201 	xa_for_each(&sr->xa, reg, entry)
202 		drm_printf(p, "\tREG[0x%lx] clr=0x%08x set=0x%08x masked=%s mcr=%s\n",
203 			   reg, entry->clr_bits, entry->set_bits,
204 			   str_yes_no(entry->reg.masked),
205 			   str_yes_no(entry->reg.mcr));
206 }
207