xref: /linux/drivers/i3c/master/mipi-i3c-hci/ext_caps.c (revision 30bbcb44707a97fcb62246bebc8b413b5ab293f8)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/i3c/master.h>
12 #include <linux/kernel.h>
13 #include <linux/io.h>
14 
15 #include "hci.h"
16 #include "ext_caps.h"
17 #include "xfer_mode_rate.h"
18 
19 
20 /* Extended Capability Header */
21 #define CAP_HEADER_LENGTH		GENMASK(23, 8)
22 #define CAP_HEADER_ID			GENMASK(7, 0)
23 
24 static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
25 {
26 	hci->vendor_mipi_id	= readl(base + 0x04);
27 	hci->vendor_version_id	= readl(base + 0x08);
28 	hci->vendor_product_id	= readl(base + 0x0c);
29 
30 	dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
31 	dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
32 	dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
33 
34 	/* ought to go in a table if this grows too much */
35 	switch (hci->vendor_mipi_id) {
36 	case MIPI_VENDOR_NXP:
37 		hci->quirks |= HCI_QUIRK_RAW_CCC;
38 		dev_dbg(&hci->master.dev, "raw CCC quirks set");
39 		break;
40 	}
41 
42 	return 0;
43 }
44 
45 static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
46 {
47 	u32 master_config = readl(base + 0x04);
48 	unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
49 	static const char * const functionality[] = {
50 		"(unknown)", "master only", "target only",
51 		"primary/secondary master" };
52 	dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
53 	if (operation_mode & 0x1)
54 		return 0;
55 	dev_err(&hci->master.dev, "only master mode is currently supported\n");
56 	return -EOPNOTSUPP;
57 }
58 
59 static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
60 {
61 	u32 bus_instance = readl(base + 0x04);
62 	unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
63 
64 	dev_info(&hci->master.dev, "%d bus instances\n", count);
65 	return 0;
66 }
67 
68 static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
69 {
70 	u32 header = readl(base);
71 	u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
72 	unsigned int index;
73 
74 	dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
75 		 entries);
76 	base += 4;  /* skip header */
77 	for (index = 0; index < entries; index++) {
78 		u32 mode_entry = readl(base);
79 
80 		dev_dbg(&hci->master.dev, "mode %d: 0x%08x",
81 			index, mode_entry);
82 		/* TODO: will be needed when I3C core does more than SDR */
83 		base += 4;
84 	}
85 
86 	return 0;
87 }
88 
89 static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
90 {
91 	u32 header = readl(base);
92 	u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
93 	u32 rate_entry;
94 	unsigned int index, rate, rate_id, mode_id;
95 
96 	base += 4;  /* skip header */
97 
98 	dev_info(&hci->master.dev, "available data rates:\n");
99 	for (index = 0; index < entries; index++) {
100 		rate_entry = readl(base);
101 		dev_dbg(&hci->master.dev, "entry %d: 0x%08x",
102 			index, rate_entry);
103 		rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
104 		rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
105 		mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
106 		dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
107 			 rate_id,
108 			 mode_id == XFERRATE_MODE_I3C ? "I3C" :
109 			 mode_id == XFERRATE_MODE_I2C ? "I2C" :
110 			 "unknown mode",
111 			 rate);
112 		base += 4;
113 	}
114 
115 	return 0;
116 }
117 
118 static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
119 {
120 	u32 autocmd_ext_caps = readl(base + 0x04);
121 	unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
122 	u32 autocmd_ext_config = readl(base + 0x08);
123 	unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
124 
125 	dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
126 		 count, max_count);
127 	/* remember auto-command register location for later use */
128 	hci->AUTOCMD_regs = base;
129 	return 0;
130 }
131 
132 static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
133 {
134 	dev_info(&hci->master.dev, "debug registers present\n");
135 	hci->DEBUG_regs = base;
136 	return 0;
137 }
138 
139 static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
140 {
141 	dev_info(&hci->master.dev, "scheduled commands available\n");
142 	/* hci->schedcmd_regs = base; */
143 	return 0;
144 }
145 
146 static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
147 {
148 	dev_info(&hci->master.dev, "Non-Current Master support available\n");
149 	/* hci->NCM_regs = base; */
150 	return 0;
151 }
152 
153 static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
154 {
155 	dev_info(&hci->master.dev, "CCC Response Configuration available\n");
156 	return 0;
157 }
158 
159 static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
160 {
161 	dev_info(&hci->master.dev, "Global DAT available\n");
162 	return 0;
163 }
164 
165 static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
166 {
167 	dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
168 	return 0;
169 }
170 
171 static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
172 {
173 	dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
174 	return 0;
175 }
176 
177 struct hci_ext_caps {
178 	u8  id;
179 	u16 min_length;
180 	int (*parser)(struct i3c_hci *hci, void __iomem *base);
181 };
182 
183 #define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
184 	{ .id = (_id), .parser = (_parser), \
185 	  .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
186 
187 static const struct hci_ext_caps ext_capabilities[] = {
188 	EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
189 	EXT_CAP(0x02, 0x04, hci_extcap_master_config),
190 	EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
191 	EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
192 	EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
193 	EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
194 	EXT_CAP(0x0c, 0x10, hci_extcap_debug),
195 	EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
196 	EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
197 	EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
198 	EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
199 	EXT_CAP(0x9d, 0x04,	hci_extcap_multilane),
200 	EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
201 };
202 
203 static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
204 {
205 	hci->vendor_data = (__force void *)base;
206 	dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
207 	/* reset the FPGA */
208 	writel(0xdeadbeef, base + 1*4);
209 	return 0;
210 }
211 
212 struct hci_ext_cap_vendor_specific {
213 	u32 vendor;
214 	u8  cap;
215 	u16 min_length;
216 	int (*parser)(struct i3c_hci *hci, void __iomem *base);
217 };
218 
219 #define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
220 	{ .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
221 	  .parser = (hci_extcap_vendor_##_vendor), \
222 	  .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
223 
224 static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
225 	EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
226 };
227 
228 static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
229 				      u32 cap_id, u32 cap_length)
230 {
231 	const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
232 	int i;
233 
234 	vendor_cap_entry = NULL;
235 	for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
236 		if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
237 		    vendor_ext_caps[i].cap == cap_id) {
238 			vendor_cap_entry = &vendor_ext_caps[i];
239 			break;
240 		}
241 	}
242 
243 	if (!vendor_cap_entry) {
244 		dev_notice(&hci->master.dev,
245 			   "unknown ext_cap 0x%02x for vendor 0x%02x\n",
246 			   cap_id, hci->vendor_mipi_id);
247 		return 0;
248 	}
249 	if (cap_length < vendor_cap_entry->min_length) {
250 		dev_err(&hci->master.dev,
251 			"ext_cap 0x%02x has size %d (expecting >= %d)\n",
252 			cap_id, cap_length, vendor_cap_entry->min_length);
253 		return -EINVAL;
254 	}
255 	return vendor_cap_entry->parser(hci, base);
256 }
257 
258 int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
259 {
260 	void __iomem *curr_cap = hci->EXTCAPS_regs;
261 	void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
262 	u32 cap_header, cap_id, cap_length;
263 	const struct hci_ext_caps *cap_entry;
264 	int i, err = 0;
265 
266 	if (!curr_cap)
267 		return 0;
268 
269 	for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
270 		cap_header = readl(curr_cap);
271 		cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
272 		cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
273 		dev_dbg(&hci->master.dev, "id=0x%02x length=%d",
274 			cap_id, cap_length);
275 		if (!cap_length)
276 			break;
277 		if (curr_cap + cap_length * 4 >= end) {
278 			dev_err(&hci->master.dev,
279 				"ext_cap 0x%02x has size %d (too big)\n",
280 				cap_id, cap_length);
281 			err = -EINVAL;
282 			break;
283 		}
284 
285 		if (cap_id >= 0xc0 && cap_id <= 0xcf) {
286 			err = hci_extcap_vendor_specific(hci, curr_cap,
287 							 cap_id, cap_length);
288 			continue;
289 		}
290 
291 		cap_entry = NULL;
292 		for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
293 			if (ext_capabilities[i].id == cap_id) {
294 				cap_entry = &ext_capabilities[i];
295 				break;
296 			}
297 		}
298 		if (!cap_entry) {
299 			dev_notice(&hci->master.dev,
300 				   "unknown ext_cap 0x%02x\n", cap_id);
301 		} else if (cap_length < cap_entry->min_length) {
302 			dev_err(&hci->master.dev,
303 				"ext_cap 0x%02x has size %d (expecting >= %d)\n",
304 				cap_id, cap_length, cap_entry->min_length);
305 			err = -EINVAL;
306 		} else {
307 			err = cap_entry->parser(hci, curr_cap);
308 		}
309 	}
310 	return err;
311 }
312