xref: /linux/drivers/i3c/master/mipi-i3c-hci/ext_caps.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/i3c/master.h>
12 #include <linux/kernel.h>
13 #include <linux/io.h>
14 
15 #include "hci.h"
16 #include "ext_caps.h"
17 #include "xfer_mode_rate.h"
18 
19 
20 /* Extended Capability Header */
21 #define CAP_HEADER_LENGTH		GENMASK(23, 8)
22 #define CAP_HEADER_ID			GENMASK(7, 0)
23 
24 static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
25 {
26 	hci->vendor_mipi_id	= readl(base + 0x04);
27 	hci->vendor_version_id	= readl(base + 0x08);
28 	hci->vendor_product_id	= readl(base + 0x0c);
29 
30 	dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
31 	dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
32 	dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
33 
34 	/* ought to go in a table if this grows too much */
35 	switch (hci->vendor_mipi_id) {
36 	case MIPI_VENDOR_NXP:
37 		hci->quirks |= HCI_QUIRK_RAW_CCC;
38 		DBG("raw CCC quirks set");
39 		break;
40 	}
41 
42 	return 0;
43 }
44 
45 static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
46 {
47 	u32 master_config = readl(base + 0x04);
48 	unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
49 	static const char * const functionality[] = {
50 		"(unknown)", "master only", "target only",
51 		"primary/secondary master" };
52 	dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
53 	if (operation_mode & 0x1)
54 		return 0;
55 	dev_err(&hci->master.dev, "only master mode is currently supported\n");
56 	return -EOPNOTSUPP;
57 }
58 
59 static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
60 {
61 	u32 bus_instance = readl(base + 0x04);
62 	unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
63 
64 	dev_info(&hci->master.dev, "%d bus instances\n", count);
65 	return 0;
66 }
67 
68 static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
69 {
70 	u32 header = readl(base);
71 	u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
72 	unsigned int index;
73 
74 	dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
75 		 entries);
76 	base += 4;  /* skip header */
77 	for (index = 0; index < entries; index++) {
78 		u32 mode_entry = readl(base);
79 
80 		DBG("mode %d: 0x%08x", index, mode_entry);
81 		/* TODO: will be needed when I3C core does more than SDR */
82 		base += 4;
83 	}
84 
85 	return 0;
86 }
87 
88 static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
89 {
90 	u32 header = readl(base);
91 	u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
92 	u32 rate_entry;
93 	unsigned int index, rate, rate_id, mode_id;
94 
95 	base += 4;  /* skip header */
96 
97 	dev_info(&hci->master.dev, "available data rates:\n");
98 	for (index = 0; index < entries; index++) {
99 		rate_entry = readl(base);
100 		DBG("entry %d: 0x%08x", index, rate_entry);
101 		rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
102 		rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
103 		mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
104 		dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
105 			 rate_id,
106 			 mode_id == XFERRATE_MODE_I3C ? "I3C" :
107 			 mode_id == XFERRATE_MODE_I2C ? "I2C" :
108 			 "unknown mode",
109 			 rate);
110 		base += 4;
111 	}
112 
113 	return 0;
114 }
115 
116 static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
117 {
118 	u32 autocmd_ext_caps = readl(base + 0x04);
119 	unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
120 	u32 autocmd_ext_config = readl(base + 0x08);
121 	unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
122 
123 	dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
124 		 count, max_count);
125 	/* remember auto-command register location for later use */
126 	hci->AUTOCMD_regs = base;
127 	return 0;
128 }
129 
130 static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
131 {
132 	dev_info(&hci->master.dev, "debug registers present\n");
133 	hci->DEBUG_regs = base;
134 	return 0;
135 }
136 
137 static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
138 {
139 	dev_info(&hci->master.dev, "scheduled commands available\n");
140 	/* hci->schedcmd_regs = base; */
141 	return 0;
142 }
143 
144 static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
145 {
146 	dev_info(&hci->master.dev, "Non-Current Master support available\n");
147 	/* hci->NCM_regs = base; */
148 	return 0;
149 }
150 
151 static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
152 {
153 	dev_info(&hci->master.dev, "CCC Response Configuration available\n");
154 	return 0;
155 }
156 
157 static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
158 {
159 	dev_info(&hci->master.dev, "Global DAT available\n");
160 	return 0;
161 }
162 
163 static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
164 {
165 	dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
166 	return 0;
167 }
168 
169 static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
170 {
171 	dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
172 	return 0;
173 }
174 
175 struct hci_ext_caps {
176 	u8  id;
177 	u16 min_length;
178 	int (*parser)(struct i3c_hci *hci, void __iomem *base);
179 };
180 
181 #define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
182 	{ .id = (_id), .parser = (_parser), \
183 	  .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
184 
185 static const struct hci_ext_caps ext_capabilities[] = {
186 	EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
187 	EXT_CAP(0x02, 0x04, hci_extcap_master_config),
188 	EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
189 	EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
190 	EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
191 	EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
192 	EXT_CAP(0x0c, 0x10, hci_extcap_debug),
193 	EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
194 	EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
195 	EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
196 	EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
197 	EXT_CAP(0x9d, 0x04,	hci_extcap_multilane),
198 	EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
199 };
200 
201 static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
202 {
203 	hci->vendor_data = (__force void *)base;
204 	dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
205 	/* reset the FPGA */
206 	writel(0xdeadbeef, base + 1*4);
207 	return 0;
208 }
209 
210 struct hci_ext_cap_vendor_specific {
211 	u32 vendor;
212 	u8  cap;
213 	u16 min_length;
214 	int (*parser)(struct i3c_hci *hci, void __iomem *base);
215 };
216 
217 #define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
218 	{ .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
219 	  .parser = (hci_extcap_vendor_##_vendor), \
220 	  .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
221 
222 static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
223 	EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
224 };
225 
226 static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
227 				      u32 cap_id, u32 cap_length)
228 {
229 	const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
230 	int i;
231 
232 	vendor_cap_entry = NULL;
233 	for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
234 		if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
235 		    vendor_ext_caps[i].cap == cap_id) {
236 			vendor_cap_entry = &vendor_ext_caps[i];
237 			break;
238 		}
239 	}
240 
241 	if (!vendor_cap_entry) {
242 		dev_notice(&hci->master.dev,
243 			   "unknown ext_cap 0x%02x for vendor 0x%02x\n",
244 			   cap_id, hci->vendor_mipi_id);
245 		return 0;
246 	}
247 	if (cap_length < vendor_cap_entry->min_length) {
248 		dev_err(&hci->master.dev,
249 			"ext_cap 0x%02x has size %d (expecting >= %d)\n",
250 			cap_id, cap_length, vendor_cap_entry->min_length);
251 		return -EINVAL;
252 	}
253 	return vendor_cap_entry->parser(hci, base);
254 }
255 
256 int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
257 {
258 	void __iomem *curr_cap = hci->EXTCAPS_regs;
259 	void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
260 	u32 cap_header, cap_id, cap_length;
261 	const struct hci_ext_caps *cap_entry;
262 	int i, err = 0;
263 
264 	if (!curr_cap)
265 		return 0;
266 
267 	for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
268 		cap_header = readl(curr_cap);
269 		cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
270 		cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
271 		DBG("id=0x%02x length=%d", cap_id, cap_length);
272 		if (!cap_length)
273 			break;
274 		if (curr_cap + cap_length * 4 >= end) {
275 			dev_err(&hci->master.dev,
276 				"ext_cap 0x%02x has size %d (too big)\n",
277 				cap_id, cap_length);
278 			err = -EINVAL;
279 			break;
280 		}
281 
282 		if (cap_id >= 0xc0 && cap_id <= 0xcf) {
283 			err = hci_extcap_vendor_specific(hci, curr_cap,
284 							 cap_id, cap_length);
285 			continue;
286 		}
287 
288 		cap_entry = NULL;
289 		for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
290 			if (ext_capabilities[i].id == cap_id) {
291 				cap_entry = &ext_capabilities[i];
292 				break;
293 			}
294 		}
295 		if (!cap_entry) {
296 			dev_notice(&hci->master.dev,
297 				   "unknown ext_cap 0x%02x\n", cap_id);
298 		} else if (cap_length < cap_entry->min_length) {
299 			dev_err(&hci->master.dev,
300 				"ext_cap 0x%02x has size %d (expecting >= %d)\n",
301 				cap_id, cap_length, cap_entry->min_length);
302 			err = -EINVAL;
303 		} else {
304 			err = cap_entry->parser(hci, curr_cap);
305 		}
306 	}
307 	return err;
308 }
309