xref: /linux/drivers/misc/mei/vsc-tp.c (revision ef9226cd56b718c79184a3466d32984a51cb449c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Intel Corporation.
4  * Intel Visual Sensing Controller Transport Layer Linux driver
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/cleanup.h>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqreturn.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/types.h>
21 
22 #include "vsc-tp.h"
23 
24 #define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
25 #define VSC_TP_ROM_BOOTUP_DELAY_MS		10
26 #define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
27 #define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
28 #define VSC_TP_WAIT_FW_POLL_TIMEOUT		(2 * HZ)
29 #define VSC_TP_WAIT_FW_POLL_DELAY_US		(20 * USEC_PER_MSEC)
30 #define VSC_TP_MAX_XFER_COUNT			5
31 
32 #define VSC_TP_PACKET_SYNC			0x31
33 #define VSC_TP_CRC_SIZE				sizeof(u32)
34 #define VSC_TP_MAX_MSG_SIZE			2048
35 /* SPI xfer timeout size */
36 #define VSC_TP_XFER_TIMEOUT_BYTES		700
37 #define VSC_TP_PACKET_PADDING_SIZE		1
38 #define VSC_TP_PACKET_SIZE(pkt) \
39 	(sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
40 #define VSC_TP_MAX_PACKET_SIZE \
41 	(sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
42 #define VSC_TP_MAX_XFER_SIZE \
43 	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
44 #define VSC_TP_NEXT_XFER_LEN(len, offset) \
45 	(len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
46 
47 struct vsc_tp_packet {
48 	__u8 sync;
49 	__u8 cmd;
50 	__le16 len;
51 	__le32 seq;
52 	__u8 buf[] __counted_by(len);
53 };
54 
55 struct vsc_tp {
56 	/* do the actual data transfer */
57 	struct spi_device *spi;
58 
59 	/* bind with mei framework */
60 	struct platform_device *pdev;
61 
62 	struct gpio_desc *wakeuphost;
63 	struct gpio_desc *resetfw;
64 	struct gpio_desc *wakeupfw;
65 
66 	/* command sequence number */
67 	u32 seq;
68 
69 	/* command buffer */
70 	void *tx_buf;
71 	void *rx_buf;
72 
73 	atomic_t assert_cnt;
74 	wait_queue_head_t xfer_wait;
75 
76 	vsc_tp_event_cb_t event_notify;
77 	void *event_notify_context;
78 
79 	/* used to protect command download */
80 	struct mutex mutex;
81 };
82 
83 /* GPIO resources */
84 static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
85 static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
86 static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
87 static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
88 
89 static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
90 	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
91 	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
92 	{ "resetfw-gpios", &resetfw_gpio, 1 },
93 	{ "wakeupfw-gpios", &wakeupfw, 1 },
94 	{}
95 };
96 
97 /* wakeup firmware and wait for response */
98 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
99 {
100 	int ret;
101 
102 	gpiod_set_value_cansleep(tp->wakeupfw, 0);
103 
104 	ret = wait_event_timeout(tp->xfer_wait,
105 				 atomic_read(&tp->assert_cnt),
106 				 VSC_TP_WAIT_FW_POLL_TIMEOUT);
107 	if (!ret)
108 		return -ETIMEDOUT;
109 
110 	return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
111 				 VSC_TP_WAIT_FW_POLL_DELAY_US,
112 				 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
113 				 tp->wakeuphost);
114 }
115 
116 static void vsc_tp_wakeup_release(struct vsc_tp *tp)
117 {
118 	atomic_dec_if_positive(&tp->assert_cnt);
119 
120 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
121 }
122 
123 static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
124 {
125 	struct spi_message msg = { 0 };
126 	struct spi_transfer xfer = {
127 		.tx_buf = obuf,
128 		.rx_buf = ibuf,
129 		.len = len,
130 	};
131 
132 	spi_message_init_with_transfers(&msg, &xfer, 1);
133 
134 	return spi_sync_locked(tp->spi, &msg);
135 }
136 
137 static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
138 			      void *ibuf, u16 ilen)
139 {
140 	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
141 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
142 	u8 *src, *crc_src, *rx_buf = tp->rx_buf;
143 	int count_down = VSC_TP_MAX_XFER_COUNT;
144 	u32 recv_crc = 0, crc = ~0;
145 	struct vsc_tp_packet ack;
146 	u8 *dst = (u8 *)&ack;
147 	bool synced = false;
148 
149 	do {
150 		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
151 		if (ret)
152 			return ret;
153 		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
154 
155 		if (synced) {
156 			src = rx_buf;
157 			src_len = next_xfer_len;
158 		} else {
159 			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
160 			if (!src)
161 				continue;
162 			synced = true;
163 			src_len = next_xfer_len - (src - rx_buf);
164 		}
165 
166 		/* traverse received data */
167 		while (src_len > 0) {
168 			cpy_len = min(src_len, dst_len);
169 			memcpy(dst, src, cpy_len);
170 			crc_src = src;
171 			src += cpy_len;
172 			src_len -= cpy_len;
173 			dst += cpy_len;
174 			dst_len -= cpy_len;
175 
176 			if (offset < sizeof(ack)) {
177 				offset += cpy_len;
178 				crc = crc32(crc, crc_src, cpy_len);
179 
180 				if (!src_len)
181 					continue;
182 
183 				if (le16_to_cpu(ack.len)) {
184 					dst = ibuf;
185 					dst_len = min(ilen, le16_to_cpu(ack.len));
186 				} else {
187 					dst = (u8 *)&recv_crc;
188 					dst_len = sizeof(recv_crc);
189 				}
190 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
191 				offset += cpy_len;
192 				crc = crc32(crc, crc_src, cpy_len);
193 
194 				if (src_len) {
195 					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
196 
197 					cpy_len = min(src_len, remain);
198 					offset += cpy_len;
199 					crc = crc32(crc, src, cpy_len);
200 					src += cpy_len;
201 					src_len -= cpy_len;
202 					if (src_len) {
203 						dst = (u8 *)&recv_crc;
204 						dst_len = sizeof(recv_crc);
205 						continue;
206 					}
207 				}
208 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
209 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
210 				offset += cpy_len;
211 
212 				if (src_len) {
213 					/* terminate the traverse */
214 					next_xfer_len = 0;
215 					break;
216 				}
217 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
218 			}
219 		}
220 	} while (next_xfer_len > 0 && --count_down);
221 
222 	if (next_xfer_len > 0)
223 		return -EAGAIN;
224 
225 	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
226 		dev_err(&tp->spi->dev, "recv crc or seq error\n");
227 		return -EINVAL;
228 	}
229 
230 	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
231 	    ack.cmd == VSC_TP_CMD_BUSY) {
232 		dev_err(&tp->spi->dev, "recv cmd ack error\n");
233 		return -EAGAIN;
234 	}
235 
236 	return min(le16_to_cpu(ack.len), ilen);
237 }
238 
239 /**
240  * vsc_tp_xfer - transfer data to firmware
241  * @tp: vsc_tp device handle
242  * @cmd: the command to be sent to the device
243  * @obuf: the tx buffer to be sent to the device
244  * @olen: the length of tx buffer
245  * @ibuf: the rx buffer to receive from the device
246  * @ilen: the length of rx buffer
247  * Return: the length of received data in case of success,
248  *	otherwise negative value
249  */
250 int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
251 		void *ibuf, size_t ilen)
252 {
253 	struct vsc_tp_packet *pkt = tp->tx_buf;
254 	u32 crc;
255 	int ret;
256 
257 	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
258 		return -EINVAL;
259 
260 	guard(mutex)(&tp->mutex);
261 
262 	pkt->sync = VSC_TP_PACKET_SYNC;
263 	pkt->cmd = cmd;
264 	pkt->len = cpu_to_le16(olen);
265 	pkt->seq = cpu_to_le32(++tp->seq);
266 	memcpy(pkt->buf, obuf, olen);
267 
268 	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
269 	memcpy(pkt->buf + olen, &crc, sizeof(crc));
270 
271 	ret = vsc_tp_wakeup_request(tp);
272 	if (unlikely(ret))
273 		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
274 	else
275 		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
276 
277 	vsc_tp_wakeup_release(tp);
278 
279 	return ret;
280 }
281 EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
282 
283 /**
284  * vsc_tp_rom_xfer - transfer data to rom code
285  * @tp: vsc_tp device handle
286  * @obuf: the data buffer to be sent to the device
287  * @ibuf: the buffer to receive data from the device
288  * @len: the length of tx buffer and rx buffer
289  * Return: 0 in case of success, negative value in case of error
290  */
291 int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
292 {
293 	size_t words = len / sizeof(__be32);
294 	int ret;
295 
296 	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
297 		return -EINVAL;
298 
299 	guard(mutex)(&tp->mutex);
300 
301 	/* rom xfer is big endian */
302 	cpu_to_be32_array(tp->tx_buf, obuf, words);
303 
304 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
305 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
306 				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
307 				tp->wakeuphost);
308 	if (ret) {
309 		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
310 		return ret;
311 	}
312 
313 	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
314 	if (ret)
315 		return ret;
316 
317 	if (ibuf)
318 		cpu_to_be32_array(ibuf, tp->rx_buf, words);
319 
320 	return ret;
321 }
322 
323 /**
324  * vsc_tp_reset - reset vsc transport layer
325  * @tp: vsc_tp device handle
326  */
327 void vsc_tp_reset(struct vsc_tp *tp)
328 {
329 	disable_irq(tp->spi->irq);
330 
331 	/* toggle reset pin */
332 	gpiod_set_value_cansleep(tp->resetfw, 0);
333 	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
334 	gpiod_set_value_cansleep(tp->resetfw, 1);
335 
336 	/* wait for ROM */
337 	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
338 
339 	/*
340 	 * Set default host wakeup pin to non-active
341 	 * to avoid unexpected host irq interrupt.
342 	 */
343 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
344 
345 	atomic_set(&tp->assert_cnt, 0);
346 
347 	enable_irq(tp->spi->irq);
348 }
349 EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
350 
351 /**
352  * vsc_tp_need_read - check if device has data to sent
353  * @tp: vsc_tp device handle
354  * Return: true if device has data to sent, otherwise false
355  */
356 bool vsc_tp_need_read(struct vsc_tp *tp)
357 {
358 	if (!atomic_read(&tp->assert_cnt))
359 		return false;
360 	if (!gpiod_get_value_cansleep(tp->wakeuphost))
361 		return false;
362 	if (!gpiod_get_value_cansleep(tp->wakeupfw))
363 		return false;
364 
365 	return true;
366 }
367 EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
368 
369 /**
370  * vsc_tp_register_event_cb - register a callback function to receive event
371  * @tp: vsc_tp device handle
372  * @event_cb: callback function
373  * @context: execution context of event callback
374  * Return: 0 in case of success, negative value in case of error
375  */
376 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
377 			    void *context)
378 {
379 	tp->event_notify = event_cb;
380 	tp->event_notify_context = context;
381 
382 	return 0;
383 }
384 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
385 
386 /**
387  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
388  * @tp: vsc_tp device handle
389  */
390 void vsc_tp_intr_synchronize(struct vsc_tp *tp)
391 {
392 	synchronize_irq(tp->spi->irq);
393 }
394 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
395 
396 /**
397  * vsc_tp_intr_enable - enable vsc_tp interrupt
398  * @tp: vsc_tp device handle
399  */
400 void vsc_tp_intr_enable(struct vsc_tp *tp)
401 {
402 	enable_irq(tp->spi->irq);
403 }
404 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
405 
406 /**
407  * vsc_tp_intr_disable - disable vsc_tp interrupt
408  * @tp: vsc_tp device handle
409  */
410 void vsc_tp_intr_disable(struct vsc_tp *tp)
411 {
412 	disable_irq(tp->spi->irq);
413 }
414 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
415 
416 static irqreturn_t vsc_tp_isr(int irq, void *data)
417 {
418 	struct vsc_tp *tp = data;
419 
420 	atomic_inc(&tp->assert_cnt);
421 
422 	return IRQ_WAKE_THREAD;
423 }
424 
425 static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
426 {
427 	struct vsc_tp *tp = data;
428 
429 	wake_up(&tp->xfer_wait);
430 
431 	if (tp->event_notify)
432 		tp->event_notify(tp->event_notify_context);
433 
434 	return IRQ_HANDLED;
435 }
436 
437 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
438 {
439 	struct acpi_device **__adev = data;
440 
441 	*__adev = adev;
442 
443 	return 1;
444 }
445 
446 static int vsc_tp_probe(struct spi_device *spi)
447 {
448 	struct vsc_tp *tp;
449 	struct platform_device_info pinfo = {
450 		.name = "intel_vsc",
451 		.data = &tp,
452 		.size_data = sizeof(tp),
453 		.id = PLATFORM_DEVID_NONE,
454 	};
455 	struct device *dev = &spi->dev;
456 	struct platform_device *pdev;
457 	struct acpi_device *adev;
458 	int ret;
459 
460 	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
461 	if (!tp)
462 		return -ENOMEM;
463 
464 	tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
465 	if (!tp->tx_buf)
466 		return -ENOMEM;
467 
468 	tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
469 	if (!tp->rx_buf)
470 		return -ENOMEM;
471 
472 	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
473 	if (ret)
474 		return ret;
475 
476 	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
477 	if (IS_ERR(tp->wakeuphost))
478 		return PTR_ERR(tp->wakeuphost);
479 
480 	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
481 	if (IS_ERR(tp->resetfw))
482 		return PTR_ERR(tp->resetfw);
483 
484 	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
485 	if (IS_ERR(tp->wakeupfw))
486 		return PTR_ERR(tp->wakeupfw);
487 
488 	atomic_set(&tp->assert_cnt, 0);
489 	init_waitqueue_head(&tp->xfer_wait);
490 	tp->spi = spi;
491 
492 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
493 	ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
494 					vsc_tp_thread_isr,
495 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
496 					dev_name(dev), tp);
497 	if (ret)
498 		return ret;
499 
500 	mutex_init(&tp->mutex);
501 
502 	/* only one child acpi device */
503 	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
504 				      vsc_tp_match_any, &adev);
505 	if (!ret) {
506 		ret = -ENODEV;
507 		goto err_destroy_lock;
508 	}
509 
510 	pinfo.fwnode = acpi_fwnode_handle(adev);
511 	pdev = platform_device_register_full(&pinfo);
512 	if (IS_ERR(pdev)) {
513 		ret = PTR_ERR(pdev);
514 		goto err_destroy_lock;
515 	}
516 
517 	tp->pdev = pdev;
518 	spi_set_drvdata(spi, tp);
519 
520 	return 0;
521 
522 err_destroy_lock:
523 	mutex_destroy(&tp->mutex);
524 
525 	return ret;
526 }
527 
528 static void vsc_tp_remove(struct spi_device *spi)
529 {
530 	struct vsc_tp *tp = spi_get_drvdata(spi);
531 
532 	platform_device_unregister(tp->pdev);
533 
534 	mutex_destroy(&tp->mutex);
535 }
536 
537 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
538 	{ "INTC1009" }, /* Raptor Lake */
539 	{ "INTC1058" }, /* Tiger Lake */
540 	{ "INTC1094" }, /* Alder Lake */
541 	{ "INTC10D0" }, /* Meteor Lake */
542 	{}
543 };
544 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
545 
546 static struct spi_driver vsc_tp_driver = {
547 	.probe = vsc_tp_probe,
548 	.remove = vsc_tp_remove,
549 	.driver = {
550 		.name = "vsc-tp",
551 		.acpi_match_table = vsc_tp_acpi_ids,
552 	},
553 };
554 module_spi_driver(vsc_tp_driver);
555 
556 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
557 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
558 MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
559 MODULE_LICENSE("GPL");
560