xref: /linux/drivers/misc/mei/vsc-tp.c (revision c26f4fbd58375bd6ef74f95eb73d61762ad97c59)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Intel Corporation.
4  * Intel Visual Sensing Controller Transport Layer Linux driver
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/cleanup.h>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqreturn.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/types.h>
21 
22 #include "vsc-tp.h"
23 
24 #define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
25 #define VSC_TP_ROM_BOOTUP_DELAY_MS		10
26 #define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
27 #define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
28 #define VSC_TP_WAIT_FW_POLL_TIMEOUT		(2 * HZ)
29 #define VSC_TP_WAIT_FW_POLL_DELAY_US		(20 * USEC_PER_MSEC)
30 #define VSC_TP_MAX_XFER_COUNT			5
31 
32 #define VSC_TP_PACKET_SYNC			0x31
33 #define VSC_TP_CRC_SIZE				sizeof(u32)
34 #define VSC_TP_MAX_MSG_SIZE			2048
35 /* SPI xfer timeout size */
36 #define VSC_TP_XFER_TIMEOUT_BYTES		700
37 #define VSC_TP_PACKET_PADDING_SIZE		1
38 #define VSC_TP_PACKET_SIZE(pkt) \
39 	(sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
40 #define VSC_TP_MAX_PACKET_SIZE \
41 	(sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
42 #define VSC_TP_MAX_XFER_SIZE \
43 	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
44 #define VSC_TP_NEXT_XFER_LEN(len, offset) \
45 	(len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
46 
47 struct vsc_tp_packet_hdr {
48 	__u8 sync;
49 	__u8 cmd;
50 	__le16 len;
51 	__le32 seq;
52 };
53 
54 struct vsc_tp_packet {
55 	struct vsc_tp_packet_hdr hdr;
56 	__u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
57 };
58 
59 struct vsc_tp {
60 	/* do the actual data transfer */
61 	struct spi_device *spi;
62 
63 	/* bind with mei framework */
64 	struct platform_device *pdev;
65 
66 	struct gpio_desc *wakeuphost;
67 	struct gpio_desc *resetfw;
68 	struct gpio_desc *wakeupfw;
69 
70 	/* command sequence number */
71 	u32 seq;
72 
73 	/* command buffer */
74 	struct vsc_tp_packet *tx_buf;
75 	struct vsc_tp_packet *rx_buf;
76 
77 	atomic_t assert_cnt;
78 	wait_queue_head_t xfer_wait;
79 
80 	vsc_tp_event_cb_t event_notify;
81 	void *event_notify_context;
82 
83 	/* used to protect command download */
84 	struct mutex mutex;
85 };
86 
87 /* GPIO resources */
88 static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
89 static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
90 static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
91 static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
92 
93 static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
94 	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
95 	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
96 	{ "resetfw-gpios", &resetfw_gpio, 1 },
97 	{ "wakeupfw-gpios", &wakeupfw, 1 },
98 	{}
99 };
100 
vsc_tp_isr(int irq,void * data)101 static irqreturn_t vsc_tp_isr(int irq, void *data)
102 {
103 	struct vsc_tp *tp = data;
104 
105 	atomic_inc(&tp->assert_cnt);
106 
107 	wake_up(&tp->xfer_wait);
108 
109 	return IRQ_WAKE_THREAD;
110 }
111 
vsc_tp_thread_isr(int irq,void * data)112 static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
113 {
114 	struct vsc_tp *tp = data;
115 
116 	if (tp->event_notify)
117 		tp->event_notify(tp->event_notify_context);
118 
119 	return IRQ_HANDLED;
120 }
121 
122 /* wakeup firmware and wait for response */
vsc_tp_wakeup_request(struct vsc_tp * tp)123 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
124 {
125 	int ret;
126 
127 	gpiod_set_value_cansleep(tp->wakeupfw, 0);
128 
129 	ret = wait_event_timeout(tp->xfer_wait,
130 				 atomic_read(&tp->assert_cnt),
131 				 VSC_TP_WAIT_FW_POLL_TIMEOUT);
132 	if (!ret)
133 		return -ETIMEDOUT;
134 
135 	return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
136 				 VSC_TP_WAIT_FW_POLL_DELAY_US,
137 				 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
138 				 tp->wakeuphost);
139 }
140 
vsc_tp_wakeup_release(struct vsc_tp * tp)141 static void vsc_tp_wakeup_release(struct vsc_tp *tp)
142 {
143 	atomic_dec_if_positive(&tp->assert_cnt);
144 
145 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
146 }
147 
vsc_tp_dev_xfer(struct vsc_tp * tp,void * obuf,void * ibuf,size_t len)148 static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
149 {
150 	struct spi_message msg = { 0 };
151 	struct spi_transfer xfer = {
152 		.tx_buf = obuf,
153 		.rx_buf = ibuf,
154 		.len = len,
155 	};
156 
157 	spi_message_init_with_transfers(&msg, &xfer, 1);
158 
159 	return spi_sync_locked(tp->spi, &msg);
160 }
161 
vsc_tp_xfer_helper(struct vsc_tp * tp,struct vsc_tp_packet * pkt,void * ibuf,u16 ilen)162 static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
163 			      void *ibuf, u16 ilen)
164 {
165 	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
166 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
167 	u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf;
168 	int count_down = VSC_TP_MAX_XFER_COUNT;
169 	u32 recv_crc = 0, crc = ~0;
170 	struct vsc_tp_packet_hdr ack;
171 	u8 *dst = (u8 *)&ack;
172 	bool synced = false;
173 
174 	do {
175 		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
176 		if (ret)
177 			return ret;
178 		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
179 
180 		if (synced) {
181 			src = rx_buf;
182 			src_len = next_xfer_len;
183 		} else {
184 			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
185 			if (!src)
186 				continue;
187 			synced = true;
188 			src_len = next_xfer_len - (src - rx_buf);
189 		}
190 
191 		/* traverse received data */
192 		while (src_len > 0) {
193 			cpy_len = min(src_len, dst_len);
194 			memcpy(dst, src, cpy_len);
195 			crc_src = src;
196 			src += cpy_len;
197 			src_len -= cpy_len;
198 			dst += cpy_len;
199 			dst_len -= cpy_len;
200 
201 			if (offset < sizeof(ack)) {
202 				offset += cpy_len;
203 				crc = crc32(crc, crc_src, cpy_len);
204 
205 				if (!src_len)
206 					continue;
207 
208 				if (le16_to_cpu(ack.len)) {
209 					dst = ibuf;
210 					dst_len = min(ilen, le16_to_cpu(ack.len));
211 				} else {
212 					dst = (u8 *)&recv_crc;
213 					dst_len = sizeof(recv_crc);
214 				}
215 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
216 				offset += cpy_len;
217 				crc = crc32(crc, crc_src, cpy_len);
218 
219 				if (src_len) {
220 					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
221 
222 					cpy_len = min(src_len, remain);
223 					offset += cpy_len;
224 					crc = crc32(crc, src, cpy_len);
225 					src += cpy_len;
226 					src_len -= cpy_len;
227 					if (src_len) {
228 						dst = (u8 *)&recv_crc;
229 						dst_len = sizeof(recv_crc);
230 						continue;
231 					}
232 				}
233 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
234 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
235 				offset += cpy_len;
236 
237 				if (src_len) {
238 					/* terminate the traverse */
239 					next_xfer_len = 0;
240 					break;
241 				}
242 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
243 			}
244 		}
245 	} while (next_xfer_len > 0 && --count_down);
246 
247 	if (next_xfer_len > 0)
248 		return -EAGAIN;
249 
250 	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
251 		dev_err(&tp->spi->dev, "recv crc or seq error\n");
252 		return -EINVAL;
253 	}
254 
255 	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
256 	    ack.cmd == VSC_TP_CMD_BUSY) {
257 		dev_err(&tp->spi->dev, "recv cmd ack error\n");
258 		return -EAGAIN;
259 	}
260 
261 	return min(le16_to_cpu(ack.len), ilen);
262 }
263 
264 /**
265  * vsc_tp_xfer - transfer data to firmware
266  * @tp: vsc_tp device handle
267  * @cmd: the command to be sent to the device
268  * @obuf: the tx buffer to be sent to the device
269  * @olen: the length of tx buffer
270  * @ibuf: the rx buffer to receive from the device
271  * @ilen: the length of rx buffer
272  * Return: the length of received data in case of success,
273  *	otherwise negative value
274  */
vsc_tp_xfer(struct vsc_tp * tp,u8 cmd,const void * obuf,size_t olen,void * ibuf,size_t ilen)275 int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
276 		void *ibuf, size_t ilen)
277 {
278 	struct vsc_tp_packet *pkt = tp->tx_buf;
279 	u32 crc;
280 	int ret;
281 
282 	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
283 		return -EINVAL;
284 
285 	guard(mutex)(&tp->mutex);
286 
287 	pkt->hdr.sync = VSC_TP_PACKET_SYNC;
288 	pkt->hdr.cmd = cmd;
289 	pkt->hdr.len = cpu_to_le16(olen);
290 	pkt->hdr.seq = cpu_to_le32(++tp->seq);
291 	memcpy(pkt->buf, obuf, olen);
292 
293 	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
294 	memcpy(pkt->buf + olen, &crc, sizeof(crc));
295 
296 	ret = vsc_tp_wakeup_request(tp);
297 	if (unlikely(ret))
298 		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
299 	else
300 		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
301 
302 	vsc_tp_wakeup_release(tp);
303 
304 	return ret;
305 }
306 EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, "VSC_TP");
307 
308 /**
309  * vsc_tp_rom_xfer - transfer data to rom code
310  * @tp: vsc_tp device handle
311  * @obuf: the data buffer to be sent to the device
312  * @ibuf: the buffer to receive data from the device
313  * @len: the length of tx buffer and rx buffer
314  * Return: 0 in case of success, negative value in case of error
315  */
vsc_tp_rom_xfer(struct vsc_tp * tp,const void * obuf,void * ibuf,size_t len)316 int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
317 {
318 	size_t words = len / sizeof(__be32);
319 	int ret;
320 
321 	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
322 		return -EINVAL;
323 
324 	guard(mutex)(&tp->mutex);
325 
326 	/* rom xfer is big endian */
327 	cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words);
328 
329 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
330 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
331 				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
332 				tp->wakeuphost);
333 	if (ret) {
334 		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
335 		return ret;
336 	}
337 
338 	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len);
339 	if (ret)
340 		return ret;
341 
342 	if (ibuf)
343 		be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words);
344 
345 	return ret;
346 }
347 
348 /**
349  * vsc_tp_reset - reset vsc transport layer
350  * @tp: vsc_tp device handle
351  */
vsc_tp_reset(struct vsc_tp * tp)352 void vsc_tp_reset(struct vsc_tp *tp)
353 {
354 	disable_irq(tp->spi->irq);
355 
356 	/* toggle reset pin */
357 	gpiod_set_value_cansleep(tp->resetfw, 0);
358 	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
359 	gpiod_set_value_cansleep(tp->resetfw, 1);
360 
361 	/* wait for ROM */
362 	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
363 
364 	/*
365 	 * Set default host wakeup pin to non-active
366 	 * to avoid unexpected host irq interrupt.
367 	 */
368 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
369 
370 	atomic_set(&tp->assert_cnt, 0);
371 }
372 EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, "VSC_TP");
373 
374 /**
375  * vsc_tp_need_read - check if device has data to sent
376  * @tp: vsc_tp device handle
377  * Return: true if device has data to sent, otherwise false
378  */
vsc_tp_need_read(struct vsc_tp * tp)379 bool vsc_tp_need_read(struct vsc_tp *tp)
380 {
381 	if (!atomic_read(&tp->assert_cnt))
382 		return false;
383 	if (!gpiod_get_value_cansleep(tp->wakeuphost))
384 		return false;
385 	if (!gpiod_get_value_cansleep(tp->wakeupfw))
386 		return false;
387 
388 	return true;
389 }
390 EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, "VSC_TP");
391 
392 /**
393  * vsc_tp_register_event_cb - register a callback function to receive event
394  * @tp: vsc_tp device handle
395  * @event_cb: callback function
396  * @context: execution context of event callback
397  * Return: 0 in case of success, negative value in case of error
398  */
vsc_tp_register_event_cb(struct vsc_tp * tp,vsc_tp_event_cb_t event_cb,void * context)399 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
400 			    void *context)
401 {
402 	tp->event_notify = event_cb;
403 	tp->event_notify_context = context;
404 
405 	return 0;
406 }
407 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, "VSC_TP");
408 
409 /**
410  * vsc_tp_request_irq - request irq for vsc_tp device
411  * @tp: vsc_tp device handle
412  */
vsc_tp_request_irq(struct vsc_tp * tp)413 int vsc_tp_request_irq(struct vsc_tp *tp)
414 {
415 	struct spi_device *spi = tp->spi;
416 	struct device *dev = &spi->dev;
417 	int ret;
418 
419 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
420 	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
421 				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
422 				   dev_name(dev), tp);
423 	if (ret)
424 		return ret;
425 
426 	return 0;
427 }
428 EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, "VSC_TP");
429 
430 /**
431  * vsc_tp_free_irq - free irq for vsc_tp device
432  * @tp: vsc_tp device handle
433  */
vsc_tp_free_irq(struct vsc_tp * tp)434 void vsc_tp_free_irq(struct vsc_tp *tp)
435 {
436 	free_irq(tp->spi->irq, tp);
437 }
438 EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, "VSC_TP");
439 
440 /**
441  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
442  * @tp: vsc_tp device handle
443  */
vsc_tp_intr_synchronize(struct vsc_tp * tp)444 void vsc_tp_intr_synchronize(struct vsc_tp *tp)
445 {
446 	synchronize_irq(tp->spi->irq);
447 }
448 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, "VSC_TP");
449 
450 /**
451  * vsc_tp_intr_enable - enable vsc_tp interrupt
452  * @tp: vsc_tp device handle
453  */
vsc_tp_intr_enable(struct vsc_tp * tp)454 void vsc_tp_intr_enable(struct vsc_tp *tp)
455 {
456 	enable_irq(tp->spi->irq);
457 }
458 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, "VSC_TP");
459 
460 /**
461  * vsc_tp_intr_disable - disable vsc_tp interrupt
462  * @tp: vsc_tp device handle
463  */
vsc_tp_intr_disable(struct vsc_tp * tp)464 void vsc_tp_intr_disable(struct vsc_tp *tp)
465 {
466 	disable_irq(tp->spi->irq);
467 }
468 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, "VSC_TP");
469 
vsc_tp_match_any(struct acpi_device * adev,void * data)470 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
471 {
472 	struct acpi_device **__adev = data;
473 
474 	*__adev = adev;
475 
476 	return 1;
477 }
478 
vsc_tp_probe(struct spi_device * spi)479 static int vsc_tp_probe(struct spi_device *spi)
480 {
481 	struct vsc_tp *tp;
482 	struct platform_device_info pinfo = {
483 		.name = "intel_vsc",
484 		.data = &tp,
485 		.size_data = sizeof(tp),
486 		.id = PLATFORM_DEVID_NONE,
487 	};
488 	struct device *dev = &spi->dev;
489 	struct platform_device *pdev;
490 	struct acpi_device *adev;
491 	int ret;
492 
493 	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
494 	if (!tp)
495 		return -ENOMEM;
496 
497 	tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL);
498 	if (!tp->tx_buf)
499 		return -ENOMEM;
500 
501 	tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL);
502 	if (!tp->rx_buf)
503 		return -ENOMEM;
504 
505 	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
506 	if (ret)
507 		return ret;
508 
509 	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
510 	if (IS_ERR(tp->wakeuphost))
511 		return PTR_ERR(tp->wakeuphost);
512 
513 	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
514 	if (IS_ERR(tp->resetfw))
515 		return PTR_ERR(tp->resetfw);
516 
517 	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
518 	if (IS_ERR(tp->wakeupfw))
519 		return PTR_ERR(tp->wakeupfw);
520 
521 	atomic_set(&tp->assert_cnt, 0);
522 	init_waitqueue_head(&tp->xfer_wait);
523 	tp->spi = spi;
524 
525 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
526 	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
527 				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
528 				   dev_name(dev), tp);
529 	if (ret)
530 		return ret;
531 
532 	mutex_init(&tp->mutex);
533 
534 	/* only one child acpi device */
535 	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
536 				      vsc_tp_match_any, &adev);
537 	if (!ret) {
538 		ret = -ENODEV;
539 		goto err_destroy_lock;
540 	}
541 
542 	pinfo.fwnode = acpi_fwnode_handle(adev);
543 	pdev = platform_device_register_full(&pinfo);
544 	if (IS_ERR(pdev)) {
545 		ret = PTR_ERR(pdev);
546 		goto err_destroy_lock;
547 	}
548 
549 	tp->pdev = pdev;
550 	spi_set_drvdata(spi, tp);
551 
552 	return 0;
553 
554 err_destroy_lock:
555 	mutex_destroy(&tp->mutex);
556 
557 	free_irq(spi->irq, tp);
558 
559 	return ret;
560 }
561 
vsc_tp_remove(struct spi_device * spi)562 static void vsc_tp_remove(struct spi_device *spi)
563 {
564 	struct vsc_tp *tp = spi_get_drvdata(spi);
565 
566 	platform_device_unregister(tp->pdev);
567 
568 	mutex_destroy(&tp->mutex);
569 
570 	free_irq(spi->irq, tp);
571 }
572 
vsc_tp_shutdown(struct spi_device * spi)573 static void vsc_tp_shutdown(struct spi_device *spi)
574 {
575 	struct vsc_tp *tp = spi_get_drvdata(spi);
576 
577 	platform_device_unregister(tp->pdev);
578 
579 	mutex_destroy(&tp->mutex);
580 
581 	vsc_tp_reset(tp);
582 
583 	free_irq(spi->irq, tp);
584 }
585 
586 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
587 	{ "INTC1009" }, /* Raptor Lake */
588 	{ "INTC1058" }, /* Tiger Lake */
589 	{ "INTC1094" }, /* Alder Lake */
590 	{ "INTC10D0" }, /* Meteor Lake */
591 	{}
592 };
593 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
594 
595 static struct spi_driver vsc_tp_driver = {
596 	.probe = vsc_tp_probe,
597 	.remove = vsc_tp_remove,
598 	.shutdown = vsc_tp_shutdown,
599 	.driver = {
600 		.name = "vsc-tp",
601 		.acpi_match_table = vsc_tp_acpi_ids,
602 	},
603 };
604 module_spi_driver(vsc_tp_driver);
605 
606 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
607 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
608 MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
609 MODULE_LICENSE("GPL");
610