xref: /linux/drivers/misc/mei/vsc-tp.c (revision 0d5ec7919f3747193f051036b2301734a4b5e1d6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Intel Corporation.
4  * Intel Visual Sensing Controller Transport Layer Linux driver
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/cleanup.h>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqreturn.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/types.h>
21 #include <linux/workqueue.h>
22 
23 #include "vsc-tp.h"
24 
25 #define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
26 #define VSC_TP_ROM_BOOTUP_DELAY_MS		10
27 #define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
28 #define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
29 #define VSC_TP_WAIT_FW_POLL_TIMEOUT		(2 * HZ)
30 #define VSC_TP_WAIT_FW_POLL_DELAY_US		(20 * USEC_PER_MSEC)
31 #define VSC_TP_MAX_XFER_COUNT			5
32 
33 #define VSC_TP_PACKET_SYNC			0x31
34 #define VSC_TP_CRC_SIZE				sizeof(u32)
35 #define VSC_TP_MAX_MSG_SIZE			2048
36 /* SPI xfer timeout size */
37 #define VSC_TP_XFER_TIMEOUT_BYTES		700
38 #define VSC_TP_PACKET_PADDING_SIZE		1
39 #define VSC_TP_PACKET_SIZE(pkt) \
40 	(sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
41 #define VSC_TP_MAX_PACKET_SIZE \
42 	(sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
43 #define VSC_TP_MAX_XFER_SIZE \
44 	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
45 #define VSC_TP_NEXT_XFER_LEN(len, offset) \
46 	(len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
47 
48 struct vsc_tp_packet_hdr {
49 	__u8 sync;
50 	__u8 cmd;
51 	__le16 len;
52 	__le32 seq;
53 };
54 
55 struct vsc_tp_packet {
56 	struct vsc_tp_packet_hdr hdr;
57 	__u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
58 };
59 
60 struct vsc_tp {
61 	/* do the actual data transfer */
62 	struct spi_device *spi;
63 
64 	/* bind with mei framework */
65 	struct platform_device *pdev;
66 
67 	struct gpio_desc *wakeuphost;
68 	struct gpio_desc *resetfw;
69 	struct gpio_desc *wakeupfw;
70 
71 	/* command sequence number */
72 	u32 seq;
73 
74 	/* command buffer */
75 	struct vsc_tp_packet *tx_buf;
76 	struct vsc_tp_packet *rx_buf;
77 
78 	atomic_t assert_cnt;
79 	wait_queue_head_t xfer_wait;
80 	struct work_struct event_work;
81 
82 	vsc_tp_event_cb_t event_notify;
83 	void *event_notify_context;
84 	struct mutex event_notify_mutex;	/* protects event_notify + context */
85 	struct mutex mutex;			/* protects command download */
86 };
87 
88 /* GPIO resources */
89 static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
90 static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
91 static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
92 static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
93 
94 static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
95 	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
96 	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
97 	{ "resetfw-gpios", &resetfw_gpio, 1 },
98 	{ "wakeupfw-gpios", &wakeupfw, 1 },
99 	{}
100 };
101 
vsc_tp_isr(int irq,void * data)102 static irqreturn_t vsc_tp_isr(int irq, void *data)
103 {
104 	struct vsc_tp *tp = data;
105 
106 	atomic_inc(&tp->assert_cnt);
107 
108 	wake_up(&tp->xfer_wait);
109 
110 	schedule_work(&tp->event_work);
111 
112 	return IRQ_HANDLED;
113 }
114 
vsc_tp_event_work(struct work_struct * work)115 static void vsc_tp_event_work(struct work_struct *work)
116 {
117 	struct vsc_tp *tp = container_of(work, struct vsc_tp, event_work);
118 
119 	guard(mutex)(&tp->event_notify_mutex);
120 
121 	if (tp->event_notify)
122 		tp->event_notify(tp->event_notify_context);
123 }
124 
125 /* wakeup firmware and wait for response */
vsc_tp_wakeup_request(struct vsc_tp * tp)126 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
127 {
128 	int ret;
129 
130 	gpiod_set_value_cansleep(tp->wakeupfw, 0);
131 
132 	ret = wait_event_timeout(tp->xfer_wait,
133 				 atomic_read(&tp->assert_cnt),
134 				 VSC_TP_WAIT_FW_POLL_TIMEOUT);
135 	if (!ret)
136 		return -ETIMEDOUT;
137 
138 	return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
139 				 VSC_TP_WAIT_FW_POLL_DELAY_US,
140 				 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
141 				 tp->wakeuphost);
142 }
143 
vsc_tp_wakeup_release(struct vsc_tp * tp)144 static void vsc_tp_wakeup_release(struct vsc_tp *tp)
145 {
146 	atomic_dec_if_positive(&tp->assert_cnt);
147 
148 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
149 }
150 
vsc_tp_dev_xfer(struct vsc_tp * tp,void * obuf,void * ibuf,size_t len)151 static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
152 {
153 	struct spi_message msg = { 0 };
154 	struct spi_transfer xfer = {
155 		.tx_buf = obuf,
156 		.rx_buf = ibuf,
157 		.len = len,
158 	};
159 
160 	spi_message_init_with_transfers(&msg, &xfer, 1);
161 
162 	return spi_sync_locked(tp->spi, &msg);
163 }
164 
vsc_tp_xfer_helper(struct vsc_tp * tp,struct vsc_tp_packet * pkt,void * ibuf,u16 ilen)165 static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
166 			      void *ibuf, u16 ilen)
167 {
168 	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
169 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
170 	u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf;
171 	int count_down = VSC_TP_MAX_XFER_COUNT;
172 	u32 recv_crc = 0, crc = ~0;
173 	struct vsc_tp_packet_hdr ack;
174 	u8 *dst = (u8 *)&ack;
175 	bool synced = false;
176 
177 	do {
178 		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
179 		if (ret)
180 			return ret;
181 		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
182 
183 		if (synced) {
184 			src = rx_buf;
185 			src_len = next_xfer_len;
186 		} else {
187 			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
188 			if (!src)
189 				continue;
190 			synced = true;
191 			src_len = next_xfer_len - (src - rx_buf);
192 		}
193 
194 		/* traverse received data */
195 		while (src_len > 0) {
196 			cpy_len = min(src_len, dst_len);
197 			memcpy(dst, src, cpy_len);
198 			crc_src = src;
199 			src += cpy_len;
200 			src_len -= cpy_len;
201 			dst += cpy_len;
202 			dst_len -= cpy_len;
203 
204 			if (offset < sizeof(ack)) {
205 				offset += cpy_len;
206 				crc = crc32(crc, crc_src, cpy_len);
207 
208 				if (!src_len)
209 					continue;
210 
211 				if (le16_to_cpu(ack.len)) {
212 					dst = ibuf;
213 					dst_len = min(ilen, le16_to_cpu(ack.len));
214 				} else {
215 					dst = (u8 *)&recv_crc;
216 					dst_len = sizeof(recv_crc);
217 				}
218 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
219 				offset += cpy_len;
220 				crc = crc32(crc, crc_src, cpy_len);
221 
222 				if (src_len) {
223 					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
224 
225 					cpy_len = min(src_len, remain);
226 					offset += cpy_len;
227 					crc = crc32(crc, src, cpy_len);
228 					src += cpy_len;
229 					src_len -= cpy_len;
230 					if (src_len) {
231 						dst = (u8 *)&recv_crc;
232 						dst_len = sizeof(recv_crc);
233 						continue;
234 					}
235 				}
236 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
237 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
238 				offset += cpy_len;
239 
240 				if (src_len) {
241 					/* terminate the traverse */
242 					next_xfer_len = 0;
243 					break;
244 				}
245 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
246 			}
247 		}
248 	} while (next_xfer_len > 0 && --count_down);
249 
250 	if (next_xfer_len > 0)
251 		return -EAGAIN;
252 
253 	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
254 		dev_err(&tp->spi->dev, "recv crc or seq error\n");
255 		return -EINVAL;
256 	}
257 
258 	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
259 	    ack.cmd == VSC_TP_CMD_BUSY) {
260 		dev_err(&tp->spi->dev, "recv cmd ack error\n");
261 		return -EAGAIN;
262 	}
263 
264 	return min(le16_to_cpu(ack.len), ilen);
265 }
266 
267 /**
268  * vsc_tp_xfer - transfer data to firmware
269  * @tp: vsc_tp device handle
270  * @cmd: the command to be sent to the device
271  * @obuf: the tx buffer to be sent to the device
272  * @olen: the length of tx buffer
273  * @ibuf: the rx buffer to receive from the device
274  * @ilen: the length of rx buffer
275  * Return: the length of received data in case of success,
276  *	otherwise negative value
277  */
vsc_tp_xfer(struct vsc_tp * tp,u8 cmd,const void * obuf,size_t olen,void * ibuf,size_t ilen)278 int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
279 		void *ibuf, size_t ilen)
280 {
281 	struct vsc_tp_packet *pkt = tp->tx_buf;
282 	u32 crc;
283 	int ret;
284 
285 	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
286 		return -EINVAL;
287 
288 	guard(mutex)(&tp->mutex);
289 
290 	pkt->hdr.sync = VSC_TP_PACKET_SYNC;
291 	pkt->hdr.cmd = cmd;
292 	pkt->hdr.len = cpu_to_le16(olen);
293 	pkt->hdr.seq = cpu_to_le32(++tp->seq);
294 	memcpy(pkt->buf, obuf, olen);
295 
296 	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
297 	memcpy(pkt->buf + olen, &crc, sizeof(crc));
298 
299 	ret = vsc_tp_wakeup_request(tp);
300 	if (unlikely(ret))
301 		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
302 	else
303 		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
304 
305 	vsc_tp_wakeup_release(tp);
306 
307 	return ret;
308 }
309 EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, "VSC_TP");
310 
311 /**
312  * vsc_tp_rom_xfer - transfer data to rom code
313  * @tp: vsc_tp device handle
314  * @obuf: the data buffer to be sent to the device
315  * @ibuf: the buffer to receive data from the device
316  * @len: the length of tx buffer and rx buffer
317  * Return: 0 in case of success, negative value in case of error
318  */
vsc_tp_rom_xfer(struct vsc_tp * tp,const void * obuf,void * ibuf,size_t len)319 int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
320 {
321 	size_t words = len / sizeof(__be32);
322 	int ret;
323 
324 	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
325 		return -EINVAL;
326 
327 	guard(mutex)(&tp->mutex);
328 
329 	/* rom xfer is big endian */
330 	cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words);
331 
332 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
333 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
334 				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
335 				tp->wakeuphost);
336 	if (ret) {
337 		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
338 		return ret;
339 	}
340 
341 	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len);
342 	if (ret)
343 		return ret;
344 
345 	if (ibuf)
346 		be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words);
347 
348 	return ret;
349 }
350 
351 /**
352  * vsc_tp_reset - reset vsc transport layer
353  * @tp: vsc_tp device handle
354  */
vsc_tp_reset(struct vsc_tp * tp)355 void vsc_tp_reset(struct vsc_tp *tp)
356 {
357 	disable_irq(tp->spi->irq);
358 
359 	/* toggle reset pin */
360 	gpiod_set_value_cansleep(tp->resetfw, 0);
361 	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
362 	gpiod_set_value_cansleep(tp->resetfw, 1);
363 
364 	/* wait for ROM */
365 	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
366 
367 	/*
368 	 * Set default host wakeup pin to non-active
369 	 * to avoid unexpected host irq interrupt.
370 	 */
371 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
372 
373 	atomic_set(&tp->assert_cnt, 0);
374 }
375 EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, "VSC_TP");
376 
377 /**
378  * vsc_tp_need_read - check if device has data to sent
379  * @tp: vsc_tp device handle
380  * Return: true if device has data to sent, otherwise false
381  */
vsc_tp_need_read(struct vsc_tp * tp)382 bool vsc_tp_need_read(struct vsc_tp *tp)
383 {
384 	if (!atomic_read(&tp->assert_cnt))
385 		return false;
386 	if (!gpiod_get_value_cansleep(tp->wakeuphost))
387 		return false;
388 	if (!gpiod_get_value_cansleep(tp->wakeupfw))
389 		return false;
390 
391 	return true;
392 }
393 EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, "VSC_TP");
394 
395 /**
396  * vsc_tp_register_event_cb - register a callback function to receive event
397  * @tp: vsc_tp device handle
398  * @event_cb: callback function
399  * @context: execution context of event callback
400  * Return: 0 in case of success, negative value in case of error
401  */
vsc_tp_register_event_cb(struct vsc_tp * tp,vsc_tp_event_cb_t event_cb,void * context)402 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
403 			    void *context)
404 {
405 	guard(mutex)(&tp->event_notify_mutex);
406 
407 	tp->event_notify = event_cb;
408 	tp->event_notify_context = context;
409 
410 	return 0;
411 }
412 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, "VSC_TP");
413 
414 /**
415  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
416  * @tp: vsc_tp device handle
417  */
vsc_tp_intr_synchronize(struct vsc_tp * tp)418 void vsc_tp_intr_synchronize(struct vsc_tp *tp)
419 {
420 	synchronize_irq(tp->spi->irq);
421 }
422 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, "VSC_TP");
423 
424 /**
425  * vsc_tp_intr_enable - enable vsc_tp interrupt
426  * @tp: vsc_tp device handle
427  */
vsc_tp_intr_enable(struct vsc_tp * tp)428 void vsc_tp_intr_enable(struct vsc_tp *tp)
429 {
430 	enable_irq(tp->spi->irq);
431 }
432 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, "VSC_TP");
433 
434 /**
435  * vsc_tp_intr_disable - disable vsc_tp interrupt
436  * @tp: vsc_tp device handle
437  */
vsc_tp_intr_disable(struct vsc_tp * tp)438 void vsc_tp_intr_disable(struct vsc_tp *tp)
439 {
440 	disable_irq(tp->spi->irq);
441 }
442 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, "VSC_TP");
443 
vsc_tp_match_any(struct acpi_device * adev,void * data)444 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
445 {
446 	struct acpi_device **__adev = data;
447 
448 	*__adev = adev;
449 
450 	return 1;
451 }
452 
vsc_tp_probe(struct spi_device * spi)453 static int vsc_tp_probe(struct spi_device *spi)
454 {
455 	struct vsc_tp *tp;
456 	struct platform_device_info pinfo = {
457 		.name = "intel_vsc",
458 		.data = &tp,
459 		.size_data = sizeof(tp),
460 		.id = PLATFORM_DEVID_NONE,
461 	};
462 	struct device *dev = &spi->dev;
463 	struct platform_device *pdev;
464 	struct acpi_device *adev;
465 	int ret;
466 
467 	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
468 	if (!tp)
469 		return -ENOMEM;
470 
471 	tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL);
472 	if (!tp->tx_buf)
473 		return -ENOMEM;
474 
475 	tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL);
476 	if (!tp->rx_buf)
477 		return -ENOMEM;
478 
479 	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
480 	if (ret)
481 		return ret;
482 
483 	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
484 	if (IS_ERR(tp->wakeuphost))
485 		return PTR_ERR(tp->wakeuphost);
486 
487 	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
488 	if (IS_ERR(tp->resetfw))
489 		return PTR_ERR(tp->resetfw);
490 
491 	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
492 	if (IS_ERR(tp->wakeupfw))
493 		return PTR_ERR(tp->wakeupfw);
494 
495 	atomic_set(&tp->assert_cnt, 0);
496 	init_waitqueue_head(&tp->xfer_wait);
497 	tp->spi = spi;
498 
499 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
500 	ret = request_threaded_irq(spi->irq, NULL, vsc_tp_isr,
501 				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
502 				   dev_name(dev), tp);
503 	if (ret)
504 		return ret;
505 
506 	mutex_init(&tp->mutex);
507 	mutex_init(&tp->event_notify_mutex);
508 	INIT_WORK(&tp->event_work, vsc_tp_event_work);
509 
510 	/* only one child acpi device */
511 	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
512 				      vsc_tp_match_any, &adev);
513 	if (!ret) {
514 		ret = -ENODEV;
515 		goto err_destroy_lock;
516 	}
517 
518 	pinfo.fwnode = acpi_fwnode_handle(adev);
519 	pdev = platform_device_register_full(&pinfo);
520 	if (IS_ERR(pdev)) {
521 		ret = PTR_ERR(pdev);
522 		goto err_destroy_lock;
523 	}
524 
525 	tp->pdev = pdev;
526 	spi_set_drvdata(spi, tp);
527 
528 	return 0;
529 
530 err_destroy_lock:
531 	free_irq(spi->irq, tp);
532 
533 	cancel_work_sync(&tp->event_work);
534 	mutex_destroy(&tp->event_notify_mutex);
535 	mutex_destroy(&tp->mutex);
536 
537 	return ret;
538 }
539 
540 /* Note this is also used for shutdown */
vsc_tp_remove(struct spi_device * spi)541 static void vsc_tp_remove(struct spi_device *spi)
542 {
543 	struct vsc_tp *tp = spi_get_drvdata(spi);
544 
545 	platform_device_unregister(tp->pdev);
546 
547 	free_irq(spi->irq, tp);
548 
549 	cancel_work_sync(&tp->event_work);
550 	mutex_destroy(&tp->event_notify_mutex);
551 	mutex_destroy(&tp->mutex);
552 }
553 
554 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
555 	{ "INTC1009" }, /* Raptor Lake */
556 	{ "INTC1058" }, /* Tiger Lake */
557 	{ "INTC1094" }, /* Alder Lake */
558 	{ "INTC10D0" }, /* Meteor Lake */
559 	{}
560 };
561 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
562 
563 static struct spi_driver vsc_tp_driver = {
564 	.probe = vsc_tp_probe,
565 	.remove = vsc_tp_remove,
566 	.shutdown = vsc_tp_remove,
567 	.driver = {
568 		.name = "vsc-tp",
569 		.acpi_match_table = vsc_tp_acpi_ids,
570 	},
571 };
572 module_spi_driver(vsc_tp_driver);
573 
574 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
575 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
576 MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
577 MODULE_LICENSE("GPL");
578