xref: /linux/drivers/misc/mei/main.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/slab.h>
12 #include <linux/fs.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/poll.h>
17 #include <linux/init.h>
18 #include <linux/ioctl.h>
19 #include <linux/cdev.h>
20 #include <linux/sched/signal.h>
21 #include <linux/compat.h>
22 #include <linux/jiffies.h>
23 #include <linux/interrupt.h>
24 
25 #include <linux/mei.h>
26 
27 #include "mei_dev.h"
28 #include "client.h"
29 
30 static const struct class mei_class = {
31 	.name = "mei",
32 };
33 
34 static dev_t mei_devt;
35 #define MEI_MAX_DEVS  MINORMASK
36 static DEFINE_MUTEX(mei_minor_lock);
37 static DEFINE_IDR(mei_idr);
38 
39 /**
40  * mei_open - the open function
41  *
42  * @inode: pointer to inode structure
43  * @file: pointer to file structure
44  *
45  * Return: 0 on success, <0 on error
46  */
47 static int mei_open(struct inode *inode, struct file *file)
48 {
49 	struct mei_device *dev;
50 	struct mei_cl *cl;
51 
52 	int err;
53 
54 	dev = idr_find(&mei_idr, iminor(inode));
55 	if (!dev)
56 		return -ENODEV;
57 	get_device(&dev->dev);
58 
59 	mutex_lock(&dev->device_lock);
60 
61 	if (dev->dev_state != MEI_DEV_ENABLED) {
62 		dev_dbg(&dev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",
63 		    mei_dev_state_str(dev->dev_state));
64 		err = -ENODEV;
65 		goto err_unlock;
66 	}
67 
68 	cl = mei_cl_alloc_linked(dev);
69 	if (IS_ERR(cl)) {
70 		err = PTR_ERR(cl);
71 		goto err_unlock;
72 	}
73 
74 	cl->fp = file;
75 	file->private_data = cl;
76 
77 	mutex_unlock(&dev->device_lock);
78 
79 	return nonseekable_open(inode, file);
80 
81 err_unlock:
82 	mutex_unlock(&dev->device_lock);
83 	put_device(&dev->dev);
84 	return err;
85 }
86 
87 /**
88  * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
89  *
90  * @cl: host client
91  * @fp: pointer to file structure
92  *
93  */
94 static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
95 				     const struct file *fp)
96 {
97 	struct mei_cl_vtag *vtag_l, *next;
98 
99 	list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
100 		if (vtag_l->fp == fp) {
101 			list_del(&vtag_l->list);
102 			kfree(vtag_l);
103 			return;
104 		}
105 	}
106 }
107 
108 /**
109  * mei_release - the release function
110  *
111  * @inode: pointer to inode structure
112  * @file: pointer to file structure
113  *
114  * Return: 0 on success, <0 on error
115  */
116 static int mei_release(struct inode *inode, struct file *file)
117 {
118 	struct mei_cl *cl = file->private_data;
119 	struct mei_device *dev;
120 	int rets;
121 
122 	if (WARN_ON(!cl || !cl->dev))
123 		return -ENODEV;
124 
125 	dev = cl->dev;
126 
127 	mutex_lock(&dev->device_lock);
128 
129 	mei_cl_vtag_remove_by_fp(cl, file);
130 
131 	if (!list_empty(&cl->vtag_map)) {
132 		cl_dbg(dev, cl, "not the last vtag\n");
133 		mei_cl_flush_queues(cl, file);
134 		rets = 0;
135 		goto out;
136 	}
137 
138 	rets = mei_cl_disconnect(cl);
139 	/*
140 	 * Check again: This is necessary since disconnect releases the lock
141 	 * and another client can connect in the meantime.
142 	 */
143 	if (!list_empty(&cl->vtag_map)) {
144 		cl_dbg(dev, cl, "not the last vtag after disconnect\n");
145 		mei_cl_flush_queues(cl, file);
146 		goto out;
147 	}
148 
149 	mei_cl_flush_queues(cl, NULL);
150 	cl_dbg(dev, cl, "removing\n");
151 
152 	mei_cl_unlink(cl);
153 	kfree(cl);
154 
155 out:
156 	file->private_data = NULL;
157 
158 	mutex_unlock(&dev->device_lock);
159 	put_device(&dev->dev);
160 	return rets;
161 }
162 
163 
164 /**
165  * mei_read - the read function.
166  *
167  * @file: pointer to file structure
168  * @ubuf: pointer to user buffer
169  * @length: buffer length
170  * @offset: data offset in buffer
171  *
172  * Return: >=0 data length on success , <0 on error
173  */
174 static ssize_t mei_read(struct file *file, char __user *ubuf,
175 			size_t length, loff_t *offset)
176 {
177 	struct mei_cl *cl = file->private_data;
178 	struct mei_device *dev;
179 	struct mei_cl_cb *cb = NULL;
180 	bool nonblock = !!(file->f_flags & O_NONBLOCK);
181 	ssize_t rets;
182 
183 	if (WARN_ON(!cl || !cl->dev))
184 		return -ENODEV;
185 
186 	dev = cl->dev;
187 
188 
189 	mutex_lock(&dev->device_lock);
190 	if (dev->dev_state != MEI_DEV_ENABLED) {
191 		rets = -ENODEV;
192 		goto out;
193 	}
194 
195 	if (length == 0) {
196 		rets = 0;
197 		goto out;
198 	}
199 
200 	if (ubuf == NULL) {
201 		rets = -EMSGSIZE;
202 		goto out;
203 	}
204 
205 	cb = mei_cl_read_cb(cl, file);
206 	if (cb)
207 		goto copy_buffer;
208 
209 	if (*offset > 0)
210 		*offset = 0;
211 
212 	rets = mei_cl_read_start(cl, length, file);
213 	if (rets && rets != -EBUSY) {
214 		cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
215 		goto out;
216 	}
217 
218 	if (nonblock) {
219 		rets = -EAGAIN;
220 		goto out;
221 	}
222 
223 	mutex_unlock(&dev->device_lock);
224 	if (wait_event_interruptible(cl->rx_wait,
225 				     mei_cl_read_cb(cl, file) ||
226 				     !mei_cl_is_connected(cl))) {
227 		if (signal_pending(current))
228 			return -EINTR;
229 		return -ERESTARTSYS;
230 	}
231 	mutex_lock(&dev->device_lock);
232 
233 	if (!mei_cl_is_connected(cl)) {
234 		rets = -ENODEV;
235 		goto out;
236 	}
237 
238 	cb = mei_cl_read_cb(cl, file);
239 	if (!cb) {
240 		rets = 0;
241 		goto out;
242 	}
243 
244 copy_buffer:
245 	/* now copy the data to user space */
246 	if (cb->status) {
247 		rets = cb->status;
248 		cl_dbg(dev, cl, "read operation failed %zd\n", rets);
249 		goto free;
250 	}
251 
252 	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
253 	       cb->buf.size, cb->buf_idx, *offset);
254 	if (*offset >= cb->buf_idx) {
255 		rets = 0;
256 		goto free;
257 	}
258 
259 	/* length is being truncated to PAGE_SIZE,
260 	 * however buf_idx may point beyond that */
261 	length = min_t(size_t, length, cb->buf_idx - *offset);
262 
263 	if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
264 		cl_dbg(dev, cl, "failed to copy data to userland\n");
265 		rets = -EFAULT;
266 		goto free;
267 	}
268 
269 	rets = length;
270 	*offset += length;
271 	/* not all data was read, keep the cb */
272 	if (*offset < cb->buf_idx)
273 		goto out;
274 
275 free:
276 	mei_cl_del_rd_completed(cl, cb);
277 	*offset = 0;
278 
279 out:
280 	cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
281 	mutex_unlock(&dev->device_lock);
282 	return rets;
283 }
284 
285 /**
286  * mei_cl_vtag_by_fp - obtain the vtag by file pointer
287  *
288  * @cl: host client
289  * @fp: pointer to file structure
290  *
291  * Return: vtag value on success, otherwise 0
292  */
293 static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
294 {
295 	struct mei_cl_vtag *cl_vtag;
296 
297 	if (!fp)
298 		return 0;
299 
300 	list_for_each_entry(cl_vtag, &cl->vtag_map, list)
301 		if (cl_vtag->fp == fp)
302 			return cl_vtag->vtag;
303 	return 0;
304 }
305 
306 /**
307  * mei_write - the write function.
308  *
309  * @file: pointer to file structure
310  * @ubuf: pointer to user buffer
311  * @length: buffer length
312  * @offset: data offset in buffer
313  *
314  * Return: >=0 data length on success , <0 on error
315  */
316 static ssize_t mei_write(struct file *file, const char __user *ubuf,
317 			 size_t length, loff_t *offset)
318 {
319 	struct mei_cl *cl = file->private_data;
320 	struct mei_cl_cb *cb;
321 	struct mei_device *dev;
322 	ssize_t rets;
323 
324 	if (WARN_ON(!cl || !cl->dev))
325 		return -ENODEV;
326 
327 	dev = cl->dev;
328 
329 	mutex_lock(&dev->device_lock);
330 
331 	if (dev->dev_state != MEI_DEV_ENABLED) {
332 		rets = -ENODEV;
333 		goto out;
334 	}
335 
336 	if (!mei_cl_is_connected(cl)) {
337 		cl_dbg(dev, cl, "is not connected");
338 		rets = -ENODEV;
339 		goto out;
340 	}
341 
342 	if (!mei_me_cl_is_active(cl->me_cl)) {
343 		rets = -ENOTTY;
344 		goto out;
345 	}
346 
347 	if (length > mei_cl_mtu(cl)) {
348 		rets = -EFBIG;
349 		goto out;
350 	}
351 
352 	if (length == 0) {
353 		rets = 0;
354 		goto out;
355 	}
356 
357 	while (cl->tx_cb_queued >= dev->tx_queue_limit) {
358 		if (file->f_flags & O_NONBLOCK) {
359 			rets = -EAGAIN;
360 			goto out;
361 		}
362 		mutex_unlock(&dev->device_lock);
363 		rets = wait_event_interruptible(cl->tx_wait,
364 				cl->writing_state == MEI_WRITE_COMPLETE ||
365 				(!mei_cl_is_connected(cl)));
366 		mutex_lock(&dev->device_lock);
367 		if (rets) {
368 			if (signal_pending(current))
369 				rets = -EINTR;
370 			goto out;
371 		}
372 		if (!mei_cl_is_connected(cl)) {
373 			rets = -ENODEV;
374 			goto out;
375 		}
376 	}
377 
378 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
379 	if (!cb) {
380 		rets = -ENOMEM;
381 		goto out;
382 	}
383 	cb->vtag = mei_cl_vtag_by_fp(cl, file);
384 
385 	rets = copy_from_user(cb->buf.data, ubuf, length);
386 	if (rets) {
387 		cl_dbg(dev, cl, "failed to copy data from userland\n");
388 		rets = -EFAULT;
389 		mei_io_cb_free(cb);
390 		goto out;
391 	}
392 
393 	rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
394 out:
395 	mutex_unlock(&dev->device_lock);
396 	return rets;
397 }
398 
399 /**
400  * mei_ioctl_connect_client - the connect to fw client IOCTL function
401  *
402  * @file: private data of the file object
403  * @in_client_uuid: requested UUID for connection
404  * @client: IOCTL connect data, output parameters
405  *
406  * Locking: called under "dev->device_lock" lock
407  *
408  * Return: 0 on success, <0 on failure.
409  */
410 static int mei_ioctl_connect_client(struct file *file,
411 				    const uuid_le *in_client_uuid,
412 				    struct mei_client *client)
413 {
414 	struct mei_device *dev;
415 	struct mei_me_client *me_cl;
416 	struct mei_cl *cl;
417 	int rets;
418 
419 	cl = file->private_data;
420 	dev = cl->dev;
421 
422 	if (cl->state != MEI_FILE_INITIALIZING &&
423 	    cl->state != MEI_FILE_DISCONNECTED)
424 		return  -EBUSY;
425 
426 retry:
427 	/* find ME client we're trying to connect to */
428 	me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
429 	if (!me_cl) {
430 		cl_dbg(dev, cl, "Cannot connect to FW Client UUID = %pUl\n",
431 			in_client_uuid);
432 		rets = -ENOTTY;
433 		goto end;
434 	}
435 
436 	if (me_cl->props.fixed_address) {
437 		bool forbidden = dev->override_fixed_address ?
438 			 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
439 		if (forbidden) {
440 			cl_dbg(dev, cl, "Connection forbidden to FW Client UUID = %pUl\n",
441 				in_client_uuid);
442 			rets = -ENOTTY;
443 			goto end;
444 		}
445 	}
446 
447 	cl_dbg(dev, cl, "Connect to FW Client ID = %d\n", me_cl->client_id);
448 	cl_dbg(dev, cl, "FW Client - Protocol Version = %d\n", me_cl->props.protocol_version);
449 	cl_dbg(dev, cl, "FW Client - Max Msg Len = %d\n", me_cl->props.max_msg_length);
450 
451 	/* prepare the output buffer */
452 	client->max_msg_length = me_cl->props.max_msg_length;
453 	client->protocol_version = me_cl->props.protocol_version;
454 	cl_dbg(dev, cl, "Can connect?\n");
455 
456 	rets = mei_cl_connect(cl, me_cl, file);
457 
458 	if (rets && cl->status == -EFAULT &&
459 	    (dev->dev_state == MEI_DEV_RESETTING ||
460 	     dev->dev_state == MEI_DEV_INIT_CLIENTS)) {
461 		/* in link reset, wait for it completion */
462 		mutex_unlock(&dev->device_lock);
463 		rets = wait_event_interruptible_timeout(dev->wait_dev_state,
464 							dev->dev_state == MEI_DEV_ENABLED,
465 							dev->timeouts.link_reset_wait);
466 		mutex_lock(&dev->device_lock);
467 		if (rets < 0) {
468 			if (signal_pending(current))
469 				rets = -EINTR;
470 			goto end;
471 		}
472 		if (dev->dev_state != MEI_DEV_ENABLED) {
473 			rets = -ETIME;
474 			goto end;
475 		}
476 		mei_me_cl_put(me_cl);
477 		goto retry;
478 	}
479 
480 end:
481 	mei_me_cl_put(me_cl);
482 	return rets;
483 }
484 
485 /**
486  * mei_vt_support_check - check if client support vtags
487  *
488  * @dev: mei_device
489  * @uuid: client UUID
490  *
491  * Locking: called under "dev->device_lock" lock
492  *
493  * Return:
494  *	0 - supported
495  *	-ENOTTY - no such client
496  *	-EOPNOTSUPP - vtags are not supported by client
497  */
498 static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
499 {
500 	struct mei_me_client *me_cl;
501 	int ret;
502 
503 	if (!dev->hbm_f_vt_supported)
504 		return -EOPNOTSUPP;
505 
506 	me_cl = mei_me_cl_by_uuid(dev, uuid);
507 	if (!me_cl) {
508 		dev_dbg(&dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
509 			uuid);
510 		return -ENOTTY;
511 	}
512 	ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
513 	mei_me_cl_put(me_cl);
514 
515 	return ret;
516 }
517 
518 /**
519  * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function
520  *
521  * @file: private data of the file object
522  * @in_client_uuid: requested UUID for connection
523  * @client: IOCTL connect data, output parameters
524  * @vtag: vm tag
525  *
526  * Locking: called under "dev->device_lock" lock
527  *
528  * Return: 0 on success, <0 on failure.
529  */
530 static int mei_ioctl_connect_vtag(struct file *file,
531 				  const uuid_le *in_client_uuid,
532 				  struct mei_client *client,
533 				  u8 vtag)
534 {
535 	struct mei_device *dev;
536 	struct mei_cl *cl;
537 	struct mei_cl *pos;
538 	struct mei_cl_vtag *cl_vtag;
539 
540 	cl = file->private_data;
541 	dev = cl->dev;
542 
543 	cl_dbg(dev, cl, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
544 
545 	switch (cl->state) {
546 	case MEI_FILE_DISCONNECTED:
547 		if (mei_cl_vtag_by_fp(cl, file) != vtag) {
548 			cl_err(dev, cl, "reconnect with different vtag\n");
549 			return -EINVAL;
550 		}
551 		break;
552 	case MEI_FILE_INITIALIZING:
553 		/* malicious connect from another thread may push vtag */
554 		if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
555 			cl_err(dev, cl, "vtag already filled\n");
556 			return -EINVAL;
557 		}
558 
559 		list_for_each_entry(pos, &dev->file_list, link) {
560 			if (pos == cl)
561 				continue;
562 			if (!pos->me_cl)
563 				continue;
564 
565 			/* only search for same UUID */
566 			if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
567 				continue;
568 
569 			/* if tag already exist try another fp */
570 			if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
571 				continue;
572 
573 			/* replace cl with acquired one */
574 			cl_dbg(dev, cl, "replacing with existing cl\n");
575 			mei_cl_unlink(cl);
576 			kfree(cl);
577 			file->private_data = pos;
578 			cl = pos;
579 			break;
580 		}
581 
582 		cl_vtag = mei_cl_vtag_alloc(file, vtag);
583 		if (IS_ERR(cl_vtag))
584 			return -ENOMEM;
585 
586 		list_add_tail(&cl_vtag->list, &cl->vtag_map);
587 		break;
588 	default:
589 		return -EBUSY;
590 	}
591 
592 	while (cl->state != MEI_FILE_INITIALIZING &&
593 	       cl->state != MEI_FILE_DISCONNECTED &&
594 	       cl->state != MEI_FILE_CONNECTED) {
595 		mutex_unlock(&dev->device_lock);
596 		wait_event_timeout(cl->wait,
597 				   (cl->state == MEI_FILE_CONNECTED ||
598 				    cl->state == MEI_FILE_DISCONNECTED ||
599 				    cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
600 				    cl->state == MEI_FILE_DISCONNECT_REPLY),
601 				   dev->timeouts.cl_connect);
602 		mutex_lock(&dev->device_lock);
603 	}
604 
605 	if (!mei_cl_is_connected(cl))
606 		return mei_ioctl_connect_client(file, in_client_uuid, client);
607 
608 	client->max_msg_length = cl->me_cl->props.max_msg_length;
609 	client->protocol_version = cl->me_cl->props.protocol_version;
610 
611 	return 0;
612 }
613 
614 /**
615  * mei_ioctl_client_notify_request - propagate event notification
616  *                                   request to client
617  *
618  * @file: pointer to file structure
619  * @request: 0 - disable, 1 - enable
620  *
621  * Return: 0 on success , <0 on error
622  */
623 static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
624 {
625 	struct mei_cl *cl = file->private_data;
626 
627 	if (request != MEI_HBM_NOTIFICATION_START &&
628 	    request != MEI_HBM_NOTIFICATION_STOP)
629 		return -EINVAL;
630 
631 	return mei_cl_notify_request(cl, file, (u8)request);
632 }
633 
634 /**
635  * mei_ioctl_client_notify_get -  wait for notification request
636  *
637  * @file: pointer to file structure
638  * @notify_get: 0 - disable, 1 - enable
639  *
640  * Return: 0 on success , <0 on error
641  */
642 static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
643 {
644 	struct mei_cl *cl = file->private_data;
645 	bool notify_ev;
646 	bool block = (file->f_flags & O_NONBLOCK) == 0;
647 	int rets;
648 
649 	rets = mei_cl_notify_get(cl, block, &notify_ev);
650 	if (rets)
651 		return rets;
652 
653 	*notify_get = notify_ev ? 1 : 0;
654 	return 0;
655 }
656 
657 /**
658  * mei_ioctl - the IOCTL function
659  *
660  * @file: pointer to file structure
661  * @cmd: ioctl command
662  * @data: pointer to mei message structure
663  *
664  * Return: 0 on success , <0 on error
665  */
666 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
667 {
668 	struct mei_device *dev;
669 	struct mei_cl *cl = file->private_data;
670 	struct mei_connect_client_data conn;
671 	struct mei_connect_client_data_vtag conn_vtag;
672 	uuid_le cl_uuid;
673 	struct mei_client *props;
674 	u8 vtag;
675 	u32 notify_get, notify_req;
676 	int rets;
677 
678 
679 	if (WARN_ON(!cl || !cl->dev))
680 		return -ENODEV;
681 
682 	dev = cl->dev;
683 
684 	cl_dbg(dev, cl, "IOCTL cmd = 0x%x", cmd);
685 
686 	mutex_lock(&dev->device_lock);
687 	if (dev->dev_state != MEI_DEV_ENABLED) {
688 		rets = -ENODEV;
689 		goto out;
690 	}
691 
692 	switch (cmd) {
693 	case IOCTL_MEI_CONNECT_CLIENT:
694 		cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT\n");
695 		if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
696 			cl_dbg(dev, cl, "failed to copy data from userland\n");
697 			rets = -EFAULT;
698 			goto out;
699 		}
700 		cl_uuid = conn.in_client_uuid;
701 		props = &conn.out_client_properties;
702 		vtag = 0;
703 
704 		rets = mei_vt_support_check(dev, &cl_uuid);
705 		if (rets == -ENOTTY)
706 			goto out;
707 		if (!rets)
708 			rets = mei_ioctl_connect_vtag(file, &cl_uuid, props,
709 						      vtag);
710 		else
711 			rets = mei_ioctl_connect_client(file, &cl_uuid, props);
712 		if (rets)
713 			goto out;
714 
715 		/* if all is ok, copying the data back to user. */
716 		if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
717 			cl_dbg(dev, cl, "failed to copy data to userland\n");
718 			rets = -EFAULT;
719 			goto out;
720 		}
721 
722 		break;
723 
724 	case IOCTL_MEI_CONNECT_CLIENT_VTAG:
725 		cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
726 		if (copy_from_user(&conn_vtag, (char __user *)data,
727 				   sizeof(conn_vtag))) {
728 			cl_dbg(dev, cl, "failed to copy data from userland\n");
729 			rets = -EFAULT;
730 			goto out;
731 		}
732 
733 		cl_uuid = conn_vtag.connect.in_client_uuid;
734 		props = &conn_vtag.out_client_properties;
735 		vtag = conn_vtag.connect.vtag;
736 
737 		rets = mei_vt_support_check(dev, &cl_uuid);
738 		if (rets == -EOPNOTSUPP)
739 			cl_dbg(dev, cl, "FW Client %pUl does not support vtags\n",
740 				&cl_uuid);
741 		if (rets)
742 			goto out;
743 
744 		if (!vtag) {
745 			cl_dbg(dev, cl, "vtag can't be zero\n");
746 			rets = -EINVAL;
747 			goto out;
748 		}
749 
750 		rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag);
751 		if (rets)
752 			goto out;
753 
754 		/* if all is ok, copying the data back to user. */
755 		if (copy_to_user((char __user *)data, &conn_vtag,
756 				 sizeof(conn_vtag))) {
757 			cl_dbg(dev, cl, "failed to copy data to userland\n");
758 			rets = -EFAULT;
759 			goto out;
760 		}
761 
762 		break;
763 
764 	case IOCTL_MEI_NOTIFY_SET:
765 		cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_SET\n");
766 		if (copy_from_user(&notify_req,
767 				   (char __user *)data, sizeof(notify_req))) {
768 			cl_dbg(dev, cl, "failed to copy data from userland\n");
769 			rets = -EFAULT;
770 			goto out;
771 		}
772 		rets = mei_ioctl_client_notify_request(file, notify_req);
773 		break;
774 
775 	case IOCTL_MEI_NOTIFY_GET:
776 		cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_GET\n");
777 		rets = mei_ioctl_client_notify_get(file, &notify_get);
778 		if (rets)
779 			goto out;
780 
781 		cl_dbg(dev, cl, "copy connect data to user\n");
782 		if (copy_to_user((char __user *)data,
783 				&notify_get, sizeof(notify_get))) {
784 			cl_dbg(dev, cl, "failed to copy data to userland\n");
785 			rets = -EFAULT;
786 			goto out;
787 
788 		}
789 		break;
790 
791 	default:
792 		rets = -ENOIOCTLCMD;
793 	}
794 
795 out:
796 	mutex_unlock(&dev->device_lock);
797 	return rets;
798 }
799 
800 /**
801  * mei_poll - the poll function
802  *
803  * @file: pointer to file structure
804  * @wait: pointer to poll_table structure
805  *
806  * Return: poll mask
807  */
808 static __poll_t mei_poll(struct file *file, poll_table *wait)
809 {
810 	__poll_t req_events = poll_requested_events(wait);
811 	struct mei_cl *cl = file->private_data;
812 	struct mei_device *dev;
813 	__poll_t mask = 0;
814 	bool notify_en;
815 
816 	if (WARN_ON(!cl || !cl->dev))
817 		return EPOLLERR;
818 
819 	dev = cl->dev;
820 
821 	mutex_lock(&dev->device_lock);
822 
823 	notify_en = cl->notify_en && (req_events & EPOLLPRI);
824 
825 	if (dev->dev_state != MEI_DEV_ENABLED ||
826 	    !mei_cl_is_connected(cl)) {
827 		mask = EPOLLERR;
828 		goto out;
829 	}
830 
831 	if (notify_en) {
832 		poll_wait(file, &cl->ev_wait, wait);
833 		if (cl->notify_ev)
834 			mask |= EPOLLPRI;
835 	}
836 
837 	if (req_events & (EPOLLIN | EPOLLRDNORM)) {
838 		poll_wait(file, &cl->rx_wait, wait);
839 
840 		if (mei_cl_read_cb(cl, file))
841 			mask |= EPOLLIN | EPOLLRDNORM;
842 		else
843 			mei_cl_read_start(cl, mei_cl_mtu(cl), file);
844 	}
845 
846 	if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
847 		poll_wait(file, &cl->tx_wait, wait);
848 		if (cl->tx_cb_queued < dev->tx_queue_limit)
849 			mask |= EPOLLOUT | EPOLLWRNORM;
850 	}
851 
852 out:
853 	mutex_unlock(&dev->device_lock);
854 	return mask;
855 }
856 
857 /**
858  * mei_cl_is_write_queued - check if the client has pending writes.
859  *
860  * @cl: writing host client
861  *
862  * Return: true if client is writing, false otherwise.
863  */
864 static bool mei_cl_is_write_queued(struct mei_cl *cl)
865 {
866 	struct mei_device *dev = cl->dev;
867 	struct mei_cl_cb *cb;
868 
869 	list_for_each_entry(cb, &dev->write_list, list)
870 		if (cb->cl == cl)
871 			return true;
872 	list_for_each_entry(cb, &dev->write_waiting_list, list)
873 		if (cb->cl == cl)
874 			return true;
875 	return false;
876 }
877 
878 /**
879  * mei_fsync - the fsync handler
880  *
881  * @fp:       pointer to file structure
882  * @start:    unused
883  * @end:      unused
884  * @datasync: unused
885  *
886  * Return: 0 on success, -ENODEV if client is not connected
887  */
888 static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
889 {
890 	struct mei_cl *cl = fp->private_data;
891 	struct mei_device *dev;
892 	int rets;
893 
894 	if (WARN_ON(!cl || !cl->dev))
895 		return -ENODEV;
896 
897 	dev = cl->dev;
898 
899 	mutex_lock(&dev->device_lock);
900 
901 	if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
902 		rets = -ENODEV;
903 		goto out;
904 	}
905 
906 	while (mei_cl_is_write_queued(cl)) {
907 		mutex_unlock(&dev->device_lock);
908 		rets = wait_event_interruptible(cl->tx_wait,
909 				cl->writing_state == MEI_WRITE_COMPLETE ||
910 				!mei_cl_is_connected(cl));
911 		mutex_lock(&dev->device_lock);
912 		if (rets) {
913 			if (signal_pending(current))
914 				rets = -EINTR;
915 			goto out;
916 		}
917 		if (!mei_cl_is_connected(cl)) {
918 			rets = -ENODEV;
919 			goto out;
920 		}
921 	}
922 	rets = 0;
923 out:
924 	mutex_unlock(&dev->device_lock);
925 	return rets;
926 }
927 
928 /**
929  * mei_fasync - asynchronous io support
930  *
931  * @fd: file descriptor
932  * @file: pointer to file structure
933  * @band: band bitmap
934  *
935  * Return: negative on error,
936  *         0 if it did no changes,
937  *         and positive a process was added or deleted
938  */
939 static int mei_fasync(int fd, struct file *file, int band)
940 {
941 
942 	struct mei_cl *cl = file->private_data;
943 
944 	if (!mei_cl_is_connected(cl))
945 		return -ENODEV;
946 
947 	return fasync_helper(fd, file, band, &cl->ev_async);
948 }
949 
950 /**
951  * trc_show - mei device trc attribute show method
952  *
953  * @device: device pointer
954  * @attr: attribute pointer
955  * @buf:  char out buffer
956  *
957  * Return: number of the bytes printed into buf or error
958  */
959 static ssize_t trc_show(struct device *device,
960 			struct device_attribute *attr, char *buf)
961 {
962 	struct mei_device *dev = dev_get_drvdata(device);
963 	u32 trc;
964 	int ret;
965 
966 	ret = mei_trc_status(dev, &trc);
967 	if (ret)
968 		return ret;
969 	return sprintf(buf, "%08X\n", trc);
970 }
971 static DEVICE_ATTR_RO(trc);
972 
973 /**
974  * fw_status_show - mei device fw_status attribute show method
975  *
976  * @device: device pointer
977  * @attr: attribute pointer
978  * @buf:  char out buffer
979  *
980  * Return: number of the bytes printed into buf or error
981  */
982 static ssize_t fw_status_show(struct device *device,
983 		struct device_attribute *attr, char *buf)
984 {
985 	struct mei_device *dev = dev_get_drvdata(device);
986 	struct mei_fw_status fw_status;
987 	int err, i;
988 	ssize_t cnt = 0;
989 
990 	mutex_lock(&dev->device_lock);
991 	err = mei_fw_status(dev, &fw_status);
992 	mutex_unlock(&dev->device_lock);
993 	if (err) {
994 		dev_err(device, "read fw_status error = %d\n", err);
995 		return err;
996 	}
997 
998 	for (i = 0; i < fw_status.count; i++)
999 		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
1000 				fw_status.status[i]);
1001 	return cnt;
1002 }
1003 static DEVICE_ATTR_RO(fw_status);
1004 
1005 /**
1006  * hbm_ver_show - display HBM protocol version negotiated with FW
1007  *
1008  * @device: device pointer
1009  * @attr: attribute pointer
1010  * @buf:  char out buffer
1011  *
1012  * Return: number of the bytes printed into buf or error
1013  */
1014 static ssize_t hbm_ver_show(struct device *device,
1015 			    struct device_attribute *attr, char *buf)
1016 {
1017 	struct mei_device *dev = dev_get_drvdata(device);
1018 	struct hbm_version ver;
1019 
1020 	mutex_lock(&dev->device_lock);
1021 	ver = dev->version;
1022 	mutex_unlock(&dev->device_lock);
1023 
1024 	return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
1025 }
1026 static DEVICE_ATTR_RO(hbm_ver);
1027 
1028 /**
1029  * hbm_ver_drv_show - display HBM protocol version advertised by driver
1030  *
1031  * @device: device pointer
1032  * @attr: attribute pointer
1033  * @buf:  char out buffer
1034  *
1035  * Return: number of the bytes printed into buf or error
1036  */
1037 static ssize_t hbm_ver_drv_show(struct device *device,
1038 				struct device_attribute *attr, char *buf)
1039 {
1040 	return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
1041 }
1042 static DEVICE_ATTR_RO(hbm_ver_drv);
1043 
1044 static ssize_t tx_queue_limit_show(struct device *device,
1045 				   struct device_attribute *attr, char *buf)
1046 {
1047 	struct mei_device *dev = dev_get_drvdata(device);
1048 	u8 size = 0;
1049 
1050 	mutex_lock(&dev->device_lock);
1051 	size = dev->tx_queue_limit;
1052 	mutex_unlock(&dev->device_lock);
1053 
1054 	return sysfs_emit(buf, "%u\n", size);
1055 }
1056 
1057 static ssize_t tx_queue_limit_store(struct device *device,
1058 				    struct device_attribute *attr,
1059 				    const char *buf, size_t count)
1060 {
1061 	struct mei_device *dev = dev_get_drvdata(device);
1062 	u8 limit;
1063 	unsigned int inp;
1064 	int err;
1065 
1066 	err = kstrtouint(buf, 10, &inp);
1067 	if (err)
1068 		return err;
1069 	if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
1070 		return -EINVAL;
1071 	limit = inp;
1072 
1073 	mutex_lock(&dev->device_lock);
1074 	dev->tx_queue_limit = limit;
1075 	mutex_unlock(&dev->device_lock);
1076 
1077 	return count;
1078 }
1079 static DEVICE_ATTR_RW(tx_queue_limit);
1080 
1081 /**
1082  * fw_ver_show - display ME FW version
1083  *
1084  * @device: device pointer
1085  * @attr: attribute pointer
1086  * @buf:  char out buffer
1087  *
1088  * Return: number of the bytes printed into buf or error
1089  */
1090 static ssize_t fw_ver_show(struct device *device,
1091 			   struct device_attribute *attr, char *buf)
1092 {
1093 	struct mei_device *dev = dev_get_drvdata(device);
1094 	struct mei_fw_version *ver;
1095 	ssize_t cnt = 0;
1096 	int i;
1097 
1098 	ver = dev->fw_ver;
1099 
1100 	for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
1101 		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
1102 				 ver[i].platform, ver[i].major, ver[i].minor,
1103 				 ver[i].hotfix, ver[i].buildno);
1104 	return cnt;
1105 }
1106 static DEVICE_ATTR_RO(fw_ver);
1107 
1108 /**
1109  * dev_state_show - display device state
1110  *
1111  * @device: device pointer
1112  * @attr: attribute pointer
1113  * @buf:  char out buffer
1114  *
1115  * Return: number of the bytes printed into buf or error
1116  */
1117 static ssize_t dev_state_show(struct device *device,
1118 			      struct device_attribute *attr, char *buf)
1119 {
1120 	struct mei_device *dev = dev_get_drvdata(device);
1121 	enum mei_dev_state dev_state;
1122 
1123 	mutex_lock(&dev->device_lock);
1124 	dev_state = dev->dev_state;
1125 	mutex_unlock(&dev->device_lock);
1126 
1127 	return sprintf(buf, "%s", mei_dev_state_str(dev_state));
1128 }
1129 static DEVICE_ATTR_RO(dev_state);
1130 
1131 /**
1132  * mei_set_devstate: set to new device state and notify sysfs file.
1133  *
1134  * @dev: mei_device
1135  * @state: new device state
1136  */
1137 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
1138 {
1139 	struct device *clsdev;
1140 
1141 	if (dev->dev_state == state)
1142 		return;
1143 
1144 	dev->dev_state = state;
1145 
1146 	wake_up_interruptible_all(&dev->wait_dev_state);
1147 
1148 	if (!dev->cdev)
1149 		return;
1150 
1151 	clsdev = class_find_device_by_devt(&mei_class, dev->cdev->dev);
1152 	if (clsdev) {
1153 		sysfs_notify(&clsdev->kobj, NULL, "dev_state");
1154 		put_device(clsdev);
1155 	}
1156 }
1157 
1158 /**
1159  * kind_show - display device kind
1160  *
1161  * @device: device pointer
1162  * @attr: attribute pointer
1163  * @buf: char out buffer
1164  *
1165  * Return: number of the bytes printed into buf or error
1166  */
1167 static ssize_t kind_show(struct device *device,
1168 			 struct device_attribute *attr, char *buf)
1169 {
1170 	struct mei_device *dev = dev_get_drvdata(device);
1171 	ssize_t ret;
1172 
1173 	if (dev->kind)
1174 		ret = sprintf(buf, "%s\n", dev->kind);
1175 	else
1176 		ret = sprintf(buf, "%s\n", "mei");
1177 
1178 	return ret;
1179 }
1180 static DEVICE_ATTR_RO(kind);
1181 
1182 static struct attribute *mei_attrs[] = {
1183 	&dev_attr_fw_status.attr,
1184 	&dev_attr_hbm_ver.attr,
1185 	&dev_attr_hbm_ver_drv.attr,
1186 	&dev_attr_tx_queue_limit.attr,
1187 	&dev_attr_fw_ver.attr,
1188 	&dev_attr_dev_state.attr,
1189 	&dev_attr_trc.attr,
1190 	&dev_attr_kind.attr,
1191 	NULL
1192 };
1193 ATTRIBUTE_GROUPS(mei);
1194 
1195 /*
1196  * file operations structure will be used for mei char device.
1197  */
1198 static const struct file_operations mei_fops = {
1199 	.owner = THIS_MODULE,
1200 	.read = mei_read,
1201 	.unlocked_ioctl = mei_ioctl,
1202 	.compat_ioctl = compat_ptr_ioctl,
1203 	.open = mei_open,
1204 	.release = mei_release,
1205 	.write = mei_write,
1206 	.poll = mei_poll,
1207 	.fsync = mei_fsync,
1208 	.fasync = mei_fasync,
1209 };
1210 
1211 /**
1212  * mei_minor_get - obtain next free device minor number
1213  *
1214  * @dev:  device pointer
1215  *
1216  * Return: allocated minor, or -ENOSPC if no free minor left
1217  */
1218 static int mei_minor_get(struct mei_device *dev)
1219 {
1220 	int ret;
1221 
1222 	mutex_lock(&mei_minor_lock);
1223 	ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
1224 	if (ret >= 0)
1225 		dev->minor = ret;
1226 	else if (ret == -ENOSPC)
1227 		dev_err(&dev->dev, "too many mei devices\n");
1228 
1229 	mutex_unlock(&mei_minor_lock);
1230 	return ret;
1231 }
1232 
1233 /**
1234  * mei_minor_free - mark device minor number as free
1235  *
1236  * @minor: minor number to free
1237  */
1238 static void mei_minor_free(int minor)
1239 {
1240 	mutex_lock(&mei_minor_lock);
1241 	idr_remove(&mei_idr, minor);
1242 	mutex_unlock(&mei_minor_lock);
1243 }
1244 
1245 static void mei_device_release(struct device *dev)
1246 {
1247 	kfree(dev_get_drvdata(dev));
1248 }
1249 
1250 int mei_register(struct mei_device *dev, struct device *parent)
1251 {
1252 	int ret, devno;
1253 	int minor;
1254 
1255 	ret = mei_minor_get(dev);
1256 	if (ret < 0)
1257 		return ret;
1258 
1259 	minor = dev->minor;
1260 
1261 	/* Fill in the data structures */
1262 	devno = MKDEV(MAJOR(mei_devt), dev->minor);
1263 
1264 	device_initialize(&dev->dev);
1265 	dev->dev.devt = devno;
1266 	dev->dev.class = &mei_class;
1267 	dev->dev.parent = parent;
1268 	dev->dev.groups = mei_groups;
1269 	dev->dev.release = mei_device_release;
1270 	dev_set_drvdata(&dev->dev, dev);
1271 
1272 	dev->cdev = cdev_alloc();
1273 	if (!dev->cdev) {
1274 		ret = -ENOMEM;
1275 		goto err;
1276 	}
1277 	dev->cdev->ops = &mei_fops;
1278 	dev->cdev->owner = parent->driver->owner;
1279 	cdev_set_parent(dev->cdev, &dev->dev.kobj);
1280 
1281 	/* Add the device */
1282 	ret = cdev_add(dev->cdev, devno, 1);
1283 	if (ret) {
1284 		dev_err(parent, "unable to add cdev for device %d:%d\n",
1285 			MAJOR(mei_devt), dev->minor);
1286 		goto err_del_cdev;
1287 	}
1288 
1289 	ret = dev_set_name(&dev->dev, "mei%d", dev->minor);
1290 	if (ret) {
1291 		dev_err(parent, "unable to set name to device %d:%d ret = %d\n",
1292 			MAJOR(mei_devt), dev->minor, ret);
1293 		goto err_del_cdev;
1294 	}
1295 
1296 	ret = device_add(&dev->dev);
1297 	if (ret) {
1298 		dev_err(parent, "unable to add device %d:%d ret = %d\n",
1299 			MAJOR(mei_devt), dev->minor, ret);
1300 		goto err_del_cdev;
1301 	}
1302 
1303 	mei_dbgfs_register(dev, dev_name(&dev->dev));
1304 
1305 	return 0;
1306 
1307 err_del_cdev:
1308 	cdev_del(dev->cdev);
1309 err:
1310 	mei_minor_free(minor);
1311 	return ret;
1312 }
1313 EXPORT_SYMBOL_GPL(mei_register);
1314 
1315 void mei_deregister(struct mei_device *dev)
1316 {
1317 	int devno;
1318 	int minor = dev->minor;
1319 
1320 	devno = dev->cdev->dev;
1321 	cdev_del(dev->cdev);
1322 
1323 	mei_dbgfs_deregister(dev);
1324 
1325 	device_destroy(&mei_class, devno);
1326 
1327 	mei_minor_free(minor);
1328 }
1329 EXPORT_SYMBOL_GPL(mei_deregister);
1330 
1331 static int __init mei_init(void)
1332 {
1333 	int ret;
1334 
1335 	ret = class_register(&mei_class);
1336 	if (ret)
1337 		return ret;
1338 
1339 	ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
1340 	if (ret < 0) {
1341 		pr_err("unable to allocate char dev region\n");
1342 		goto err_class;
1343 	}
1344 
1345 	ret = mei_cl_bus_init();
1346 	if (ret < 0) {
1347 		pr_err("unable to initialize bus\n");
1348 		goto err_chrdev;
1349 	}
1350 
1351 	return 0;
1352 
1353 err_chrdev:
1354 	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1355 err_class:
1356 	class_unregister(&mei_class);
1357 	return ret;
1358 }
1359 
1360 static void __exit mei_exit(void)
1361 {
1362 	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1363 	class_unregister(&mei_class);
1364 	mei_cl_bus_exit();
1365 }
1366 
1367 module_init(mei_init);
1368 module_exit(mei_exit);
1369 
1370 MODULE_AUTHOR("Intel Corporation");
1371 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1372 MODULE_LICENSE("GPL v2");
1373 
1374