xref: /linux/drivers/misc/mei/bus.c (revision 58809f614e0e3f4e12b489bddf680bfeb31c0a20)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/scatterlist.h>
17 #include <linux/mei_cl_bus.h>
18 
19 #include "mei_dev.h"
20 #include "client.h"
21 
22 #define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
23 
24 /**
25  * __mei_cl_send - internal client send (write)
26  *
27  * @cl: host client
28  * @buf: buffer to send
29  * @length: buffer length
30  * @vtag: virtual tag
31  * @mode: sending mode
32  *
33  * Return: written size bytes or < 0 on error
34  */
35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 		      unsigned int mode)
37 {
38 	return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39 }
40 
41 /**
42  * __mei_cl_send_timeout - internal client send (write)
43  *
44  * @cl: host client
45  * @buf: buffer to send
46  * @length: buffer length
47  * @vtag: virtual tag
48  * @mode: sending mode
49  * @timeout: send timeout in milliseconds.
50  *           effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51  *           set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52  *
53  * Return: written size bytes or < 0 on error
54  */
55 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56 			      unsigned int mode, unsigned long timeout)
57 {
58 	struct mei_device *bus;
59 	struct mei_cl_cb *cb;
60 	ssize_t rets;
61 
62 	if (WARN_ON(!cl || !cl->dev))
63 		return -ENODEV;
64 
65 	bus = cl->dev;
66 
67 	mutex_lock(&bus->device_lock);
68 	if (bus->dev_state != MEI_DEV_ENABLED &&
69 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
70 		rets = -ENODEV;
71 		goto out;
72 	}
73 
74 	if (!mei_cl_is_connected(cl)) {
75 		rets = -ENODEV;
76 		goto out;
77 	}
78 
79 	/* Check if we have an ME client device */
80 	if (!mei_me_cl_is_active(cl->me_cl)) {
81 		rets = -ENOTTY;
82 		goto out;
83 	}
84 
85 	if (vtag) {
86 		/* Check if vtag is supported by client */
87 		rets = mei_cl_vt_support_check(cl);
88 		if (rets)
89 			goto out;
90 	}
91 
92 	if (length > mei_cl_mtu(cl)) {
93 		rets = -EFBIG;
94 		goto out;
95 	}
96 
97 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98 		mutex_unlock(&bus->device_lock);
99 		rets = wait_event_interruptible(cl->tx_wait,
100 				cl->writing_state == MEI_WRITE_COMPLETE ||
101 				(!mei_cl_is_connected(cl)));
102 		mutex_lock(&bus->device_lock);
103 		if (rets) {
104 			if (signal_pending(current))
105 				rets = -EINTR;
106 			goto out;
107 		}
108 		if (!mei_cl_is_connected(cl)) {
109 			rets = -ENODEV;
110 			goto out;
111 		}
112 	}
113 
114 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
115 	if (!cb) {
116 		rets = -ENOMEM;
117 		goto out;
118 	}
119 	cb->vtag = vtag;
120 
121 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123 	memcpy(cb->buf.data, buf, length);
124 	/* hack we point data to header */
125 	if (mode & MEI_CL_IO_SGL) {
126 		cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127 		cb->buf.data = NULL;
128 		cb->buf.size = 0;
129 	}
130 
131 	rets = mei_cl_write(cl, cb, timeout);
132 
133 	if (mode & MEI_CL_IO_SGL && rets == 0)
134 		rets = length;
135 
136 out:
137 	mutex_unlock(&bus->device_lock);
138 
139 	return rets;
140 }
141 
142 /**
143  * __mei_cl_recv - internal client receive (read)
144  *
145  * @cl: host client
146  * @buf: buffer to receive
147  * @length: buffer length
148  * @vtag: virtual tag
149  * @mode: io mode
150  * @timeout: recv timeout, 0 for infinite timeout
151  *
152  * Return: read size in bytes of < 0 on error
153  */
154 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155 		      unsigned int mode, unsigned long timeout)
156 {
157 	struct mei_device *bus;
158 	struct mei_cl_cb *cb;
159 	size_t r_length;
160 	ssize_t rets;
161 	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162 
163 	if (WARN_ON(!cl || !cl->dev))
164 		return -ENODEV;
165 
166 	bus = cl->dev;
167 
168 	mutex_lock(&bus->device_lock);
169 	if (bus->dev_state != MEI_DEV_ENABLED &&
170 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
171 		rets = -ENODEV;
172 		goto out;
173 	}
174 
175 	cb = mei_cl_read_cb(cl, NULL);
176 	if (cb)
177 		goto copy;
178 
179 	rets = mei_cl_read_start(cl, length, NULL);
180 	if (rets && rets != -EBUSY)
181 		goto out;
182 
183 	if (nonblock) {
184 		rets = -EAGAIN;
185 		goto out;
186 	}
187 
188 	/* wait on event only if there is no other waiter */
189 	/* synchronized under device mutex */
190 	if (!waitqueue_active(&cl->rx_wait)) {
191 
192 		mutex_unlock(&bus->device_lock);
193 
194 		if (timeout) {
195 			rets = wait_event_interruptible_timeout
196 					(cl->rx_wait,
197 					mei_cl_read_cb(cl, NULL) ||
198 					(!mei_cl_is_connected(cl)),
199 					msecs_to_jiffies(timeout));
200 			if (rets == 0)
201 				return -ETIME;
202 			if (rets < 0) {
203 				if (signal_pending(current))
204 					return -EINTR;
205 				return -ERESTARTSYS;
206 			}
207 		} else {
208 			if (wait_event_interruptible
209 					(cl->rx_wait,
210 					mei_cl_read_cb(cl, NULL) ||
211 					(!mei_cl_is_connected(cl)))) {
212 				if (signal_pending(current))
213 					return -EINTR;
214 				return -ERESTARTSYS;
215 			}
216 		}
217 
218 		mutex_lock(&bus->device_lock);
219 
220 		if (!mei_cl_is_connected(cl)) {
221 			rets = -ENODEV;
222 			goto out;
223 		}
224 	}
225 
226 	cb = mei_cl_read_cb(cl, NULL);
227 	if (!cb) {
228 		rets = 0;
229 		goto out;
230 	}
231 
232 copy:
233 	if (cb->status) {
234 		rets = cb->status;
235 		goto free;
236 	}
237 
238 	/* for the GSC type - copy the extended header to the buffer */
239 	if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240 		r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241 		memcpy(buf, cb->ext_hdr, r_length);
242 	} else {
243 		r_length = min_t(size_t, length, cb->buf_idx);
244 		memcpy(buf, cb->buf.data, r_length);
245 	}
246 	rets = r_length;
247 
248 	if (vtag)
249 		*vtag = cb->vtag;
250 
251 free:
252 	mei_cl_del_rd_completed(cl, cb);
253 out:
254 	mutex_unlock(&bus->device_lock);
255 
256 	return rets;
257 }
258 
259 /**
260  * mei_cldev_send_vtag - me device send with vtag (write)
261  *
262  * @cldev: me client device
263  * @buf: buffer to send
264  * @length: buffer length
265  * @vtag: virtual tag
266  *
267  * Return:
268  *  * written size in bytes
269  *  * < 0 on error
270  */
271 
272 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273 			    size_t length, u8 vtag)
274 {
275 	struct mei_cl *cl = cldev->cl;
276 
277 	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
278 }
279 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280 
281 /**
282  * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
283  *
284  * @cldev: me client device
285  * @buf: buffer to send
286  * @length: buffer length
287  * @vtag: virtual tag
288  * @timeout: send timeout in milliseconds, 0 for infinite timeout
289  *
290  * Return:
291  *  * written size in bytes
292  *  * < 0 on error
293  */
294 
295 ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
296 				    size_t length, u8 vtag, unsigned long timeout)
297 {
298 	struct mei_cl *cl = cldev->cl;
299 
300 	return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
301 }
302 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
303 
304 /**
305  * mei_cldev_recv_vtag - client receive with vtag (read)
306  *
307  * @cldev: me client device
308  * @buf: buffer to receive
309  * @length: buffer length
310  * @vtag: virtual tag
311  *
312  * Return:
313  * * read size in bytes
314  * *  < 0 on error
315  */
316 
317 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
318 			    u8 *vtag)
319 {
320 	struct mei_cl *cl = cldev->cl;
321 
322 	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
323 }
324 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
325 
326 /**
327  * mei_cldev_recv_timeout - client receive with timeout (read)
328  *
329  * @cldev: me client device
330  * @buf: buffer to receive
331  * @length: buffer length
332  * @timeout: send timeout in milliseconds, 0 for infinite timeout
333  *
334  * Return:
335  * * read size in bytes
336  * *  < 0 on error
337  */
338 ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
339 			       unsigned long timeout)
340 {
341 	return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
342 }
343 EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
344 
345 /**
346  * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
347  *
348  * @cldev: me client device
349  * @buf: buffer to receive
350  * @length: buffer length
351  * @vtag: virtual tag
352  * @timeout: recv timeout in milliseconds, 0 for infinite timeout
353  *
354  * Return:
355  * * read size in bytes
356  * *  < 0 on error
357  */
358 
359 ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
360 				    u8 *vtag, unsigned long timeout)
361 {
362 	struct mei_cl *cl = cldev->cl;
363 
364 	return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
365 }
366 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
367 
368 /**
369  * mei_cldev_send - me device send (write)
370  *
371  * @cldev: me client device
372  * @buf: buffer to send
373  * @length: buffer length
374  *
375  * Return:
376  *  * written size in bytes
377  *  * < 0 on error
378  */
379 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
380 {
381 	return mei_cldev_send_vtag(cldev, buf, length, 0);
382 }
383 EXPORT_SYMBOL_GPL(mei_cldev_send);
384 
385 /**
386  * mei_cldev_send_timeout - me device send with timeout (write)
387  *
388  * @cldev: me client device
389  * @buf: buffer to send
390  * @length: buffer length
391  * @timeout: send timeout in milliseconds, 0 for infinite timeout
392  *
393  * Return:
394  *  * written size in bytes
395  *  * < 0 on error
396  */
397 ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
398 			       unsigned long timeout)
399 {
400 	return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
401 }
402 EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
403 
404 /**
405  * mei_cldev_recv - client receive (read)
406  *
407  * @cldev: me client device
408  * @buf: buffer to receive
409  * @length: buffer length
410  *
411  * Return: read size in bytes of < 0 on error
412  */
413 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
414 {
415 	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
416 }
417 EXPORT_SYMBOL_GPL(mei_cldev_recv);
418 
419 /**
420  * mei_cl_bus_rx_work - dispatch rx event for a bus device
421  *
422  * @work: work
423  */
424 static void mei_cl_bus_rx_work(struct work_struct *work)
425 {
426 	struct mei_cl_device *cldev;
427 	struct mei_device *bus;
428 
429 	cldev = container_of(work, struct mei_cl_device, rx_work);
430 
431 	bus = cldev->bus;
432 
433 	if (cldev->rx_cb)
434 		cldev->rx_cb(cldev);
435 
436 	mutex_lock(&bus->device_lock);
437 	if (mei_cl_is_connected(cldev->cl))
438 		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
439 	mutex_unlock(&bus->device_lock);
440 }
441 
442 /**
443  * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
444  *
445  * @work: work
446  */
447 static void mei_cl_bus_notif_work(struct work_struct *work)
448 {
449 	struct mei_cl_device *cldev;
450 
451 	cldev = container_of(work, struct mei_cl_device, notif_work);
452 
453 	if (cldev->notif_cb)
454 		cldev->notif_cb(cldev);
455 }
456 
457 /**
458  * mei_cl_bus_notify_event - schedule notify cb on bus client
459  *
460  * @cl: host client
461  *
462  * Return: true if event was scheduled
463  *         false if the client is not waiting for event
464  */
465 bool mei_cl_bus_notify_event(struct mei_cl *cl)
466 {
467 	struct mei_cl_device *cldev = cl->cldev;
468 
469 	if (!cldev || !cldev->notif_cb)
470 		return false;
471 
472 	if (!cl->notify_ev)
473 		return false;
474 
475 	schedule_work(&cldev->notif_work);
476 
477 	cl->notify_ev = false;
478 
479 	return true;
480 }
481 
482 /**
483  * mei_cl_bus_rx_event - schedule rx event
484  *
485  * @cl: host client
486  *
487  * Return: true if event was scheduled
488  *         false if the client is not waiting for event
489  */
490 bool mei_cl_bus_rx_event(struct mei_cl *cl)
491 {
492 	struct mei_cl_device *cldev = cl->cldev;
493 
494 	if (!cldev || !cldev->rx_cb)
495 		return false;
496 
497 	schedule_work(&cldev->rx_work);
498 
499 	return true;
500 }
501 
502 /**
503  * mei_cldev_register_rx_cb - register Rx event callback
504  *
505  * @cldev: me client devices
506  * @rx_cb: callback function
507  *
508  * Return: 0 on success
509  *         -EALREADY if an callback is already registered
510  *         <0 on other errors
511  */
512 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
513 {
514 	struct mei_device *bus = cldev->bus;
515 	int ret;
516 
517 	if (!rx_cb)
518 		return -EINVAL;
519 	if (cldev->rx_cb)
520 		return -EALREADY;
521 
522 	cldev->rx_cb = rx_cb;
523 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
524 
525 	mutex_lock(&bus->device_lock);
526 	if (mei_cl_is_connected(cldev->cl))
527 		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
528 	else
529 		ret = -ENODEV;
530 	mutex_unlock(&bus->device_lock);
531 	if (ret && ret != -EBUSY) {
532 		cancel_work_sync(&cldev->rx_work);
533 		cldev->rx_cb = NULL;
534 		return ret;
535 	}
536 
537 	return 0;
538 }
539 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
540 
541 /**
542  * mei_cldev_register_notif_cb - register FW notification event callback
543  *
544  * @cldev: me client devices
545  * @notif_cb: callback function
546  *
547  * Return: 0 on success
548  *         -EALREADY if an callback is already registered
549  *         <0 on other errors
550  */
551 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
552 				mei_cldev_cb_t notif_cb)
553 {
554 	struct mei_device *bus = cldev->bus;
555 	int ret;
556 
557 	if (!notif_cb)
558 		return -EINVAL;
559 
560 	if (cldev->notif_cb)
561 		return -EALREADY;
562 
563 	cldev->notif_cb = notif_cb;
564 	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
565 
566 	mutex_lock(&bus->device_lock);
567 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
568 	mutex_unlock(&bus->device_lock);
569 	if (ret) {
570 		cancel_work_sync(&cldev->notif_work);
571 		cldev->notif_cb = NULL;
572 		return ret;
573 	}
574 
575 	return 0;
576 }
577 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
578 
579 /**
580  * mei_cldev_get_drvdata - driver data getter
581  *
582  * @cldev: mei client device
583  *
584  * Return: driver private data
585  */
586 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
587 {
588 	return dev_get_drvdata(&cldev->dev);
589 }
590 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
591 
592 /**
593  * mei_cldev_set_drvdata - driver data setter
594  *
595  * @cldev: mei client device
596  * @data: data to store
597  */
598 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
599 {
600 	dev_set_drvdata(&cldev->dev, data);
601 }
602 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
603 
604 /**
605  * mei_cldev_ver - return protocol version of the underlying me client
606  *
607  * @cldev: mei client device
608  *
609  * Return: me client protocol version
610  */
611 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
612 {
613 	return mei_me_cl_ver(cldev->me_cl);
614 }
615 EXPORT_SYMBOL_GPL(mei_cldev_ver);
616 
617 /**
618  * mei_cldev_mtu - max message that client can send and receive
619  *
620  * @cldev: mei client device
621  *
622  * Return: mtu or 0 if client is not connected
623  */
624 size_t mei_cldev_mtu(const struct mei_cl_device *cldev)
625 {
626 	return mei_cl_mtu(cldev->cl);
627 }
628 EXPORT_SYMBOL_GPL(mei_cldev_mtu);
629 
630 /**
631  * mei_cldev_enabled - check whether the device is enabled
632  *
633  * @cldev: mei client device
634  *
635  * Return: true if me client is initialized and connected
636  */
637 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
638 {
639 	return mei_cl_is_connected(cldev->cl);
640 }
641 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
642 
643 /**
644  * mei_cl_bus_module_get - acquire module of the underlying
645  *    hw driver.
646  *
647  * @cldev: mei client device
648  *
649  * Return: true on success; false if the module was removed.
650  */
651 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
652 {
653 	return try_module_get(cldev->bus->dev->driver->owner);
654 }
655 
656 /**
657  * mei_cl_bus_module_put -  release the underlying hw module.
658  *
659  * @cldev: mei client device
660  */
661 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
662 {
663 	module_put(cldev->bus->dev->driver->owner);
664 }
665 
666 /**
667  * mei_cl_bus_vtag - get bus vtag entry wrapper
668  *     The tag for bus client is always first.
669  *
670  * @cl: host client
671  *
672  * Return: bus vtag or NULL
673  */
674 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
675 {
676 	return list_first_entry_or_null(&cl->vtag_map,
677 					struct mei_cl_vtag, list);
678 }
679 
680 /**
681  * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
682  *
683  * @cldev: me client device
684  *
685  * Return:
686  * * 0 on success
687  * * -ENOMEM if memory allocation failed
688  */
689 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
690 {
691 	struct mei_cl *cl = cldev->cl;
692 	struct mei_cl_vtag *cl_vtag;
693 
694 	/*
695 	 * Bail out if the client does not supports vtags
696 	 * or has already allocated one
697 	 */
698 	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
699 		return 0;
700 
701 	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
702 	if (IS_ERR(cl_vtag))
703 		return -ENOMEM;
704 
705 	list_add_tail(&cl_vtag->list, &cl->vtag_map);
706 
707 	return 0;
708 }
709 
710 /**
711  * mei_cl_bus_vtag_free - remove the bus entry from vtag map
712  *
713  * @cldev: me client device
714  */
715 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
716 {
717 	struct mei_cl *cl = cldev->cl;
718 	struct mei_cl_vtag *cl_vtag;
719 
720 	cl_vtag = mei_cl_bus_vtag(cl);
721 	if (!cl_vtag)
722 		return;
723 
724 	list_del(&cl_vtag->list);
725 	kfree(cl_vtag);
726 }
727 
728 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
729 {
730 	struct mei_device *bus;
731 	struct mei_cl *cl;
732 	int ret;
733 
734 	if (!cldev || !buffer_id || !size)
735 		return ERR_PTR(-EINVAL);
736 
737 	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
738 		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
739 			MEI_FW_PAGE_SIZE);
740 		return ERR_PTR(-EINVAL);
741 	}
742 
743 	cl = cldev->cl;
744 	bus = cldev->bus;
745 
746 	mutex_lock(&bus->device_lock);
747 	if (cl->state == MEI_FILE_UNINITIALIZED) {
748 		ret = mei_cl_link(cl);
749 		if (ret)
750 			goto notlinked;
751 		/* update pointers */
752 		cl->cldev = cldev;
753 	}
754 
755 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
756 	if (ret)
757 		mei_cl_unlink(cl);
758 notlinked:
759 	mutex_unlock(&bus->device_lock);
760 	if (ret)
761 		return ERR_PTR(ret);
762 	return cl->dma.vaddr;
763 }
764 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
765 
766 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
767 {
768 	struct mei_device *bus;
769 	struct mei_cl *cl;
770 	int ret;
771 
772 	if (!cldev)
773 		return -EINVAL;
774 
775 	cl = cldev->cl;
776 	bus = cldev->bus;
777 
778 	mutex_lock(&bus->device_lock);
779 	ret = mei_cl_dma_unmap(cl, NULL);
780 
781 	mei_cl_flush_queues(cl, NULL);
782 	mei_cl_unlink(cl);
783 	mutex_unlock(&bus->device_lock);
784 	return ret;
785 }
786 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
787 
788 /**
789  * mei_cldev_enable - enable me client device
790  *     create connection with me client
791  *
792  * @cldev: me client device
793  *
794  * Return: 0 on success and < 0 on error
795  */
796 int mei_cldev_enable(struct mei_cl_device *cldev)
797 {
798 	struct mei_device *bus = cldev->bus;
799 	struct mei_cl *cl;
800 	int ret;
801 
802 	cl = cldev->cl;
803 
804 	mutex_lock(&bus->device_lock);
805 	if (cl->state == MEI_FILE_UNINITIALIZED) {
806 		ret = mei_cl_link(cl);
807 		if (ret)
808 			goto notlinked;
809 		/* update pointers */
810 		cl->cldev = cldev;
811 	}
812 
813 	if (mei_cl_is_connected(cl)) {
814 		ret = 0;
815 		goto out;
816 	}
817 
818 	if (!mei_me_cl_is_active(cldev->me_cl)) {
819 		dev_err(&cldev->dev, "me client is not active\n");
820 		ret = -ENOTTY;
821 		goto out;
822 	}
823 
824 	ret = mei_cl_bus_vtag_alloc(cldev);
825 	if (ret)
826 		goto out;
827 
828 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
829 	if (ret < 0) {
830 		dev_err(&cldev->dev, "cannot connect\n");
831 		mei_cl_bus_vtag_free(cldev);
832 	}
833 
834 out:
835 	if (ret)
836 		mei_cl_unlink(cl);
837 notlinked:
838 	mutex_unlock(&bus->device_lock);
839 
840 	return ret;
841 }
842 EXPORT_SYMBOL_GPL(mei_cldev_enable);
843 
844 /**
845  * mei_cldev_unregister_callbacks - internal wrapper for unregistering
846  *  callbacks.
847  *
848  * @cldev: client device
849  */
850 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
851 {
852 	if (cldev->rx_cb) {
853 		cancel_work_sync(&cldev->rx_work);
854 		cldev->rx_cb = NULL;
855 	}
856 
857 	if (cldev->notif_cb) {
858 		cancel_work_sync(&cldev->notif_work);
859 		cldev->notif_cb = NULL;
860 	}
861 }
862 
863 /**
864  * mei_cldev_disable - disable me client device
865  *     disconnect form the me client
866  *
867  * @cldev: me client device
868  *
869  * Return: 0 on success and < 0 on error
870  */
871 int mei_cldev_disable(struct mei_cl_device *cldev)
872 {
873 	struct mei_device *bus;
874 	struct mei_cl *cl;
875 	int err;
876 
877 	if (!cldev)
878 		return -ENODEV;
879 
880 	cl = cldev->cl;
881 
882 	bus = cldev->bus;
883 
884 	mei_cldev_unregister_callbacks(cldev);
885 
886 	mutex_lock(&bus->device_lock);
887 
888 	mei_cl_bus_vtag_free(cldev);
889 
890 	if (!mei_cl_is_connected(cl)) {
891 		dev_dbg(&cldev->dev, "Already disconnected\n");
892 		err = 0;
893 		goto out;
894 	}
895 
896 	err = mei_cl_disconnect(cl);
897 	if (err < 0)
898 		dev_err(&cldev->dev, "Could not disconnect from the ME client\n");
899 
900 out:
901 	/* Flush queues and remove any pending read unless we have mapped DMA */
902 	if (!cl->dma_mapped) {
903 		mei_cl_flush_queues(cl, NULL);
904 		mei_cl_unlink(cl);
905 	}
906 
907 	mutex_unlock(&bus->device_lock);
908 	return err;
909 }
910 EXPORT_SYMBOL_GPL(mei_cldev_disable);
911 
912 /**
913  * mei_cldev_send_gsc_command - sends a gsc command, by sending
914  * a gsl mei message to gsc and receiving reply from gsc
915  *
916  * @cldev: me client device
917  * @client_id: client id to send the command to
918  * @fence_id: fence id to send the command to
919  * @sg_in: scatter gather list containing addresses for rx message buffer
920  * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
921  * @sg_out: scatter gather list containing addresses for tx message buffer
922  *
923  * Return:
924  *  * written size in bytes
925  *  * < 0 on error
926  */
927 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
928 				   u8 client_id, u32 fence_id,
929 				   struct scatterlist *sg_in,
930 				   size_t total_in_len,
931 				   struct scatterlist *sg_out)
932 {
933 	struct mei_cl *cl;
934 	struct mei_device *bus;
935 	ssize_t ret = 0;
936 
937 	struct mei_ext_hdr_gsc_h2f *ext_hdr;
938 	size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
939 	int sg_out_nents, sg_in_nents;
940 	int i;
941 	struct scatterlist *sg;
942 	struct mei_ext_hdr_gsc_f2h rx_msg;
943 	unsigned int sg_len;
944 
945 	if (!cldev || !sg_in || !sg_out)
946 		return -EINVAL;
947 
948 	cl = cldev->cl;
949 	bus = cldev->bus;
950 
951 	dev_dbg(&cldev->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
952 
953 	if (!bus->hbm_f_gsc_supported)
954 		return -EOPNOTSUPP;
955 
956 	sg_out_nents = sg_nents(sg_out);
957 	sg_in_nents = sg_nents(sg_in);
958 	/* at least one entry in tx and rx sgls must be present */
959 	if (sg_out_nents <= 0 || sg_in_nents <= 0)
960 		return -EINVAL;
961 
962 	buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
963 	ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
964 	if (!ext_hdr)
965 		return -ENOMEM;
966 
967 	/* construct the GSC message */
968 	ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
969 	ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
970 
971 	ext_hdr->client_id = client_id;
972 	ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
973 	ext_hdr->fence_id = fence_id;
974 	ext_hdr->input_address_count = sg_in_nents;
975 	ext_hdr->output_address_count = sg_out_nents;
976 	ext_hdr->reserved[0] = 0;
977 	ext_hdr->reserved[1] = 0;
978 
979 	/* copy in-sgl to the message */
980 	for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
981 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
982 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
983 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
984 		ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
985 		total_in_len -= ext_hdr->sgl[i].length;
986 	}
987 
988 	/* copy out-sgl to the message */
989 	for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
990 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
991 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
992 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
993 		ext_hdr->sgl[i].length = sg_len;
994 	}
995 
996 	/* send the message to GSC */
997 	ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
998 	if (ret < 0) {
999 		dev_err(&cldev->dev, "__mei_cl_send failed, returned %zd\n", ret);
1000 		goto end;
1001 	}
1002 	if (ret != buf_sz) {
1003 		dev_err(&cldev->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
1004 			ret, buf_sz);
1005 		ret = -EIO;
1006 		goto end;
1007 	}
1008 
1009 	/* receive the reply from GSC, note that at this point sg_in should contain the reply */
1010 	ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
1011 
1012 	if (ret != sizeof(rx_msg)) {
1013 		dev_err(&cldev->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
1014 			ret, sizeof(rx_msg));
1015 		if (ret >= 0)
1016 			ret = -EIO;
1017 		goto end;
1018 	}
1019 
1020 	/* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
1021 	if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
1022 		dev_err(&cldev->dev, "received client_id/fence_id  %u/%u  instead of %u/%u sent\n",
1023 			rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
1024 		ret = -EFAULT;
1025 		goto end;
1026 	}
1027 
1028 	dev_dbg(&cldev->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
1029 	ret = rx_msg.written;
1030 
1031 end:
1032 	kfree(ext_hdr);
1033 	return ret;
1034 }
1035 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
1036 
1037 /**
1038  * mei_cl_device_find - find matching entry in the driver id table
1039  *
1040  * @cldev: me client device
1041  * @cldrv: me client driver
1042  *
1043  * Return: id on success; NULL if no id is matching
1044  */
1045 static const
1046 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1047 					    const struct mei_cl_driver *cldrv)
1048 {
1049 	const struct mei_cl_device_id *id;
1050 	const uuid_le *uuid;
1051 	u8 version;
1052 	bool match;
1053 
1054 	uuid = mei_me_cl_uuid(cldev->me_cl);
1055 	version = mei_me_cl_ver(cldev->me_cl);
1056 
1057 	id = cldrv->id_table;
1058 	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
1059 		if (!uuid_le_cmp(*uuid, id->uuid)) {
1060 			match = true;
1061 
1062 			if (cldev->name[0])
1063 				if (strncmp(cldev->name, id->name,
1064 					    sizeof(id->name)))
1065 					match = false;
1066 
1067 			if (id->version != MEI_CL_VERSION_ANY)
1068 				if (id->version != version)
1069 					match = false;
1070 			if (match)
1071 				return id;
1072 		}
1073 
1074 		id++;
1075 	}
1076 
1077 	return NULL;
1078 }
1079 
1080 /**
1081  * mei_cl_device_match  - device match function
1082  *
1083  * @dev: device
1084  * @drv: driver
1085  *
1086  * Return:  1 if matching device was found 0 otherwise
1087  */
1088 static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
1089 {
1090 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1091 	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1092 	const struct mei_cl_device_id *found_id;
1093 
1094 	if (!cldev->do_match)
1095 		return 0;
1096 
1097 	if (!cldrv || !cldrv->id_table)
1098 		return 0;
1099 
1100 	found_id = mei_cl_device_find(cldev, cldrv);
1101 	if (found_id)
1102 		return 1;
1103 
1104 	return 0;
1105 }
1106 
1107 /**
1108  * mei_cl_device_probe - bus probe function
1109  *
1110  * @dev: device
1111  *
1112  * Return:  0 on success; < 0 otherwise
1113  */
1114 static int mei_cl_device_probe(struct device *dev)
1115 {
1116 	struct mei_cl_device *cldev;
1117 	struct mei_cl_driver *cldrv;
1118 	const struct mei_cl_device_id *id;
1119 	int ret;
1120 
1121 	cldev = to_mei_cl_device(dev);
1122 	cldrv = to_mei_cl_driver(dev->driver);
1123 
1124 	if (!cldrv || !cldrv->probe)
1125 		return -ENODEV;
1126 
1127 	id = mei_cl_device_find(cldev, cldrv);
1128 	if (!id)
1129 		return -ENODEV;
1130 
1131 	if (!mei_cl_bus_module_get(cldev)) {
1132 		dev_err(&cldev->dev, "get hw module failed");
1133 		return -ENODEV;
1134 	}
1135 
1136 	ret = cldrv->probe(cldev, id);
1137 	if (ret) {
1138 		mei_cl_bus_module_put(cldev);
1139 		return ret;
1140 	}
1141 
1142 	__module_get(THIS_MODULE);
1143 	return 0;
1144 }
1145 
1146 /**
1147  * mei_cl_device_remove - remove device from the bus
1148  *
1149  * @dev: device
1150  *
1151  * Return:  0 on success; < 0 otherwise
1152  */
1153 static void mei_cl_device_remove(struct device *dev)
1154 {
1155 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1156 	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1157 
1158 	if (cldrv->remove)
1159 		cldrv->remove(cldev);
1160 
1161 	mei_cldev_unregister_callbacks(cldev);
1162 
1163 	mei_cl_bus_module_put(cldev);
1164 	module_put(THIS_MODULE);
1165 }
1166 
1167 static ssize_t name_show(struct device *dev, struct device_attribute *a,
1168 			     char *buf)
1169 {
1170 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1171 
1172 	return sysfs_emit(buf, "%s", cldev->name);
1173 }
1174 static DEVICE_ATTR_RO(name);
1175 
1176 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1177 			     char *buf)
1178 {
1179 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1180 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1181 
1182 	return sysfs_emit(buf, "%pUl", uuid);
1183 }
1184 static DEVICE_ATTR_RO(uuid);
1185 
1186 static ssize_t version_show(struct device *dev, struct device_attribute *a,
1187 			     char *buf)
1188 {
1189 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1190 	u8 version = mei_me_cl_ver(cldev->me_cl);
1191 
1192 	return sysfs_emit(buf, "%02X", version);
1193 }
1194 static DEVICE_ATTR_RO(version);
1195 
1196 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1197 			     char *buf)
1198 {
1199 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1200 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1201 	u8 version = mei_me_cl_ver(cldev->me_cl);
1202 
1203 	return sysfs_emit(buf, "mei:%s:%pUl:%02X:", cldev->name, uuid, version);
1204 }
1205 static DEVICE_ATTR_RO(modalias);
1206 
1207 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1208 			     char *buf)
1209 {
1210 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1211 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1212 
1213 	return sysfs_emit(buf, "%d", maxconn);
1214 }
1215 static DEVICE_ATTR_RO(max_conn);
1216 
1217 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1218 			  char *buf)
1219 {
1220 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1221 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1222 
1223 	return sysfs_emit(buf, "%d", fixed);
1224 }
1225 static DEVICE_ATTR_RO(fixed);
1226 
1227 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1228 			 char *buf)
1229 {
1230 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1231 	bool vt = mei_me_cl_vt(cldev->me_cl);
1232 
1233 	return sysfs_emit(buf, "%d", vt);
1234 }
1235 static DEVICE_ATTR_RO(vtag);
1236 
1237 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1238 			    char *buf)
1239 {
1240 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1241 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1242 
1243 	return sysfs_emit(buf, "%u", maxlen);
1244 }
1245 static DEVICE_ATTR_RO(max_len);
1246 
1247 static struct attribute *mei_cldev_attrs[] = {
1248 	&dev_attr_name.attr,
1249 	&dev_attr_uuid.attr,
1250 	&dev_attr_version.attr,
1251 	&dev_attr_modalias.attr,
1252 	&dev_attr_max_conn.attr,
1253 	&dev_attr_fixed.attr,
1254 	&dev_attr_vtag.attr,
1255 	&dev_attr_max_len.attr,
1256 	NULL,
1257 };
1258 ATTRIBUTE_GROUPS(mei_cldev);
1259 
1260 /**
1261  * mei_cl_device_uevent - me client bus uevent handler
1262  *
1263  * @dev: device
1264  * @env: uevent kobject
1265  *
1266  * Return: 0 on success -ENOMEM on when add_uevent_var fails
1267  */
1268 static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1269 {
1270 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1271 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1272 	u8 version = mei_me_cl_ver(cldev->me_cl);
1273 
1274 	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1275 		return -ENOMEM;
1276 
1277 	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1278 		return -ENOMEM;
1279 
1280 	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1281 		return -ENOMEM;
1282 
1283 	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1284 			   cldev->name, uuid, version))
1285 		return -ENOMEM;
1286 
1287 	return 0;
1288 }
1289 
1290 static const struct bus_type mei_cl_bus_type = {
1291 	.name		= "mei",
1292 	.dev_groups	= mei_cldev_groups,
1293 	.match		= mei_cl_device_match,
1294 	.probe		= mei_cl_device_probe,
1295 	.remove		= mei_cl_device_remove,
1296 	.uevent		= mei_cl_device_uevent,
1297 };
1298 
1299 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1300 {
1301 	if (bus)
1302 		get_device(bus->dev);
1303 
1304 	return bus;
1305 }
1306 
1307 static void mei_dev_bus_put(struct mei_device *bus)
1308 {
1309 	if (bus)
1310 		put_device(bus->dev);
1311 }
1312 
1313 static void mei_cl_bus_dev_release(struct device *dev)
1314 {
1315 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1316 	struct mei_device *mdev = cldev->cl->dev;
1317 	struct mei_cl *cl;
1318 
1319 	mei_cl_flush_queues(cldev->cl, NULL);
1320 	mei_me_cl_put(cldev->me_cl);
1321 	mei_dev_bus_put(cldev->bus);
1322 
1323 	list_for_each_entry(cl, &mdev->file_list, link)
1324 		WARN_ON(cl == cldev->cl);
1325 
1326 	kfree(cldev->cl);
1327 	kfree(cldev);
1328 }
1329 
1330 static const struct device_type mei_cl_device_type = {
1331 	.release = mei_cl_bus_dev_release,
1332 };
1333 
1334 /**
1335  * mei_cl_bus_set_name - set device name for me client device
1336  *  <controller>-<client device>
1337  *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1338  *
1339  * @cldev: me client device
1340  */
1341 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1342 {
1343 	dev_set_name(&cldev->dev, "%s-%pUl",
1344 		     dev_name(cldev->bus->dev),
1345 		     mei_me_cl_uuid(cldev->me_cl));
1346 }
1347 
1348 /**
1349  * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1350  *
1351  * @bus: mei device
1352  * @me_cl: me client
1353  *
1354  * Return: allocated device structure or NULL on allocation failure
1355  */
1356 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1357 						  struct mei_me_client *me_cl)
1358 {
1359 	struct mei_cl_device *cldev;
1360 	struct mei_cl *cl;
1361 
1362 	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1363 	if (!cldev)
1364 		return NULL;
1365 
1366 	cl = mei_cl_allocate(bus);
1367 	if (!cl) {
1368 		kfree(cldev);
1369 		return NULL;
1370 	}
1371 
1372 	device_initialize(&cldev->dev);
1373 	cldev->dev.parent = bus->dev;
1374 	cldev->dev.bus    = &mei_cl_bus_type;
1375 	cldev->dev.type   = &mei_cl_device_type;
1376 	cldev->bus        = mei_dev_bus_get(bus);
1377 	cldev->me_cl      = mei_me_cl_get(me_cl);
1378 	cldev->cl         = cl;
1379 	mei_cl_bus_set_name(cldev);
1380 	cldev->is_added   = 0;
1381 	INIT_LIST_HEAD(&cldev->bus_list);
1382 	device_enable_async_suspend(&cldev->dev);
1383 
1384 	return cldev;
1385 }
1386 
1387 /**
1388  * mei_cl_bus_dev_setup - setup me client device
1389  *    run fix up routines and set the device name
1390  *
1391  * @bus: mei device
1392  * @cldev: me client device
1393  *
1394  * Return: true if the device is eligible for enumeration
1395  */
1396 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1397 				 struct mei_cl_device *cldev)
1398 {
1399 	cldev->do_match = 1;
1400 	mei_cl_bus_dev_fixup(cldev);
1401 
1402 	/* the device name can change during fix up */
1403 	if (cldev->do_match)
1404 		mei_cl_bus_set_name(cldev);
1405 
1406 	return cldev->do_match == 1;
1407 }
1408 
1409 /**
1410  * mei_cl_bus_dev_add - add me client devices
1411  *
1412  * @cldev: me client device
1413  *
1414  * Return: 0 on success; < 0 on failure
1415  */
1416 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1417 {
1418 	int ret;
1419 
1420 	dev_dbg(&cldev->dev, "adding %pUL:%02X\n",
1421 		mei_me_cl_uuid(cldev->me_cl),
1422 		mei_me_cl_ver(cldev->me_cl));
1423 	ret = device_add(&cldev->dev);
1424 	if (!ret)
1425 		cldev->is_added = 1;
1426 
1427 	return ret;
1428 }
1429 
1430 /**
1431  * mei_cl_bus_dev_stop - stop the driver
1432  *
1433  * @cldev: me client device
1434  */
1435 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1436 {
1437 	cldev->do_match = 0;
1438 	if (cldev->is_added)
1439 		device_release_driver(&cldev->dev);
1440 }
1441 
1442 /**
1443  * mei_cl_bus_dev_destroy - destroy me client devices object
1444  *
1445  * @cldev: me client device
1446  *
1447  * Locking: called under "dev->cl_bus_lock" lock
1448  */
1449 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1450 {
1451 
1452 	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1453 
1454 	if (!cldev->is_added)
1455 		return;
1456 
1457 	device_del(&cldev->dev);
1458 
1459 	list_del_init(&cldev->bus_list);
1460 
1461 	cldev->is_added = 0;
1462 	put_device(&cldev->dev);
1463 }
1464 
1465 /**
1466  * mei_cl_bus_remove_device - remove a devices form the bus
1467  *
1468  * @cldev: me client device
1469  */
1470 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1471 {
1472 	mei_cl_bus_dev_stop(cldev);
1473 	mei_cl_bus_dev_destroy(cldev);
1474 }
1475 
1476 /**
1477  * mei_cl_bus_remove_devices - remove all devices form the bus
1478  *
1479  * @bus: mei device
1480  */
1481 void mei_cl_bus_remove_devices(struct mei_device *bus)
1482 {
1483 	struct mei_cl_device *cldev, *next;
1484 
1485 	mutex_lock(&bus->cl_bus_lock);
1486 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1487 		mei_cl_bus_remove_device(cldev);
1488 	mutex_unlock(&bus->cl_bus_lock);
1489 }
1490 
1491 
1492 /**
1493  * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1494  *     based on me client
1495  *
1496  * @bus: mei device
1497  * @me_cl: me client
1498  *
1499  * Locking: called under "dev->cl_bus_lock" lock
1500  */
1501 static void mei_cl_bus_dev_init(struct mei_device *bus,
1502 				struct mei_me_client *me_cl)
1503 {
1504 	struct mei_cl_device *cldev;
1505 
1506 	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1507 
1508 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1509 
1510 	if (me_cl->bus_added)
1511 		return;
1512 
1513 	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1514 	if (!cldev)
1515 		return;
1516 
1517 	me_cl->bus_added = true;
1518 	list_add_tail(&cldev->bus_list, &bus->device_list);
1519 
1520 }
1521 
1522 /**
1523  * mei_cl_bus_rescan - scan me clients list and add create
1524  *    devices for eligible clients
1525  *
1526  * @bus: mei device
1527  */
1528 static void mei_cl_bus_rescan(struct mei_device *bus)
1529 {
1530 	struct mei_cl_device *cldev, *n;
1531 	struct mei_me_client *me_cl;
1532 
1533 	mutex_lock(&bus->cl_bus_lock);
1534 
1535 	down_read(&bus->me_clients_rwsem);
1536 	list_for_each_entry(me_cl, &bus->me_clients, list)
1537 		mei_cl_bus_dev_init(bus, me_cl);
1538 	up_read(&bus->me_clients_rwsem);
1539 
1540 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1541 
1542 		if (!mei_me_cl_is_active(cldev->me_cl)) {
1543 			mei_cl_bus_remove_device(cldev);
1544 			continue;
1545 		}
1546 
1547 		if (cldev->is_added)
1548 			continue;
1549 
1550 		if (mei_cl_bus_dev_setup(bus, cldev))
1551 			mei_cl_bus_dev_add(cldev);
1552 		else {
1553 			list_del_init(&cldev->bus_list);
1554 			put_device(&cldev->dev);
1555 		}
1556 	}
1557 	mutex_unlock(&bus->cl_bus_lock);
1558 
1559 	dev_dbg(bus->dev, "rescan end");
1560 }
1561 
1562 void mei_cl_bus_rescan_work(struct work_struct *work)
1563 {
1564 	struct mei_device *bus =
1565 		container_of(work, struct mei_device, bus_rescan_work);
1566 
1567 	mei_cl_bus_rescan(bus);
1568 }
1569 
1570 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1571 				struct module *owner)
1572 {
1573 	int err;
1574 
1575 	cldrv->driver.name = cldrv->name;
1576 	cldrv->driver.owner = owner;
1577 	cldrv->driver.bus = &mei_cl_bus_type;
1578 
1579 	err = driver_register(&cldrv->driver);
1580 	if (err)
1581 		return err;
1582 
1583 	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1584 
1585 	return 0;
1586 }
1587 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1588 
1589 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1590 {
1591 	driver_unregister(&cldrv->driver);
1592 
1593 	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1594 }
1595 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1596 
1597 
1598 int __init mei_cl_bus_init(void)
1599 {
1600 	return bus_register(&mei_cl_bus_type);
1601 }
1602 
1603 void __exit mei_cl_bus_exit(void)
1604 {
1605 	bus_unregister(&mei_cl_bus_type);
1606 }
1607