1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/scatterlist.h>
17 #include <linux/mei_cl_bus.h>
18
19 #include "mei_dev.h"
20 #include "client.h"
21
22 #define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
23
24 /**
25 * __mei_cl_send - internal client send (write)
26 *
27 * @cl: host client
28 * @buf: buffer to send
29 * @length: buffer length
30 * @vtag: virtual tag
31 * @mode: sending mode
32 *
33 * Return: written size bytes or < 0 on error
34 */
__mei_cl_send(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode)35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 unsigned int mode)
37 {
38 return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39 }
40
41 /**
42 * __mei_cl_send_timeout - internal client send (write)
43 *
44 * @cl: host client
45 * @buf: buffer to send
46 * @length: buffer length
47 * @vtag: virtual tag
48 * @mode: sending mode
49 * @timeout: send timeout in milliseconds.
50 * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52 *
53 * Return: written size bytes or < 0 on error
54 */
__mei_cl_send_timeout(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode,unsigned long timeout)55 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56 unsigned int mode, unsigned long timeout)
57 {
58 struct mei_device *bus;
59 struct mei_cl_cb *cb;
60 ssize_t rets;
61
62 if (WARN_ON(!cl || !cl->dev))
63 return -ENODEV;
64
65 bus = cl->dev;
66
67 mutex_lock(&bus->device_lock);
68 if (bus->dev_state != MEI_DEV_ENABLED &&
69 bus->dev_state != MEI_DEV_POWERING_DOWN) {
70 rets = -ENODEV;
71 goto out;
72 }
73
74 if (!mei_cl_is_connected(cl)) {
75 rets = -ENODEV;
76 goto out;
77 }
78
79 /* Check if we have an ME client device */
80 if (!mei_me_cl_is_active(cl->me_cl)) {
81 rets = -ENOTTY;
82 goto out;
83 }
84
85 if (vtag) {
86 /* Check if vtag is supported by client */
87 rets = mei_cl_vt_support_check(cl);
88 if (rets)
89 goto out;
90 }
91
92 if (length > mei_cl_mtu(cl)) {
93 rets = -EFBIG;
94 goto out;
95 }
96
97 while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98 mutex_unlock(&bus->device_lock);
99 rets = wait_event_interruptible(cl->tx_wait,
100 cl->writing_state == MEI_WRITE_COMPLETE ||
101 (!mei_cl_is_connected(cl)));
102 mutex_lock(&bus->device_lock);
103 if (rets) {
104 if (signal_pending(current))
105 rets = -EINTR;
106 goto out;
107 }
108 if (!mei_cl_is_connected(cl)) {
109 rets = -ENODEV;
110 goto out;
111 }
112 }
113
114 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
115 if (!cb) {
116 rets = -ENOMEM;
117 goto out;
118 }
119 cb->vtag = vtag;
120
121 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123 memcpy(cb->buf.data, buf, length);
124 /* hack we point data to header */
125 if (mode & MEI_CL_IO_SGL) {
126 cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127 cb->buf.data = NULL;
128 cb->buf.size = 0;
129 }
130
131 rets = mei_cl_write(cl, cb, timeout);
132
133 if (mode & MEI_CL_IO_SGL && rets == 0)
134 rets = length;
135
136 out:
137 mutex_unlock(&bus->device_lock);
138
139 return rets;
140 }
141
142 /**
143 * __mei_cl_recv - internal client receive (read)
144 *
145 * @cl: host client
146 * @buf: buffer to receive
147 * @length: buffer length
148 * @vtag: virtual tag
149 * @mode: io mode
150 * @timeout: recv timeout, 0 for infinite timeout
151 *
152 * Return: read size in bytes of < 0 on error
153 */
__mei_cl_recv(struct mei_cl * cl,u8 * buf,size_t length,u8 * vtag,unsigned int mode,unsigned long timeout)154 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155 unsigned int mode, unsigned long timeout)
156 {
157 struct mei_device *bus;
158 struct mei_cl_cb *cb;
159 size_t r_length;
160 ssize_t rets;
161 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162
163 if (WARN_ON(!cl || !cl->dev))
164 return -ENODEV;
165
166 bus = cl->dev;
167
168 mutex_lock(&bus->device_lock);
169 if (bus->dev_state != MEI_DEV_ENABLED &&
170 bus->dev_state != MEI_DEV_POWERING_DOWN) {
171 rets = -ENODEV;
172 goto out;
173 }
174
175 cb = mei_cl_read_cb(cl, NULL);
176 if (cb)
177 goto copy;
178
179 rets = mei_cl_read_start(cl, length, NULL);
180 if (rets && rets != -EBUSY)
181 goto out;
182
183 if (nonblock) {
184 rets = -EAGAIN;
185 goto out;
186 }
187
188 /* wait on event only if there is no other waiter */
189 /* synchronized under device mutex */
190 if (!waitqueue_active(&cl->rx_wait)) {
191
192 mutex_unlock(&bus->device_lock);
193
194 if (timeout) {
195 rets = wait_event_interruptible_timeout
196 (cl->rx_wait,
197 mei_cl_read_cb(cl, NULL) ||
198 (!mei_cl_is_connected(cl)),
199 msecs_to_jiffies(timeout));
200 if (rets == 0)
201 return -ETIME;
202 if (rets < 0) {
203 if (signal_pending(current))
204 return -EINTR;
205 return -ERESTARTSYS;
206 }
207 } else {
208 if (wait_event_interruptible
209 (cl->rx_wait,
210 mei_cl_read_cb(cl, NULL) ||
211 (!mei_cl_is_connected(cl)))) {
212 if (signal_pending(current))
213 return -EINTR;
214 return -ERESTARTSYS;
215 }
216 }
217
218 mutex_lock(&bus->device_lock);
219
220 if (!mei_cl_is_connected(cl)) {
221 rets = -ENODEV;
222 goto out;
223 }
224 }
225
226 cb = mei_cl_read_cb(cl, NULL);
227 if (!cb) {
228 rets = 0;
229 goto out;
230 }
231
232 copy:
233 if (cb->status) {
234 rets = cb->status;
235 goto free;
236 }
237
238 /* for the GSC type - copy the extended header to the buffer */
239 if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240 r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241 memcpy(buf, cb->ext_hdr, r_length);
242 } else {
243 r_length = min_t(size_t, length, cb->buf_idx);
244 memcpy(buf, cb->buf.data, r_length);
245 }
246 rets = r_length;
247
248 if (vtag)
249 *vtag = cb->vtag;
250
251 free:
252 mei_cl_del_rd_completed(cl, cb);
253 out:
254 mutex_unlock(&bus->device_lock);
255
256 return rets;
257 }
258
259 /**
260 * mei_cldev_send_vtag - me device send with vtag (write)
261 *
262 * @cldev: me client device
263 * @buf: buffer to send
264 * @length: buffer length
265 * @vtag: virtual tag
266 *
267 * Return:
268 * * written size in bytes
269 * * < 0 on error
270 */
271
mei_cldev_send_vtag(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag)272 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273 size_t length, u8 vtag)
274 {
275 struct mei_cl *cl = cldev->cl;
276
277 return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
278 }
279 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280
281 /**
282 * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
283 *
284 * @cldev: me client device
285 * @buf: buffer to send
286 * @length: buffer length
287 * @vtag: virtual tag
288 * @timeout: send timeout in milliseconds, 0 for infinite timeout
289 *
290 * Return:
291 * * written size in bytes
292 * * < 0 on error
293 */
294
mei_cldev_send_vtag_timeout(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag,unsigned long timeout)295 ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
296 size_t length, u8 vtag, unsigned long timeout)
297 {
298 struct mei_cl *cl = cldev->cl;
299
300 return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
301 }
302 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
303
304 /**
305 * mei_cldev_recv_vtag - client receive with vtag (read)
306 *
307 * @cldev: me client device
308 * @buf: buffer to receive
309 * @length: buffer length
310 * @vtag: virtual tag
311 *
312 * Return:
313 * * read size in bytes
314 * * < 0 on error
315 */
316
mei_cldev_recv_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)317 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
318 u8 *vtag)
319 {
320 struct mei_cl *cl = cldev->cl;
321
322 return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
323 }
324 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
325
326 /**
327 * mei_cldev_recv_timeout - client receive with timeout (read)
328 *
329 * @cldev: me client device
330 * @buf: buffer to receive
331 * @length: buffer length
332 * @timeout: send timeout in milliseconds, 0 for infinite timeout
333 *
334 * Return:
335 * * read size in bytes
336 * * < 0 on error
337 */
mei_cldev_recv_timeout(struct mei_cl_device * cldev,u8 * buf,size_t length,unsigned long timeout)338 ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
339 unsigned long timeout)
340 {
341 return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
342 }
343 EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
344
345 /**
346 * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
347 *
348 * @cldev: me client device
349 * @buf: buffer to receive
350 * @length: buffer length
351 * @vtag: virtual tag
352 * @timeout: recv timeout in milliseconds, 0 for infinite timeout
353 *
354 * Return:
355 * * read size in bytes
356 * * < 0 on error
357 */
358
mei_cldev_recv_vtag_timeout(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag,unsigned long timeout)359 ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
360 u8 *vtag, unsigned long timeout)
361 {
362 struct mei_cl *cl = cldev->cl;
363
364 return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
365 }
366 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
367
368 /**
369 * mei_cldev_send - me device send (write)
370 *
371 * @cldev: me client device
372 * @buf: buffer to send
373 * @length: buffer length
374 *
375 * Return:
376 * * written size in bytes
377 * * < 0 on error
378 */
mei_cldev_send(struct mei_cl_device * cldev,const u8 * buf,size_t length)379 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
380 {
381 return mei_cldev_send_vtag(cldev, buf, length, 0);
382 }
383 EXPORT_SYMBOL_GPL(mei_cldev_send);
384
385 /**
386 * mei_cldev_send_timeout - me device send with timeout (write)
387 *
388 * @cldev: me client device
389 * @buf: buffer to send
390 * @length: buffer length
391 * @timeout: send timeout in milliseconds, 0 for infinite timeout
392 *
393 * Return:
394 * * written size in bytes
395 * * < 0 on error
396 */
mei_cldev_send_timeout(struct mei_cl_device * cldev,const u8 * buf,size_t length,unsigned long timeout)397 ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
398 unsigned long timeout)
399 {
400 return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
401 }
402 EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
403
404 /**
405 * mei_cldev_recv - client receive (read)
406 *
407 * @cldev: me client device
408 * @buf: buffer to receive
409 * @length: buffer length
410 *
411 * Return: read size in bytes of < 0 on error
412 */
mei_cldev_recv(struct mei_cl_device * cldev,u8 * buf,size_t length)413 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
414 {
415 return mei_cldev_recv_vtag(cldev, buf, length, NULL);
416 }
417 EXPORT_SYMBOL_GPL(mei_cldev_recv);
418
419 /**
420 * mei_cl_bus_rx_work - dispatch rx event for a bus device
421 *
422 * @work: work
423 */
mei_cl_bus_rx_work(struct work_struct * work)424 static void mei_cl_bus_rx_work(struct work_struct *work)
425 {
426 struct mei_cl_device *cldev;
427 struct mei_device *bus;
428
429 cldev = container_of(work, struct mei_cl_device, rx_work);
430
431 bus = cldev->bus;
432
433 if (cldev->rx_cb)
434 cldev->rx_cb(cldev);
435
436 mutex_lock(&bus->device_lock);
437 if (mei_cl_is_connected(cldev->cl))
438 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
439 mutex_unlock(&bus->device_lock);
440 }
441
442 /**
443 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
444 *
445 * @work: work
446 */
mei_cl_bus_notif_work(struct work_struct * work)447 static void mei_cl_bus_notif_work(struct work_struct *work)
448 {
449 struct mei_cl_device *cldev;
450
451 cldev = container_of(work, struct mei_cl_device, notif_work);
452
453 if (cldev->notif_cb)
454 cldev->notif_cb(cldev);
455 }
456
457 /**
458 * mei_cl_bus_notify_event - schedule notify cb on bus client
459 *
460 * @cl: host client
461 *
462 * Return: true if event was scheduled
463 * false if the client is not waiting for event
464 */
mei_cl_bus_notify_event(struct mei_cl * cl)465 bool mei_cl_bus_notify_event(struct mei_cl *cl)
466 {
467 struct mei_cl_device *cldev = cl->cldev;
468
469 if (!cldev || !cldev->notif_cb)
470 return false;
471
472 if (!cl->notify_ev)
473 return false;
474
475 schedule_work(&cldev->notif_work);
476
477 cl->notify_ev = false;
478
479 return true;
480 }
481
482 /**
483 * mei_cl_bus_rx_event - schedule rx event
484 *
485 * @cl: host client
486 *
487 * Return: true if event was scheduled
488 * false if the client is not waiting for event
489 */
mei_cl_bus_rx_event(struct mei_cl * cl)490 bool mei_cl_bus_rx_event(struct mei_cl *cl)
491 {
492 struct mei_cl_device *cldev = cl->cldev;
493
494 if (!cldev || !cldev->rx_cb)
495 return false;
496
497 schedule_work(&cldev->rx_work);
498
499 return true;
500 }
501
502 /**
503 * mei_cldev_register_rx_cb - register Rx event callback
504 *
505 * @cldev: me client devices
506 * @rx_cb: callback function
507 *
508 * Return: 0 on success
509 * -EALREADY if an callback is already registered
510 * <0 on other errors
511 */
mei_cldev_register_rx_cb(struct mei_cl_device * cldev,mei_cldev_cb_t rx_cb)512 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
513 {
514 struct mei_device *bus = cldev->bus;
515 int ret;
516
517 if (!rx_cb)
518 return -EINVAL;
519 if (cldev->rx_cb)
520 return -EALREADY;
521
522 cldev->rx_cb = rx_cb;
523 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
524
525 mutex_lock(&bus->device_lock);
526 if (mei_cl_is_connected(cldev->cl))
527 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
528 else
529 ret = -ENODEV;
530 mutex_unlock(&bus->device_lock);
531 if (ret && ret != -EBUSY) {
532 cancel_work_sync(&cldev->rx_work);
533 cldev->rx_cb = NULL;
534 return ret;
535 }
536
537 return 0;
538 }
539 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
540
541 /**
542 * mei_cldev_register_notif_cb - register FW notification event callback
543 *
544 * @cldev: me client devices
545 * @notif_cb: callback function
546 *
547 * Return: 0 on success
548 * -EALREADY if an callback is already registered
549 * <0 on other errors
550 */
mei_cldev_register_notif_cb(struct mei_cl_device * cldev,mei_cldev_cb_t notif_cb)551 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
552 mei_cldev_cb_t notif_cb)
553 {
554 struct mei_device *bus = cldev->bus;
555 int ret;
556
557 if (!notif_cb)
558 return -EINVAL;
559
560 if (cldev->notif_cb)
561 return -EALREADY;
562
563 cldev->notif_cb = notif_cb;
564 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
565
566 mutex_lock(&bus->device_lock);
567 ret = mei_cl_notify_request(cldev->cl, NULL, 1);
568 mutex_unlock(&bus->device_lock);
569 if (ret) {
570 cancel_work_sync(&cldev->notif_work);
571 cldev->notif_cb = NULL;
572 return ret;
573 }
574
575 return 0;
576 }
577 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
578
579 /**
580 * mei_cldev_get_drvdata - driver data getter
581 *
582 * @cldev: mei client device
583 *
584 * Return: driver private data
585 */
mei_cldev_get_drvdata(const struct mei_cl_device * cldev)586 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
587 {
588 return dev_get_drvdata(&cldev->dev);
589 }
590 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
591
592 /**
593 * mei_cldev_set_drvdata - driver data setter
594 *
595 * @cldev: mei client device
596 * @data: data to store
597 */
mei_cldev_set_drvdata(struct mei_cl_device * cldev,void * data)598 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
599 {
600 dev_set_drvdata(&cldev->dev, data);
601 }
602 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
603
604 /**
605 * mei_cldev_uuid - return uuid of the underlying me client
606 *
607 * @cldev: mei client device
608 *
609 * Return: me client uuid
610 */
mei_cldev_uuid(const struct mei_cl_device * cldev)611 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
612 {
613 return mei_me_cl_uuid(cldev->me_cl);
614 }
615 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
616
617 /**
618 * mei_cldev_ver - return protocol version of the underlying me client
619 *
620 * @cldev: mei client device
621 *
622 * Return: me client protocol version
623 */
mei_cldev_ver(const struct mei_cl_device * cldev)624 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
625 {
626 return mei_me_cl_ver(cldev->me_cl);
627 }
628 EXPORT_SYMBOL_GPL(mei_cldev_ver);
629
630 /**
631 * mei_cldev_mtu - max message that client can send and receive
632 *
633 * @cldev: mei client device
634 *
635 * Return: mtu or 0 if client is not connected
636 */
mei_cldev_mtu(const struct mei_cl_device * cldev)637 size_t mei_cldev_mtu(const struct mei_cl_device *cldev)
638 {
639 return mei_cl_mtu(cldev->cl);
640 }
641 EXPORT_SYMBOL_GPL(mei_cldev_mtu);
642
643 /**
644 * mei_cldev_enabled - check whether the device is enabled
645 *
646 * @cldev: mei client device
647 *
648 * Return: true if me client is initialized and connected
649 */
mei_cldev_enabled(const struct mei_cl_device * cldev)650 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
651 {
652 return mei_cl_is_connected(cldev->cl);
653 }
654 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
655
656 /**
657 * mei_cl_bus_module_get - acquire module of the underlying
658 * hw driver.
659 *
660 * @cldev: mei client device
661 *
662 * Return: true on success; false if the module was removed.
663 */
mei_cl_bus_module_get(struct mei_cl_device * cldev)664 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
665 {
666 return try_module_get(cldev->bus->parent->driver->owner);
667 }
668
669 /**
670 * mei_cl_bus_module_put - release the underlying hw module.
671 *
672 * @cldev: mei client device
673 */
mei_cl_bus_module_put(struct mei_cl_device * cldev)674 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
675 {
676 module_put(cldev->bus->parent->driver->owner);
677 }
678
679 /**
680 * mei_cl_bus_vtag - get bus vtag entry wrapper
681 * The tag for bus client is always first.
682 *
683 * @cl: host client
684 *
685 * Return: bus vtag or NULL
686 */
mei_cl_bus_vtag(struct mei_cl * cl)687 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
688 {
689 return list_first_entry_or_null(&cl->vtag_map,
690 struct mei_cl_vtag, list);
691 }
692
693 /**
694 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
695 *
696 * @cldev: me client device
697 *
698 * Return:
699 * * 0 on success
700 * * -ENOMEM if memory allocation failed
701 */
mei_cl_bus_vtag_alloc(struct mei_cl_device * cldev)702 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
703 {
704 struct mei_cl *cl = cldev->cl;
705 struct mei_cl_vtag *cl_vtag;
706
707 /*
708 * Bail out if the client does not supports vtags
709 * or has already allocated one
710 */
711 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
712 return 0;
713
714 cl_vtag = mei_cl_vtag_alloc(NULL, 0);
715 if (IS_ERR(cl_vtag))
716 return -ENOMEM;
717
718 list_add_tail(&cl_vtag->list, &cl->vtag_map);
719
720 return 0;
721 }
722
723 /**
724 * mei_cl_bus_vtag_free - remove the bus entry from vtag map
725 *
726 * @cldev: me client device
727 */
mei_cl_bus_vtag_free(struct mei_cl_device * cldev)728 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
729 {
730 struct mei_cl *cl = cldev->cl;
731 struct mei_cl_vtag *cl_vtag;
732
733 cl_vtag = mei_cl_bus_vtag(cl);
734 if (!cl_vtag)
735 return;
736
737 list_del(&cl_vtag->list);
738 kfree(cl_vtag);
739 }
740
mei_cldev_dma_map(struct mei_cl_device * cldev,u8 buffer_id,size_t size)741 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
742 {
743 struct mei_device *bus;
744 struct mei_cl *cl;
745 int ret;
746
747 if (!cldev || !buffer_id || !size)
748 return ERR_PTR(-EINVAL);
749
750 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
751 dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
752 MEI_FW_PAGE_SIZE);
753 return ERR_PTR(-EINVAL);
754 }
755
756 cl = cldev->cl;
757 bus = cldev->bus;
758
759 mutex_lock(&bus->device_lock);
760 if (cl->state == MEI_FILE_UNINITIALIZED) {
761 ret = mei_cl_link(cl);
762 if (ret)
763 goto notlinked;
764 /* update pointers */
765 cl->cldev = cldev;
766 }
767
768 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
769 if (ret)
770 mei_cl_unlink(cl);
771 notlinked:
772 mutex_unlock(&bus->device_lock);
773 if (ret)
774 return ERR_PTR(ret);
775 return cl->dma.vaddr;
776 }
777 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
778
mei_cldev_dma_unmap(struct mei_cl_device * cldev)779 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
780 {
781 struct mei_device *bus;
782 struct mei_cl *cl;
783 int ret;
784
785 if (!cldev)
786 return -EINVAL;
787
788 cl = cldev->cl;
789 bus = cldev->bus;
790
791 mutex_lock(&bus->device_lock);
792 ret = mei_cl_dma_unmap(cl, NULL);
793
794 mei_cl_flush_queues(cl, NULL);
795 mei_cl_unlink(cl);
796 mutex_unlock(&bus->device_lock);
797 return ret;
798 }
799 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
800
801 /**
802 * mei_cldev_enable - enable me client device
803 * create connection with me client
804 *
805 * @cldev: me client device
806 *
807 * Return: 0 on success and < 0 on error
808 */
mei_cldev_enable(struct mei_cl_device * cldev)809 int mei_cldev_enable(struct mei_cl_device *cldev)
810 {
811 struct mei_device *bus = cldev->bus;
812 struct mei_cl *cl;
813 int ret;
814
815 cl = cldev->cl;
816
817 mutex_lock(&bus->device_lock);
818 if (cl->state == MEI_FILE_UNINITIALIZED) {
819 ret = mei_cl_link(cl);
820 if (ret)
821 goto notlinked;
822 /* update pointers */
823 cl->cldev = cldev;
824 }
825
826 if (mei_cl_is_connected(cl)) {
827 ret = 0;
828 goto out;
829 }
830
831 if (!mei_me_cl_is_active(cldev->me_cl)) {
832 dev_err(&cldev->dev, "me client is not active\n");
833 ret = -ENOTTY;
834 goto out;
835 }
836
837 ret = mei_cl_bus_vtag_alloc(cldev);
838 if (ret)
839 goto out;
840
841 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
842 if (ret < 0) {
843 dev_dbg(&cldev->dev, "cannot connect\n");
844 mei_cl_bus_vtag_free(cldev);
845 }
846
847 out:
848 if (ret)
849 mei_cl_unlink(cl);
850 notlinked:
851 mutex_unlock(&bus->device_lock);
852
853 return ret;
854 }
855 EXPORT_SYMBOL_GPL(mei_cldev_enable);
856
857 /**
858 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
859 * callbacks.
860 *
861 * @cldev: client device
862 */
mei_cldev_unregister_callbacks(struct mei_cl_device * cldev)863 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
864 {
865 if (cldev->rx_cb) {
866 cancel_work_sync(&cldev->rx_work);
867 cldev->rx_cb = NULL;
868 }
869
870 if (cldev->notif_cb) {
871 cancel_work_sync(&cldev->notif_work);
872 cldev->notif_cb = NULL;
873 }
874 }
875
876 /**
877 * mei_cldev_disable - disable me client device
878 * disconnect form the me client
879 *
880 * @cldev: me client device
881 *
882 * Return: 0 on success and < 0 on error
883 */
mei_cldev_disable(struct mei_cl_device * cldev)884 int mei_cldev_disable(struct mei_cl_device *cldev)
885 {
886 struct mei_device *bus;
887 struct mei_cl *cl;
888 int err;
889
890 if (!cldev)
891 return -ENODEV;
892
893 cl = cldev->cl;
894
895 bus = cldev->bus;
896
897 mei_cldev_unregister_callbacks(cldev);
898
899 mutex_lock(&bus->device_lock);
900
901 mei_cl_bus_vtag_free(cldev);
902
903 if (!mei_cl_is_connected(cl)) {
904 dev_dbg(&cldev->dev, "Already disconnected\n");
905 err = 0;
906 goto out;
907 }
908
909 err = mei_cl_disconnect(cl);
910 if (err < 0)
911 dev_err(&cldev->dev, "Could not disconnect from the ME client\n");
912
913 out:
914 /* Flush queues and remove any pending read unless we have mapped DMA */
915 if (!cl->dma_mapped) {
916 mei_cl_flush_queues(cl, NULL);
917 mei_cl_unlink(cl);
918 }
919
920 mutex_unlock(&bus->device_lock);
921 return err;
922 }
923 EXPORT_SYMBOL_GPL(mei_cldev_disable);
924
925 /**
926 * mei_cldev_send_gsc_command - sends a gsc command, by sending
927 * a gsl mei message to gsc and receiving reply from gsc
928 *
929 * @cldev: me client device
930 * @client_id: client id to send the command to
931 * @fence_id: fence id to send the command to
932 * @sg_in: scatter gather list containing addresses for rx message buffer
933 * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
934 * @sg_out: scatter gather list containing addresses for tx message buffer
935 *
936 * Return:
937 * * written size in bytes
938 * * < 0 on error
939 */
mei_cldev_send_gsc_command(struct mei_cl_device * cldev,u8 client_id,u32 fence_id,struct scatterlist * sg_in,size_t total_in_len,struct scatterlist * sg_out)940 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
941 u8 client_id, u32 fence_id,
942 struct scatterlist *sg_in,
943 size_t total_in_len,
944 struct scatterlist *sg_out)
945 {
946 struct mei_cl *cl;
947 struct mei_device *bus;
948 ssize_t ret = 0;
949
950 struct mei_ext_hdr_gsc_h2f *ext_hdr;
951 size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
952 int sg_out_nents, sg_in_nents;
953 int i;
954 struct scatterlist *sg;
955 struct mei_ext_hdr_gsc_f2h rx_msg;
956 unsigned int sg_len;
957
958 if (!cldev || !sg_in || !sg_out)
959 return -EINVAL;
960
961 cl = cldev->cl;
962 bus = cldev->bus;
963
964 dev_dbg(&cldev->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
965
966 if (!bus->hbm_f_gsc_supported)
967 return -EOPNOTSUPP;
968
969 sg_out_nents = sg_nents(sg_out);
970 sg_in_nents = sg_nents(sg_in);
971 /* at least one entry in tx and rx sgls must be present */
972 if (sg_out_nents <= 0 || sg_in_nents <= 0)
973 return -EINVAL;
974
975 buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
976 ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
977 if (!ext_hdr)
978 return -ENOMEM;
979
980 /* construct the GSC message */
981 ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
982 ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
983
984 ext_hdr->client_id = client_id;
985 ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
986 ext_hdr->fence_id = fence_id;
987 ext_hdr->input_address_count = sg_in_nents;
988 ext_hdr->output_address_count = sg_out_nents;
989 ext_hdr->reserved[0] = 0;
990 ext_hdr->reserved[1] = 0;
991
992 /* copy in-sgl to the message */
993 for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
994 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
995 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
996 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
997 ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
998 total_in_len -= ext_hdr->sgl[i].length;
999 }
1000
1001 /* copy out-sgl to the message */
1002 for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
1003 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
1004 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
1005 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
1006 ext_hdr->sgl[i].length = sg_len;
1007 }
1008
1009 /* send the message to GSC */
1010 ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
1011 if (ret < 0) {
1012 dev_err(&cldev->dev, "__mei_cl_send failed, returned %zd\n", ret);
1013 goto end;
1014 }
1015 if (ret != buf_sz) {
1016 dev_err(&cldev->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
1017 ret, buf_sz);
1018 ret = -EIO;
1019 goto end;
1020 }
1021
1022 /* receive the reply from GSC, note that at this point sg_in should contain the reply */
1023 ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
1024
1025 if (ret != sizeof(rx_msg)) {
1026 dev_err(&cldev->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
1027 ret, sizeof(rx_msg));
1028 if (ret >= 0)
1029 ret = -EIO;
1030 goto end;
1031 }
1032
1033 /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
1034 if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
1035 dev_err(&cldev->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n",
1036 rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
1037 ret = -EFAULT;
1038 goto end;
1039 }
1040
1041 dev_dbg(&cldev->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
1042 ret = rx_msg.written;
1043
1044 end:
1045 kfree(ext_hdr);
1046 return ret;
1047 }
1048 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
1049
1050 /**
1051 * mei_cl_device_find - find matching entry in the driver id table
1052 *
1053 * @cldev: me client device
1054 * @cldrv: me client driver
1055 *
1056 * Return: id on success; NULL if no id is matching
1057 */
1058 static const
mei_cl_device_find(const struct mei_cl_device * cldev,const struct mei_cl_driver * cldrv)1059 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1060 const struct mei_cl_driver *cldrv)
1061 {
1062 const struct mei_cl_device_id *id;
1063 const uuid_le *uuid;
1064 u8 version;
1065 bool match;
1066
1067 uuid = mei_me_cl_uuid(cldev->me_cl);
1068 version = mei_me_cl_ver(cldev->me_cl);
1069
1070 id = cldrv->id_table;
1071 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
1072 if (!uuid_le_cmp(*uuid, id->uuid)) {
1073 match = true;
1074
1075 if (cldev->name[0])
1076 if (strncmp(cldev->name, id->name,
1077 sizeof(id->name)))
1078 match = false;
1079
1080 if (id->version != MEI_CL_VERSION_ANY)
1081 if (id->version != version)
1082 match = false;
1083 if (match)
1084 return id;
1085 }
1086
1087 id++;
1088 }
1089
1090 return NULL;
1091 }
1092
1093 /**
1094 * mei_cl_device_match - device match function
1095 *
1096 * @dev: device
1097 * @drv: driver
1098 *
1099 * Return: 1 if matching device was found 0 otherwise
1100 */
mei_cl_device_match(struct device * dev,const struct device_driver * drv)1101 static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
1102 {
1103 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1104 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1105 const struct mei_cl_device_id *found_id;
1106
1107 if (!cldev->do_match)
1108 return 0;
1109
1110 if (!cldrv || !cldrv->id_table)
1111 return 0;
1112
1113 found_id = mei_cl_device_find(cldev, cldrv);
1114 if (found_id)
1115 return 1;
1116
1117 return 0;
1118 }
1119
1120 /**
1121 * mei_cl_device_probe - bus probe function
1122 *
1123 * @dev: device
1124 *
1125 * Return: 0 on success; < 0 otherwise
1126 */
mei_cl_device_probe(struct device * dev)1127 static int mei_cl_device_probe(struct device *dev)
1128 {
1129 struct mei_cl_device *cldev;
1130 struct mei_cl_driver *cldrv;
1131 const struct mei_cl_device_id *id;
1132 int ret;
1133
1134 cldev = to_mei_cl_device(dev);
1135 cldrv = to_mei_cl_driver(dev->driver);
1136
1137 if (!cldrv || !cldrv->probe)
1138 return -ENODEV;
1139
1140 id = mei_cl_device_find(cldev, cldrv);
1141 if (!id)
1142 return -ENODEV;
1143
1144 if (!mei_cl_bus_module_get(cldev)) {
1145 dev_err(&cldev->dev, "get hw module failed");
1146 return -ENODEV;
1147 }
1148
1149 ret = cldrv->probe(cldev, id);
1150 if (ret) {
1151 mei_cl_bus_module_put(cldev);
1152 return ret;
1153 }
1154
1155 __module_get(THIS_MODULE);
1156 return 0;
1157 }
1158
1159 /**
1160 * mei_cl_device_remove - remove device from the bus
1161 *
1162 * @dev: device
1163 *
1164 * Return: 0 on success; < 0 otherwise
1165 */
mei_cl_device_remove(struct device * dev)1166 static void mei_cl_device_remove(struct device *dev)
1167 {
1168 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1169 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1170
1171 if (cldrv->remove)
1172 cldrv->remove(cldev);
1173
1174 mei_cldev_unregister_callbacks(cldev);
1175
1176 mei_cl_bus_module_put(cldev);
1177 module_put(THIS_MODULE);
1178 }
1179
name_show(struct device * dev,struct device_attribute * a,char * buf)1180 static ssize_t name_show(struct device *dev, struct device_attribute *a,
1181 char *buf)
1182 {
1183 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1184
1185 return sysfs_emit(buf, "%s", cldev->name);
1186 }
1187 static DEVICE_ATTR_RO(name);
1188
uuid_show(struct device * dev,struct device_attribute * a,char * buf)1189 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1190 char *buf)
1191 {
1192 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1193 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1194
1195 return sysfs_emit(buf, "%pUl", uuid);
1196 }
1197 static DEVICE_ATTR_RO(uuid);
1198
version_show(struct device * dev,struct device_attribute * a,char * buf)1199 static ssize_t version_show(struct device *dev, struct device_attribute *a,
1200 char *buf)
1201 {
1202 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1203 u8 version = mei_me_cl_ver(cldev->me_cl);
1204
1205 return sysfs_emit(buf, "%02X", version);
1206 }
1207 static DEVICE_ATTR_RO(version);
1208
modalias_show(struct device * dev,struct device_attribute * a,char * buf)1209 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1210 char *buf)
1211 {
1212 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1213 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1214 u8 version = mei_me_cl_ver(cldev->me_cl);
1215
1216 return sysfs_emit(buf, "mei:%s:%pUl:%02X:", cldev->name, uuid, version);
1217 }
1218 static DEVICE_ATTR_RO(modalias);
1219
max_conn_show(struct device * dev,struct device_attribute * a,char * buf)1220 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1221 char *buf)
1222 {
1223 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1224 u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1225
1226 return sysfs_emit(buf, "%d", maxconn);
1227 }
1228 static DEVICE_ATTR_RO(max_conn);
1229
fixed_show(struct device * dev,struct device_attribute * a,char * buf)1230 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1231 char *buf)
1232 {
1233 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1234 u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1235
1236 return sysfs_emit(buf, "%d", fixed);
1237 }
1238 static DEVICE_ATTR_RO(fixed);
1239
vtag_show(struct device * dev,struct device_attribute * a,char * buf)1240 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1241 char *buf)
1242 {
1243 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1244 bool vt = mei_me_cl_vt(cldev->me_cl);
1245
1246 return sysfs_emit(buf, "%d", vt);
1247 }
1248 static DEVICE_ATTR_RO(vtag);
1249
max_len_show(struct device * dev,struct device_attribute * a,char * buf)1250 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1251 char *buf)
1252 {
1253 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1254 u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1255
1256 return sysfs_emit(buf, "%u", maxlen);
1257 }
1258 static DEVICE_ATTR_RO(max_len);
1259
1260 static struct attribute *mei_cldev_attrs[] = {
1261 &dev_attr_name.attr,
1262 &dev_attr_uuid.attr,
1263 &dev_attr_version.attr,
1264 &dev_attr_modalias.attr,
1265 &dev_attr_max_conn.attr,
1266 &dev_attr_fixed.attr,
1267 &dev_attr_vtag.attr,
1268 &dev_attr_max_len.attr,
1269 NULL,
1270 };
1271 ATTRIBUTE_GROUPS(mei_cldev);
1272
1273 /**
1274 * mei_cl_device_uevent - me client bus uevent handler
1275 *
1276 * @dev: device
1277 * @env: uevent kobject
1278 *
1279 * Return: 0 on success -ENOMEM on when add_uevent_var fails
1280 */
mei_cl_device_uevent(const struct device * dev,struct kobj_uevent_env * env)1281 static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1282 {
1283 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1284 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1285 u8 version = mei_me_cl_ver(cldev->me_cl);
1286
1287 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1288 return -ENOMEM;
1289
1290 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1291 return -ENOMEM;
1292
1293 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1294 return -ENOMEM;
1295
1296 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1297 cldev->name, uuid, version))
1298 return -ENOMEM;
1299
1300 return 0;
1301 }
1302
1303 static const struct bus_type mei_cl_bus_type = {
1304 .name = "mei",
1305 .dev_groups = mei_cldev_groups,
1306 .match = mei_cl_device_match,
1307 .probe = mei_cl_device_probe,
1308 .remove = mei_cl_device_remove,
1309 .uevent = mei_cl_device_uevent,
1310 };
1311
mei_dev_bus_get(struct mei_device * bus)1312 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1313 {
1314 if (bus) {
1315 get_device(&bus->dev);
1316 get_device(bus->parent);
1317 }
1318
1319 return bus;
1320 }
1321
mei_dev_bus_put(struct mei_device * bus)1322 static void mei_dev_bus_put(struct mei_device *bus)
1323 {
1324 if (bus) {
1325 put_device(bus->parent);
1326 put_device(&bus->dev);
1327 }
1328 }
1329
mei_cl_bus_dev_release(struct device * dev)1330 static void mei_cl_bus_dev_release(struct device *dev)
1331 {
1332 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1333 struct mei_device *mdev = cldev->cl->dev;
1334 struct mei_cl *cl;
1335
1336 mei_cl_flush_queues(cldev->cl, NULL);
1337 mei_me_cl_put(cldev->me_cl);
1338 mei_dev_bus_put(cldev->bus);
1339
1340 list_for_each_entry(cl, &mdev->file_list, link)
1341 WARN_ON(cl == cldev->cl);
1342
1343 kfree(cldev->cl);
1344 kfree(cldev);
1345 }
1346
1347 static const struct device_type mei_cl_device_type = {
1348 .release = mei_cl_bus_dev_release,
1349 };
1350
1351 /**
1352 * mei_cl_bus_set_name - set device name for me client device
1353 * <controller>-<client device>
1354 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1355 *
1356 * @cldev: me client device
1357 */
mei_cl_bus_set_name(struct mei_cl_device * cldev)1358 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1359 {
1360 dev_set_name(&cldev->dev, "%s-%pUl",
1361 dev_name(cldev->bus->parent),
1362 mei_me_cl_uuid(cldev->me_cl));
1363 }
1364
1365 /**
1366 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1367 *
1368 * @bus: mei device
1369 * @me_cl: me client
1370 *
1371 * Return: allocated device structure or NULL on allocation failure
1372 */
mei_cl_bus_dev_alloc(struct mei_device * bus,struct mei_me_client * me_cl)1373 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1374 struct mei_me_client *me_cl)
1375 {
1376 struct mei_cl_device *cldev;
1377 struct mei_cl *cl;
1378
1379 cldev = kzalloc_obj(*cldev);
1380 if (!cldev)
1381 return NULL;
1382
1383 cl = mei_cl_allocate(bus);
1384 if (!cl) {
1385 kfree(cldev);
1386 return NULL;
1387 }
1388
1389 device_initialize(&cldev->dev);
1390 cldev->dev.parent = bus->parent;
1391 cldev->dev.bus = &mei_cl_bus_type;
1392 cldev->dev.type = &mei_cl_device_type;
1393 cldev->bus = mei_dev_bus_get(bus);
1394 cldev->me_cl = mei_me_cl_get(me_cl);
1395 cldev->cl = cl;
1396 mei_cl_bus_set_name(cldev);
1397 cldev->is_added = 0;
1398 INIT_LIST_HEAD(&cldev->bus_list);
1399 device_enable_async_suspend(&cldev->dev);
1400
1401 return cldev;
1402 }
1403
1404 /**
1405 * mei_cl_bus_dev_setup - setup me client device
1406 * run fix up routines and set the device name
1407 *
1408 * @bus: mei device
1409 * @cldev: me client device
1410 *
1411 * Return: true if the device is eligible for enumeration
1412 */
mei_cl_bus_dev_setup(struct mei_device * bus,struct mei_cl_device * cldev)1413 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1414 struct mei_cl_device *cldev)
1415 {
1416 cldev->do_match = 1;
1417 mei_cl_bus_dev_fixup(cldev);
1418
1419 /* the device name can change during fix up */
1420 if (cldev->do_match)
1421 mei_cl_bus_set_name(cldev);
1422
1423 return cldev->do_match == 1;
1424 }
1425
1426 /**
1427 * mei_cl_bus_dev_add - add me client devices
1428 *
1429 * @cldev: me client device
1430 *
1431 * Return: 0 on success; < 0 on failure
1432 */
mei_cl_bus_dev_add(struct mei_cl_device * cldev)1433 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1434 {
1435 int ret;
1436
1437 dev_dbg(&cldev->dev, "adding %pUL:%02X\n",
1438 mei_me_cl_uuid(cldev->me_cl),
1439 mei_me_cl_ver(cldev->me_cl));
1440 ret = device_add(&cldev->dev);
1441 if (!ret)
1442 cldev->is_added = 1;
1443
1444 return ret;
1445 }
1446
1447 /**
1448 * mei_cl_bus_dev_stop - stop the driver
1449 *
1450 * @cldev: me client device
1451 */
mei_cl_bus_dev_stop(struct mei_cl_device * cldev)1452 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1453 {
1454 cldev->do_match = 0;
1455 if (cldev->is_added)
1456 device_release_driver(&cldev->dev);
1457 }
1458
1459 /**
1460 * mei_cl_bus_dev_destroy - destroy me client devices object
1461 *
1462 * @cldev: me client device
1463 *
1464 * Locking: called under "dev->cl_bus_lock" lock
1465 */
mei_cl_bus_dev_destroy(struct mei_cl_device * cldev)1466 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1467 {
1468
1469 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1470
1471 if (!cldev->is_added)
1472 return;
1473
1474 device_del(&cldev->dev);
1475
1476 list_del_init(&cldev->bus_list);
1477
1478 cldev->is_added = 0;
1479 put_device(&cldev->dev);
1480 }
1481
1482 /**
1483 * mei_cl_bus_remove_device - remove a devices form the bus
1484 *
1485 * @cldev: me client device
1486 */
mei_cl_bus_remove_device(struct mei_cl_device * cldev)1487 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1488 {
1489 mei_cl_bus_dev_stop(cldev);
1490 mei_cl_bus_dev_destroy(cldev);
1491 }
1492
1493 /**
1494 * mei_cl_bus_remove_devices - remove all devices form the bus
1495 *
1496 * @bus: mei device
1497 */
mei_cl_bus_remove_devices(struct mei_device * bus)1498 void mei_cl_bus_remove_devices(struct mei_device *bus)
1499 {
1500 struct mei_cl_device *cldev, *next;
1501
1502 mutex_lock(&bus->cl_bus_lock);
1503 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1504 mei_cl_bus_remove_device(cldev);
1505 mutex_unlock(&bus->cl_bus_lock);
1506 }
1507
1508
1509 /**
1510 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1511 * based on me client
1512 *
1513 * @bus: mei device
1514 * @me_cl: me client
1515 *
1516 * Locking: called under "dev->cl_bus_lock" lock
1517 */
mei_cl_bus_dev_init(struct mei_device * bus,struct mei_me_client * me_cl)1518 static void mei_cl_bus_dev_init(struct mei_device *bus,
1519 struct mei_me_client *me_cl)
1520 {
1521 struct mei_cl_device *cldev;
1522
1523 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1524
1525 dev_dbg(&bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1526
1527 if (me_cl->bus_added)
1528 return;
1529
1530 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1531 if (!cldev)
1532 return;
1533
1534 me_cl->bus_added = true;
1535 list_add_tail(&cldev->bus_list, &bus->device_list);
1536
1537 }
1538
1539 /**
1540 * mei_cl_bus_rescan - scan me clients list and add create
1541 * devices for eligible clients
1542 *
1543 * @bus: mei device
1544 */
mei_cl_bus_rescan(struct mei_device * bus)1545 static void mei_cl_bus_rescan(struct mei_device *bus)
1546 {
1547 struct mei_cl_device *cldev, *n;
1548 struct mei_me_client *me_cl;
1549
1550 mutex_lock(&bus->cl_bus_lock);
1551
1552 down_read(&bus->me_clients_rwsem);
1553 list_for_each_entry(me_cl, &bus->me_clients, list)
1554 mei_cl_bus_dev_init(bus, me_cl);
1555 up_read(&bus->me_clients_rwsem);
1556
1557 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1558
1559 if (!mei_me_cl_is_active(cldev->me_cl)) {
1560 mei_cl_bus_remove_device(cldev);
1561 continue;
1562 }
1563
1564 if (cldev->is_added)
1565 continue;
1566
1567 if (mei_cl_bus_dev_setup(bus, cldev))
1568 mei_cl_bus_dev_add(cldev);
1569 else {
1570 list_del_init(&cldev->bus_list);
1571 put_device(&cldev->dev);
1572 }
1573 }
1574 mutex_unlock(&bus->cl_bus_lock);
1575
1576 dev_dbg(&bus->dev, "rescan end");
1577 }
1578
mei_cl_bus_rescan_work(struct work_struct * work)1579 void mei_cl_bus_rescan_work(struct work_struct *work)
1580 {
1581 struct mei_device *bus =
1582 container_of(work, struct mei_device, bus_rescan_work);
1583
1584 mei_cl_bus_rescan(bus);
1585 }
1586
__mei_cldev_driver_register(struct mei_cl_driver * cldrv,struct module * owner)1587 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1588 struct module *owner)
1589 {
1590 int err;
1591
1592 cldrv->driver.name = cldrv->name;
1593 cldrv->driver.owner = owner;
1594 cldrv->driver.bus = &mei_cl_bus_type;
1595
1596 err = driver_register(&cldrv->driver);
1597 if (err)
1598 return err;
1599
1600 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1601
1602 return 0;
1603 }
1604 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1605
mei_cldev_driver_unregister(struct mei_cl_driver * cldrv)1606 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1607 {
1608 driver_unregister(&cldrv->driver);
1609
1610 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1611 }
1612 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1613
1614
mei_cl_bus_init(void)1615 int __init mei_cl_bus_init(void)
1616 {
1617 return bus_register(&mei_cl_bus_type);
1618 }
1619
mei_cl_bus_exit(void)1620 void __exit mei_cl_bus_exit(void)
1621 {
1622 bus_unregister(&mei_cl_bus_type);
1623 }
1624