1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/dma-mapping.h>
13
14 #include <linux/mei.h>
15
16 #include "mei_dev.h"
17 #include "hbm.h"
18 #include "client.h"
19
20 /**
21 * mei_me_cl_init - initialize me client
22 *
23 * @me_cl: me client
24 */
mei_me_cl_init(struct mei_me_client * me_cl)25 void mei_me_cl_init(struct mei_me_client *me_cl)
26 {
27 INIT_LIST_HEAD(&me_cl->list);
28 kref_init(&me_cl->refcnt);
29 }
30
31 /**
32 * mei_me_cl_get - increases me client refcount
33 *
34 * @me_cl: me client
35 *
36 * Locking: called under "dev->device_lock" lock
37 *
38 * Return: me client or NULL
39 */
mei_me_cl_get(struct mei_me_client * me_cl)40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
41 {
42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
43 return me_cl;
44
45 return NULL;
46 }
47
48 /**
49 * mei_me_cl_release - free me client
50 *
51 * @ref: me_client refcount
52 *
53 * Locking: called under "dev->device_lock" lock
54 */
mei_me_cl_release(struct kref * ref)55 static void mei_me_cl_release(struct kref *ref)
56 {
57 struct mei_me_client *me_cl =
58 container_of(ref, struct mei_me_client, refcnt);
59
60 kfree(me_cl);
61 }
62
63 /**
64 * mei_me_cl_put - decrease me client refcount and free client if necessary
65 *
66 * @me_cl: me client
67 *
68 * Locking: called under "dev->device_lock" lock
69 */
mei_me_cl_put(struct mei_me_client * me_cl)70 void mei_me_cl_put(struct mei_me_client *me_cl)
71 {
72 if (me_cl)
73 kref_put(&me_cl->refcnt, mei_me_cl_release);
74 }
75
76 /**
77 * __mei_me_cl_del - delete me client from the list and decrease
78 * reference counter
79 *
80 * @dev: mei device
81 * @me_cl: me client
82 *
83 * Locking: dev->me_clients_rwsem
84 */
__mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
86 {
87 if (!me_cl)
88 return;
89
90 list_del_init(&me_cl->list);
91 mei_me_cl_put(me_cl);
92 }
93
94 /**
95 * mei_me_cl_del - delete me client from the list and decrease
96 * reference counter
97 *
98 * @dev: mei device
99 * @me_cl: me client
100 */
mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
102 {
103 down_write(&dev->me_clients_rwsem);
104 __mei_me_cl_del(dev, me_cl);
105 up_write(&dev->me_clients_rwsem);
106 }
107
108 /**
109 * mei_me_cl_add - add me client to the list
110 *
111 * @dev: mei device
112 * @me_cl: me client
113 */
mei_me_cl_add(struct mei_device * dev,struct mei_me_client * me_cl)114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
115 {
116 down_write(&dev->me_clients_rwsem);
117 list_add(&me_cl->list, &dev->me_clients);
118 up_write(&dev->me_clients_rwsem);
119 }
120
121 /**
122 * __mei_me_cl_by_uuid - locate me client by uuid
123 * increases ref count
124 *
125 * @dev: mei device
126 * @uuid: me client uuid
127 *
128 * Return: me client or NULL if not found
129 *
130 * Locking: dev->me_clients_rwsem
131 */
__mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
133 const uuid_le *uuid)
134 {
135 struct mei_me_client *me_cl;
136 const uuid_le *pn;
137
138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
139
140 list_for_each_entry(me_cl, &dev->me_clients, list) {
141 pn = &me_cl->props.protocol_name;
142 if (uuid_le_cmp(*uuid, *pn) == 0)
143 return mei_me_cl_get(me_cl);
144 }
145
146 return NULL;
147 }
148
149 /**
150 * mei_me_cl_by_uuid - locate me client by uuid
151 * increases ref count
152 *
153 * @dev: mei device
154 * @uuid: me client uuid
155 *
156 * Return: me client or NULL if not found
157 *
158 * Locking: dev->me_clients_rwsem
159 */
mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
161 const uuid_le *uuid)
162 {
163 struct mei_me_client *me_cl;
164
165 down_read(&dev->me_clients_rwsem);
166 me_cl = __mei_me_cl_by_uuid(dev, uuid);
167 up_read(&dev->me_clients_rwsem);
168
169 return me_cl;
170 }
171
172 /**
173 * mei_me_cl_by_id - locate me client by client id
174 * increases ref count
175 *
176 * @dev: the device structure
177 * @client_id: me client id
178 *
179 * Return: me client or NULL if not found
180 *
181 * Locking: dev->me_clients_rwsem
182 */
mei_me_cl_by_id(struct mei_device * dev,u8 client_id)183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
184 {
185
186 struct mei_me_client *__me_cl, *me_cl = NULL;
187
188 down_read(&dev->me_clients_rwsem);
189 list_for_each_entry(__me_cl, &dev->me_clients, list) {
190 if (__me_cl->client_id == client_id) {
191 me_cl = mei_me_cl_get(__me_cl);
192 break;
193 }
194 }
195 up_read(&dev->me_clients_rwsem);
196
197 return me_cl;
198 }
199
200 /**
201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
202 * increases ref count
203 *
204 * @dev: the device structure
205 * @uuid: me client uuid
206 * @client_id: me client id
207 *
208 * Return: me client or null if not found
209 *
210 * Locking: dev->me_clients_rwsem
211 */
__mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
213 const uuid_le *uuid, u8 client_id)
214 {
215 struct mei_me_client *me_cl;
216 const uuid_le *pn;
217
218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
219
220 list_for_each_entry(me_cl, &dev->me_clients, list) {
221 pn = &me_cl->props.protocol_name;
222 if (uuid_le_cmp(*uuid, *pn) == 0 &&
223 me_cl->client_id == client_id)
224 return mei_me_cl_get(me_cl);
225 }
226
227 return NULL;
228 }
229
230
231 /**
232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
233 * increases ref count
234 *
235 * @dev: the device structure
236 * @uuid: me client uuid
237 * @client_id: me client id
238 *
239 * Return: me client or null if not found
240 */
mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
242 const uuid_le *uuid, u8 client_id)
243 {
244 struct mei_me_client *me_cl;
245
246 down_read(&dev->me_clients_rwsem);
247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
248 up_read(&dev->me_clients_rwsem);
249
250 return me_cl;
251 }
252
253 /**
254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
255 *
256 * @dev: the device structure
257 * @uuid: me client uuid
258 *
259 * Locking: called under "dev->device_lock" lock
260 */
mei_me_cl_rm_by_uuid(struct mei_device * dev,const uuid_le * uuid)261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
262 {
263 struct mei_me_client *me_cl;
264
265 dev_dbg(&dev->dev, "remove %pUl\n", uuid);
266
267 down_write(&dev->me_clients_rwsem);
268 me_cl = __mei_me_cl_by_uuid(dev, uuid);
269 __mei_me_cl_del(dev, me_cl);
270 mei_me_cl_put(me_cl);
271 up_write(&dev->me_clients_rwsem);
272 }
273
274 /**
275 * mei_me_cl_rm_all - remove all me clients
276 *
277 * @dev: the device structure
278 *
279 * Locking: called under "dev->device_lock" lock
280 */
mei_me_cl_rm_all(struct mei_device * dev)281 void mei_me_cl_rm_all(struct mei_device *dev)
282 {
283 struct mei_me_client *me_cl, *next;
284
285 down_write(&dev->me_clients_rwsem);
286 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
287 __mei_me_cl_del(dev, me_cl);
288 up_write(&dev->me_clients_rwsem);
289 }
290
291 /**
292 * mei_io_cb_free - free mei_cb_private related memory
293 *
294 * @cb: mei callback struct
295 */
mei_io_cb_free(struct mei_cl_cb * cb)296 void mei_io_cb_free(struct mei_cl_cb *cb)
297 {
298 if (cb == NULL)
299 return;
300
301 list_del(&cb->list);
302 kvfree(cb->buf.data);
303 kfree(cb->ext_hdr);
304 kfree(cb);
305 }
306
307 /**
308 * mei_tx_cb_enqueue - queue tx callback
309 *
310 * @cb: mei callback struct
311 * @head: an instance of list to queue on
312 *
313 * Locking: called under "dev->device_lock" lock
314 */
mei_tx_cb_enqueue(struct mei_cl_cb * cb,struct list_head * head)315 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
316 struct list_head *head)
317 {
318 list_add_tail(&cb->list, head);
319 cb->cl->tx_cb_queued++;
320 }
321
322 /**
323 * mei_tx_cb_dequeue - dequeue tx callback
324 *
325 * @cb: mei callback struct to dequeue and free
326 *
327 * Locking: called under "dev->device_lock" lock
328 */
mei_tx_cb_dequeue(struct mei_cl_cb * cb)329 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
330 {
331 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
332 cb->cl->tx_cb_queued--;
333
334 mei_io_cb_free(cb);
335 }
336
337 /**
338 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
339 *
340 * @cl: mei client
341 * @fp: pointer to file structure
342 *
343 * Locking: called under "dev->device_lock" lock
344 */
mei_cl_set_read_by_fp(const struct mei_cl * cl,const struct file * fp)345 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
346 const struct file *fp)
347 {
348 struct mei_cl_vtag *cl_vtag;
349
350 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
351 if (cl_vtag->fp == fp) {
352 cl_vtag->pending_read = true;
353 return;
354 }
355 }
356 }
357
358 /**
359 * mei_io_cb_init - allocate and initialize io callback
360 *
361 * @cl: mei client
362 * @type: operation type
363 * @fp: pointer to file structure
364 *
365 * Return: mei_cl_cb pointer or NULL;
366 */
mei_io_cb_init(struct mei_cl * cl,enum mei_cb_file_ops type,const struct file * fp)367 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
368 enum mei_cb_file_ops type,
369 const struct file *fp)
370 {
371 struct mei_cl_cb *cb;
372
373 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
374 if (!cb)
375 return NULL;
376
377 INIT_LIST_HEAD(&cb->list);
378 cb->fp = fp;
379 cb->cl = cl;
380 cb->buf_idx = 0;
381 cb->fop_type = type;
382 cb->vtag = 0;
383 cb->ext_hdr = NULL;
384
385 return cb;
386 }
387
388 /**
389 * mei_io_list_flush_cl - removes cbs belonging to the cl.
390 *
391 * @head: an instance of our list structure
392 * @cl: host client
393 */
mei_io_list_flush_cl(struct list_head * head,const struct mei_cl * cl)394 static void mei_io_list_flush_cl(struct list_head *head,
395 const struct mei_cl *cl)
396 {
397 struct mei_cl_cb *cb, *next;
398
399 list_for_each_entry_safe(cb, next, head, list) {
400 if (cl == cb->cl) {
401 list_del_init(&cb->list);
402 if (cb->fop_type == MEI_FOP_READ)
403 mei_io_cb_free(cb);
404 }
405 }
406 }
407
408 /**
409 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
410 *
411 * @head: An instance of our list structure
412 * @cl: host client
413 * @fp: file pointer (matching cb file object), may be NULL
414 */
mei_io_tx_list_free_cl(struct list_head * head,const struct mei_cl * cl,const struct file * fp)415 static void mei_io_tx_list_free_cl(struct list_head *head,
416 const struct mei_cl *cl,
417 const struct file *fp)
418 {
419 struct mei_cl_cb *cb, *next;
420
421 list_for_each_entry_safe(cb, next, head, list) {
422 if (cl == cb->cl && (!fp || fp == cb->fp))
423 mei_tx_cb_dequeue(cb);
424 }
425 }
426
427 /**
428 * mei_io_list_free_fp - free cb from a list that matches file pointer
429 *
430 * @head: io list
431 * @fp: file pointer (matching cb file object), may be NULL
432 */
mei_io_list_free_fp(struct list_head * head,const struct file * fp)433 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
434 {
435 struct mei_cl_cb *cb, *next;
436
437 list_for_each_entry_safe(cb, next, head, list)
438 if (!fp || fp == cb->fp)
439 mei_io_cb_free(cb);
440 }
441
442 /**
443 * mei_cl_free_pending - free pending cb
444 *
445 * @cl: host client
446 */
mei_cl_free_pending(struct mei_cl * cl)447 static void mei_cl_free_pending(struct mei_cl *cl)
448 {
449 struct mei_cl_cb *cb;
450
451 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
452 mei_io_cb_free(cb);
453 }
454
455 /**
456 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
457 *
458 * @cl: host client
459 * @length: size of the buffer
460 * @fop_type: operation type
461 * @fp: associated file pointer (might be NULL)
462 *
463 * Return: cb on success and NULL on failure
464 */
mei_cl_alloc_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)465 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
466 enum mei_cb_file_ops fop_type,
467 const struct file *fp)
468 {
469 struct mei_cl_cb *cb;
470
471 cb = mei_io_cb_init(cl, fop_type, fp);
472 if (!cb)
473 return NULL;
474
475 if (length == 0)
476 return cb;
477
478 cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
479 if (!cb->buf.data) {
480 mei_io_cb_free(cb);
481 return NULL;
482 }
483 cb->buf.size = length;
484
485 return cb;
486 }
487
488 /**
489 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
490 * and enqueuing of the control commands cb
491 *
492 * @cl: host client
493 * @length: size of the buffer
494 * @fop_type: operation type
495 * @fp: associated file pointer (might be NULL)
496 *
497 * Return: cb on success and NULL on failure
498 * Locking: called under "dev->device_lock" lock
499 */
mei_cl_enqueue_ctrl_wr_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)500 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
501 enum mei_cb_file_ops fop_type,
502 const struct file *fp)
503 {
504 struct mei_cl_cb *cb;
505
506 /* for RX always allocate at least client's mtu */
507 if (length)
508 length = max_t(size_t, length, mei_cl_mtu(cl));
509
510 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
511 if (!cb)
512 return NULL;
513
514 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
515 return cb;
516 }
517
518 /**
519 * mei_cl_read_cb - find this cl's callback in the read list
520 * for a specific file
521 *
522 * @cl: host client
523 * @fp: file pointer (matching cb file object), may be NULL
524 *
525 * Return: cb on success, NULL if cb is not found
526 */
mei_cl_read_cb(struct mei_cl * cl,const struct file * fp)527 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
528 {
529 struct mei_cl_cb *cb;
530 struct mei_cl_cb *ret_cb = NULL;
531
532 spin_lock(&cl->rd_completed_lock);
533 list_for_each_entry(cb, &cl->rd_completed, list)
534 if (!fp || fp == cb->fp) {
535 ret_cb = cb;
536 break;
537 }
538 spin_unlock(&cl->rd_completed_lock);
539 return ret_cb;
540 }
541
542 /**
543 * mei_cl_flush_queues - flushes queue lists belonging to cl.
544 *
545 * @cl: host client
546 * @fp: file pointer (matching cb file object), may be NULL
547 *
548 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
549 */
mei_cl_flush_queues(struct mei_cl * cl,const struct file * fp)550 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
551 {
552 struct mei_device *dev;
553
554 if (WARN_ON(!cl || !cl->dev))
555 return -EINVAL;
556
557 dev = cl->dev;
558
559 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
560 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
561 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
562 /* free pending and control cb only in final flush */
563 if (!fp) {
564 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
565 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
566 mei_cl_free_pending(cl);
567 }
568 spin_lock(&cl->rd_completed_lock);
569 mei_io_list_free_fp(&cl->rd_completed, fp);
570 spin_unlock(&cl->rd_completed_lock);
571
572 return 0;
573 }
574
575 /**
576 * mei_cl_init - initializes cl.
577 *
578 * @cl: host client to be initialized
579 * @dev: mei device
580 */
mei_cl_init(struct mei_cl * cl,struct mei_device * dev)581 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
582 {
583 memset(cl, 0, sizeof(*cl));
584 init_waitqueue_head(&cl->wait);
585 init_waitqueue_head(&cl->rx_wait);
586 init_waitqueue_head(&cl->tx_wait);
587 init_waitqueue_head(&cl->ev_wait);
588 INIT_LIST_HEAD(&cl->vtag_map);
589 spin_lock_init(&cl->rd_completed_lock);
590 INIT_LIST_HEAD(&cl->rd_completed);
591 INIT_LIST_HEAD(&cl->rd_pending);
592 INIT_LIST_HEAD(&cl->link);
593 cl->writing_state = MEI_IDLE;
594 cl->state = MEI_FILE_UNINITIALIZED;
595 cl->dev = dev;
596 }
597
598 /**
599 * mei_cl_allocate - allocates cl structure and sets it up.
600 *
601 * @dev: mei device
602 * Return: The allocated file or NULL on failure
603 */
mei_cl_allocate(struct mei_device * dev)604 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
605 {
606 struct mei_cl *cl;
607
608 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
609 if (!cl)
610 return NULL;
611
612 mei_cl_init(cl, dev);
613
614 return cl;
615 }
616
617 /**
618 * mei_cl_link - allocate host id in the host map
619 *
620 * @cl: host client
621 *
622 * Return: 0 on success
623 * -EINVAL on incorrect values
624 * -EMFILE if open count exceeded.
625 */
mei_cl_link(struct mei_cl * cl)626 int mei_cl_link(struct mei_cl *cl)
627 {
628 struct mei_device *dev;
629 int id;
630
631 if (WARN_ON(!cl || !cl->dev))
632 return -EINVAL;
633
634 dev = cl->dev;
635
636 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
637 if (id >= MEI_CLIENTS_MAX) {
638 dev_err(&dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
639 return -EMFILE;
640 }
641
642 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
643 dev_err(&dev->dev, "open_handle_count exceeded %d",
644 MEI_MAX_OPEN_HANDLE_COUNT);
645 return -EMFILE;
646 }
647
648 dev->open_handle_count++;
649
650 cl->host_client_id = id;
651 list_add_tail(&cl->link, &dev->file_list);
652
653 set_bit(id, dev->host_clients_map);
654
655 cl->state = MEI_FILE_INITIALIZING;
656
657 cl_dbg(dev, cl, "link cl\n");
658 return 0;
659 }
660
661 /**
662 * mei_cl_unlink - remove host client from the list
663 *
664 * @cl: host client
665 *
666 * Return: always 0
667 */
mei_cl_unlink(struct mei_cl * cl)668 int mei_cl_unlink(struct mei_cl *cl)
669 {
670 struct mei_device *dev;
671
672 /* don't shout on error exit path */
673 if (!cl)
674 return 0;
675
676 if (WARN_ON(!cl->dev))
677 return 0;
678
679 dev = cl->dev;
680
681 cl_dbg(dev, cl, "unlink client");
682
683 if (cl->state == MEI_FILE_UNINITIALIZED)
684 return 0;
685
686 if (dev->open_handle_count > 0)
687 dev->open_handle_count--;
688
689 /* never clear the 0 bit */
690 if (cl->host_client_id)
691 clear_bit(cl->host_client_id, dev->host_clients_map);
692
693 list_del_init(&cl->link);
694
695 cl->state = MEI_FILE_UNINITIALIZED;
696 cl->writing_state = MEI_IDLE;
697
698 WARN_ON(!list_empty(&cl->rd_completed) ||
699 !list_empty(&cl->rd_pending) ||
700 !list_empty(&cl->link));
701
702 return 0;
703 }
704
mei_host_client_init(struct mei_device * dev)705 void mei_host_client_init(struct mei_device *dev)
706 {
707 mei_set_devstate(dev, MEI_DEV_ENABLED);
708 dev->reset_count = 0;
709
710 schedule_work(&dev->bus_rescan_work);
711
712 dev_dbg(&dev->dev, "rpm: autosuspend\n");
713 pm_request_autosuspend(dev->parent);
714 }
715
716 /**
717 * mei_hbuf_acquire - try to acquire host buffer
718 *
719 * @dev: the device structure
720 * Return: true if host buffer was acquired
721 */
mei_hbuf_acquire(struct mei_device * dev)722 bool mei_hbuf_acquire(struct mei_device *dev)
723 {
724 if (mei_pg_state(dev) == MEI_PG_ON ||
725 mei_pg_in_transition(dev)) {
726 dev_dbg(&dev->dev, "device is in pg\n");
727 return false;
728 }
729
730 if (!dev->hbuf_is_ready) {
731 dev_dbg(&dev->dev, "hbuf is not ready\n");
732 return false;
733 }
734
735 dev->hbuf_is_ready = false;
736
737 return true;
738 }
739
740 /**
741 * mei_cl_wake_all - wake up readers, writers and event waiters so
742 * they can be interrupted
743 *
744 * @cl: host client
745 */
mei_cl_wake_all(struct mei_cl * cl)746 static void mei_cl_wake_all(struct mei_cl *cl)
747 {
748 struct mei_device *dev = cl->dev;
749
750 /* synchronized under device mutex */
751 if (waitqueue_active(&cl->rx_wait)) {
752 cl_dbg(dev, cl, "Waking up reading client!\n");
753 wake_up_interruptible(&cl->rx_wait);
754 }
755 /* synchronized under device mutex */
756 if (waitqueue_active(&cl->tx_wait)) {
757 cl_dbg(dev, cl, "Waking up writing client!\n");
758 wake_up_interruptible(&cl->tx_wait);
759 }
760 /* synchronized under device mutex */
761 if (waitqueue_active(&cl->ev_wait)) {
762 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
763 wake_up_interruptible(&cl->ev_wait);
764 }
765 /* synchronized under device mutex */
766 if (waitqueue_active(&cl->wait)) {
767 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
768 wake_up(&cl->wait);
769 }
770 }
771
772 /**
773 * mei_cl_set_disconnected - set disconnected state and clear
774 * associated states and resources
775 *
776 * @cl: host client
777 */
mei_cl_set_disconnected(struct mei_cl * cl)778 static void mei_cl_set_disconnected(struct mei_cl *cl)
779 {
780 struct mei_device *dev = cl->dev;
781
782 if (cl->state == MEI_FILE_DISCONNECTED ||
783 cl->state <= MEI_FILE_INITIALIZING)
784 return;
785
786 cl->state = MEI_FILE_DISCONNECTED;
787 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
788 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
789 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
790 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
791 mei_cl_wake_all(cl);
792 cl->rx_flow_ctrl_creds = 0;
793 cl->tx_flow_ctrl_creds = 0;
794 cl->timer_count = 0;
795
796 if (!cl->me_cl)
797 return;
798
799 if (!WARN_ON(cl->me_cl->connect_count == 0))
800 cl->me_cl->connect_count--;
801
802 if (cl->me_cl->connect_count == 0)
803 cl->me_cl->tx_flow_ctrl_creds = 0;
804
805 mei_me_cl_put(cl->me_cl);
806 cl->me_cl = NULL;
807 }
808
mei_cl_set_connecting(struct mei_cl * cl,struct mei_me_client * me_cl)809 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
810 {
811 if (!mei_me_cl_get(me_cl))
812 return -ENOENT;
813
814 /* only one connection is allowed for fixed address clients */
815 if (me_cl->props.fixed_address) {
816 if (me_cl->connect_count) {
817 mei_me_cl_put(me_cl);
818 return -EBUSY;
819 }
820 }
821
822 cl->me_cl = me_cl;
823 cl->state = MEI_FILE_CONNECTING;
824 cl->me_cl->connect_count++;
825
826 return 0;
827 }
828
829 /*
830 * mei_cl_send_disconnect - send disconnect request
831 *
832 * @cl: host client
833 * @cb: callback block
834 *
835 * Return: 0, OK; otherwise, error.
836 */
mei_cl_send_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb)837 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
838 {
839 struct mei_device *dev;
840 int ret;
841
842 dev = cl->dev;
843
844 ret = mei_hbm_cl_disconnect_req(dev, cl);
845 cl->status = ret;
846 if (ret) {
847 cl->state = MEI_FILE_DISCONNECT_REPLY;
848 return ret;
849 }
850
851 list_move_tail(&cb->list, &dev->ctrl_rd_list);
852 cl->timer_count = dev->timeouts.connect;
853 mei_schedule_stall_timer(dev);
854
855 return 0;
856 }
857
858 /**
859 * mei_cl_irq_disconnect - processes close related operation from
860 * interrupt thread context - send disconnect request
861 *
862 * @cl: client
863 * @cb: callback block.
864 * @cmpl_list: complete list.
865 *
866 * Return: 0, OK; otherwise, error.
867 */
mei_cl_irq_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)868 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
869 struct list_head *cmpl_list)
870 {
871 struct mei_device *dev = cl->dev;
872 u32 msg_slots;
873 int slots;
874 int ret;
875
876 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
877 slots = mei_hbuf_empty_slots(dev);
878 if (slots < 0)
879 return -EOVERFLOW;
880
881 if ((u32)slots < msg_slots)
882 return -EMSGSIZE;
883
884 ret = mei_cl_send_disconnect(cl, cb);
885 if (ret)
886 list_move_tail(&cb->list, cmpl_list);
887
888 return ret;
889 }
890
891 /**
892 * __mei_cl_disconnect - disconnect host client from the me one
893 * internal function runtime pm has to be already acquired
894 *
895 * @cl: host client
896 *
897 * Return: 0 on success, <0 on failure.
898 */
__mei_cl_disconnect(struct mei_cl * cl)899 static int __mei_cl_disconnect(struct mei_cl *cl)
900 {
901 struct mei_device *dev;
902 struct mei_cl_cb *cb;
903 int rets;
904
905 dev = cl->dev;
906
907 cl->state = MEI_FILE_DISCONNECTING;
908
909 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
910 if (!cb) {
911 rets = -ENOMEM;
912 goto out;
913 }
914
915 if (mei_hbuf_acquire(dev)) {
916 rets = mei_cl_send_disconnect(cl, cb);
917 if (rets) {
918 cl_err(dev, cl, "failed to disconnect.\n");
919 goto out;
920 }
921 }
922
923 mutex_unlock(&dev->device_lock);
924 wait_event_timeout(cl->wait,
925 cl->state == MEI_FILE_DISCONNECT_REPLY ||
926 cl->state == MEI_FILE_DISCONNECTED,
927 dev->timeouts.cl_connect);
928 mutex_lock(&dev->device_lock);
929
930 rets = cl->status;
931 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
932 cl->state != MEI_FILE_DISCONNECTED) {
933 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
934 rets = -ETIME;
935 }
936
937 out:
938 /* we disconnect also on error */
939 mei_cl_set_disconnected(cl);
940 if (!rets)
941 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
942
943 mei_io_cb_free(cb);
944 return rets;
945 }
946
947 /**
948 * mei_cl_disconnect - disconnect host client from the me one
949 *
950 * @cl: host client
951 *
952 * Locking: called under "dev->device_lock" lock
953 *
954 * Return: 0 on success, <0 on failure.
955 */
mei_cl_disconnect(struct mei_cl * cl)956 int mei_cl_disconnect(struct mei_cl *cl)
957 {
958 struct mei_device *dev;
959 int rets;
960
961 if (WARN_ON(!cl || !cl->dev))
962 return -ENODEV;
963
964 dev = cl->dev;
965
966 cl_dbg(dev, cl, "disconnecting");
967
968 if (!mei_cl_is_connected(cl))
969 return 0;
970
971 if (mei_cl_is_fixed_address(cl)) {
972 mei_cl_set_disconnected(cl);
973 return 0;
974 }
975
976 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
977 dev->dev_state == MEI_DEV_POWER_DOWN) {
978 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
979 mei_cl_set_disconnected(cl);
980 return 0;
981 }
982
983 rets = pm_runtime_get(dev->parent);
984 if (rets < 0 && rets != -EINPROGRESS) {
985 pm_runtime_put_noidle(dev->parent);
986 cl_err(dev, cl, "rpm: get failed %d\n", rets);
987 return rets;
988 }
989
990 rets = __mei_cl_disconnect(cl);
991
992 cl_dbg(dev, cl, "rpm: autosuspend\n");
993 pm_runtime_put_autosuspend(dev->parent);
994
995 return rets;
996 }
997
998
999 /**
1000 * mei_cl_is_other_connecting - checks if other
1001 * client with the same me client id is connecting
1002 *
1003 * @cl: private data of the file object
1004 *
1005 * Return: true if other client is connected, false - otherwise.
1006 */
mei_cl_is_other_connecting(struct mei_cl * cl)1007 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1008 {
1009 struct mei_device *dev;
1010 struct mei_cl_cb *cb;
1011
1012 dev = cl->dev;
1013
1014 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1015 if (cb->fop_type == MEI_FOP_CONNECT &&
1016 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1017 return true;
1018 }
1019
1020 return false;
1021 }
1022
1023 /**
1024 * mei_cl_send_connect - send connect request
1025 *
1026 * @cl: host client
1027 * @cb: callback block
1028 *
1029 * Return: 0, OK; otherwise, error.
1030 */
mei_cl_send_connect(struct mei_cl * cl,struct mei_cl_cb * cb)1031 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1032 {
1033 struct mei_device *dev;
1034 int ret;
1035
1036 dev = cl->dev;
1037
1038 ret = mei_hbm_cl_connect_req(dev, cl);
1039 cl->status = ret;
1040 if (ret) {
1041 cl->state = MEI_FILE_DISCONNECT_REPLY;
1042 return ret;
1043 }
1044
1045 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1046 cl->timer_count = dev->timeouts.connect;
1047 mei_schedule_stall_timer(dev);
1048 return 0;
1049 }
1050
1051 /**
1052 * mei_cl_irq_connect - send connect request in irq_thread context
1053 *
1054 * @cl: host client
1055 * @cb: callback block
1056 * @cmpl_list: complete list
1057 *
1058 * Return: 0, OK; otherwise, error.
1059 */
mei_cl_irq_connect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1060 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1061 struct list_head *cmpl_list)
1062 {
1063 struct mei_device *dev = cl->dev;
1064 u32 msg_slots;
1065 int slots;
1066 int rets;
1067
1068 if (mei_cl_is_other_connecting(cl))
1069 return 0;
1070
1071 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1072 slots = mei_hbuf_empty_slots(dev);
1073 if (slots < 0)
1074 return -EOVERFLOW;
1075
1076 if ((u32)slots < msg_slots)
1077 return -EMSGSIZE;
1078
1079 rets = mei_cl_send_connect(cl, cb);
1080 if (rets)
1081 list_move_tail(&cb->list, cmpl_list);
1082
1083 return rets;
1084 }
1085
1086 /**
1087 * mei_cl_connect - connect host client to the me one
1088 *
1089 * @cl: host client
1090 * @me_cl: me client
1091 * @fp: pointer to file structure
1092 *
1093 * Locking: called under "dev->device_lock" lock
1094 *
1095 * Return: 0 on success, <0 on failure.
1096 */
mei_cl_connect(struct mei_cl * cl,struct mei_me_client * me_cl,const struct file * fp)1097 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1098 const struct file *fp)
1099 {
1100 struct mei_device *dev;
1101 struct mei_cl_cb *cb;
1102 int rets;
1103
1104 if (WARN_ON(!cl || !cl->dev || !me_cl))
1105 return -ENODEV;
1106
1107 dev = cl->dev;
1108
1109 rets = mei_cl_set_connecting(cl, me_cl);
1110 if (rets)
1111 goto nortpm;
1112
1113 if (mei_cl_is_fixed_address(cl)) {
1114 cl->state = MEI_FILE_CONNECTED;
1115 rets = 0;
1116 goto nortpm;
1117 }
1118
1119 rets = pm_runtime_get(dev->parent);
1120 if (rets < 0 && rets != -EINPROGRESS) {
1121 pm_runtime_put_noidle(dev->parent);
1122 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1123 goto nortpm;
1124 }
1125
1126 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1127 if (!cb) {
1128 rets = -ENOMEM;
1129 goto out;
1130 }
1131
1132 /* run hbuf acquire last so we don't have to undo */
1133 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1134 rets = mei_cl_send_connect(cl, cb);
1135 if (rets)
1136 goto out;
1137 }
1138
1139 mutex_unlock(&dev->device_lock);
1140 wait_event_timeout(cl->wait,
1141 (cl->state == MEI_FILE_CONNECTED ||
1142 cl->state == MEI_FILE_DISCONNECTED ||
1143 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1144 cl->state == MEI_FILE_DISCONNECT_REPLY),
1145 dev->timeouts.cl_connect);
1146 mutex_lock(&dev->device_lock);
1147
1148 if (!mei_cl_is_connected(cl)) {
1149 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1150 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1151 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1152 /* ignore disconnect return valuue;
1153 * in case of failure reset will be invoked
1154 */
1155 __mei_cl_disconnect(cl);
1156 rets = -EFAULT;
1157 goto out;
1158 }
1159
1160 /* timeout or something went really wrong */
1161 if (!cl->status)
1162 cl->status = -EFAULT;
1163 }
1164
1165 rets = cl->status;
1166 out:
1167 cl_dbg(dev, cl, "rpm: autosuspend\n");
1168 pm_runtime_put_autosuspend(dev->parent);
1169
1170 mei_io_cb_free(cb);
1171
1172 nortpm:
1173 if (!mei_cl_is_connected(cl))
1174 mei_cl_set_disconnected(cl);
1175
1176 return rets;
1177 }
1178
1179 /**
1180 * mei_cl_alloc_linked - allocate and link host client
1181 *
1182 * @dev: the device structure
1183 *
1184 * Return: cl on success ERR_PTR on failure
1185 */
mei_cl_alloc_linked(struct mei_device * dev)1186 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1187 {
1188 struct mei_cl *cl;
1189 int ret;
1190
1191 cl = mei_cl_allocate(dev);
1192 if (!cl) {
1193 ret = -ENOMEM;
1194 goto err;
1195 }
1196
1197 ret = mei_cl_link(cl);
1198 if (ret)
1199 goto err;
1200
1201 return cl;
1202 err:
1203 kfree(cl);
1204 return ERR_PTR(ret);
1205 }
1206
1207 /**
1208 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1209 *
1210 * @cl: host client
1211 *
1212 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1213 */
mei_cl_tx_flow_ctrl_creds(struct mei_cl * cl)1214 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1215 {
1216 if (WARN_ON(!cl || !cl->me_cl))
1217 return -EINVAL;
1218
1219 if (cl->tx_flow_ctrl_creds > 0)
1220 return 1;
1221
1222 if (mei_cl_is_fixed_address(cl))
1223 return 1;
1224
1225 if (mei_cl_is_single_recv_buf(cl)) {
1226 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1227 return 1;
1228 }
1229 return 0;
1230 }
1231
1232 /**
1233 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1234 * for a client
1235 *
1236 * @cl: host client
1237 *
1238 * Return:
1239 * 0 on success
1240 * -EINVAL when ctrl credits are <= 0
1241 */
mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl * cl)1242 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1243 {
1244 if (WARN_ON(!cl || !cl->me_cl))
1245 return -EINVAL;
1246
1247 if (mei_cl_is_fixed_address(cl))
1248 return 0;
1249
1250 if (mei_cl_is_single_recv_buf(cl)) {
1251 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1252 return -EINVAL;
1253 cl->me_cl->tx_flow_ctrl_creds--;
1254 } else {
1255 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1256 return -EINVAL;
1257 cl->tx_flow_ctrl_creds--;
1258 }
1259 return 0;
1260 }
1261
1262 /**
1263 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1264 *
1265 * @fp: pointer to file structure
1266 * @vtag: vm tag
1267 *
1268 * Return:
1269 * * Pointer to allocated struct - on success
1270 * * ERR_PTR(-ENOMEM) on memory allocation failure
1271 */
mei_cl_vtag_alloc(struct file * fp,u8 vtag)1272 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1273 {
1274 struct mei_cl_vtag *cl_vtag;
1275
1276 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1277 if (!cl_vtag)
1278 return ERR_PTR(-ENOMEM);
1279
1280 INIT_LIST_HEAD(&cl_vtag->list);
1281 cl_vtag->vtag = vtag;
1282 cl_vtag->fp = fp;
1283
1284 return cl_vtag;
1285 }
1286
1287 /**
1288 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1289 *
1290 * @cl: host client
1291 * @vtag: virtual tag
1292 *
1293 * Return:
1294 * * A file pointer - on success
1295 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1296 */
mei_cl_fp_by_vtag(const struct mei_cl * cl,u8 vtag)1297 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1298 {
1299 struct mei_cl_vtag *vtag_l;
1300
1301 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1302 /* The client on bus has one fixed fp */
1303 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1304 vtag_l->vtag == vtag)
1305 return vtag_l->fp;
1306
1307 return ERR_PTR(-ENOENT);
1308 }
1309
1310 /**
1311 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1312 *
1313 * @cl: host client
1314 * @vtag: vm tag
1315 */
mei_cl_reset_read_by_vtag(const struct mei_cl * cl,u8 vtag)1316 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1317 {
1318 struct mei_cl_vtag *vtag_l;
1319
1320 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1321 /* The client on bus has one fixed vtag map */
1322 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1323 vtag_l->vtag == vtag) {
1324 vtag_l->pending_read = false;
1325 break;
1326 }
1327 }
1328 }
1329
1330 /**
1331 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1332 * in the vtag list
1333 *
1334 * @cl: host client
1335 */
mei_cl_read_vtag_add_fc(struct mei_cl * cl)1336 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1337 {
1338 struct mei_cl_vtag *cl_vtag;
1339
1340 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1341 if (cl_vtag->pending_read) {
1342 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1343 mei_cl_mtu(cl),
1344 MEI_FOP_READ,
1345 cl_vtag->fp))
1346 cl->rx_flow_ctrl_creds++;
1347 break;
1348 }
1349 }
1350 }
1351
1352 /**
1353 * mei_cl_vt_support_check - check if client support vtags
1354 *
1355 * @cl: host client
1356 *
1357 * Return:
1358 * * 0 - supported, or not connected at all
1359 * * -EOPNOTSUPP - vtags are not supported by client
1360 */
mei_cl_vt_support_check(const struct mei_cl * cl)1361 int mei_cl_vt_support_check(const struct mei_cl *cl)
1362 {
1363 struct mei_device *dev = cl->dev;
1364
1365 if (!dev->hbm_f_vt_supported)
1366 return -EOPNOTSUPP;
1367
1368 if (!cl->me_cl)
1369 return 0;
1370
1371 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1372 }
1373
1374 /**
1375 * mei_cl_add_rd_completed - add read completed callback to list with lock
1376 * and vtag check
1377 *
1378 * @cl: host client
1379 * @cb: callback block
1380 *
1381 */
mei_cl_add_rd_completed(struct mei_cl * cl,struct mei_cl_cb * cb)1382 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1383 {
1384 const struct file *fp;
1385
1386 if (!mei_cl_vt_support_check(cl)) {
1387 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1388 if (IS_ERR(fp)) {
1389 /* client already disconnected, discarding */
1390 mei_io_cb_free(cb);
1391 return;
1392 }
1393 cb->fp = fp;
1394 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1395 mei_cl_read_vtag_add_fc(cl);
1396 }
1397
1398 spin_lock(&cl->rd_completed_lock);
1399 list_add_tail(&cb->list, &cl->rd_completed);
1400 spin_unlock(&cl->rd_completed_lock);
1401 }
1402
1403 /**
1404 * mei_cl_del_rd_completed - free read completed callback with lock
1405 *
1406 * @cl: host client
1407 * @cb: callback block
1408 *
1409 */
mei_cl_del_rd_completed(struct mei_cl * cl,struct mei_cl_cb * cb)1410 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1411 {
1412 spin_lock(&cl->rd_completed_lock);
1413 mei_io_cb_free(cb);
1414 spin_unlock(&cl->rd_completed_lock);
1415 }
1416
1417 /**
1418 * mei_cl_notify_fop2req - convert fop to proper request
1419 *
1420 * @fop: client notification start response command
1421 *
1422 * Return: MEI_HBM_NOTIFICATION_START/STOP
1423 */
mei_cl_notify_fop2req(enum mei_cb_file_ops fop)1424 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1425 {
1426 if (fop == MEI_FOP_NOTIFY_START)
1427 return MEI_HBM_NOTIFICATION_START;
1428 else
1429 return MEI_HBM_NOTIFICATION_STOP;
1430 }
1431
1432 /**
1433 * mei_cl_notify_req2fop - convert notification request top file operation type
1434 *
1435 * @req: hbm notification request type
1436 *
1437 * Return: MEI_FOP_NOTIFY_START/STOP
1438 */
mei_cl_notify_req2fop(u8 req)1439 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1440 {
1441 if (req == MEI_HBM_NOTIFICATION_START)
1442 return MEI_FOP_NOTIFY_START;
1443 else
1444 return MEI_FOP_NOTIFY_STOP;
1445 }
1446
1447 /**
1448 * mei_cl_irq_notify - send notification request in irq_thread context
1449 *
1450 * @cl: client
1451 * @cb: callback block.
1452 * @cmpl_list: complete list.
1453 *
1454 * Return: 0 on such and error otherwise.
1455 */
mei_cl_irq_notify(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1456 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1457 struct list_head *cmpl_list)
1458 {
1459 struct mei_device *dev = cl->dev;
1460 u32 msg_slots;
1461 int slots;
1462 int ret;
1463 bool request;
1464
1465 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1466 slots = mei_hbuf_empty_slots(dev);
1467 if (slots < 0)
1468 return -EOVERFLOW;
1469
1470 if ((u32)slots < msg_slots)
1471 return -EMSGSIZE;
1472
1473 request = mei_cl_notify_fop2req(cb->fop_type);
1474 ret = mei_hbm_cl_notify_req(dev, cl, request);
1475 if (ret) {
1476 cl->status = ret;
1477 list_move_tail(&cb->list, cmpl_list);
1478 return ret;
1479 }
1480
1481 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1482 return 0;
1483 }
1484
1485 /**
1486 * mei_cl_notify_request - send notification stop/start request
1487 *
1488 * @cl: host client
1489 * @fp: associate request with file
1490 * @request: 1 for start or 0 for stop
1491 *
1492 * Locking: called under "dev->device_lock" lock
1493 *
1494 * Return: 0 on such and error otherwise.
1495 */
mei_cl_notify_request(struct mei_cl * cl,const struct file * fp,u8 request)1496 int mei_cl_notify_request(struct mei_cl *cl,
1497 const struct file *fp, u8 request)
1498 {
1499 struct mei_device *dev;
1500 struct mei_cl_cb *cb;
1501 enum mei_cb_file_ops fop_type;
1502 int rets;
1503
1504 if (WARN_ON(!cl || !cl->dev))
1505 return -ENODEV;
1506
1507 dev = cl->dev;
1508
1509 if (!dev->hbm_f_ev_supported) {
1510 cl_dbg(dev, cl, "notifications not supported\n");
1511 return -EOPNOTSUPP;
1512 }
1513
1514 if (!mei_cl_is_connected(cl))
1515 return -ENODEV;
1516
1517 rets = pm_runtime_get(dev->parent);
1518 if (rets < 0 && rets != -EINPROGRESS) {
1519 pm_runtime_put_noidle(dev->parent);
1520 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1521 return rets;
1522 }
1523
1524 fop_type = mei_cl_notify_req2fop(request);
1525 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1526 if (!cb) {
1527 rets = -ENOMEM;
1528 goto out;
1529 }
1530
1531 if (mei_hbuf_acquire(dev)) {
1532 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1533 rets = -ENODEV;
1534 goto out;
1535 }
1536 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1537 }
1538
1539 mutex_unlock(&dev->device_lock);
1540 wait_event_timeout(cl->wait,
1541 cl->notify_en == request ||
1542 cl->status ||
1543 !mei_cl_is_connected(cl),
1544 dev->timeouts.cl_connect);
1545 mutex_lock(&dev->device_lock);
1546
1547 if (cl->notify_en != request && !cl->status)
1548 cl->status = -EFAULT;
1549
1550 rets = cl->status;
1551
1552 out:
1553 cl_dbg(dev, cl, "rpm: autosuspend\n");
1554 pm_runtime_put_autosuspend(dev->parent);
1555
1556 mei_io_cb_free(cb);
1557 return rets;
1558 }
1559
1560 /**
1561 * mei_cl_notify - raise notification
1562 *
1563 * @cl: host client
1564 *
1565 * Locking: called under "dev->device_lock" lock
1566 */
mei_cl_notify(struct mei_cl * cl)1567 void mei_cl_notify(struct mei_cl *cl)
1568 {
1569 struct mei_device *dev;
1570
1571 if (!cl || !cl->dev)
1572 return;
1573
1574 dev = cl->dev;
1575
1576 if (!cl->notify_en)
1577 return;
1578
1579 cl_dbg(dev, cl, "notify event");
1580 cl->notify_ev = true;
1581 if (!mei_cl_bus_notify_event(cl))
1582 wake_up_interruptible(&cl->ev_wait);
1583
1584 if (cl->ev_async)
1585 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1586
1587 }
1588
1589 /**
1590 * mei_cl_notify_get - get or wait for notification event
1591 *
1592 * @cl: host client
1593 * @block: this request is blocking
1594 * @notify_ev: true if notification event was received
1595 *
1596 * Locking: called under "dev->device_lock" lock
1597 *
1598 * Return: 0 on such and error otherwise.
1599 */
mei_cl_notify_get(struct mei_cl * cl,bool block,bool * notify_ev)1600 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1601 {
1602 struct mei_device *dev;
1603 int rets;
1604
1605 *notify_ev = false;
1606
1607 if (WARN_ON(!cl || !cl->dev))
1608 return -ENODEV;
1609
1610 dev = cl->dev;
1611
1612 if (!dev->hbm_f_ev_supported) {
1613 cl_dbg(dev, cl, "notifications not supported\n");
1614 return -EOPNOTSUPP;
1615 }
1616
1617 if (!mei_cl_is_connected(cl))
1618 return -ENODEV;
1619
1620 if (cl->notify_ev)
1621 goto out;
1622
1623 if (!block)
1624 return -EAGAIN;
1625
1626 mutex_unlock(&dev->device_lock);
1627 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1628 mutex_lock(&dev->device_lock);
1629
1630 if (rets < 0)
1631 return rets;
1632
1633 out:
1634 *notify_ev = cl->notify_ev;
1635 cl->notify_ev = false;
1636 return 0;
1637 }
1638
1639 /**
1640 * mei_cl_read_start - the start read client message function.
1641 *
1642 * @cl: host client
1643 * @length: number of bytes to read
1644 * @fp: pointer to file structure
1645 *
1646 * Return: 0 on success, <0 on failure.
1647 */
mei_cl_read_start(struct mei_cl * cl,size_t length,const struct file * fp)1648 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1649 {
1650 struct mei_device *dev;
1651 struct mei_cl_cb *cb;
1652 int rets;
1653
1654 if (WARN_ON(!cl || !cl->dev))
1655 return -ENODEV;
1656
1657 dev = cl->dev;
1658
1659 if (!mei_cl_is_connected(cl))
1660 return -ENODEV;
1661
1662 if (!mei_me_cl_is_active(cl->me_cl)) {
1663 cl_err(dev, cl, "no such me client\n");
1664 return -ENOTTY;
1665 }
1666
1667 if (mei_cl_is_fixed_address(cl))
1668 return 0;
1669
1670 /* HW currently supports only one pending read */
1671 if (cl->rx_flow_ctrl_creds) {
1672 mei_cl_set_read_by_fp(cl, fp);
1673 return -EBUSY;
1674 }
1675
1676 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1677 if (!cb)
1678 return -ENOMEM;
1679
1680 mei_cl_set_read_by_fp(cl, fp);
1681
1682 rets = pm_runtime_get(dev->parent);
1683 if (rets < 0 && rets != -EINPROGRESS) {
1684 pm_runtime_put_noidle(dev->parent);
1685 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1686 goto nortpm;
1687 }
1688
1689 rets = 0;
1690 if (mei_hbuf_acquire(dev)) {
1691 rets = mei_hbm_cl_flow_control_req(dev, cl);
1692 if (rets < 0)
1693 goto out;
1694
1695 list_move_tail(&cb->list, &cl->rd_pending);
1696 }
1697 cl->rx_flow_ctrl_creds++;
1698
1699 out:
1700 cl_dbg(dev, cl, "rpm: autosuspend\n");
1701 pm_runtime_put_autosuspend(dev->parent);
1702 nortpm:
1703 if (rets)
1704 mei_io_cb_free(cb);
1705
1706 return rets;
1707 }
1708
mei_ext_hdr_set_vtag(void * ext,u8 vtag)1709 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
1710 {
1711 struct mei_ext_hdr_vtag *vtag_hdr = ext;
1712
1713 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
1714 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
1715 vtag_hdr->vtag = vtag;
1716 vtag_hdr->reserved = 0;
1717 return vtag_hdr->hdr.length;
1718 }
1719
mei_ext_hdr_is_gsc(struct mei_ext_hdr * ext)1720 static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
1721 {
1722 return ext && ext->type == MEI_EXT_HDR_GSC;
1723 }
1724
mei_ext_hdr_set_gsc(struct mei_ext_hdr * ext,struct mei_ext_hdr * gsc_hdr)1725 static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
1726 {
1727 memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
1728 return ext->length;
1729 }
1730
1731 /**
1732 * mei_msg_hdr_init - allocate and initialize mei message header
1733 *
1734 * @cb: message callback structure
1735 *
1736 * Return: a pointer to initialized header or ERR_PTR on failure
1737 */
mei_msg_hdr_init(const struct mei_cl_cb * cb)1738 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1739 {
1740 size_t hdr_len;
1741 struct mei_ext_meta_hdr *meta;
1742 struct mei_msg_hdr *mei_hdr;
1743 bool is_ext, is_hbm, is_gsc, is_vtag;
1744 struct mei_ext_hdr *next_ext;
1745
1746 if (!cb)
1747 return ERR_PTR(-EINVAL);
1748
1749 /* Extended header for vtag is attached only on the first fragment */
1750 is_vtag = (cb->vtag && cb->buf_idx == 0);
1751 is_hbm = cb->cl->me_cl->client_id == 0;
1752 is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
1753 is_ext = is_vtag || is_gsc;
1754
1755 /* Compute extended header size */
1756 hdr_len = sizeof(*mei_hdr);
1757
1758 if (!is_ext)
1759 goto setup_hdr;
1760
1761 hdr_len += sizeof(*meta);
1762 if (is_vtag)
1763 hdr_len += sizeof(struct mei_ext_hdr_vtag);
1764
1765 if (is_gsc)
1766 hdr_len += mei_ext_hdr_len(cb->ext_hdr);
1767
1768 setup_hdr:
1769 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1770 if (!mei_hdr)
1771 return ERR_PTR(-ENOMEM);
1772
1773 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1774 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1775 mei_hdr->internal = cb->internal;
1776 mei_hdr->extended = is_ext;
1777
1778 if (!is_ext)
1779 goto out;
1780
1781 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1782 meta->size = 0;
1783 next_ext = (struct mei_ext_hdr *)meta->hdrs;
1784 if (is_vtag) {
1785 meta->count++;
1786 meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
1787 next_ext = mei_ext_next(next_ext);
1788 }
1789
1790 if (is_gsc) {
1791 meta->count++;
1792 meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
1793 next_ext = mei_ext_next(next_ext);
1794 }
1795
1796 out:
1797 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1798 return mei_hdr;
1799 }
1800
1801 /**
1802 * mei_cl_irq_write - write a message to device
1803 * from the interrupt thread context
1804 *
1805 * @cl: client
1806 * @cb: callback block.
1807 * @cmpl_list: complete list.
1808 *
1809 * Return: 0, OK; otherwise error.
1810 */
mei_cl_irq_write(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1811 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1812 struct list_head *cmpl_list)
1813 {
1814 struct mei_device *dev;
1815 struct mei_msg_data *buf;
1816 struct mei_msg_hdr *mei_hdr = NULL;
1817 size_t hdr_len;
1818 size_t hbuf_len, dr_len;
1819 size_t buf_len = 0;
1820 size_t data_len;
1821 int hbuf_slots;
1822 u32 dr_slots;
1823 u32 dma_len;
1824 int rets;
1825 bool first_chunk;
1826 const void *data = NULL;
1827
1828 if (WARN_ON(!cl || !cl->dev))
1829 return -ENODEV;
1830
1831 dev = cl->dev;
1832
1833 buf = &cb->buf;
1834
1835 first_chunk = cb->buf_idx == 0;
1836
1837 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1838 if (rets < 0)
1839 goto err;
1840
1841 if (rets == 0) {
1842 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1843 return 0;
1844 }
1845
1846 if (buf->data) {
1847 buf_len = buf->size - cb->buf_idx;
1848 data = buf->data + cb->buf_idx;
1849 }
1850 hbuf_slots = mei_hbuf_empty_slots(dev);
1851 if (hbuf_slots < 0) {
1852 rets = -EOVERFLOW;
1853 goto err;
1854 }
1855
1856 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1857 dr_slots = mei_dma_ring_empty_slots(dev);
1858 dr_len = mei_slots2data(dr_slots);
1859
1860 mei_hdr = mei_msg_hdr_init(cb);
1861 if (IS_ERR(mei_hdr)) {
1862 rets = PTR_ERR(mei_hdr);
1863 mei_hdr = NULL;
1864 goto err;
1865 }
1866
1867 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1868
1869 /**
1870 * Split the message only if we can write the whole host buffer
1871 * otherwise wait for next time the host buffer is empty.
1872 */
1873 if (hdr_len + buf_len <= hbuf_len) {
1874 data_len = buf_len;
1875 mei_hdr->msg_complete = 1;
1876 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1877 mei_hdr->dma_ring = 1;
1878 if (buf_len > dr_len)
1879 buf_len = dr_len;
1880 else
1881 mei_hdr->msg_complete = 1;
1882
1883 data_len = sizeof(dma_len);
1884 dma_len = buf_len;
1885 data = &dma_len;
1886 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1887 buf_len = hbuf_len - hdr_len;
1888 data_len = buf_len;
1889 } else {
1890 kfree(mei_hdr);
1891 return 0;
1892 }
1893 mei_hdr->length += data_len;
1894
1895 if (mei_hdr->dma_ring && buf->data)
1896 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1897 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1898
1899 if (rets)
1900 goto err;
1901
1902 cl->status = 0;
1903 cl->writing_state = MEI_WRITING;
1904 cb->buf_idx += buf_len;
1905
1906 if (first_chunk) {
1907 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1908 rets = -EIO;
1909 goto err;
1910 }
1911 }
1912
1913 if (mei_hdr->msg_complete)
1914 list_move_tail(&cb->list, &dev->write_waiting_list);
1915
1916 kfree(mei_hdr);
1917 return 0;
1918
1919 err:
1920 kfree(mei_hdr);
1921 cl->status = rets;
1922 list_move_tail(&cb->list, cmpl_list);
1923 return rets;
1924 }
1925
1926 /**
1927 * mei_cl_write - submit a write cb to mei device
1928 * assumes device_lock is locked
1929 *
1930 * @cl: host client
1931 * @cb: write callback with filled data
1932 * @timeout: send timeout in milliseconds.
1933 * effective only for blocking writes: the cb->blocking is set.
1934 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
1935 *
1936 * Return: number of bytes sent on success, <0 on failure.
1937 */
mei_cl_write(struct mei_cl * cl,struct mei_cl_cb * cb,unsigned long timeout)1938 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
1939 {
1940 struct mei_device *dev;
1941 struct mei_msg_data *buf;
1942 struct mei_msg_hdr *mei_hdr = NULL;
1943 size_t hdr_len;
1944 size_t hbuf_len, dr_len;
1945 size_t buf_len;
1946 size_t data_len;
1947 int hbuf_slots;
1948 u32 dr_slots;
1949 u32 dma_len;
1950 ssize_t rets;
1951 bool blocking;
1952 const void *data;
1953
1954 if (WARN_ON(!cl || !cl->dev))
1955 return -ENODEV;
1956
1957 if (WARN_ON(!cb))
1958 return -EINVAL;
1959
1960 dev = cl->dev;
1961
1962 buf = &cb->buf;
1963 buf_len = buf->size;
1964
1965 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1966
1967 blocking = cb->blocking;
1968 data = buf->data;
1969
1970 rets = pm_runtime_get(dev->parent);
1971 if (rets < 0 && rets != -EINPROGRESS) {
1972 pm_runtime_put_noidle(dev->parent);
1973 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1974 goto free;
1975 }
1976
1977 cb->buf_idx = 0;
1978 cl->writing_state = MEI_IDLE;
1979
1980
1981 rets = mei_cl_tx_flow_ctrl_creds(cl);
1982 if (rets < 0)
1983 goto err;
1984
1985 mei_hdr = mei_msg_hdr_init(cb);
1986 if (IS_ERR(mei_hdr)) {
1987 rets = PTR_ERR(mei_hdr);
1988 mei_hdr = NULL;
1989 goto err;
1990 }
1991
1992 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1993
1994 if (rets == 0) {
1995 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1996 rets = buf_len;
1997 goto out;
1998 }
1999
2000 if (!mei_hbuf_acquire(dev)) {
2001 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
2002 rets = buf_len;
2003 goto out;
2004 }
2005
2006 hbuf_slots = mei_hbuf_empty_slots(dev);
2007 if (hbuf_slots < 0) {
2008 buf_len = -EOVERFLOW;
2009 goto out;
2010 }
2011
2012 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
2013 dr_slots = mei_dma_ring_empty_slots(dev);
2014 dr_len = mei_slots2data(dr_slots);
2015
2016 if (hdr_len + buf_len <= hbuf_len) {
2017 data_len = buf_len;
2018 mei_hdr->msg_complete = 1;
2019 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2020 mei_hdr->dma_ring = 1;
2021 if (buf_len > dr_len)
2022 buf_len = dr_len;
2023 else
2024 mei_hdr->msg_complete = 1;
2025
2026 data_len = sizeof(dma_len);
2027 dma_len = buf_len;
2028 data = &dma_len;
2029 } else {
2030 buf_len = hbuf_len - hdr_len;
2031 data_len = buf_len;
2032 }
2033
2034 mei_hdr->length += data_len;
2035
2036 if (mei_hdr->dma_ring && buf->data)
2037 mei_dma_ring_write(dev, buf->data, buf_len);
2038 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2039
2040 if (rets)
2041 goto err;
2042
2043 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2044 if (rets)
2045 goto err;
2046
2047 cl->writing_state = MEI_WRITING;
2048 cb->buf_idx = buf_len;
2049 /* restore return value */
2050 buf_len = buf->size;
2051
2052 out:
2053 if (mei_hdr->msg_complete)
2054 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
2055 else
2056 mei_tx_cb_enqueue(cb, &dev->write_list);
2057
2058 cb = NULL;
2059 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2060
2061 mutex_unlock(&dev->device_lock);
2062 rets = wait_event_interruptible_timeout(cl->tx_wait,
2063 cl->writing_state == MEI_WRITE_COMPLETE ||
2064 (!mei_cl_is_connected(cl)),
2065 msecs_to_jiffies(timeout));
2066 mutex_lock(&dev->device_lock);
2067 /* clean all queue on timeout as something fatal happened */
2068 if (rets == 0) {
2069 rets = -ETIME;
2070 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
2071 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
2072 }
2073 /* wait_event_interruptible returns -ERESTARTSYS */
2074 if (rets > 0)
2075 rets = 0;
2076 if (rets) {
2077 if (signal_pending(current))
2078 rets = -EINTR;
2079 goto err;
2080 }
2081 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2082 rets = -EFAULT;
2083 goto err;
2084 }
2085 }
2086
2087 rets = buf_len;
2088 err:
2089 cl_dbg(dev, cl, "rpm: autosuspend\n");
2090 pm_runtime_put_autosuspend(dev->parent);
2091 free:
2092 mei_io_cb_free(cb);
2093
2094 kfree(mei_hdr);
2095
2096 return rets;
2097 }
2098
2099 /**
2100 * mei_cl_complete - processes completed operation for a client
2101 *
2102 * @cl: private data of the file object.
2103 * @cb: callback block.
2104 */
mei_cl_complete(struct mei_cl * cl,struct mei_cl_cb * cb)2105 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2106 {
2107 struct mei_device *dev = cl->dev;
2108
2109 switch (cb->fop_type) {
2110 case MEI_FOP_WRITE:
2111 mei_tx_cb_dequeue(cb);
2112 cl->writing_state = MEI_WRITE_COMPLETE;
2113 if (waitqueue_active(&cl->tx_wait))
2114 wake_up_interruptible(&cl->tx_wait);
2115 else
2116 pm_request_autosuspend(dev->parent);
2117 break;
2118
2119 case MEI_FOP_READ:
2120 mei_cl_add_rd_completed(cl, cb);
2121 if (!mei_cl_is_fixed_address(cl) &&
2122 !WARN_ON(!cl->rx_flow_ctrl_creds))
2123 cl->rx_flow_ctrl_creds--;
2124 if (!mei_cl_bus_rx_event(cl))
2125 wake_up_interruptible(&cl->rx_wait);
2126 break;
2127
2128 case MEI_FOP_CONNECT:
2129 case MEI_FOP_DISCONNECT:
2130 case MEI_FOP_NOTIFY_STOP:
2131 case MEI_FOP_NOTIFY_START:
2132 case MEI_FOP_DMA_MAP:
2133 case MEI_FOP_DMA_UNMAP:
2134 if (waitqueue_active(&cl->wait))
2135 wake_up(&cl->wait);
2136
2137 break;
2138 case MEI_FOP_DISCONNECT_RSP:
2139 mei_io_cb_free(cb);
2140 mei_cl_set_disconnected(cl);
2141 break;
2142 default:
2143 BUG_ON(0);
2144 }
2145 }
2146
2147
2148 /**
2149 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2150 *
2151 * @dev: mei device
2152 */
mei_cl_all_disconnect(struct mei_device * dev)2153 void mei_cl_all_disconnect(struct mei_device *dev)
2154 {
2155 struct mei_cl *cl;
2156
2157 list_for_each_entry(cl, &dev->file_list, link)
2158 mei_cl_set_disconnected(cl);
2159 }
2160 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
2161
mei_cl_dma_map_find(struct mei_device * dev,u8 buffer_id)2162 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
2163 {
2164 struct mei_cl *cl;
2165
2166 list_for_each_entry(cl, &dev->file_list, link)
2167 if (cl->dma.buffer_id == buffer_id)
2168 return cl;
2169 return NULL;
2170 }
2171
2172 /**
2173 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
2174 *
2175 * @cl: client
2176 * @cb: callback block.
2177 * @cmpl_list: complete list.
2178 *
2179 * Return: 0 on such and error otherwise.
2180 */
mei_cl_irq_dma_map(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)2181 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
2182 struct list_head *cmpl_list)
2183 {
2184 struct mei_device *dev = cl->dev;
2185 u32 msg_slots;
2186 int slots;
2187 int ret;
2188
2189 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
2190 slots = mei_hbuf_empty_slots(dev);
2191 if (slots < 0)
2192 return -EOVERFLOW;
2193
2194 if ((u32)slots < msg_slots)
2195 return -EMSGSIZE;
2196
2197 ret = mei_hbm_cl_dma_map_req(dev, cl);
2198 if (ret) {
2199 cl->status = ret;
2200 list_move_tail(&cb->list, cmpl_list);
2201 return ret;
2202 }
2203
2204 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2205 return 0;
2206 }
2207
2208 /**
2209 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
2210 *
2211 * @cl: client
2212 * @cb: callback block.
2213 * @cmpl_list: complete list.
2214 *
2215 * Return: 0 on such and error otherwise.
2216 */
mei_cl_irq_dma_unmap(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)2217 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
2218 struct list_head *cmpl_list)
2219 {
2220 struct mei_device *dev = cl->dev;
2221 u32 msg_slots;
2222 int slots;
2223 int ret;
2224
2225 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
2226 slots = mei_hbuf_empty_slots(dev);
2227 if (slots < 0)
2228 return -EOVERFLOW;
2229
2230 if ((u32)slots < msg_slots)
2231 return -EMSGSIZE;
2232
2233 ret = mei_hbm_cl_dma_unmap_req(dev, cl);
2234 if (ret) {
2235 cl->status = ret;
2236 list_move_tail(&cb->list, cmpl_list);
2237 return ret;
2238 }
2239
2240 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2241 return 0;
2242 }
2243
mei_cl_dma_alloc(struct mei_cl * cl,u8 buf_id,size_t size)2244 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
2245 {
2246 cl->dma.vaddr = dmam_alloc_coherent(&cl->dev->dev, size,
2247 &cl->dma.daddr, GFP_KERNEL);
2248 if (!cl->dma.vaddr)
2249 return -ENOMEM;
2250
2251 cl->dma.buffer_id = buf_id;
2252 cl->dma.size = size;
2253
2254 return 0;
2255 }
2256
mei_cl_dma_free(struct mei_cl * cl)2257 static void mei_cl_dma_free(struct mei_cl *cl)
2258 {
2259 cl->dma.buffer_id = 0;
2260 dmam_free_coherent(&cl->dev->dev,
2261 cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
2262 cl->dma.size = 0;
2263 cl->dma.vaddr = NULL;
2264 cl->dma.daddr = 0;
2265 }
2266
2267 /**
2268 * mei_cl_dma_alloc_and_map - send client dma map request
2269 *
2270 * @cl: host client
2271 * @fp: pointer to file structure
2272 * @buffer_id: id of the mapped buffer
2273 * @size: size of the buffer
2274 *
2275 * Locking: called under "dev->device_lock" lock
2276 *
2277 * Return:
2278 * * -ENODEV
2279 * * -EINVAL
2280 * * -EOPNOTSUPP
2281 * * -EPROTO
2282 * * -ENOMEM;
2283 */
mei_cl_dma_alloc_and_map(struct mei_cl * cl,const struct file * fp,u8 buffer_id,size_t size)2284 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
2285 u8 buffer_id, size_t size)
2286 {
2287 struct mei_device *dev;
2288 struct mei_cl_cb *cb;
2289 int rets;
2290
2291 if (WARN_ON(!cl || !cl->dev))
2292 return -ENODEV;
2293
2294 dev = cl->dev;
2295
2296 if (!dev->hbm_f_cd_supported) {
2297 cl_dbg(dev, cl, "client dma is not supported\n");
2298 return -EOPNOTSUPP;
2299 }
2300
2301 if (buffer_id == 0)
2302 return -EINVAL;
2303
2304 if (mei_cl_is_connected(cl))
2305 return -EPROTO;
2306
2307 if (cl->dma_mapped)
2308 return -EPROTO;
2309
2310 if (mei_cl_dma_map_find(dev, buffer_id)) {
2311 cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
2312 cl->dma.buffer_id);
2313 return -EPROTO;
2314 }
2315
2316 rets = pm_runtime_get(dev->parent);
2317 if (rets < 0 && rets != -EINPROGRESS) {
2318 pm_runtime_put_noidle(dev->parent);
2319 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2320 return rets;
2321 }
2322
2323 rets = mei_cl_dma_alloc(cl, buffer_id, size);
2324 if (rets) {
2325 pm_runtime_put_noidle(dev->parent);
2326 return rets;
2327 }
2328
2329 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
2330 if (!cb) {
2331 rets = -ENOMEM;
2332 goto out;
2333 }
2334
2335 if (mei_hbuf_acquire(dev)) {
2336 if (mei_hbm_cl_dma_map_req(dev, cl)) {
2337 rets = -ENODEV;
2338 goto out;
2339 }
2340 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2341 }
2342
2343 cl->status = 0;
2344
2345 mutex_unlock(&dev->device_lock);
2346 wait_event_timeout(cl->wait,
2347 cl->dma_mapped || cl->status,
2348 dev->timeouts.cl_connect);
2349 mutex_lock(&dev->device_lock);
2350
2351 if (!cl->dma_mapped && !cl->status)
2352 cl->status = -EFAULT;
2353
2354 rets = cl->status;
2355
2356 out:
2357 if (rets)
2358 mei_cl_dma_free(cl);
2359
2360 cl_dbg(dev, cl, "rpm: autosuspend\n");
2361 pm_runtime_put_autosuspend(dev->parent);
2362
2363 mei_io_cb_free(cb);
2364 return rets;
2365 }
2366
2367 /**
2368 * mei_cl_dma_unmap - send client dma unmap request
2369 *
2370 * @cl: host client
2371 * @fp: pointer to file structure
2372 *
2373 * Locking: called under "dev->device_lock" lock
2374 *
2375 * Return: 0 on such and error otherwise.
2376 */
mei_cl_dma_unmap(struct mei_cl * cl,const struct file * fp)2377 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
2378 {
2379 struct mei_device *dev;
2380 struct mei_cl_cb *cb;
2381 int rets;
2382
2383 if (WARN_ON(!cl || !cl->dev))
2384 return -ENODEV;
2385
2386 dev = cl->dev;
2387
2388 if (!dev->hbm_f_cd_supported) {
2389 cl_dbg(dev, cl, "client dma is not supported\n");
2390 return -EOPNOTSUPP;
2391 }
2392
2393 /* do not allow unmap for connected client */
2394 if (mei_cl_is_connected(cl))
2395 return -EPROTO;
2396
2397 if (!cl->dma_mapped)
2398 return -EPROTO;
2399
2400 rets = pm_runtime_get(dev->parent);
2401 if (rets < 0 && rets != -EINPROGRESS) {
2402 pm_runtime_put_noidle(dev->parent);
2403 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2404 return rets;
2405 }
2406
2407 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
2408 if (!cb) {
2409 rets = -ENOMEM;
2410 goto out;
2411 }
2412
2413 if (mei_hbuf_acquire(dev)) {
2414 if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
2415 rets = -ENODEV;
2416 goto out;
2417 }
2418 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2419 }
2420
2421 cl->status = 0;
2422
2423 mutex_unlock(&dev->device_lock);
2424 wait_event_timeout(cl->wait,
2425 !cl->dma_mapped || cl->status,
2426 dev->timeouts.cl_connect);
2427 mutex_lock(&dev->device_lock);
2428
2429 if (cl->dma_mapped && !cl->status)
2430 cl->status = -EFAULT;
2431
2432 rets = cl->status;
2433
2434 if (!rets)
2435 mei_cl_dma_free(cl);
2436 out:
2437 cl_dbg(dev, cl, "rpm: autosuspend\n");
2438 pm_runtime_put_autosuspend(dev->parent);
2439
2440 mei_io_cb_free(cb);
2441 return rets;
2442 }
2443