xref: /linux/drivers/misc/mei/interrupt.c (revision e926301b39a07f587ff8c66354a2e2ee4c29162c)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 
18 #include <linux/pci.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/fs.h>
22 #include <linux/jiffies.h>
23 
24 #include <linux/mei.h>
25 
26 #include "mei_dev.h"
27 #include "hbm.h"
28 #include "hw-me.h"
29 #include "client.h"
30 
31 
32 /**
33  * mei_complete_handler - processes completed operation.
34  *
35  * @cl: private data of the file object.
36  * @cb_pos: callback block.
37  */
38 void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
39 {
40 	if (cb_pos->fop_type == MEI_FOP_WRITE) {
41 		mei_io_cb_free(cb_pos);
42 		cb_pos = NULL;
43 		cl->writing_state = MEI_WRITE_COMPLETE;
44 		if (waitqueue_active(&cl->tx_wait))
45 			wake_up_interruptible(&cl->tx_wait);
46 
47 	} else if (cb_pos->fop_type == MEI_FOP_READ &&
48 			MEI_READING == cl->reading_state) {
49 		cl->reading_state = MEI_READ_COMPLETE;
50 		if (waitqueue_active(&cl->rx_wait))
51 			wake_up_interruptible(&cl->rx_wait);
52 
53 	}
54 }
55 
56 /**
57  * _mei_irq_thread_state_ok - checks if mei header matches file private data
58  *
59  * @cl: private data of the file object
60  * @mei_hdr: header of mei client message
61  *
62  * returns !=0 if matches, 0 if no match.
63  */
64 static int _mei_irq_thread_state_ok(struct mei_cl *cl,
65 				struct mei_msg_hdr *mei_hdr)
66 {
67 	return (cl->host_client_id == mei_hdr->host_addr &&
68 		cl->me_client_id == mei_hdr->me_addr &&
69 		cl->state == MEI_FILE_CONNECTED &&
70 		MEI_READ_COMPLETE != cl->reading_state);
71 }
72 
73 /**
74  * mei_irq_thread_read_client_message - bottom half read routine after ISR to
75  * handle the read mei client message data processing.
76  *
77  * @complete_list: An instance of our list structure
78  * @dev: the device structure
79  * @mei_hdr: header of mei client message
80  *
81  * returns 0 on success, <0 on failure.
82  */
83 static int mei_irq_thread_read_client_message(struct mei_cl_cb *complete_list,
84 		struct mei_device *dev,
85 		struct mei_msg_hdr *mei_hdr)
86 {
87 	struct mei_cl *cl;
88 	struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
89 	unsigned char *buffer = NULL;
90 
91 	dev_dbg(&dev->pdev->dev, "start client msg\n");
92 	if (list_empty(&dev->read_list.list))
93 		goto quit;
94 
95 	list_for_each_entry_safe(cb_pos, cb_next, &dev->read_list.list, list) {
96 		cl = cb_pos->cl;
97 		if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
98 			cl->reading_state = MEI_READING;
99 			buffer = cb_pos->response_buffer.data + cb_pos->buf_idx;
100 
101 			if (cb_pos->response_buffer.size <
102 					mei_hdr->length + cb_pos->buf_idx) {
103 				dev_dbg(&dev->pdev->dev, "message overflow.\n");
104 				list_del(&cb_pos->list);
105 				return -ENOMEM;
106 			}
107 			if (buffer)
108 				mei_read_slots(dev, buffer, mei_hdr->length);
109 
110 			cb_pos->buf_idx += mei_hdr->length;
111 			if (mei_hdr->msg_complete) {
112 				cl->status = 0;
113 				list_del(&cb_pos->list);
114 				dev_dbg(&dev->pdev->dev,
115 					"completed read H cl = %d, ME cl = %d, length = %lu\n",
116 					cl->host_client_id,
117 					cl->me_client_id,
118 					cb_pos->buf_idx);
119 
120 				list_add_tail(&cb_pos->list,
121 						&complete_list->list);
122 			}
123 
124 			break;
125 		}
126 
127 	}
128 
129 quit:
130 	dev_dbg(&dev->pdev->dev, "message read\n");
131 	if (!buffer) {
132 		mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
133 		dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
134 				MEI_HDR_PRM(mei_hdr));
135 	}
136 
137 	return 0;
138 }
139 
140 /**
141  * _mei_irq_thread_close - processes close related operation.
142  *
143  * @dev: the device structure.
144  * @slots: free slots.
145  * @cb_pos: callback block.
146  * @cl: private data of the file object.
147  * @cmpl_list: complete list.
148  *
149  * returns 0, OK; otherwise, error.
150  */
151 static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
152 				struct mei_cl_cb *cb_pos,
153 				struct mei_cl *cl,
154 				struct mei_cl_cb *cmpl_list)
155 {
156 	u32 msg_slots =
157 		mei_data2slots(sizeof(struct hbm_client_connect_request));
158 
159 	if (*slots < msg_slots)
160 		return -EMSGSIZE;
161 
162 	*slots -= msg_slots;
163 
164 	if (mei_hbm_cl_disconnect_req(dev, cl)) {
165 		cl->status = 0;
166 		cb_pos->buf_idx = 0;
167 		list_move_tail(&cb_pos->list, &cmpl_list->list);
168 		return -EIO;
169 	}
170 
171 	cl->state = MEI_FILE_DISCONNECTING;
172 	cl->status = 0;
173 	cb_pos->buf_idx = 0;
174 	list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
175 	cl->timer_count = MEI_CONNECT_TIMEOUT;
176 
177 	return 0;
178 }
179 
180 
181 /**
182  * _mei_hb_read - processes read related operation.
183  *
184  * @dev: the device structure.
185  * @slots: free slots.
186  * @cb_pos: callback block.
187  * @cl: private data of the file object.
188  * @cmpl_list: complete list.
189  *
190  * returns 0, OK; otherwise, error.
191  */
192 static int _mei_irq_thread_read(struct mei_device *dev,	s32 *slots,
193 			struct mei_cl_cb *cb_pos,
194 			struct mei_cl *cl,
195 			struct mei_cl_cb *cmpl_list)
196 {
197 	u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
198 
199 	if (*slots < msg_slots) {
200 		/* return the cancel routine */
201 		list_del(&cb_pos->list);
202 		return -EMSGSIZE;
203 	}
204 
205 	*slots -= msg_slots;
206 
207 	if (mei_hbm_cl_flow_control_req(dev, cl)) {
208 		cl->status = -ENODEV;
209 		cb_pos->buf_idx = 0;
210 		list_move_tail(&cb_pos->list, &cmpl_list->list);
211 		return -ENODEV;
212 	}
213 	list_move_tail(&cb_pos->list, &dev->read_list.list);
214 
215 	return 0;
216 }
217 
218 
219 /**
220  * _mei_irq_thread_ioctl - processes ioctl related operation.
221  *
222  * @dev: the device structure.
223  * @slots: free slots.
224  * @cb_pos: callback block.
225  * @cl: private data of the file object.
226  * @cmpl_list: complete list.
227  *
228  * returns 0, OK; otherwise, error.
229  */
230 static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
231 			struct mei_cl_cb *cb_pos,
232 			struct mei_cl *cl,
233 			struct mei_cl_cb *cmpl_list)
234 {
235 	u32 msg_slots =
236 		mei_data2slots(sizeof(struct hbm_client_connect_request));
237 
238 	if (*slots < msg_slots) {
239 		/* return the cancel routine */
240 		list_del(&cb_pos->list);
241 		return -EMSGSIZE;
242 	}
243 
244 	*slots -=  msg_slots;
245 
246 	cl->state = MEI_FILE_CONNECTING;
247 
248 	if (mei_hbm_cl_connect_req(dev, cl)) {
249 		cl->status = -ENODEV;
250 		cb_pos->buf_idx = 0;
251 		list_del(&cb_pos->list);
252 		return -ENODEV;
253 	} else {
254 		list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
255 		cl->timer_count = MEI_CONNECT_TIMEOUT;
256 	}
257 	return 0;
258 }
259 
260 /**
261  * mei_irq_thread_write_complete - write messages to device.
262  *
263  * @dev: the device structure.
264  * @slots: free slots.
265  * @cb: callback block.
266  * @cmpl_list: complete list.
267  *
268  * returns 0, OK; otherwise, error.
269  */
270 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
271 			struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
272 {
273 	struct mei_msg_hdr mei_hdr;
274 	struct mei_cl *cl = cb->cl;
275 	size_t len = cb->request_buffer.size - cb->buf_idx;
276 	u32 msg_slots = mei_data2slots(len);
277 
278 	mei_hdr.host_addr = cl->host_client_id;
279 	mei_hdr.me_addr = cl->me_client_id;
280 	mei_hdr.reserved = 0;
281 
282 	if (*slots >= msg_slots) {
283 		mei_hdr.length = len;
284 		mei_hdr.msg_complete = 1;
285 	/* Split the message only if we can write the whole host buffer */
286 	} else if (*slots == dev->hbuf_depth) {
287 		msg_slots = *slots;
288 		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
289 		mei_hdr.length = len;
290 		mei_hdr.msg_complete = 0;
291 	} else {
292 		/* wait for next time the host buffer is empty */
293 		return 0;
294 	}
295 
296 	dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
297 			cb->request_buffer.size, cb->buf_idx);
298 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
299 
300 	*slots -=  msg_slots;
301 	if (mei_write_message(dev, &mei_hdr,
302 			cb->request_buffer.data + cb->buf_idx)) {
303 		cl->status = -ENODEV;
304 		list_move_tail(&cb->list, &cmpl_list->list);
305 		return -ENODEV;
306 	}
307 
308 	if (mei_cl_flow_ctrl_reduce(cl))
309 		return -ENODEV;
310 
311 	cl->status = 0;
312 	cb->buf_idx += mei_hdr.length;
313 	if (mei_hdr.msg_complete)
314 		list_move_tail(&cb->list, &dev->write_waiting_list.list);
315 
316 	return 0;
317 }
318 
319 /**
320  * mei_irq_thread_read_handler - bottom half read routine after ISR to
321  * handle the read processing.
322  *
323  * @dev: the device structure
324  * @cmpl_list: An instance of our list structure
325  * @slots: slots to read.
326  *
327  * returns 0 on success, <0 on failure.
328  */
329 int mei_irq_read_handler(struct mei_device *dev,
330 		struct mei_cl_cb *cmpl_list, s32 *slots)
331 {
332 	struct mei_msg_hdr *mei_hdr;
333 	struct mei_cl *cl_pos = NULL;
334 	struct mei_cl *cl_next = NULL;
335 	int ret = 0;
336 
337 	if (!dev->rd_msg_hdr) {
338 		dev->rd_msg_hdr = mei_read_hdr(dev);
339 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
340 		(*slots)--;
341 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
342 	}
343 	mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
344 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
345 
346 	if (mei_hdr->reserved || !dev->rd_msg_hdr) {
347 		dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
348 		ret = -EBADMSG;
349 		goto end;
350 	}
351 
352 	if (mei_hdr->host_addr || mei_hdr->me_addr) {
353 		list_for_each_entry_safe(cl_pos, cl_next,
354 					&dev->file_list, link) {
355 			dev_dbg(&dev->pdev->dev,
356 					"list_for_each_entry_safe read host"
357 					" client = %d, ME client = %d\n",
358 					cl_pos->host_client_id,
359 					cl_pos->me_client_id);
360 			if (cl_pos->host_client_id == mei_hdr->host_addr &&
361 			    cl_pos->me_client_id == mei_hdr->me_addr)
362 				break;
363 		}
364 
365 		if (&cl_pos->link == &dev->file_list) {
366 			dev_dbg(&dev->pdev->dev, "corrupted message header\n");
367 			ret = -EBADMSG;
368 			goto end;
369 		}
370 	}
371 	if (((*slots) * sizeof(u32)) < mei_hdr->length) {
372 		dev_dbg(&dev->pdev->dev,
373 				"we can't read the message slots =%08x.\n",
374 				*slots);
375 		/* we can't read the message */
376 		ret = -ERANGE;
377 		goto end;
378 	}
379 
380 	/* decide where to read the message too */
381 	if (!mei_hdr->host_addr) {
382 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
383 		mei_hbm_dispatch(dev, mei_hdr);
384 		dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
385 	} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
386 		   (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
387 		   (dev->iamthif_state == MEI_IAMTHIF_READING)) {
388 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
389 
390 		dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
391 
392 		ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
393 		if (ret)
394 			goto end;
395 	} else {
396 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
397 		ret = mei_irq_thread_read_client_message(cmpl_list,
398 							 dev, mei_hdr);
399 		if (ret)
400 			goto end;
401 
402 	}
403 
404 	/* reset the number of slots and header */
405 	*slots = mei_count_full_read_slots(dev);
406 	dev->rd_msg_hdr = 0;
407 
408 	if (*slots == -EOVERFLOW) {
409 		/* overflow - reset */
410 		dev_dbg(&dev->pdev->dev, "resetting due to slots overflow.\n");
411 		/* set the event since message has been read */
412 		ret = -ERANGE;
413 		goto end;
414 	}
415 end:
416 	return ret;
417 }
418 
419 
420 /**
421  * mei_irq_write_handler -  dispatch write requests
422  *  after irq received
423  *
424  * @dev: the device structure
425  * @cmpl_list: An instance of our list structure
426  *
427  * returns 0 on success, <0 on failure.
428  */
429 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
430 {
431 
432 	struct mei_cl *cl;
433 	struct mei_cl_cb *pos = NULL, *next = NULL;
434 	struct mei_cl_cb *list;
435 	s32 slots;
436 	int ret;
437 
438 	if (!mei_hbuf_is_ready(dev)) {
439 		dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
440 		return 0;
441 	}
442 	slots = mei_hbuf_empty_slots(dev);
443 	if (slots <= 0)
444 		return -EMSGSIZE;
445 
446 	/* complete all waiting for write CB */
447 	dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
448 
449 	list = &dev->write_waiting_list;
450 	list_for_each_entry_safe(pos, next, &list->list, list) {
451 		cl = pos->cl;
452 		if (cl == NULL)
453 			continue;
454 
455 		cl->status = 0;
456 		list_del(&pos->list);
457 		if (MEI_WRITING == cl->writing_state &&
458 		    pos->fop_type == MEI_FOP_WRITE &&
459 		    cl != &dev->iamthif_cl) {
460 			dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
461 			cl->writing_state = MEI_WRITE_COMPLETE;
462 			list_add_tail(&pos->list, &cmpl_list->list);
463 		}
464 		if (cl == &dev->iamthif_cl) {
465 			dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
466 			if (dev->iamthif_flow_control_pending) {
467 				ret = mei_amthif_irq_read(dev, &slots);
468 				if (ret)
469 					return ret;
470 			}
471 		}
472 	}
473 
474 	if (dev->wd_state == MEI_WD_STOPPING) {
475 		dev->wd_state = MEI_WD_IDLE;
476 		wake_up_interruptible(&dev->wait_stop_wd);
477 	}
478 
479 	if (dev->wr_ext_msg.hdr.length) {
480 		mei_write_message(dev, &dev->wr_ext_msg.hdr,
481 				dev->wr_ext_msg.data);
482 		slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
483 		dev->wr_ext_msg.hdr.length = 0;
484 	}
485 	if (dev->dev_state == MEI_DEV_ENABLED) {
486 		if (dev->wd_pending &&
487 		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
488 			if (mei_wd_send(dev))
489 				dev_dbg(&dev->pdev->dev, "wd send failed.\n");
490 			else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
491 				return -ENODEV;
492 
493 			dev->wd_pending = false;
494 
495 			if (dev->wd_state == MEI_WD_RUNNING)
496 				slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
497 			else
498 				slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
499 		}
500 	}
501 
502 	/* complete control write list CB */
503 	dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
504 	list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
505 		cl = pos->cl;
506 		if (!cl) {
507 			list_del(&pos->list);
508 			return -ENODEV;
509 		}
510 		switch (pos->fop_type) {
511 		case MEI_FOP_CLOSE:
512 			/* send disconnect message */
513 			ret = _mei_irq_thread_close(dev, &slots, pos,
514 						cl, cmpl_list);
515 			if (ret)
516 				return ret;
517 
518 			break;
519 		case MEI_FOP_READ:
520 			/* send flow control message */
521 			ret = _mei_irq_thread_read(dev, &slots, pos,
522 						cl, cmpl_list);
523 			if (ret)
524 				return ret;
525 
526 			break;
527 		case MEI_FOP_IOCTL:
528 			/* connect message */
529 			if (mei_cl_is_other_connecting(cl))
530 				continue;
531 			ret = _mei_irq_thread_ioctl(dev, &slots, pos,
532 						cl, cmpl_list);
533 			if (ret)
534 				return ret;
535 
536 			break;
537 
538 		default:
539 			BUG();
540 		}
541 
542 	}
543 	/* complete  write list CB */
544 	dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
545 	list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
546 		cl = pos->cl;
547 		if (cl == NULL)
548 			continue;
549 		if (mei_cl_flow_ctrl_creds(cl) <= 0) {
550 			dev_dbg(&dev->pdev->dev,
551 				"No flow control credentials for client %d, not sending.\n",
552 				cl->host_client_id);
553 			continue;
554 		}
555 
556 		if (cl == &dev->iamthif_cl)
557 			ret = mei_amthif_irq_write_complete(dev, &slots,
558 							pos, cmpl_list);
559 		else
560 			ret = mei_irq_thread_write_complete(dev, &slots, pos,
561 						cmpl_list);
562 		if (ret)
563 			return ret;
564 
565 	}
566 	return 0;
567 }
568 
569 
570 
571 /**
572  * mei_timer - timer function.
573  *
574  * @work: pointer to the work_struct structure
575  *
576  * NOTE: This function is called by timer interrupt work
577  */
578 void mei_timer(struct work_struct *work)
579 {
580 	unsigned long timeout;
581 	struct mei_cl *cl_pos = NULL;
582 	struct mei_cl *cl_next = NULL;
583 	struct mei_cl_cb  *cb_pos = NULL;
584 	struct mei_cl_cb  *cb_next = NULL;
585 
586 	struct mei_device *dev = container_of(work,
587 					struct mei_device, timer_work.work);
588 
589 
590 	mutex_lock(&dev->device_lock);
591 	if (dev->dev_state != MEI_DEV_ENABLED) {
592 		if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
593 			if (dev->init_clients_timer) {
594 				if (--dev->init_clients_timer == 0) {
595 					dev_dbg(&dev->pdev->dev, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
596 						dev->init_clients_state);
597 					mei_reset(dev, 1);
598 				}
599 			}
600 		}
601 		goto out;
602 	}
603 	/*** connect/disconnect timeouts ***/
604 	list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
605 		if (cl_pos->timer_count) {
606 			if (--cl_pos->timer_count == 0) {
607 				dev_dbg(&dev->pdev->dev, "HECI reset due to connect/disconnect timeout.\n");
608 				mei_reset(dev, 1);
609 				goto out;
610 			}
611 		}
612 	}
613 
614 	if (dev->iamthif_stall_timer) {
615 		if (--dev->iamthif_stall_timer == 0) {
616 			dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
617 			mei_reset(dev, 1);
618 			dev->iamthif_msg_buf_size = 0;
619 			dev->iamthif_msg_buf_index = 0;
620 			dev->iamthif_canceled = false;
621 			dev->iamthif_ioctl = true;
622 			dev->iamthif_state = MEI_IAMTHIF_IDLE;
623 			dev->iamthif_timer = 0;
624 
625 			mei_io_cb_free(dev->iamthif_current_cb);
626 			dev->iamthif_current_cb = NULL;
627 
628 			dev->iamthif_file_object = NULL;
629 			mei_amthif_run_next_cmd(dev);
630 		}
631 	}
632 
633 	if (dev->iamthif_timer) {
634 
635 		timeout = dev->iamthif_timer +
636 			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
637 
638 		dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
639 				dev->iamthif_timer);
640 		dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
641 		dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
642 		if (time_after(jiffies, timeout)) {
643 			/*
644 			 * User didn't read the AMTHI data on time (15sec)
645 			 * freeing AMTHI for other requests
646 			 */
647 
648 			dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
649 
650 			list_for_each_entry_safe(cb_pos, cb_next,
651 				&dev->amthif_rd_complete_list.list, list) {
652 
653 				cl_pos = cb_pos->file_object->private_data;
654 
655 				/* Finding the AMTHI entry. */
656 				if (cl_pos == &dev->iamthif_cl)
657 					list_del(&cb_pos->list);
658 			}
659 			mei_io_cb_free(dev->iamthif_current_cb);
660 			dev->iamthif_current_cb = NULL;
661 
662 			dev->iamthif_file_object->private_data = NULL;
663 			dev->iamthif_file_object = NULL;
664 			dev->iamthif_timer = 0;
665 			mei_amthif_run_next_cmd(dev);
666 
667 		}
668 	}
669 out:
670 	schedule_delayed_work(&dev->timer_work, 2 * HZ);
671 	mutex_unlock(&dev->device_lock);
672 }
673 
674