xref: /linux/drivers/misc/mei/interrupt.c (revision f37130533f68711fd6bae2c79950b8e72002bad6)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 
18 #include <linux/pci.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/fs.h>
22 #include <linux/jiffies.h>
23 
24 #include <linux/mei.h>
25 
26 #include "mei_dev.h"
27 #include "hbm.h"
28 #include "hw-me.h"
29 #include "client.h"
30 
31 
32 /**
33  * mei_complete_handler - processes completed operation.
34  *
35  * @cl: private data of the file object.
36  * @cb_pos: callback block.
37  */
38 void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
39 {
40 	if (cb_pos->fop_type == MEI_FOP_WRITE) {
41 		mei_io_cb_free(cb_pos);
42 		cb_pos = NULL;
43 		cl->writing_state = MEI_WRITE_COMPLETE;
44 		if (waitqueue_active(&cl->tx_wait))
45 			wake_up_interruptible(&cl->tx_wait);
46 
47 	} else if (cb_pos->fop_type == MEI_FOP_READ &&
48 			MEI_READING == cl->reading_state) {
49 		cl->reading_state = MEI_READ_COMPLETE;
50 		if (waitqueue_active(&cl->rx_wait))
51 			wake_up_interruptible(&cl->rx_wait);
52 
53 	}
54 }
55 
56 /**
57  * _mei_irq_thread_state_ok - checks if mei header matches file private data
58  *
59  * @cl: private data of the file object
60  * @mei_hdr: header of mei client message
61  *
62  * returns !=0 if matches, 0 if no match.
63  */
64 static int _mei_irq_thread_state_ok(struct mei_cl *cl,
65 				struct mei_msg_hdr *mei_hdr)
66 {
67 	return (cl->host_client_id == mei_hdr->host_addr &&
68 		cl->me_client_id == mei_hdr->me_addr &&
69 		cl->state == MEI_FILE_CONNECTED &&
70 		MEI_READ_COMPLETE != cl->reading_state);
71 }
72 
73 /**
74  * mei_irq_thread_read_client_message - bottom half read routine after ISR to
75  * handle the read mei client message data processing.
76  *
77  * @complete_list: An instance of our list structure
78  * @dev: the device structure
79  * @mei_hdr: header of mei client message
80  *
81  * returns 0 on success, <0 on failure.
82  */
83 static int mei_irq_thread_read_client_message(struct mei_cl_cb *complete_list,
84 		struct mei_device *dev,
85 		struct mei_msg_hdr *mei_hdr)
86 {
87 	struct mei_cl *cl;
88 	struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
89 	unsigned char *buffer = NULL;
90 
91 	dev_dbg(&dev->pdev->dev, "start client msg\n");
92 	if (list_empty(&dev->read_list.list))
93 		goto quit;
94 
95 	list_for_each_entry_safe(cb_pos, cb_next, &dev->read_list.list, list) {
96 		cl = cb_pos->cl;
97 		if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
98 			cl->reading_state = MEI_READING;
99 			buffer = cb_pos->response_buffer.data + cb_pos->buf_idx;
100 
101 			if (cb_pos->response_buffer.size <
102 					mei_hdr->length + cb_pos->buf_idx) {
103 				dev_dbg(&dev->pdev->dev, "message overflow.\n");
104 				list_del(&cb_pos->list);
105 				return -ENOMEM;
106 			}
107 			if (buffer)
108 				mei_read_slots(dev, buffer, mei_hdr->length);
109 
110 			cb_pos->buf_idx += mei_hdr->length;
111 			if (mei_hdr->msg_complete) {
112 				cl->status = 0;
113 				list_del(&cb_pos->list);
114 				dev_dbg(&dev->pdev->dev,
115 					"completed read H cl = %d, ME cl = %d, length = %lu\n",
116 					cl->host_client_id,
117 					cl->me_client_id,
118 					cb_pos->buf_idx);
119 
120 				list_add_tail(&cb_pos->list,
121 						&complete_list->list);
122 			}
123 
124 			break;
125 		}
126 
127 	}
128 
129 quit:
130 	dev_dbg(&dev->pdev->dev, "message read\n");
131 	if (!buffer) {
132 		mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
133 		dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
134 				MEI_HDR_PRM(mei_hdr));
135 	}
136 
137 	return 0;
138 }
139 
140 /**
141  * _mei_irq_thread_close - processes close related operation.
142  *
143  * @dev: the device structure.
144  * @slots: free slots.
145  * @cb_pos: callback block.
146  * @cl: private data of the file object.
147  * @cmpl_list: complete list.
148  *
149  * returns 0, OK; otherwise, error.
150  */
151 static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
152 				struct mei_cl_cb *cb_pos,
153 				struct mei_cl *cl,
154 				struct mei_cl_cb *cmpl_list)
155 {
156 	if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
157 			sizeof(struct hbm_client_connect_request)))
158 		return -EBADMSG;
159 
160 	*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
161 
162 	if (mei_hbm_cl_disconnect_req(dev, cl)) {
163 		cl->status = 0;
164 		cb_pos->buf_idx = 0;
165 		list_move_tail(&cb_pos->list, &cmpl_list->list);
166 		return -EMSGSIZE;
167 	} else {
168 		cl->state = MEI_FILE_DISCONNECTING;
169 		cl->status = 0;
170 		cb_pos->buf_idx = 0;
171 		list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
172 		cl->timer_count = MEI_CONNECT_TIMEOUT;
173 	}
174 
175 	return 0;
176 }
177 
178 
179 /**
180  * _mei_hb_read - processes read related operation.
181  *
182  * @dev: the device structure.
183  * @slots: free slots.
184  * @cb_pos: callback block.
185  * @cl: private data of the file object.
186  * @cmpl_list: complete list.
187  *
188  * returns 0, OK; otherwise, error.
189  */
190 static int _mei_irq_thread_read(struct mei_device *dev,	s32 *slots,
191 			struct mei_cl_cb *cb_pos,
192 			struct mei_cl *cl,
193 			struct mei_cl_cb *cmpl_list)
194 {
195 	if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
196 			sizeof(struct hbm_flow_control))) {
197 		/* return the cancel routine */
198 		list_del(&cb_pos->list);
199 		return -EBADMSG;
200 	}
201 
202 	*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
203 
204 	if (mei_hbm_cl_flow_control_req(dev, cl)) {
205 		cl->status = -ENODEV;
206 		cb_pos->buf_idx = 0;
207 		list_move_tail(&cb_pos->list, &cmpl_list->list);
208 		return -ENODEV;
209 	}
210 	list_move_tail(&cb_pos->list, &dev->read_list.list);
211 
212 	return 0;
213 }
214 
215 
216 /**
217  * _mei_irq_thread_ioctl - processes ioctl related operation.
218  *
219  * @dev: the device structure.
220  * @slots: free slots.
221  * @cb_pos: callback block.
222  * @cl: private data of the file object.
223  * @cmpl_list: complete list.
224  *
225  * returns 0, OK; otherwise, error.
226  */
227 static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
228 			struct mei_cl_cb *cb_pos,
229 			struct mei_cl *cl,
230 			struct mei_cl_cb *cmpl_list)
231 {
232 	if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
233 			sizeof(struct hbm_client_connect_request))) {
234 		/* return the cancel routine */
235 		list_del(&cb_pos->list);
236 		return -EBADMSG;
237 	}
238 
239 	cl->state = MEI_FILE_CONNECTING;
240 	*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
241 	if (mei_hbm_cl_connect_req(dev, cl)) {
242 		cl->status = -ENODEV;
243 		cb_pos->buf_idx = 0;
244 		list_del(&cb_pos->list);
245 		return -ENODEV;
246 	} else {
247 		list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
248 		cl->timer_count = MEI_CONNECT_TIMEOUT;
249 	}
250 	return 0;
251 }
252 
253 /**
254  * mei_irq_thread_write_complete - write messages to device.
255  *
256  * @dev: the device structure.
257  * @slots: free slots.
258  * @cb: callback block.
259  * @cmpl_list: complete list.
260  *
261  * returns 0, OK; otherwise, error.
262  */
263 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
264 			struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
265 {
266 	struct mei_msg_hdr mei_hdr;
267 	struct mei_cl *cl = cb->cl;
268 	size_t len = cb->request_buffer.size - cb->buf_idx;
269 	size_t msg_slots = mei_data2slots(len);
270 
271 	mei_hdr.host_addr = cl->host_client_id;
272 	mei_hdr.me_addr = cl->me_client_id;
273 	mei_hdr.reserved = 0;
274 
275 	if (*slots >= msg_slots) {
276 		mei_hdr.length = len;
277 		mei_hdr.msg_complete = 1;
278 	/* Split the message only if we can write the whole host buffer */
279 	} else if (*slots == dev->hbuf_depth) {
280 		msg_slots = *slots;
281 		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
282 		mei_hdr.length = len;
283 		mei_hdr.msg_complete = 0;
284 	} else {
285 		/* wait for next time the host buffer is empty */
286 		return 0;
287 	}
288 
289 	dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
290 			cb->request_buffer.size, cb->buf_idx);
291 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
292 
293 	*slots -=  msg_slots;
294 	if (mei_write_message(dev, &mei_hdr,
295 			cb->request_buffer.data + cb->buf_idx)) {
296 		cl->status = -ENODEV;
297 		list_move_tail(&cb->list, &cmpl_list->list);
298 		return -ENODEV;
299 	}
300 
301 	if (mei_cl_flow_ctrl_reduce(cl))
302 		return -ENODEV;
303 
304 	cl->status = 0;
305 	cb->buf_idx += mei_hdr.length;
306 	if (mei_hdr.msg_complete)
307 		list_move_tail(&cb->list, &dev->write_waiting_list.list);
308 
309 	return 0;
310 }
311 
312 /**
313  * mei_irq_thread_read_handler - bottom half read routine after ISR to
314  * handle the read processing.
315  *
316  * @dev: the device structure
317  * @cmpl_list: An instance of our list structure
318  * @slots: slots to read.
319  *
320  * returns 0 on success, <0 on failure.
321  */
322 int mei_irq_read_handler(struct mei_device *dev,
323 		struct mei_cl_cb *cmpl_list, s32 *slots)
324 {
325 	struct mei_msg_hdr *mei_hdr;
326 	struct mei_cl *cl_pos = NULL;
327 	struct mei_cl *cl_next = NULL;
328 	int ret = 0;
329 
330 	if (!dev->rd_msg_hdr) {
331 		dev->rd_msg_hdr = mei_read_hdr(dev);
332 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
333 		(*slots)--;
334 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
335 	}
336 	mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
337 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
338 
339 	if (mei_hdr->reserved || !dev->rd_msg_hdr) {
340 		dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
341 		ret = -EBADMSG;
342 		goto end;
343 	}
344 
345 	if (mei_hdr->host_addr || mei_hdr->me_addr) {
346 		list_for_each_entry_safe(cl_pos, cl_next,
347 					&dev->file_list, link) {
348 			dev_dbg(&dev->pdev->dev,
349 					"list_for_each_entry_safe read host"
350 					" client = %d, ME client = %d\n",
351 					cl_pos->host_client_id,
352 					cl_pos->me_client_id);
353 			if (cl_pos->host_client_id == mei_hdr->host_addr &&
354 			    cl_pos->me_client_id == mei_hdr->me_addr)
355 				break;
356 		}
357 
358 		if (&cl_pos->link == &dev->file_list) {
359 			dev_dbg(&dev->pdev->dev, "corrupted message header\n");
360 			ret = -EBADMSG;
361 			goto end;
362 		}
363 	}
364 	if (((*slots) * sizeof(u32)) < mei_hdr->length) {
365 		dev_dbg(&dev->pdev->dev,
366 				"we can't read the message slots =%08x.\n",
367 				*slots);
368 		/* we can't read the message */
369 		ret = -ERANGE;
370 		goto end;
371 	}
372 
373 	/* decide where to read the message too */
374 	if (!mei_hdr->host_addr) {
375 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
376 		mei_hbm_dispatch(dev, mei_hdr);
377 		dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
378 	} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
379 		   (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
380 		   (dev->iamthif_state == MEI_IAMTHIF_READING)) {
381 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
382 
383 		dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
384 
385 		ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
386 		if (ret)
387 			goto end;
388 	} else {
389 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
390 		ret = mei_irq_thread_read_client_message(cmpl_list,
391 							 dev, mei_hdr);
392 		if (ret)
393 			goto end;
394 
395 	}
396 
397 	/* reset the number of slots and header */
398 	*slots = mei_count_full_read_slots(dev);
399 	dev->rd_msg_hdr = 0;
400 
401 	if (*slots == -EOVERFLOW) {
402 		/* overflow - reset */
403 		dev_dbg(&dev->pdev->dev, "resetting due to slots overflow.\n");
404 		/* set the event since message has been read */
405 		ret = -ERANGE;
406 		goto end;
407 	}
408 end:
409 	return ret;
410 }
411 
412 
413 /**
414  * mei_irq_write_handler -  dispatch write requests
415  *  after irq received
416  *
417  * @dev: the device structure
418  * @cmpl_list: An instance of our list structure
419  *
420  * returns 0 on success, <0 on failure.
421  */
422 int mei_irq_write_handler(struct mei_device *dev,
423 				struct mei_cl_cb *cmpl_list)
424 {
425 
426 	struct mei_cl *cl;
427 	struct mei_cl_cb *pos = NULL, *next = NULL;
428 	struct mei_cl_cb *list;
429 	s32 slots;
430 	int ret;
431 
432 	if (!mei_hbuf_is_ready(dev)) {
433 		dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
434 		return 0;
435 	}
436 	slots = mei_hbuf_empty_slots(dev);
437 	if (slots <= 0)
438 		return -EMSGSIZE;
439 
440 	/* complete all waiting for write CB */
441 	dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
442 
443 	list = &dev->write_waiting_list;
444 	list_for_each_entry_safe(pos, next, &list->list, list) {
445 		cl = pos->cl;
446 		if (cl == NULL)
447 			continue;
448 
449 		cl->status = 0;
450 		list_del(&pos->list);
451 		if (MEI_WRITING == cl->writing_state &&
452 		    pos->fop_type == MEI_FOP_WRITE &&
453 		    cl != &dev->iamthif_cl) {
454 			dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
455 			cl->writing_state = MEI_WRITE_COMPLETE;
456 			list_add_tail(&pos->list, &cmpl_list->list);
457 		}
458 		if (cl == &dev->iamthif_cl) {
459 			dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
460 			if (dev->iamthif_flow_control_pending) {
461 				ret = mei_amthif_irq_read(dev, &slots);
462 				if (ret)
463 					return ret;
464 			}
465 		}
466 	}
467 
468 	if (dev->wd_state == MEI_WD_STOPPING) {
469 		dev->wd_state = MEI_WD_IDLE;
470 		wake_up_interruptible(&dev->wait_stop_wd);
471 	}
472 
473 	if (dev->wr_ext_msg.hdr.length) {
474 		mei_write_message(dev, &dev->wr_ext_msg.hdr,
475 				dev->wr_ext_msg.data);
476 		slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
477 		dev->wr_ext_msg.hdr.length = 0;
478 	}
479 	if (dev->dev_state == MEI_DEV_ENABLED) {
480 		if (dev->wd_pending &&
481 		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
482 			if (mei_wd_send(dev))
483 				dev_dbg(&dev->pdev->dev, "wd send failed.\n");
484 			else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
485 				return -ENODEV;
486 
487 			dev->wd_pending = false;
488 
489 			if (dev->wd_state == MEI_WD_RUNNING)
490 				slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
491 			else
492 				slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
493 		}
494 	}
495 
496 	/* complete control write list CB */
497 	dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
498 	list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
499 		cl = pos->cl;
500 		if (!cl) {
501 			list_del(&pos->list);
502 			return -ENODEV;
503 		}
504 		switch (pos->fop_type) {
505 		case MEI_FOP_CLOSE:
506 			/* send disconnect message */
507 			ret = _mei_irq_thread_close(dev, &slots, pos,
508 						cl, cmpl_list);
509 			if (ret)
510 				return ret;
511 
512 			break;
513 		case MEI_FOP_READ:
514 			/* send flow control message */
515 			ret = _mei_irq_thread_read(dev, &slots, pos,
516 						cl, cmpl_list);
517 			if (ret)
518 				return ret;
519 
520 			break;
521 		case MEI_FOP_IOCTL:
522 			/* connect message */
523 			if (mei_cl_is_other_connecting(cl))
524 				continue;
525 			ret = _mei_irq_thread_ioctl(dev, &slots, pos,
526 						cl, cmpl_list);
527 			if (ret)
528 				return ret;
529 
530 			break;
531 
532 		default:
533 			BUG();
534 		}
535 
536 	}
537 	/* complete  write list CB */
538 	dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
539 	list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
540 		cl = pos->cl;
541 		if (cl == NULL)
542 			continue;
543 		if (mei_cl_flow_ctrl_creds(cl) <= 0) {
544 			dev_dbg(&dev->pdev->dev,
545 				"No flow control credentials for client %d, not sending.\n",
546 				cl->host_client_id);
547 			continue;
548 		}
549 
550 		if (cl == &dev->iamthif_cl)
551 			ret = mei_amthif_irq_write_complete(dev, &slots,
552 							pos, cmpl_list);
553 		else
554 			ret = mei_irq_thread_write_complete(dev, &slots, pos,
555 						cmpl_list);
556 		if (ret)
557 			return ret;
558 
559 	}
560 	return 0;
561 }
562 
563 
564 
565 /**
566  * mei_timer - timer function.
567  *
568  * @work: pointer to the work_struct structure
569  *
570  * NOTE: This function is called by timer interrupt work
571  */
572 void mei_timer(struct work_struct *work)
573 {
574 	unsigned long timeout;
575 	struct mei_cl *cl_pos = NULL;
576 	struct mei_cl *cl_next = NULL;
577 	struct mei_cl_cb  *cb_pos = NULL;
578 	struct mei_cl_cb  *cb_next = NULL;
579 
580 	struct mei_device *dev = container_of(work,
581 					struct mei_device, timer_work.work);
582 
583 
584 	mutex_lock(&dev->device_lock);
585 	if (dev->dev_state != MEI_DEV_ENABLED) {
586 		if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
587 			if (dev->init_clients_timer) {
588 				if (--dev->init_clients_timer == 0) {
589 					dev_dbg(&dev->pdev->dev, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
590 						dev->init_clients_state);
591 					mei_reset(dev, 1);
592 				}
593 			}
594 		}
595 		goto out;
596 	}
597 	/*** connect/disconnect timeouts ***/
598 	list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
599 		if (cl_pos->timer_count) {
600 			if (--cl_pos->timer_count == 0) {
601 				dev_dbg(&dev->pdev->dev, "HECI reset due to connect/disconnect timeout.\n");
602 				mei_reset(dev, 1);
603 				goto out;
604 			}
605 		}
606 	}
607 
608 	if (dev->iamthif_stall_timer) {
609 		if (--dev->iamthif_stall_timer == 0) {
610 			dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
611 			mei_reset(dev, 1);
612 			dev->iamthif_msg_buf_size = 0;
613 			dev->iamthif_msg_buf_index = 0;
614 			dev->iamthif_canceled = false;
615 			dev->iamthif_ioctl = true;
616 			dev->iamthif_state = MEI_IAMTHIF_IDLE;
617 			dev->iamthif_timer = 0;
618 
619 			mei_io_cb_free(dev->iamthif_current_cb);
620 			dev->iamthif_current_cb = NULL;
621 
622 			dev->iamthif_file_object = NULL;
623 			mei_amthif_run_next_cmd(dev);
624 		}
625 	}
626 
627 	if (dev->iamthif_timer) {
628 
629 		timeout = dev->iamthif_timer +
630 			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
631 
632 		dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
633 				dev->iamthif_timer);
634 		dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
635 		dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
636 		if (time_after(jiffies, timeout)) {
637 			/*
638 			 * User didn't read the AMTHI data on time (15sec)
639 			 * freeing AMTHI for other requests
640 			 */
641 
642 			dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
643 
644 			list_for_each_entry_safe(cb_pos, cb_next,
645 				&dev->amthif_rd_complete_list.list, list) {
646 
647 				cl_pos = cb_pos->file_object->private_data;
648 
649 				/* Finding the AMTHI entry. */
650 				if (cl_pos == &dev->iamthif_cl)
651 					list_del(&cb_pos->list);
652 			}
653 			mei_io_cb_free(dev->iamthif_current_cb);
654 			dev->iamthif_current_cb = NULL;
655 
656 			dev->iamthif_file_object->private_data = NULL;
657 			dev->iamthif_file_object = NULL;
658 			dev->iamthif_timer = 0;
659 			mei_amthif_run_next_cmd(dev);
660 
661 		}
662 	}
663 out:
664 	schedule_delayed_work(&dev->timer_work, 2 * HZ);
665 	mutex_unlock(&dev->device_lock);
666 }
667 
668