1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 18 #include <linux/export.h> 19 #include <linux/kthread.h> 20 #include <linux/interrupt.h> 21 #include <linux/fs.h> 22 #include <linux/jiffies.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/mei.h> 27 28 #include "mei_dev.h" 29 #include "hbm.h" 30 #include "client.h" 31 32 33 /** 34 * mei_irq_compl_handler - dispatch complete handlers 35 * for the completed callbacks 36 * 37 * @dev: mei device 38 * @cmpl_list: list of completed cbs 39 */ 40 void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list) 41 { 42 struct mei_cl_cb *cb, *next; 43 struct mei_cl *cl; 44 45 list_for_each_entry_safe(cb, next, cmpl_list, list) { 46 cl = cb->cl; 47 list_del_init(&cb->list); 48 49 dev_dbg(dev->dev, "completing call back.\n"); 50 mei_cl_complete(cl, cb); 51 } 52 } 53 EXPORT_SYMBOL_GPL(mei_irq_compl_handler); 54 55 /** 56 * mei_cl_hbm_equal - check if hbm is addressed to the client 57 * 58 * @cl: host client 59 * @mei_hdr: header of mei client message 60 * 61 * Return: true if matches, false otherwise 62 */ 63 static inline int mei_cl_hbm_equal(struct mei_cl *cl, 64 struct mei_msg_hdr *mei_hdr) 65 { 66 return mei_cl_host_addr(cl) == mei_hdr->host_addr && 67 mei_cl_me_id(cl) == mei_hdr->me_addr; 68 } 69 70 /** 71 * mei_irq_discard_msg - discard received message 72 * 73 * @dev: mei device 74 * @hdr: message header 75 */ 76 static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) 77 { 78 /* 79 * no need to check for size as it is guarantied 80 * that length fits into rd_msg_buf 81 */ 82 mei_read_slots(dev, dev->rd_msg_buf, hdr->length); 83 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 84 MEI_HDR_PRM(hdr)); 85 } 86 87 /** 88 * mei_cl_irq_read_msg - process client message 89 * 90 * @cl: reading client 91 * @mei_hdr: header of mei client message 92 * @cmpl_list: completion list 93 * 94 * Return: always 0 95 */ 96 static int mei_cl_irq_read_msg(struct mei_cl *cl, 97 struct mei_msg_hdr *mei_hdr, 98 struct list_head *cmpl_list) 99 { 100 struct mei_device *dev = cl->dev; 101 struct mei_cl_cb *cb; 102 size_t buf_sz; 103 104 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 105 if (!cb) { 106 if (!mei_cl_is_fixed_address(cl)) { 107 cl_err(dev, cl, "pending read cb not found\n"); 108 goto discard; 109 } 110 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); 111 if (!cb) 112 goto discard; 113 list_add_tail(&cb->list, &cl->rd_pending); 114 } 115 116 if (!mei_cl_is_connected(cl)) { 117 cl_dbg(dev, cl, "not connected\n"); 118 cb->status = -ENODEV; 119 goto discard; 120 } 121 122 buf_sz = mei_hdr->length + cb->buf_idx; 123 /* catch for integer overflow */ 124 if (buf_sz < cb->buf_idx) { 125 cl_err(dev, cl, "message is too big len %d idx %zu\n", 126 mei_hdr->length, cb->buf_idx); 127 cb->status = -EMSGSIZE; 128 goto discard; 129 } 130 131 if (cb->buf.size < buf_sz) { 132 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", 133 cb->buf.size, mei_hdr->length, cb->buf_idx); 134 cb->status = -EMSGSIZE; 135 goto discard; 136 } 137 138 mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); 139 140 cb->buf_idx += mei_hdr->length; 141 142 if (mei_hdr->msg_complete) { 143 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); 144 list_move_tail(&cb->list, cmpl_list); 145 } else { 146 pm_runtime_mark_last_busy(dev->dev); 147 pm_request_autosuspend(dev->dev); 148 } 149 150 return 0; 151 152 discard: 153 if (cb) 154 list_move_tail(&cb->list, cmpl_list); 155 mei_irq_discard_msg(dev, mei_hdr); 156 return 0; 157 } 158 159 /** 160 * mei_cl_irq_disconnect_rsp - send disconnection response message 161 * 162 * @cl: client 163 * @cb: callback block. 164 * @cmpl_list: complete list. 165 * 166 * Return: 0, OK; otherwise, error. 167 */ 168 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 169 struct list_head *cmpl_list) 170 { 171 struct mei_device *dev = cl->dev; 172 u32 msg_slots; 173 int slots; 174 int ret; 175 176 slots = mei_hbuf_empty_slots(dev); 177 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); 178 179 if (slots < msg_slots) 180 return -EMSGSIZE; 181 182 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 183 list_move_tail(&cb->list, cmpl_list); 184 185 return ret; 186 } 187 188 /** 189 * mei_cl_irq_read - processes client read related operation from the 190 * interrupt thread context - request for flow control credits 191 * 192 * @cl: client 193 * @cb: callback block. 194 * @cmpl_list: complete list. 195 * 196 * Return: 0, OK; otherwise, error. 197 */ 198 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 199 struct list_head *cmpl_list) 200 { 201 struct mei_device *dev = cl->dev; 202 u32 msg_slots; 203 int slots; 204 int ret; 205 206 if (!list_empty(&cl->rd_pending)) 207 return 0; 208 209 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 210 slots = mei_hbuf_empty_slots(dev); 211 212 if (slots < msg_slots) 213 return -EMSGSIZE; 214 215 ret = mei_hbm_cl_flow_control_req(dev, cl); 216 if (ret) { 217 cl->status = ret; 218 cb->buf_idx = 0; 219 list_move_tail(&cb->list, cmpl_list); 220 return ret; 221 } 222 223 list_move_tail(&cb->list, &cl->rd_pending); 224 225 return 0; 226 } 227 228 static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr) 229 { 230 return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0; 231 } 232 233 static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) 234 { 235 return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0; 236 } 237 238 /** 239 * mei_irq_read_handler - bottom half read routine after ISR to 240 * handle the read processing. 241 * 242 * @dev: the device structure 243 * @cmpl_list: An instance of our list structure 244 * @slots: slots to read. 245 * 246 * Return: 0 on success, <0 on failure. 247 */ 248 int mei_irq_read_handler(struct mei_device *dev, 249 struct list_head *cmpl_list, s32 *slots) 250 { 251 struct mei_msg_hdr *mei_hdr; 252 struct mei_cl *cl; 253 int ret; 254 255 if (!dev->rd_msg_hdr) { 256 dev->rd_msg_hdr = mei_read_hdr(dev); 257 (*slots)--; 258 dev_dbg(dev->dev, "slots =%08x.\n", *slots); 259 } 260 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 261 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 262 263 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 264 dev_err(dev->dev, "corrupted message header 0x%08X\n", 265 dev->rd_msg_hdr); 266 ret = -EBADMSG; 267 goto end; 268 } 269 270 if (mei_slots2data(*slots) < mei_hdr->length) { 271 dev_err(dev->dev, "less data available than length=%08x.\n", 272 *slots); 273 /* we can't read the message */ 274 ret = -ENODATA; 275 goto end; 276 } 277 278 /* HBM message */ 279 if (hdr_is_hbm(mei_hdr)) { 280 ret = mei_hbm_dispatch(dev, mei_hdr); 281 if (ret) { 282 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", 283 ret); 284 goto end; 285 } 286 goto reset_slots; 287 } 288 289 /* find recipient cl */ 290 list_for_each_entry(cl, &dev->file_list, link) { 291 if (mei_cl_hbm_equal(cl, mei_hdr)) { 292 cl_dbg(dev, cl, "got a message\n"); 293 break; 294 } 295 } 296 297 /* if no recipient cl was found we assume corrupted header */ 298 if (&cl->link == &dev->file_list) { 299 /* A message for not connected fixed address clients 300 * should be silently discarded 301 */ 302 if (hdr_is_fixed(mei_hdr)) { 303 mei_irq_discard_msg(dev, mei_hdr); 304 ret = 0; 305 goto reset_slots; 306 } 307 dev_err(dev->dev, "no destination client found 0x%08X\n", 308 dev->rd_msg_hdr); 309 ret = -EBADMSG; 310 goto end; 311 } 312 313 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); 314 315 316 reset_slots: 317 /* reset the number of slots and header */ 318 *slots = mei_count_full_read_slots(dev); 319 dev->rd_msg_hdr = 0; 320 321 if (*slots == -EOVERFLOW) { 322 /* overflow - reset */ 323 dev_err(dev->dev, "resetting due to slots overflow.\n"); 324 /* set the event since message has been read */ 325 ret = -ERANGE; 326 goto end; 327 } 328 end: 329 return ret; 330 } 331 EXPORT_SYMBOL_GPL(mei_irq_read_handler); 332 333 334 /** 335 * mei_irq_write_handler - dispatch write requests 336 * after irq received 337 * 338 * @dev: the device structure 339 * @cmpl_list: An instance of our list structure 340 * 341 * Return: 0 on success, <0 on failure. 342 */ 343 int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) 344 { 345 346 struct mei_cl *cl; 347 struct mei_cl_cb *cb, *next; 348 s32 slots; 349 int ret; 350 351 352 if (!mei_hbuf_acquire(dev)) 353 return 0; 354 355 slots = mei_hbuf_empty_slots(dev); 356 if (slots <= 0) 357 return -EMSGSIZE; 358 359 /* complete all waiting for write CB */ 360 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); 361 362 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { 363 cl = cb->cl; 364 365 cl->status = 0; 366 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 367 cl->writing_state = MEI_WRITE_COMPLETE; 368 list_move_tail(&cb->list, cmpl_list); 369 } 370 371 /* complete control write list CB */ 372 dev_dbg(dev->dev, "complete control write list cb.\n"); 373 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { 374 cl = cb->cl; 375 switch (cb->fop_type) { 376 case MEI_FOP_DISCONNECT: 377 /* send disconnect message */ 378 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); 379 if (ret) 380 return ret; 381 382 break; 383 case MEI_FOP_READ: 384 /* send flow control message */ 385 ret = mei_cl_irq_read(cl, cb, cmpl_list); 386 if (ret) 387 return ret; 388 389 break; 390 case MEI_FOP_CONNECT: 391 /* connect message */ 392 ret = mei_cl_irq_connect(cl, cb, cmpl_list); 393 if (ret) 394 return ret; 395 396 break; 397 case MEI_FOP_DISCONNECT_RSP: 398 /* send disconnect resp */ 399 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); 400 if (ret) 401 return ret; 402 break; 403 404 case MEI_FOP_NOTIFY_START: 405 case MEI_FOP_NOTIFY_STOP: 406 ret = mei_cl_irq_notify(cl, cb, cmpl_list); 407 if (ret) 408 return ret; 409 break; 410 default: 411 BUG(); 412 } 413 414 } 415 /* complete write list CB */ 416 dev_dbg(dev->dev, "complete write list cb.\n"); 417 list_for_each_entry_safe(cb, next, &dev->write_list, list) { 418 cl = cb->cl; 419 ret = mei_cl_irq_write(cl, cb, cmpl_list); 420 if (ret) 421 return ret; 422 } 423 return 0; 424 } 425 EXPORT_SYMBOL_GPL(mei_irq_write_handler); 426 427 428 /** 429 * mei_connect_timeout - connect/disconnect timeouts 430 * 431 * @cl: host client 432 */ 433 static void mei_connect_timeout(struct mei_cl *cl) 434 { 435 struct mei_device *dev = cl->dev; 436 437 if (cl->state == MEI_FILE_CONNECTING) { 438 if (dev->hbm_f_dot_supported) { 439 cl->state = MEI_FILE_DISCONNECT_REQUIRED; 440 wake_up(&cl->wait); 441 return; 442 } 443 } 444 mei_reset(dev); 445 } 446 447 #define MEI_STALL_TIMER_FREQ (2 * HZ) 448 /** 449 * mei_schedule_stall_timer - re-arm stall_timer work 450 * 451 * Schedule stall timer 452 * 453 * @dev: the device structure 454 */ 455 void mei_schedule_stall_timer(struct mei_device *dev) 456 { 457 schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ); 458 } 459 460 /** 461 * mei_timer - timer function. 462 * 463 * @work: pointer to the work_struct structure 464 * 465 */ 466 void mei_timer(struct work_struct *work) 467 { 468 struct mei_cl *cl; 469 struct mei_device *dev = container_of(work, 470 struct mei_device, timer_work.work); 471 bool reschedule_timer = false; 472 473 mutex_lock(&dev->device_lock); 474 475 /* Catch interrupt stalls during HBM init handshake */ 476 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 477 dev->hbm_state != MEI_HBM_IDLE) { 478 479 if (dev->init_clients_timer) { 480 if (--dev->init_clients_timer == 0) { 481 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", 482 dev->hbm_state); 483 mei_reset(dev); 484 goto out; 485 } 486 reschedule_timer = true; 487 } 488 } 489 490 if (dev->dev_state != MEI_DEV_ENABLED) 491 goto out; 492 493 /*** connect/disconnect timeouts ***/ 494 list_for_each_entry(cl, &dev->file_list, link) { 495 if (cl->timer_count) { 496 if (--cl->timer_count == 0) { 497 dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); 498 mei_connect_timeout(cl); 499 goto out; 500 } 501 reschedule_timer = true; 502 } 503 } 504 505 out: 506 if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer) 507 mei_schedule_stall_timer(dev); 508 509 mutex_unlock(&dev->device_lock); 510 } 511