1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ISHTP bus layer messages handling 4 * 5 * Copyright (c) 2003-2016, Intel Corporation. 6 */ 7 8 #include <linux/export.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <linux/wait.h> 12 #include <linux/spinlock.h> 13 #include "ishtp-dev.h" 14 #include "hbm.h" 15 #include "client.h" 16 #include "loader.h" 17 18 /** 19 * ishtp_hbm_fw_cl_allocate() - Allocate FW clients 20 * @dev: ISHTP device instance 21 * 22 * Allocates storage for fw clients 23 */ 24 static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev) 25 { 26 struct ishtp_fw_client *clients; 27 int b; 28 29 /* count how many ISH clients we have */ 30 for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX) 31 dev->fw_clients_num++; 32 33 if (dev->fw_clients_num <= 0) 34 return; 35 36 /* allocate storage for fw clients representation */ 37 clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client), 38 GFP_KERNEL); 39 if (!clients) { 40 dev->dev_state = ISHTP_DEV_RESETTING; 41 ish_hw_reset(dev); 42 return; 43 } 44 dev->fw_clients = clients; 45 } 46 47 /** 48 * ishtp_hbm_cl_hdr() - construct client hbm header 49 * @cl: client 50 * @hbm_cmd: host bus message command 51 * @buf: buffer for cl header 52 * @len: buffer length 53 * 54 * Initialize HBM buffer 55 */ 56 static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd, 57 void *buf, size_t len) 58 { 59 struct ishtp_hbm_cl_cmd *cmd = buf; 60 61 memset(cmd, 0, len); 62 63 cmd->hbm_cmd = hbm_cmd; 64 cmd->host_addr = cl->host_client_id; 65 cmd->fw_addr = cl->fw_client_id; 66 } 67 68 /** 69 * ishtp_hbm_cl_addr_equal() - Compare client address 70 * @cl: client 71 * @buf: Client command buffer 72 * 73 * Compare client address with the address in command buffer 74 * 75 * Return: True if they have the same address 76 */ 77 static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf) 78 { 79 struct ishtp_hbm_cl_cmd *cmd = buf; 80 81 return cl->host_client_id == cmd->host_addr && 82 cl->fw_client_id == cmd->fw_addr; 83 } 84 85 /** 86 * ishtp_hbm_start_wait() - Wait for HBM start message 87 * @dev: ISHTP device instance 88 * 89 * Wait for HBM start message from firmware 90 * 91 * Return: 0 if HBM start is/was received else timeout error 92 */ 93 int ishtp_hbm_start_wait(struct ishtp_device *dev) 94 { 95 int ret; 96 97 if (dev->hbm_state > ISHTP_HBM_START) 98 return 0; 99 100 dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n", 101 dev->hbm_state); 102 ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg, 103 dev->hbm_state >= ISHTP_HBM_STARTED, 104 (ISHTP_INTEROP_TIMEOUT * HZ)); 105 106 dev_dbg(dev->devc, 107 "Woke up from waiting for ishtp start. hbm_state=%08X\n", 108 dev->hbm_state); 109 110 if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) { 111 dev->hbm_state = ISHTP_HBM_IDLE; 112 dev_err(dev->devc, 113 "waiting for ishtp start failed. ret=%d hbm_state=%08X\n", 114 ret, dev->hbm_state); 115 return -ETIMEDOUT; 116 } 117 return 0; 118 } 119 120 /** 121 * ishtp_hbm_start_req() - Send HBM start message 122 * @dev: ISHTP device instance 123 * 124 * Send HBM start message to firmware 125 * 126 * Return: 0 if success else error code 127 */ 128 int ishtp_hbm_start_req(struct ishtp_device *dev) 129 { 130 struct ishtp_msg_hdr hdr; 131 struct hbm_host_version_request start_req = { 0 }; 132 133 ishtp_hbm_hdr(&hdr, sizeof(start_req)); 134 135 /* host start message */ 136 start_req.hbm_cmd = HOST_START_REQ_CMD; 137 start_req.host_version.major_version = HBM_MAJOR_VERSION; 138 start_req.host_version.minor_version = HBM_MINOR_VERSION; 139 140 /* 141 * (!) Response to HBM start may be so quick that this thread would get 142 * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START. 143 * So set it at first, change back to ISHTP_HBM_IDLE upon failure 144 */ 145 dev->hbm_state = ISHTP_HBM_START; 146 if (ishtp_write_message(dev, &hdr, &start_req)) { 147 dev_err(dev->devc, "version message send failed\n"); 148 dev->dev_state = ISHTP_DEV_RESETTING; 149 dev->hbm_state = ISHTP_HBM_IDLE; 150 ish_hw_reset(dev); 151 return -ENODEV; 152 } 153 154 return 0; 155 } 156 157 /** 158 * ishtp_hbm_enum_clients_req() - Send client enum req 159 * @dev: ISHTP device instance 160 * 161 * Send enumeration client request message 162 * 163 * Return: 0 if success else error code 164 */ 165 void ishtp_hbm_enum_clients_req(struct ishtp_device *dev) 166 { 167 struct ishtp_msg_hdr hdr; 168 struct hbm_host_enum_request enum_req = { 0 }; 169 170 /* enumerate clients */ 171 ishtp_hbm_hdr(&hdr, sizeof(enum_req)); 172 enum_req.hbm_cmd = HOST_ENUM_REQ_CMD; 173 174 if (ishtp_write_message(dev, &hdr, &enum_req)) { 175 dev->dev_state = ISHTP_DEV_RESETTING; 176 dev_err(dev->devc, "enumeration request send failed\n"); 177 ish_hw_reset(dev); 178 } 179 dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS; 180 } 181 182 /** 183 * ishtp_hbm_prop_req() - Request property 184 * @dev: ISHTP device instance 185 * 186 * Request property for a single client 187 * 188 * Return: 0 if success else error code 189 */ 190 static int ishtp_hbm_prop_req(struct ishtp_device *dev) 191 { 192 struct ishtp_msg_hdr hdr; 193 struct hbm_props_request prop_req = { 0 }; 194 unsigned long next_client_index; 195 uint8_t client_num; 196 197 client_num = dev->fw_client_presentation_num; 198 199 next_client_index = find_next_bit(dev->fw_clients_map, 200 ISHTP_CLIENTS_MAX, dev->fw_client_index); 201 202 /* We got all client properties */ 203 if (next_client_index == ISHTP_CLIENTS_MAX) { 204 dev->hbm_state = ISHTP_HBM_WORKING; 205 dev->dev_state = ISHTP_DEV_ENABLED; 206 207 for (dev->fw_client_presentation_num = 1; 208 dev->fw_client_presentation_num < client_num + 1; 209 ++dev->fw_client_presentation_num) 210 /* Add new client device */ 211 ishtp_bus_new_client(dev); 212 return 0; 213 } 214 215 dev->fw_clients[client_num].client_id = next_client_index; 216 217 ishtp_hbm_hdr(&hdr, sizeof(prop_req)); 218 219 prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; 220 prop_req.address = next_client_index; 221 222 if (ishtp_write_message(dev, &hdr, &prop_req)) { 223 dev->dev_state = ISHTP_DEV_RESETTING; 224 dev_err(dev->devc, "properties request send failed\n"); 225 ish_hw_reset(dev); 226 return -EIO; 227 } 228 229 dev->fw_client_index = next_client_index; 230 231 return 0; 232 } 233 234 /** 235 * ishtp_hbm_stop_req() - Send HBM stop 236 * @dev: ISHTP device instance 237 * 238 * Send stop request message 239 */ 240 static void ishtp_hbm_stop_req(struct ishtp_device *dev) 241 { 242 struct ishtp_msg_hdr hdr; 243 struct hbm_host_stop_request stop_req = { 0 } ; 244 245 ishtp_hbm_hdr(&hdr, sizeof(stop_req)); 246 247 stop_req.hbm_cmd = HOST_STOP_REQ_CMD; 248 stop_req.reason = DRIVER_STOP_REQUEST; 249 250 ishtp_write_message(dev, &hdr, &stop_req); 251 } 252 253 /** 254 * ishtp_hbm_cl_flow_control_req() - Send flow control request 255 * @dev: ISHTP device instance 256 * @cl: ISHTP client instance 257 * 258 * Send flow control request 259 * 260 * Return: 0 if success else error code 261 */ 262 int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev, 263 struct ishtp_cl *cl) 264 { 265 struct ishtp_msg_hdr hdr; 266 struct hbm_flow_control flow_ctrl; 267 const size_t len = sizeof(flow_ctrl); 268 int rv; 269 unsigned long flags; 270 271 spin_lock_irqsave(&cl->fc_spinlock, flags); 272 273 ishtp_hbm_hdr(&hdr, len); 274 ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len); 275 276 /* 277 * Sync possible race when RB recycle and packet receive paths 278 * both try to send an out FC 279 */ 280 if (cl->out_flow_ctrl_creds) { 281 spin_unlock_irqrestore(&cl->fc_spinlock, flags); 282 return 0; 283 } 284 285 cl->recv_msg_num_frags = 0; 286 287 rv = ishtp_write_message(dev, &hdr, &flow_ctrl); 288 if (!rv) { 289 ++cl->out_flow_ctrl_creds; 290 ++cl->out_flow_ctrl_cnt; 291 cl->ts_out_fc = ktime_get(); 292 if (cl->ts_rx) { 293 ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx); 294 if (ktime_after(ts_diff, cl->ts_max_fc_delay)) 295 cl->ts_max_fc_delay = ts_diff; 296 } 297 } else { 298 ++cl->err_send_fc; 299 } 300 301 spin_unlock_irqrestore(&cl->fc_spinlock, flags); 302 return rv; 303 } 304 305 /** 306 * ishtp_hbm_cl_disconnect_req() - Send disconnect request 307 * @dev: ISHTP device instance 308 * @cl: ISHTP client instance 309 * 310 * Send disconnect message to fw 311 * 312 * Return: 0 if success else error code 313 */ 314 int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl) 315 { 316 struct ishtp_msg_hdr hdr; 317 struct hbm_client_connect_request disconn_req; 318 const size_t len = sizeof(disconn_req); 319 320 ishtp_hbm_hdr(&hdr, len); 321 ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len); 322 323 return ishtp_write_message(dev, &hdr, &disconn_req); 324 } 325 326 /** 327 * ishtp_hbm_cl_disconnect_res() - Get disconnect response 328 * @dev: ISHTP device instance 329 * @rs: Response message 330 * 331 * Received disconnect response from fw 332 */ 333 static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev, 334 struct hbm_client_connect_response *rs) 335 { 336 struct ishtp_cl *cl = NULL; 337 unsigned long flags; 338 339 spin_lock_irqsave(&dev->cl_list_lock, flags); 340 list_for_each_entry(cl, &dev->cl_list, link) { 341 if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) { 342 cl->state = ISHTP_CL_DISCONNECTED; 343 wake_up_interruptible(&cl->wait_ctrl_res); 344 break; 345 } 346 } 347 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 348 } 349 350 /** 351 * ishtp_hbm_cl_connect_req() - Send connect request 352 * @dev: ISHTP device instance 353 * @cl: client device instance 354 * 355 * Send connection request to specific fw client 356 * 357 * Return: 0 if success else error code 358 */ 359 int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl) 360 { 361 struct ishtp_msg_hdr hdr; 362 struct hbm_client_connect_request conn_req; 363 const size_t len = sizeof(conn_req); 364 365 ishtp_hbm_hdr(&hdr, len); 366 ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len); 367 368 return ishtp_write_message(dev, &hdr, &conn_req); 369 } 370 371 /** 372 * ishtp_hbm_cl_connect_res() - Get connect response 373 * @dev: ISHTP device instance 374 * @rs: Response message 375 * 376 * Received connect response from fw 377 */ 378 static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev, 379 struct hbm_client_connect_response *rs) 380 { 381 struct ishtp_cl *cl = NULL; 382 unsigned long flags; 383 384 spin_lock_irqsave(&dev->cl_list_lock, flags); 385 list_for_each_entry(cl, &dev->cl_list, link) { 386 if (ishtp_hbm_cl_addr_equal(cl, rs)) { 387 if (!rs->status) { 388 cl->state = ISHTP_CL_CONNECTED; 389 cl->status = 0; 390 } else { 391 cl->state = ISHTP_CL_DISCONNECTED; 392 cl->status = -ENODEV; 393 } 394 wake_up_interruptible(&cl->wait_ctrl_res); 395 break; 396 } 397 } 398 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 399 } 400 401 /** 402 * ishtp_hbm_fw_disconnect_req() - Receive disconnect request 403 * @dev: ISHTP device instance 404 * @disconnect_req: disconnect request structure 405 * 406 * Disconnect request bus message from the fw. Send disconnect response. 407 */ 408 static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev, 409 struct hbm_client_connect_request *disconnect_req) 410 { 411 struct ishtp_cl *cl; 412 const size_t len = sizeof(struct hbm_client_connect_response); 413 unsigned long flags; 414 struct ishtp_msg_hdr hdr; 415 unsigned char data[4]; /* All HBM messages are 4 bytes */ 416 417 spin_lock_irqsave(&dev->cl_list_lock, flags); 418 list_for_each_entry(cl, &dev->cl_list, link) { 419 if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) { 420 cl->state = ISHTP_CL_DISCONNECTED; 421 422 /* send disconnect response */ 423 ishtp_hbm_hdr(&hdr, len); 424 ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data, 425 len); 426 ishtp_write_message(dev, &hdr, data); 427 break; 428 } 429 } 430 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 431 } 432 433 /** 434 * ishtp_hbm_dma_xfer_ack() - Receive transfer ACK 435 * @dev: ISHTP device instance 436 * @dma_xfer: HBM transfer message 437 * 438 * Receive ack for ISHTP-over-DMA client message 439 */ 440 static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev, 441 struct dma_xfer_hbm *dma_xfer) 442 { 443 void *msg; 444 uint64_t offs; 445 struct ishtp_msg_hdr *ishtp_hdr = 446 (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr; 447 unsigned int msg_offs; 448 struct ishtp_cl *cl; 449 450 for (msg_offs = 0; msg_offs < ishtp_hdr->length; 451 msg_offs += sizeof(struct dma_xfer_hbm)) { 452 offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys; 453 if (offs > dev->ishtp_host_dma_tx_buf_size) { 454 dev_err(dev->devc, "Bad DMA Tx ack message address\n"); 455 return; 456 } 457 if (dma_xfer->msg_length > 458 dev->ishtp_host_dma_tx_buf_size - offs) { 459 dev_err(dev->devc, "Bad DMA Tx ack message size\n"); 460 return; 461 } 462 463 /* logical address of the acked mem */ 464 msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs; 465 ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length); 466 467 list_for_each_entry(cl, &dev->cl_list, link) { 468 if (cl->fw_client_id == dma_xfer->fw_client_id && 469 cl->host_client_id == dma_xfer->host_client_id) 470 /* 471 * in case that a single ack may be sent 472 * over several dma transfers, and the last msg 473 * addr was inside the acked memory, but not in 474 * its start 475 */ 476 if (cl->last_dma_addr >= 477 (unsigned char *)msg && 478 cl->last_dma_addr < 479 (unsigned char *)msg + 480 dma_xfer->msg_length) { 481 cl->last_dma_acked = 1; 482 483 if (!list_empty(&cl->tx_list.list) && 484 cl->ishtp_flow_ctrl_creds) { 485 /* 486 * start sending the first msg 487 */ 488 ishtp_cl_send_msg(dev, cl); 489 } 490 } 491 } 492 ++dma_xfer; 493 } 494 } 495 496 /** 497 * ishtp_hbm_dma_xfer() - Receive DMA transfer message 498 * @dev: ISHTP device instance 499 * @dma_xfer: HBM transfer message 500 * 501 * Receive ISHTP-over-DMA client message 502 */ 503 static void ishtp_hbm_dma_xfer(struct ishtp_device *dev, 504 struct dma_xfer_hbm *dma_xfer) 505 { 506 void *msg; 507 uint64_t offs; 508 struct ishtp_msg_hdr hdr; 509 struct ishtp_msg_hdr *ishtp_hdr = 510 (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr; 511 struct dma_xfer_hbm *prm = dma_xfer; 512 unsigned int msg_offs; 513 514 for (msg_offs = 0; msg_offs < ishtp_hdr->length; 515 msg_offs += sizeof(struct dma_xfer_hbm)) { 516 517 offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys; 518 if (offs > dev->ishtp_host_dma_rx_buf_size) { 519 dev_err(dev->devc, "Bad DMA Rx message address\n"); 520 return; 521 } 522 if (dma_xfer->msg_length > 523 dev->ishtp_host_dma_rx_buf_size - offs) { 524 dev_err(dev->devc, "Bad DMA Rx message size\n"); 525 return; 526 } 527 msg = dev->ishtp_host_dma_rx_buf + offs; 528 recv_ishtp_cl_msg_dma(dev, msg, dma_xfer); 529 dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */ 530 ++dma_xfer; 531 } 532 533 /* Send DMA_XFER_ACK [...] */ 534 ishtp_hbm_hdr(&hdr, ishtp_hdr->length); 535 ishtp_write_message(dev, &hdr, (unsigned char *)prm); 536 } 537 538 /** 539 * ishtp_hbm_dispatch() - HBM dispatch function 540 * @dev: ISHTP device instance 541 * @hdr: bus message 542 * 543 * Bottom half read routine after ISR to handle the read bus message cmd 544 * processing 545 */ 546 void ishtp_hbm_dispatch(struct ishtp_device *dev, 547 struct ishtp_bus_message *hdr) 548 { 549 struct ishtp_bus_message *ishtp_msg; 550 struct ishtp_fw_client *fw_client; 551 struct hbm_host_version_response *version_res; 552 struct hbm_client_connect_response *connect_res; 553 struct hbm_client_connect_response *disconnect_res; 554 struct hbm_client_connect_request *disconnect_req; 555 struct hbm_props_response *props_res; 556 struct hbm_host_enum_response *enum_res; 557 struct ishtp_msg_hdr ishtp_hdr; 558 struct dma_alloc_notify dma_alloc_notify; 559 struct dma_xfer_hbm *dma_xfer; 560 561 ishtp_msg = hdr; 562 563 switch (ishtp_msg->hbm_cmd) { 564 case HOST_START_RES_CMD: 565 version_res = (struct hbm_host_version_response *)ishtp_msg; 566 if (!version_res->host_version_supported) { 567 dev->version = version_res->fw_max_version; 568 569 dev->hbm_state = ISHTP_HBM_STOPPED; 570 ishtp_hbm_stop_req(dev); 571 return; 572 } 573 574 /* Start firmware loading process if it has loader capability */ 575 if (version_res->host_version_supported & ISHTP_SUPPORT_CAP_LOADER) 576 schedule_work(&dev->work_fw_loader); 577 578 dev->version.major_version = HBM_MAJOR_VERSION; 579 dev->version.minor_version = HBM_MINOR_VERSION; 580 if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS && 581 dev->hbm_state == ISHTP_HBM_START) { 582 dev->hbm_state = ISHTP_HBM_STARTED; 583 ishtp_hbm_enum_clients_req(dev); 584 } else { 585 dev_err(dev->devc, 586 "reset: wrong host start response\n"); 587 /* BUG: why do we arrive here? */ 588 ish_hw_reset(dev); 589 return; 590 } 591 592 wake_up_interruptible(&dev->wait_hbm_recvd_msg); 593 break; 594 595 case CLIENT_CONNECT_RES_CMD: 596 connect_res = (struct hbm_client_connect_response *)ishtp_msg; 597 ishtp_hbm_cl_connect_res(dev, connect_res); 598 break; 599 600 case CLIENT_DISCONNECT_RES_CMD: 601 disconnect_res = 602 (struct hbm_client_connect_response *)ishtp_msg; 603 ishtp_hbm_cl_disconnect_res(dev, disconnect_res); 604 break; 605 606 case HOST_CLIENT_PROPERTIES_RES_CMD: 607 props_res = (struct hbm_props_response *)ishtp_msg; 608 fw_client = &dev->fw_clients[dev->fw_client_presentation_num]; 609 610 if (props_res->status || !dev->fw_clients) { 611 dev_err(dev->devc, 612 "reset: properties response hbm wrong status\n"); 613 ish_hw_reset(dev); 614 return; 615 } 616 617 if (fw_client->client_id != props_res->address) { 618 dev_err(dev->devc, 619 "reset: host properties response address mismatch [%02X %02X]\n", 620 fw_client->client_id, props_res->address); 621 ish_hw_reset(dev); 622 return; 623 } 624 625 if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS || 626 dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) { 627 dev_err(dev->devc, 628 "reset: unexpected properties response\n"); 629 ish_hw_reset(dev); 630 return; 631 } 632 633 fw_client->props = props_res->client_properties; 634 dev->fw_client_index++; 635 dev->fw_client_presentation_num++; 636 637 /* request property for the next client */ 638 ishtp_hbm_prop_req(dev); 639 640 if (dev->dev_state != ISHTP_DEV_ENABLED) 641 break; 642 643 if (!ishtp_use_dma_transfer()) 644 break; 645 646 dev_dbg(dev->devc, "Requesting to use DMA\n"); 647 ishtp_cl_alloc_dma_buf(dev); 648 if (dev->ishtp_host_dma_rx_buf) { 649 const size_t len = sizeof(dma_alloc_notify); 650 651 memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify)); 652 dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY; 653 dma_alloc_notify.buf_size = 654 dev->ishtp_host_dma_rx_buf_size; 655 dma_alloc_notify.buf_address = 656 dev->ishtp_host_dma_rx_buf_phys; 657 ishtp_hbm_hdr(&ishtp_hdr, len); 658 ishtp_write_message(dev, &ishtp_hdr, 659 (unsigned char *)&dma_alloc_notify); 660 } 661 662 break; 663 664 case HOST_ENUM_RES_CMD: 665 enum_res = (struct hbm_host_enum_response *) ishtp_msg; 666 memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32); 667 if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS && 668 dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) { 669 dev->fw_client_presentation_num = 0; 670 dev->fw_client_index = 0; 671 672 ishtp_hbm_fw_cl_allocate(dev); 673 dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES; 674 675 /* first property request */ 676 ishtp_hbm_prop_req(dev); 677 } else { 678 dev_err(dev->devc, 679 "reset: unexpected enumeration response hbm\n"); 680 ish_hw_reset(dev); 681 return; 682 } 683 break; 684 685 case HOST_STOP_RES_CMD: 686 if (dev->hbm_state != ISHTP_HBM_STOPPED) 687 dev_err(dev->devc, "unexpected stop response\n"); 688 689 dev->dev_state = ISHTP_DEV_DISABLED; 690 dev_info(dev->devc, "reset: FW stop response\n"); 691 ish_hw_reset(dev); 692 break; 693 694 case CLIENT_DISCONNECT_REQ_CMD: 695 /* search for client */ 696 disconnect_req = 697 (struct hbm_client_connect_request *)ishtp_msg; 698 ishtp_hbm_fw_disconnect_req(dev, disconnect_req); 699 break; 700 701 case FW_STOP_REQ_CMD: 702 dev->hbm_state = ISHTP_HBM_STOPPED; 703 break; 704 705 case DMA_BUFFER_ALLOC_RESPONSE: 706 dev->ishtp_host_dma_enabled = 1; 707 break; 708 709 case DMA_XFER: 710 dma_xfer = (struct dma_xfer_hbm *)ishtp_msg; 711 if (!dev->ishtp_host_dma_enabled) { 712 dev_err(dev->devc, 713 "DMA XFER requested but DMA is not enabled\n"); 714 break; 715 } 716 ishtp_hbm_dma_xfer(dev, dma_xfer); 717 break; 718 719 case DMA_XFER_ACK: 720 dma_xfer = (struct dma_xfer_hbm *)ishtp_msg; 721 if (!dev->ishtp_host_dma_enabled || 722 !dev->ishtp_host_dma_tx_buf) { 723 dev_err(dev->devc, 724 "DMA XFER acked but DMA Tx is not enabled\n"); 725 break; 726 } 727 ishtp_hbm_dma_xfer_ack(dev, dma_xfer); 728 break; 729 730 default: 731 dev_err(dev->devc, "unknown HBM: %u\n", 732 (unsigned int)ishtp_msg->hbm_cmd); 733 734 break; 735 } 736 } 737 738 /** 739 * bh_hbm_work_fn() - HBM work function 740 * @work: work struct 741 * 742 * Bottom half processing work function (instead of thread handler) 743 * for processing hbm messages 744 */ 745 void bh_hbm_work_fn(struct work_struct *work) 746 { 747 unsigned long flags; 748 struct ishtp_device *dev; 749 unsigned char hbm[IPC_PAYLOAD_SIZE]; 750 751 dev = container_of(work, struct ishtp_device, bh_hbm_work); 752 spin_lock_irqsave(&dev->rd_msg_spinlock, flags); 753 if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) { 754 memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head, 755 IPC_PAYLOAD_SIZE); 756 dev->rd_msg_fifo_head = 757 (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) % 758 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE); 759 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags); 760 ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm); 761 } else { 762 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags); 763 } 764 } 765 766 /** 767 * recv_hbm() - Receive HBM message 768 * @dev: ISHTP device instance 769 * @ishtp_hdr: received bus message 770 * 771 * Receive and process ISHTP bus messages in ISR context. This will schedule 772 * work function to process message 773 */ 774 void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr) 775 { 776 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE]; 777 struct ishtp_bus_message *ishtp_msg = 778 (struct ishtp_bus_message *)rd_msg_buf; 779 unsigned long flags; 780 781 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length); 782 783 /* Flow control - handle in place */ 784 if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) { 785 struct hbm_flow_control *flow_control = 786 (struct hbm_flow_control *)ishtp_msg; 787 struct ishtp_cl *cl = NULL; 788 unsigned long flags, tx_flags; 789 790 spin_lock_irqsave(&dev->cl_list_lock, flags); 791 list_for_each_entry(cl, &dev->cl_list, link) { 792 if (cl->host_client_id == flow_control->host_addr && 793 cl->fw_client_id == 794 flow_control->fw_addr) { 795 /* 796 * NOTE: It's valid only for counting 797 * flow-control implementation to receive a 798 * FC in the middle of sending. Meanwhile not 799 * supported 800 */ 801 if (cl->ishtp_flow_ctrl_creds) 802 dev_err(dev->devc, 803 "recv extra FC from FW client %u (host client %u) (FC count was %d)\n", 804 (unsigned int)cl->fw_client_id, 805 (unsigned int)cl->host_client_id, 806 cl->ishtp_flow_ctrl_creds); 807 else { 808 ++cl->ishtp_flow_ctrl_creds; 809 ++cl->ishtp_flow_ctrl_cnt; 810 cl->last_ipc_acked = 1; 811 spin_lock_irqsave( 812 &cl->tx_list_spinlock, 813 tx_flags); 814 if (!list_empty(&cl->tx_list.list)) { 815 /* 816 * start sending the first msg 817 * = the callback function 818 */ 819 spin_unlock_irqrestore( 820 &cl->tx_list_spinlock, 821 tx_flags); 822 ishtp_cl_send_msg(dev, cl); 823 } else { 824 spin_unlock_irqrestore( 825 &cl->tx_list_spinlock, 826 tx_flags); 827 } 828 } 829 break; 830 } 831 } 832 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 833 goto eoi; 834 } 835 836 /* 837 * Some messages that are safe for ISR processing and important 838 * to be done "quickly" and in-order, go here 839 */ 840 if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD || 841 ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD || 842 ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD || 843 ishtp_msg->hbm_cmd == DMA_XFER) { 844 ishtp_hbm_dispatch(dev, ishtp_msg); 845 goto eoi; 846 } 847 848 /* 849 * All other HBMs go here. 850 * We schedule HBMs for processing serially by using system wq, 851 * possibly there will be multiple HBMs scheduled at the same time. 852 */ 853 spin_lock_irqsave(&dev->rd_msg_spinlock, flags); 854 if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) % 855 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) == 856 dev->rd_msg_fifo_head) { 857 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags); 858 dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n", 859 (unsigned int)ishtp_msg->hbm_cmd); 860 goto eoi; 861 } 862 memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg, 863 ishtp_hdr->length); 864 dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) % 865 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE); 866 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags); 867 schedule_work(&dev->bh_hbm_work); 868 eoi: 869 return; 870 } 871 872 /** 873 * ishtp_loader_recv_msg() - Receive a message from the ISHTP device 874 * @dev: The ISHTP device 875 * @buf: The buffer containing the message 876 */ 877 static void ishtp_loader_recv_msg(struct ishtp_device *dev, void *buf) 878 { 879 if (dev->fw_loader_rx_buf) 880 memcpy(dev->fw_loader_rx_buf, buf, dev->fw_loader_rx_size); 881 882 dev->fw_loader_received = true; 883 wake_up_interruptible(&dev->wait_loader_recvd_msg); 884 } 885 886 /** 887 * recv_fixed_cl_msg() - Receive fixed client message 888 * @dev: ISHTP device instance 889 * @ishtp_hdr: received bus message 890 * 891 * Receive and process ISHTP fixed client messages (address == 0) 892 * in ISR context 893 */ 894 void recv_fixed_cl_msg(struct ishtp_device *dev, 895 struct ishtp_msg_hdr *ishtp_hdr) 896 { 897 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE]; 898 899 dev->print_log(dev, 900 "%s() got fixed client msg from client #%d\n", 901 __func__, ishtp_hdr->fw_addr); 902 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length); 903 if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) { 904 struct ish_system_states_header *msg_hdr = 905 (struct ish_system_states_header *)rd_msg_buf; 906 if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE) 907 ishtp_send_resume(dev); 908 /* if FW request arrived here, the system is not suspended */ 909 else 910 dev_err(dev->devc, "unknown fixed client msg [%02X]\n", 911 msg_hdr->cmd); 912 } else if (ishtp_hdr->fw_addr == ISHTP_LOADER_CLIENT_ADDR) { 913 ishtp_loader_recv_msg(dev, rd_msg_buf); 914 } 915 } 916 917 /** 918 * fix_cl_hdr() - Initialize fixed client header 919 * @hdr: message header 920 * @length: length of message 921 * @cl_addr: Client address 922 * 923 * Initialize message header for fixed client 924 */ 925 static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length, 926 uint8_t cl_addr) 927 { 928 hdr->host_addr = 0; 929 hdr->fw_addr = cl_addr; 930 hdr->length = length; 931 hdr->msg_complete = 1; 932 hdr->reserved = 0; 933 } 934 935 /*** Suspend and resume notification ***/ 936 937 static uint32_t current_state; 938 static uint32_t supported_states = SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT; 939 940 /** 941 * ishtp_send_suspend() - Send suspend message to FW 942 * @dev: ISHTP device instance 943 * 944 * Send suspend message to FW. This is useful for system freeze (non S3) case 945 */ 946 void ishtp_send_suspend(struct ishtp_device *dev) 947 { 948 struct ishtp_msg_hdr ishtp_hdr; 949 struct ish_system_states_status state_status_msg; 950 const size_t len = sizeof(struct ish_system_states_status); 951 952 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR); 953 954 memset(&state_status_msg, 0, len); 955 state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS; 956 state_status_msg.supported_states = supported_states; 957 current_state |= (SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT); 958 dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__); 959 state_status_msg.states_status = current_state; 960 961 ishtp_write_message(dev, &ishtp_hdr, 962 (unsigned char *)&state_status_msg); 963 } 964 EXPORT_SYMBOL(ishtp_send_suspend); 965 966 /** 967 * ishtp_send_resume() - Send resume message to FW 968 * @dev: ISHTP device instance 969 * 970 * Send resume message to FW. This is useful for system freeze (non S3) case 971 */ 972 void ishtp_send_resume(struct ishtp_device *dev) 973 { 974 struct ishtp_msg_hdr ishtp_hdr; 975 struct ish_system_states_status state_status_msg; 976 const size_t len = sizeof(struct ish_system_states_status); 977 978 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR); 979 980 memset(&state_status_msg, 0, len); 981 state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS; 982 state_status_msg.supported_states = supported_states; 983 current_state &= ~(CONNECTED_STANDBY_STATE_BIT | SUSPEND_STATE_BIT); 984 dev->print_log(dev, "%s() sends RESUME notification\n", __func__); 985 state_status_msg.states_status = current_state; 986 987 ishtp_write_message(dev, &ishtp_hdr, 988 (unsigned char *)&state_status_msg); 989 } 990 EXPORT_SYMBOL(ishtp_send_resume); 991 992 /** 993 * ishtp_query_subscribers() - Send query subscribers message 994 * @dev: ISHTP device instance 995 * 996 * Send message to query subscribers 997 */ 998 void ishtp_query_subscribers(struct ishtp_device *dev) 999 { 1000 struct ishtp_msg_hdr ishtp_hdr; 1001 struct ish_system_states_query_subscribers query_subscribers_msg; 1002 const size_t len = sizeof(struct ish_system_states_query_subscribers); 1003 1004 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR); 1005 1006 memset(&query_subscribers_msg, 0, len); 1007 query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS; 1008 1009 ishtp_write_message(dev, &ishtp_hdr, 1010 (unsigned char *)&query_subscribers_msg); 1011 } 1012