1 /* 2 * ISHTP client logic 3 * 4 * Copyright (c) 2003-2016, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/slab.h> 18 #include <linux/sched.h> 19 #include <linux/wait.h> 20 #include <linux/delay.h> 21 #include <linux/dma-mapping.h> 22 #include "hbm.h" 23 #include "client.h" 24 25 /** 26 * ishtp_read_list_flush() - Flush read queue 27 * @cl: ishtp client instance 28 * 29 * Used to remove all entries from read queue for a client 30 */ 31 static void ishtp_read_list_flush(struct ishtp_cl *cl) 32 { 33 struct ishtp_cl_rb *rb; 34 struct ishtp_cl_rb *next; 35 unsigned long flags; 36 37 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags); 38 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) 39 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { 40 list_del(&rb->list); 41 ishtp_io_rb_free(rb); 42 } 43 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); 44 } 45 46 /** 47 * ishtp_cl_flush_queues() - Flush all queues for a client 48 * @cl: ishtp client instance 49 * 50 * Used to remove all queues for a client. This is called when a client device 51 * needs reset due to error, S3 resume or during module removal 52 * 53 * Return: 0 on success else -EINVAL if device is NULL 54 */ 55 int ishtp_cl_flush_queues(struct ishtp_cl *cl) 56 { 57 if (WARN_ON(!cl || !cl->dev)) 58 return -EINVAL; 59 60 ishtp_read_list_flush(cl); 61 62 return 0; 63 } 64 EXPORT_SYMBOL(ishtp_cl_flush_queues); 65 66 /** 67 * ishtp_cl_init() - Initialize all fields of a client device 68 * @cl: ishtp client instance 69 * @dev: ishtp device 70 * 71 * Initializes a client device fields: Init spinlocks, init queues etc. 72 * This function is called during new client creation 73 */ 74 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev) 75 { 76 memset(cl, 0, sizeof(struct ishtp_cl)); 77 init_waitqueue_head(&cl->wait_ctrl_res); 78 spin_lock_init(&cl->free_list_spinlock); 79 spin_lock_init(&cl->in_process_spinlock); 80 spin_lock_init(&cl->tx_list_spinlock); 81 spin_lock_init(&cl->tx_free_list_spinlock); 82 spin_lock_init(&cl->fc_spinlock); 83 INIT_LIST_HEAD(&cl->link); 84 cl->dev = dev; 85 86 INIT_LIST_HEAD(&cl->free_rb_list.list); 87 INIT_LIST_HEAD(&cl->tx_list.list); 88 INIT_LIST_HEAD(&cl->tx_free_list.list); 89 INIT_LIST_HEAD(&cl->in_process_list.list); 90 91 cl->rx_ring_size = CL_DEF_RX_RING_SIZE; 92 cl->tx_ring_size = CL_DEF_TX_RING_SIZE; 93 94 /* dma */ 95 cl->last_tx_path = CL_TX_PATH_IPC; 96 cl->last_dma_acked = 1; 97 cl->last_dma_addr = NULL; 98 cl->last_ipc_acked = 1; 99 } 100 101 /** 102 * ishtp_cl_allocate() - allocates client structure and sets it up. 103 * @dev: ishtp device 104 * 105 * Allocate memory for new client device and call to initialize each field. 106 * 107 * Return: The allocated client instance or NULL on failure 108 */ 109 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev) 110 { 111 struct ishtp_cl *cl; 112 113 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL); 114 if (!cl) 115 return NULL; 116 117 ishtp_cl_init(cl, dev); 118 return cl; 119 } 120 EXPORT_SYMBOL(ishtp_cl_allocate); 121 122 /** 123 * ishtp_cl_free() - Frees a client device 124 * @cl: client device instance 125 * 126 * Frees a client device 127 */ 128 void ishtp_cl_free(struct ishtp_cl *cl) 129 { 130 struct ishtp_device *dev; 131 unsigned long flags; 132 133 if (!cl) 134 return; 135 136 dev = cl->dev; 137 if (!dev) 138 return; 139 140 spin_lock_irqsave(&dev->cl_list_lock, flags); 141 ishtp_cl_free_rx_ring(cl); 142 ishtp_cl_free_tx_ring(cl); 143 kfree(cl); 144 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 145 } 146 EXPORT_SYMBOL(ishtp_cl_free); 147 148 /** 149 * ishtp_cl_link() - Reserve a host id and link the client instance 150 * @cl: client device instance 151 * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any 152 * id from the available can be used 153 * 154 * 155 * This allocates a single bit in the hostmap. This function will make sure 156 * that not many client sessions are opened at the same time. Once allocated 157 * the client device instance is added to the ishtp device in the current 158 * client list 159 * 160 * Return: 0 or error code on failure 161 */ 162 int ishtp_cl_link(struct ishtp_cl *cl, int id) 163 { 164 struct ishtp_device *dev; 165 unsigned long flags, flags_cl; 166 int ret = 0; 167 168 if (WARN_ON(!cl || !cl->dev)) 169 return -EINVAL; 170 171 dev = cl->dev; 172 173 spin_lock_irqsave(&dev->device_lock, flags); 174 175 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) { 176 ret = -EMFILE; 177 goto unlock_dev; 178 } 179 180 /* If Id is not assigned get one*/ 181 if (id == ISHTP_HOST_CLIENT_ID_ANY) 182 id = find_first_zero_bit(dev->host_clients_map, 183 ISHTP_CLIENTS_MAX); 184 185 if (id >= ISHTP_CLIENTS_MAX) { 186 spin_unlock_irqrestore(&dev->device_lock, flags); 187 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX); 188 return -ENOENT; 189 } 190 191 dev->open_handle_count++; 192 cl->host_client_id = id; 193 spin_lock_irqsave(&dev->cl_list_lock, flags_cl); 194 if (dev->dev_state != ISHTP_DEV_ENABLED) { 195 ret = -ENODEV; 196 goto unlock_cl; 197 } 198 list_add_tail(&cl->link, &dev->cl_list); 199 set_bit(id, dev->host_clients_map); 200 cl->state = ISHTP_CL_INITIALIZING; 201 202 unlock_cl: 203 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl); 204 unlock_dev: 205 spin_unlock_irqrestore(&dev->device_lock, flags); 206 return ret; 207 } 208 EXPORT_SYMBOL(ishtp_cl_link); 209 210 /** 211 * ishtp_cl_unlink() - remove fw_cl from the client device list 212 * @cl: client device instance 213 * 214 * Remove a previously linked device to a ishtp device 215 */ 216 void ishtp_cl_unlink(struct ishtp_cl *cl) 217 { 218 struct ishtp_device *dev; 219 struct ishtp_cl *pos; 220 unsigned long flags; 221 222 /* don't shout on error exit path */ 223 if (!cl || !cl->dev) 224 return; 225 226 dev = cl->dev; 227 228 spin_lock_irqsave(&dev->device_lock, flags); 229 if (dev->open_handle_count > 0) { 230 clear_bit(cl->host_client_id, dev->host_clients_map); 231 dev->open_handle_count--; 232 } 233 spin_unlock_irqrestore(&dev->device_lock, flags); 234 235 /* 236 * This checks that 'cl' is actually linked into device's structure, 237 * before attempting 'list_del' 238 */ 239 spin_lock_irqsave(&dev->cl_list_lock, flags); 240 list_for_each_entry(pos, &dev->cl_list, link) 241 if (cl->host_client_id == pos->host_client_id) { 242 list_del_init(&pos->link); 243 break; 244 } 245 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 246 } 247 EXPORT_SYMBOL(ishtp_cl_unlink); 248 249 /** 250 * ishtp_cl_disconnect() - Send disconnect request to firmware 251 * @cl: client device instance 252 * 253 * Send a disconnect request for a client to firmware. 254 * 255 * Return: 0 if successful disconnect response from the firmware or error 256 * code on failure 257 */ 258 int ishtp_cl_disconnect(struct ishtp_cl *cl) 259 { 260 struct ishtp_device *dev; 261 int err; 262 263 if (WARN_ON(!cl || !cl->dev)) 264 return -ENODEV; 265 266 dev = cl->dev; 267 268 dev->print_log(dev, "%s() state %d\n", __func__, cl->state); 269 270 if (cl->state != ISHTP_CL_DISCONNECTING) { 271 dev->print_log(dev, "%s() Disconnect in progress\n", __func__); 272 return 0; 273 } 274 275 if (ishtp_hbm_cl_disconnect_req(dev, cl)) { 276 dev->print_log(dev, "%s() Failed to disconnect\n", __func__); 277 dev_err(&cl->device->dev, "failed to disconnect.\n"); 278 return -ENODEV; 279 } 280 281 err = wait_event_interruptible_timeout(cl->wait_ctrl_res, 282 (dev->dev_state != ISHTP_DEV_ENABLED || 283 cl->state == ISHTP_CL_DISCONNECTED), 284 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT)); 285 286 /* 287 * If FW reset arrived, this will happen. Don't check cl->, 288 * as 'cl' may be freed already 289 */ 290 if (dev->dev_state != ISHTP_DEV_ENABLED) { 291 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", 292 __func__); 293 return -ENODEV; 294 } 295 296 if (cl->state == ISHTP_CL_DISCONNECTED) { 297 dev->print_log(dev, "%s() successful\n", __func__); 298 return 0; 299 } 300 301 return -ENODEV; 302 } 303 EXPORT_SYMBOL(ishtp_cl_disconnect); 304 305 /** 306 * ishtp_cl_is_other_connecting() - Check other client is connecting 307 * @cl: client device instance 308 * 309 * Checks if other client with the same fw client id is connecting 310 * 311 * Return: true if other client is connected else false 312 */ 313 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl) 314 { 315 struct ishtp_device *dev; 316 struct ishtp_cl *pos; 317 unsigned long flags; 318 319 if (WARN_ON(!cl || !cl->dev)) 320 return false; 321 322 dev = cl->dev; 323 spin_lock_irqsave(&dev->cl_list_lock, flags); 324 list_for_each_entry(pos, &dev->cl_list, link) { 325 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) && 326 cl->fw_client_id == pos->fw_client_id) { 327 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 328 return true; 329 } 330 } 331 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 332 333 return false; 334 } 335 336 /** 337 * ishtp_cl_connect() - Send connect request to firmware 338 * @cl: client device instance 339 * 340 * Send a connect request for a client to firmware. If successful it will 341 * RX and TX ring buffers 342 * 343 * Return: 0 if successful connect response from the firmware and able 344 * to bind and allocate ring buffers or error code on failure 345 */ 346 int ishtp_cl_connect(struct ishtp_cl *cl) 347 { 348 struct ishtp_device *dev; 349 int rets; 350 351 if (WARN_ON(!cl || !cl->dev)) 352 return -ENODEV; 353 354 dev = cl->dev; 355 356 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state); 357 358 if (ishtp_cl_is_other_connecting(cl)) { 359 dev->print_log(dev, "%s() Busy\n", __func__); 360 return -EBUSY; 361 } 362 363 if (ishtp_hbm_cl_connect_req(dev, cl)) { 364 dev->print_log(dev, "%s() HBM connect req fail\n", __func__); 365 return -ENODEV; 366 } 367 368 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res, 369 (dev->dev_state == ISHTP_DEV_ENABLED && 370 (cl->state == ISHTP_CL_CONNECTED || 371 cl->state == ISHTP_CL_DISCONNECTED)), 372 ishtp_secs_to_jiffies( 373 ISHTP_CL_CONNECT_TIMEOUT)); 374 /* 375 * If FW reset arrived, this will happen. Don't check cl->, 376 * as 'cl' may be freed already 377 */ 378 if (dev->dev_state != ISHTP_DEV_ENABLED) { 379 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", 380 __func__); 381 return -EFAULT; 382 } 383 384 if (cl->state != ISHTP_CL_CONNECTED) { 385 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n", 386 __func__); 387 return -EFAULT; 388 } 389 390 rets = cl->status; 391 if (rets) { 392 dev->print_log(dev, "%s() Invalid status\n", __func__); 393 return rets; 394 } 395 396 rets = ishtp_cl_device_bind(cl); 397 if (rets) { 398 dev->print_log(dev, "%s() Bind error\n", __func__); 399 ishtp_cl_disconnect(cl); 400 return rets; 401 } 402 403 rets = ishtp_cl_alloc_rx_ring(cl); 404 if (rets) { 405 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__); 406 /* if failed allocation, disconnect */ 407 ishtp_cl_disconnect(cl); 408 return rets; 409 } 410 411 rets = ishtp_cl_alloc_tx_ring(cl); 412 if (rets) { 413 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__); 414 /* if failed allocation, disconnect */ 415 ishtp_cl_free_rx_ring(cl); 416 ishtp_cl_disconnect(cl); 417 return rets; 418 } 419 420 /* Upon successful connection and allocation, emit flow-control */ 421 rets = ishtp_cl_read_start(cl); 422 423 dev->print_log(dev, "%s() successful\n", __func__); 424 425 return rets; 426 } 427 EXPORT_SYMBOL(ishtp_cl_connect); 428 429 /** 430 * ishtp_cl_read_start() - Prepare to read client message 431 * @cl: client device instance 432 * 433 * Get a free buffer from pool of free read buffers and add to read buffer 434 * pool to add contents. Send a flow control request to firmware to be able 435 * send next message. 436 * 437 * Return: 0 if successful or error code on failure 438 */ 439 int ishtp_cl_read_start(struct ishtp_cl *cl) 440 { 441 struct ishtp_device *dev; 442 struct ishtp_cl_rb *rb; 443 int rets; 444 int i; 445 unsigned long flags; 446 unsigned long dev_flags; 447 448 if (WARN_ON(!cl || !cl->dev)) 449 return -ENODEV; 450 451 dev = cl->dev; 452 453 if (cl->state != ISHTP_CL_CONNECTED) 454 return -ENODEV; 455 456 if (dev->dev_state != ISHTP_DEV_ENABLED) 457 return -ENODEV; 458 459 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id); 460 if (i < 0) { 461 dev_err(&cl->device->dev, "no such fw client %d\n", 462 cl->fw_client_id); 463 return -ENODEV; 464 } 465 466 /* The current rb is the head of the free rb list */ 467 spin_lock_irqsave(&cl->free_list_spinlock, flags); 468 if (list_empty(&cl->free_rb_list.list)) { 469 dev_warn(&cl->device->dev, 470 "[ishtp-ish] Rx buffers pool is empty\n"); 471 rets = -ENOMEM; 472 rb = NULL; 473 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 474 goto out; 475 } 476 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); 477 list_del_init(&rb->list); 478 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 479 480 rb->cl = cl; 481 rb->buf_idx = 0; 482 483 INIT_LIST_HEAD(&rb->list); 484 rets = 0; 485 486 /* 487 * This must be BEFORE sending flow control - 488 * response in ISR may come too fast... 489 */ 490 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 491 list_add_tail(&rb->list, &dev->read_list.list); 492 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 493 if (ishtp_hbm_cl_flow_control_req(dev, cl)) { 494 rets = -ENODEV; 495 goto out; 496 } 497 out: 498 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */ 499 if (rets && rb) { 500 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 501 list_del(&rb->list); 502 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 503 504 spin_lock_irqsave(&cl->free_list_spinlock, flags); 505 list_add_tail(&rb->list, &cl->free_rb_list.list); 506 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 507 } 508 return rets; 509 } 510 511 /** 512 * ishtp_cl_send() - Send a message to firmware 513 * @cl: client device instance 514 * @buf: message buffer 515 * @length: length of message 516 * 517 * If the client is correct state to send message, this function gets a buffer 518 * from tx ring buffers, copy the message data and call to send the message 519 * using ishtp_cl_send_msg() 520 * 521 * Return: 0 if successful or error code on failure 522 */ 523 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length) 524 { 525 struct ishtp_device *dev; 526 int id; 527 struct ishtp_cl_tx_ring *cl_msg; 528 int have_msg_to_send = 0; 529 unsigned long tx_flags, tx_free_flags; 530 531 if (WARN_ON(!cl || !cl->dev)) 532 return -ENODEV; 533 534 dev = cl->dev; 535 536 if (cl->state != ISHTP_CL_CONNECTED) { 537 ++cl->err_send_msg; 538 return -EPIPE; 539 } 540 541 if (dev->dev_state != ISHTP_DEV_ENABLED) { 542 ++cl->err_send_msg; 543 return -ENODEV; 544 } 545 546 /* Check if we have fw client device */ 547 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id); 548 if (id < 0) { 549 ++cl->err_send_msg; 550 return -ENOENT; 551 } 552 553 if (length > dev->fw_clients[id].props.max_msg_length) { 554 ++cl->err_send_msg; 555 return -EMSGSIZE; 556 } 557 558 /* No free bufs */ 559 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 560 if (list_empty(&cl->tx_free_list.list)) { 561 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 562 tx_free_flags); 563 ++cl->err_send_msg; 564 return -ENOMEM; 565 } 566 567 cl_msg = list_first_entry(&cl->tx_free_list.list, 568 struct ishtp_cl_tx_ring, list); 569 if (!cl_msg->send_buf.data) { 570 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 571 tx_free_flags); 572 return -EIO; 573 /* Should not happen, as free list is pre-allocated */ 574 } 575 /* 576 * This is safe, as 'length' is already checked for not exceeding 577 * max ISHTP message size per client 578 */ 579 list_del_init(&cl_msg->list); 580 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); 581 memcpy(cl_msg->send_buf.data, buf, length); 582 cl_msg->send_buf.size = length; 583 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 584 have_msg_to_send = !list_empty(&cl->tx_list.list); 585 list_add_tail(&cl_msg->list, &cl->tx_list.list); 586 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 587 588 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0) 589 ishtp_cl_send_msg(dev, cl); 590 591 return 0; 592 } 593 EXPORT_SYMBOL(ishtp_cl_send); 594 595 /** 596 * ishtp_cl_read_complete() - read complete 597 * @rb: Pointer to client request block 598 * 599 * If the message is completely received call ishtp_cl_bus_rx_event() 600 * to process message 601 */ 602 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb) 603 { 604 unsigned long flags; 605 int schedule_work_flag = 0; 606 struct ishtp_cl *cl = rb->cl; 607 608 spin_lock_irqsave(&cl->in_process_spinlock, flags); 609 /* 610 * if in-process list is empty, then need to schedule 611 * the processing thread 612 */ 613 schedule_work_flag = list_empty(&cl->in_process_list.list); 614 list_add_tail(&rb->list, &cl->in_process_list.list); 615 spin_unlock_irqrestore(&cl->in_process_spinlock, flags); 616 617 if (schedule_work_flag) 618 ishtp_cl_bus_rx_event(cl->device); 619 } 620 621 /** 622 * ipc_tx_callback() - IPC tx callback function 623 * @prm: Pointer to client device instance 624 * 625 * Send message over IPC either first time or on callback on previous message 626 * completion 627 */ 628 static void ipc_tx_callback(void *prm) 629 { 630 struct ishtp_cl *cl = prm; 631 struct ishtp_cl_tx_ring *cl_msg; 632 size_t rem; 633 struct ishtp_device *dev = (cl ? cl->dev : NULL); 634 struct ishtp_msg_hdr ishtp_hdr; 635 unsigned long tx_flags, tx_free_flags; 636 unsigned char *pmsg; 637 638 if (!dev) 639 return; 640 641 /* 642 * Other conditions if some critical error has 643 * occurred before this callback is called 644 */ 645 if (dev->dev_state != ISHTP_DEV_ENABLED) 646 return; 647 648 if (cl->state != ISHTP_CL_CONNECTED) 649 return; 650 651 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 652 if (list_empty(&cl->tx_list.list)) { 653 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 654 return; 655 } 656 657 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) { 658 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 659 return; 660 } 661 662 if (!cl->sending) { 663 --cl->ishtp_flow_ctrl_creds; 664 cl->last_ipc_acked = 0; 665 cl->last_tx_path = CL_TX_PATH_IPC; 666 cl->sending = 1; 667 } 668 669 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, 670 list); 671 rem = cl_msg->send_buf.size - cl->tx_offs; 672 673 ishtp_hdr.host_addr = cl->host_client_id; 674 ishtp_hdr.fw_addr = cl->fw_client_id; 675 ishtp_hdr.reserved = 0; 676 pmsg = cl_msg->send_buf.data + cl->tx_offs; 677 678 if (rem <= dev->mtu) { 679 ishtp_hdr.length = rem; 680 ishtp_hdr.msg_complete = 1; 681 cl->sending = 0; 682 list_del_init(&cl_msg->list); /* Must be before write */ 683 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 684 /* Submit to IPC queue with no callback */ 685 ishtp_write_message(dev, &ishtp_hdr, pmsg); 686 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 687 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); 688 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 689 tx_free_flags); 690 } else { 691 /* Send IPC fragment */ 692 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 693 cl->tx_offs += dev->mtu; 694 ishtp_hdr.length = dev->mtu; 695 ishtp_hdr.msg_complete = 0; 696 ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl); 697 } 698 } 699 700 /** 701 * ishtp_cl_send_msg_ipc() -Send message using IPC 702 * @dev: ISHTP device instance 703 * @cl: Pointer to client device instance 704 * 705 * Send message over IPC not using DMA 706 */ 707 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev, 708 struct ishtp_cl *cl) 709 { 710 /* If last DMA message wasn't acked yet, leave this one in Tx queue */ 711 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0) 712 return; 713 714 cl->tx_offs = 0; 715 ipc_tx_callback(cl); 716 ++cl->send_msg_cnt_ipc; 717 } 718 719 /** 720 * ishtp_cl_send_msg_dma() -Send message using DMA 721 * @dev: ISHTP device instance 722 * @cl: Pointer to client device instance 723 * 724 * Send message using DMA 725 */ 726 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev, 727 struct ishtp_cl *cl) 728 { 729 struct ishtp_msg_hdr hdr; 730 struct dma_xfer_hbm dma_xfer; 731 unsigned char *msg_addr; 732 int off; 733 struct ishtp_cl_tx_ring *cl_msg; 734 unsigned long tx_flags, tx_free_flags; 735 736 /* If last IPC message wasn't acked yet, leave this one in Tx queue */ 737 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0) 738 return; 739 740 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 741 if (list_empty(&cl->tx_list.list)) { 742 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 743 return; 744 } 745 746 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, 747 list); 748 749 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size); 750 if (!msg_addr) { 751 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 752 if (dev->transfer_path == CL_TX_PATH_DEFAULT) 753 ishtp_cl_send_msg_ipc(dev, cl); 754 return; 755 } 756 757 list_del_init(&cl_msg->list); /* Must be before write */ 758 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 759 760 --cl->ishtp_flow_ctrl_creds; 761 cl->last_dma_acked = 0; 762 cl->last_dma_addr = msg_addr; 763 cl->last_tx_path = CL_TX_PATH_DMA; 764 765 /* write msg to dma buf */ 766 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size); 767 768 /* send dma_xfer hbm msg */ 769 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf; 770 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm)); 771 dma_xfer.hbm = DMA_XFER; 772 dma_xfer.fw_client_id = cl->fw_client_id; 773 dma_xfer.host_client_id = cl->host_client_id; 774 dma_xfer.reserved = 0; 775 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off; 776 dma_xfer.msg_length = cl_msg->send_buf.size; 777 dma_xfer.reserved2 = 0; 778 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer); 779 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 780 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); 781 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); 782 ++cl->send_msg_cnt_dma; 783 } 784 785 /** 786 * ishtp_cl_send_msg() -Send message using DMA or IPC 787 * @dev: ISHTP device instance 788 * @cl: Pointer to client device instance 789 * 790 * Send message using DMA or IPC based on transfer_path 791 */ 792 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl) 793 { 794 if (dev->transfer_path == CL_TX_PATH_DMA) 795 ishtp_cl_send_msg_dma(dev, cl); 796 else 797 ishtp_cl_send_msg_ipc(dev, cl); 798 } 799 800 /** 801 * recv_ishtp_cl_msg() -Receive client message 802 * @dev: ISHTP device instance 803 * @ishtp_hdr: Pointer to message header 804 * 805 * Receive and dispatch ISHTP client messages. This function executes in ISR 806 * context 807 */ 808 void recv_ishtp_cl_msg(struct ishtp_device *dev, 809 struct ishtp_msg_hdr *ishtp_hdr) 810 { 811 struct ishtp_cl *cl; 812 struct ishtp_cl_rb *rb; 813 struct ishtp_cl_rb *new_rb; 814 unsigned char *buffer = NULL; 815 struct ishtp_cl_rb *complete_rb = NULL; 816 unsigned long dev_flags; 817 unsigned long flags; 818 int rb_count; 819 820 if (ishtp_hdr->reserved) { 821 dev_err(dev->devc, "corrupted message header.\n"); 822 goto eoi; 823 } 824 825 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) { 826 dev_err(dev->devc, 827 "ISHTP message length in hdr exceeds IPC MTU\n"); 828 goto eoi; 829 } 830 831 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 832 rb_count = -1; 833 list_for_each_entry(rb, &dev->read_list.list, list) { 834 ++rb_count; 835 cl = rb->cl; 836 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr && 837 cl->fw_client_id == ishtp_hdr->fw_addr) || 838 !(cl->state == ISHTP_CL_CONNECTED)) 839 continue; 840 841 /* If no Rx buffer is allocated, disband the rb */ 842 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { 843 spin_unlock_irqrestore(&dev->read_list_spinlock, 844 dev_flags); 845 dev_err(&cl->device->dev, 846 "Rx buffer is not allocated.\n"); 847 list_del(&rb->list); 848 ishtp_io_rb_free(rb); 849 cl->status = -ENOMEM; 850 goto eoi; 851 } 852 853 /* 854 * If message buffer overflown (exceeds max. client msg 855 * size, drop message and return to free buffer. 856 * Do we need to disconnect such a client? (We don't send 857 * back FC, so communication will be stuck anyway) 858 */ 859 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) { 860 spin_unlock_irqrestore(&dev->read_list_spinlock, 861 dev_flags); 862 dev_err(&cl->device->dev, 863 "message overflow. size %d len %d idx %ld\n", 864 rb->buffer.size, ishtp_hdr->length, 865 rb->buf_idx); 866 list_del(&rb->list); 867 ishtp_cl_io_rb_recycle(rb); 868 cl->status = -EIO; 869 goto eoi; 870 } 871 872 buffer = rb->buffer.data + rb->buf_idx; 873 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length); 874 875 rb->buf_idx += ishtp_hdr->length; 876 if (ishtp_hdr->msg_complete) { 877 /* Last fragment in message - it's complete */ 878 cl->status = 0; 879 list_del(&rb->list); 880 complete_rb = rb; 881 882 --cl->out_flow_ctrl_creds; 883 /* 884 * the whole msg arrived, send a new FC, and add a new 885 * rb buffer for the next coming msg 886 */ 887 spin_lock_irqsave(&cl->free_list_spinlock, flags); 888 889 if (!list_empty(&cl->free_rb_list.list)) { 890 new_rb = list_entry(cl->free_rb_list.list.next, 891 struct ishtp_cl_rb, list); 892 list_del_init(&new_rb->list); 893 spin_unlock_irqrestore(&cl->free_list_spinlock, 894 flags); 895 new_rb->cl = cl; 896 new_rb->buf_idx = 0; 897 INIT_LIST_HEAD(&new_rb->list); 898 list_add_tail(&new_rb->list, 899 &dev->read_list.list); 900 901 ishtp_hbm_cl_flow_control_req(dev, cl); 902 } else { 903 spin_unlock_irqrestore(&cl->free_list_spinlock, 904 flags); 905 } 906 } 907 /* One more fragment in message (even if this was last) */ 908 ++cl->recv_msg_num_frags; 909 910 /* 911 * We can safely break here (and in BH too), 912 * a single input message can go only to a single request! 913 */ 914 break; 915 } 916 917 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 918 /* If it's nobody's message, just read and discard it */ 919 if (!buffer) { 920 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE]; 921 922 dev_err(dev->devc, "Dropped Rx msg - no request\n"); 923 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length); 924 goto eoi; 925 } 926 927 if (complete_rb) { 928 getnstimeofday(&cl->ts_rx); 929 ++cl->recv_msg_cnt_ipc; 930 ishtp_cl_read_complete(complete_rb); 931 } 932 eoi: 933 return; 934 } 935 936 /** 937 * recv_ishtp_cl_msg_dma() -Receive client message 938 * @dev: ISHTP device instance 939 * @msg: message pointer 940 * @hbm: hbm buffer 941 * 942 * Receive and dispatch ISHTP client messages using DMA. This function executes 943 * in ISR context 944 */ 945 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg, 946 struct dma_xfer_hbm *hbm) 947 { 948 struct ishtp_cl *cl; 949 struct ishtp_cl_rb *rb; 950 struct ishtp_cl_rb *new_rb; 951 unsigned char *buffer = NULL; 952 struct ishtp_cl_rb *complete_rb = NULL; 953 unsigned long dev_flags; 954 unsigned long flags; 955 956 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 957 list_for_each_entry(rb, &dev->read_list.list, list) { 958 cl = rb->cl; 959 if (!cl || !(cl->host_client_id == hbm->host_client_id && 960 cl->fw_client_id == hbm->fw_client_id) || 961 !(cl->state == ISHTP_CL_CONNECTED)) 962 continue; 963 964 /* 965 * If no Rx buffer is allocated, disband the rb 966 */ 967 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { 968 spin_unlock_irqrestore(&dev->read_list_spinlock, 969 dev_flags); 970 dev_err(&cl->device->dev, 971 "response buffer is not allocated.\n"); 972 list_del(&rb->list); 973 ishtp_io_rb_free(rb); 974 cl->status = -ENOMEM; 975 goto eoi; 976 } 977 978 /* 979 * If message buffer overflown (exceeds max. client msg 980 * size, drop message and return to free buffer. 981 * Do we need to disconnect such a client? (We don't send 982 * back FC, so communication will be stuck anyway) 983 */ 984 if (rb->buffer.size < hbm->msg_length) { 985 spin_unlock_irqrestore(&dev->read_list_spinlock, 986 dev_flags); 987 dev_err(&cl->device->dev, 988 "message overflow. size %d len %d idx %ld\n", 989 rb->buffer.size, hbm->msg_length, rb->buf_idx); 990 list_del(&rb->list); 991 ishtp_cl_io_rb_recycle(rb); 992 cl->status = -EIO; 993 goto eoi; 994 } 995 996 buffer = rb->buffer.data; 997 memcpy(buffer, msg, hbm->msg_length); 998 rb->buf_idx = hbm->msg_length; 999 1000 /* Last fragment in message - it's complete */ 1001 cl->status = 0; 1002 list_del(&rb->list); 1003 complete_rb = rb; 1004 1005 --cl->out_flow_ctrl_creds; 1006 /* 1007 * the whole msg arrived, send a new FC, and add a new 1008 * rb buffer for the next coming msg 1009 */ 1010 spin_lock_irqsave(&cl->free_list_spinlock, flags); 1011 1012 if (!list_empty(&cl->free_rb_list.list)) { 1013 new_rb = list_entry(cl->free_rb_list.list.next, 1014 struct ishtp_cl_rb, list); 1015 list_del_init(&new_rb->list); 1016 spin_unlock_irqrestore(&cl->free_list_spinlock, 1017 flags); 1018 new_rb->cl = cl; 1019 new_rb->buf_idx = 0; 1020 INIT_LIST_HEAD(&new_rb->list); 1021 list_add_tail(&new_rb->list, 1022 &dev->read_list.list); 1023 1024 ishtp_hbm_cl_flow_control_req(dev, cl); 1025 } else { 1026 spin_unlock_irqrestore(&cl->free_list_spinlock, 1027 flags); 1028 } 1029 1030 /* One more fragment in message (this is always last) */ 1031 ++cl->recv_msg_num_frags; 1032 1033 /* 1034 * We can safely break here (and in BH too), 1035 * a single input message can go only to a single request! 1036 */ 1037 break; 1038 } 1039 1040 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 1041 /* If it's nobody's message, just read and discard it */ 1042 if (!buffer) { 1043 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n"); 1044 goto eoi; 1045 } 1046 1047 if (complete_rb) { 1048 getnstimeofday(&cl->ts_rx); 1049 ++cl->recv_msg_cnt_dma; 1050 ishtp_cl_read_complete(complete_rb); 1051 } 1052 eoi: 1053 return; 1054 } 1055