1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ssi_protocol.c 4 * 5 * Implementation of the SSI McSAAB improved protocol. 6 * 7 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 8 * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> 9 * 10 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 11 */ 12 13 #include <linux/atomic.h> 14 #include <linux/clk.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/gpio.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_arp.h> 20 #include <linux/if_phonet.h> 21 #include <linux/init.h> 22 #include <linux/irq.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/netdevice.h> 26 #include <linux/notifier.h> 27 #include <linux/scatterlist.h> 28 #include <linux/skbuff.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/timer.h> 32 #include <linux/hsi/hsi.h> 33 #include <linux/hsi/ssi_protocol.h> 34 35 #define SSIP_TXQUEUE_LEN 100 36 #define SSIP_MAX_MTU 65535 37 #define SSIP_DEFAULT_MTU 4000 38 #define PN_MEDIA_SOS 21 39 #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ 40 #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ 41 #define SSIP_KATOUT 15 /* 15 msecs */ 42 #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ 43 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) 44 #define SSIP_CMT_LOADER_SYNC 0x11223344 45 /* 46 * SSI protocol command definitions 47 */ 48 #define SSIP_COMMAND(data) ((data) >> 28) 49 #define SSIP_PAYLOAD(data) ((data) & 0xfffffff) 50 /* Commands */ 51 #define SSIP_SW_BREAK 0 52 #define SSIP_BOOTINFO_REQ 1 53 #define SSIP_BOOTINFO_RESP 2 54 #define SSIP_WAKETEST_RESULT 3 55 #define SSIP_START_TRANS 4 56 #define SSIP_READY 5 57 /* Payloads */ 58 #define SSIP_DATA_VERSION(data) ((data) & 0xff) 59 #define SSIP_LOCAL_VERID 1 60 #define SSIP_WAKETEST_OK 0 61 #define SSIP_WAKETEST_FAILED 1 62 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) 63 #define SSIP_MSG_ID(data) ((data) & 0xff) 64 /* Generic Command */ 65 #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) 66 /* Commands for the control channel */ 67 #define SSIP_BOOTINFO_REQ_CMD(ver) \ 68 SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) 69 #define SSIP_BOOTINFO_RESP_CMD(ver) \ 70 SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) 71 #define SSIP_START_TRANS_CMD(pdulen, id) \ 72 SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) 73 #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) 74 #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) 75 76 #define SSIP_WAKETEST_FLAG 0 77 78 /* Main state machine states */ 79 enum { 80 INIT, 81 HANDSHAKE, 82 ACTIVE, 83 }; 84 85 /* Send state machine states */ 86 enum { 87 SEND_IDLE, 88 WAIT4READY, 89 SEND_READY, 90 SENDING, 91 SENDING_SWBREAK, 92 }; 93 94 /* Receive state machine states */ 95 enum { 96 RECV_IDLE, 97 RECV_READY, 98 RECEIVING, 99 }; 100 101 /** 102 * struct ssi_protocol - SSI protocol (McSAAB) data 103 * @main_state: Main state machine 104 * @send_state: TX state machine 105 * @recv_state: RX state machine 106 * @flags: Flags, currently only used to follow wake line test 107 * @rxid: RX data id 108 * @txid: TX data id 109 * @txqueue_len: TX queue length 110 * @tx_wd: TX watchdog 111 * @rx_wd: RX watchdog 112 * @keep_alive: Workaround for SSI HW bug 113 * @lock: To serialize access to this struct 114 * @netdev: Phonet network device 115 * @txqueue: TX data queue 116 * @cmdqueue: Queue of free commands 117 * @work: &struct work_struct for scheduled work 118 * @cl: HSI client own reference 119 * @link: Link for ssip_list 120 * @tx_usecnt: Refcount to keep track the slaves that use the wake line 121 * @channel_id_cmd: HSI channel id for command stream 122 * @channel_id_data: HSI channel id for data stream 123 */ 124 struct ssi_protocol { 125 unsigned int main_state; 126 unsigned int send_state; 127 unsigned int recv_state; 128 unsigned long flags; 129 u8 rxid; 130 u8 txid; 131 unsigned int txqueue_len; 132 struct timer_list tx_wd; 133 struct timer_list rx_wd; 134 struct timer_list keep_alive; /* wake-up workaround */ 135 spinlock_t lock; 136 struct net_device *netdev; 137 struct list_head txqueue; 138 struct list_head cmdqueue; 139 struct work_struct work; 140 struct hsi_client *cl; 141 struct list_head link; 142 atomic_t tx_usecnt; 143 int channel_id_cmd; 144 int channel_id_data; 145 }; 146 147 /* List of ssi protocol instances */ 148 static LIST_HEAD(ssip_list); 149 150 static void ssip_rxcmd_complete(struct hsi_msg *msg); 151 152 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) 153 { 154 u32 *data; 155 156 data = sg_virt(msg->sgt.sgl); 157 *data = cmd; 158 } 159 160 static inline u32 ssip_get_cmd(struct hsi_msg *msg) 161 { 162 u32 *data; 163 164 data = sg_virt(msg->sgt.sgl); 165 166 return *data; 167 } 168 169 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) 170 { 171 skb_frag_t *frag; 172 struct scatterlist *sg; 173 int i; 174 175 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); 176 177 sg = msg->sgt.sgl; 178 sg_set_buf(sg, skb->data, skb_headlen(skb)); 179 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 180 sg = sg_next(sg); 181 BUG_ON(!sg); 182 frag = &skb_shinfo(skb)->frags[i]; 183 sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag), 184 skb_frag_off(frag)); 185 } 186 } 187 188 static void ssip_free_data(struct hsi_msg *msg) 189 { 190 struct sk_buff *skb; 191 192 skb = msg->context; 193 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, 194 skb); 195 msg->destructor = NULL; 196 dev_kfree_skb(skb); 197 hsi_free_msg(msg); 198 } 199 200 static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, 201 struct sk_buff *skb, gfp_t flags) 202 { 203 struct hsi_msg *msg; 204 205 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); 206 if (!msg) 207 return NULL; 208 ssip_skb_to_msg(skb, msg); 209 msg->destructor = ssip_free_data; 210 msg->channel = ssi->channel_id_data; 211 msg->context = skb; 212 213 return msg; 214 } 215 216 static inline void ssip_release_cmd(struct hsi_msg *msg) 217 { 218 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); 219 220 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); 221 spin_lock_bh(&ssi->lock); 222 list_add_tail(&msg->link, &ssi->cmdqueue); 223 spin_unlock_bh(&ssi->lock); 224 } 225 226 static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) 227 { 228 struct hsi_msg *msg; 229 230 BUG_ON(list_empty(&ssi->cmdqueue)); 231 232 spin_lock_bh(&ssi->lock); 233 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); 234 list_del(&msg->link); 235 spin_unlock_bh(&ssi->lock); 236 msg->destructor = ssip_release_cmd; 237 238 return msg; 239 } 240 241 static void ssip_free_cmds(struct ssi_protocol *ssi) 242 { 243 struct hsi_msg *msg, *tmp; 244 245 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { 246 list_del(&msg->link); 247 msg->destructor = NULL; 248 kfree(sg_virt(msg->sgt.sgl)); 249 hsi_free_msg(msg); 250 } 251 } 252 253 static int ssip_alloc_cmds(struct ssi_protocol *ssi) 254 { 255 struct hsi_msg *msg; 256 u32 *buf; 257 unsigned int i; 258 259 for (i = 0; i < SSIP_MAX_CMDS; i++) { 260 msg = hsi_alloc_msg(1, GFP_KERNEL); 261 if (!msg) 262 goto out; 263 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 264 if (!buf) { 265 hsi_free_msg(msg); 266 goto out; 267 } 268 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); 269 msg->channel = ssi->channel_id_cmd; 270 list_add_tail(&msg->link, &ssi->cmdqueue); 271 } 272 273 return 0; 274 out: 275 ssip_free_cmds(ssi); 276 277 return -ENOMEM; 278 } 279 280 static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) 281 { 282 ssi->recv_state = state; 283 switch (state) { 284 case RECV_IDLE: 285 del_timer(&ssi->rx_wd); 286 if (ssi->send_state == SEND_IDLE) 287 del_timer(&ssi->keep_alive); 288 break; 289 case RECV_READY: 290 /* CMT speech workaround */ 291 if (atomic_read(&ssi->tx_usecnt)) 292 break; 293 fallthrough; 294 case RECEIVING: 295 mod_timer(&ssi->keep_alive, jiffies + 296 msecs_to_jiffies(SSIP_KATOUT)); 297 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 298 break; 299 default: 300 break; 301 } 302 } 303 304 static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) 305 { 306 ssi->send_state = state; 307 switch (state) { 308 case SEND_IDLE: 309 case SEND_READY: 310 del_timer(&ssi->tx_wd); 311 if (ssi->recv_state == RECV_IDLE) 312 del_timer(&ssi->keep_alive); 313 break; 314 case WAIT4READY: 315 case SENDING: 316 case SENDING_SWBREAK: 317 mod_timer(&ssi->keep_alive, 318 jiffies + msecs_to_jiffies(SSIP_KATOUT)); 319 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 320 break; 321 default: 322 break; 323 } 324 } 325 326 struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) 327 { 328 struct hsi_client *master = ERR_PTR(-ENODEV); 329 struct ssi_protocol *ssi; 330 331 list_for_each_entry(ssi, &ssip_list, link) 332 if (slave->device.parent == ssi->cl->device.parent) { 333 master = ssi->cl; 334 break; 335 } 336 337 return master; 338 } 339 EXPORT_SYMBOL_GPL(ssip_slave_get_master); 340 341 int ssip_slave_start_tx(struct hsi_client *master) 342 { 343 struct ssi_protocol *ssi = hsi_client_drvdata(master); 344 345 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); 346 spin_lock_bh(&ssi->lock); 347 if (ssi->send_state == SEND_IDLE) { 348 ssip_set_txstate(ssi, WAIT4READY); 349 hsi_start_tx(master); 350 } 351 spin_unlock_bh(&ssi->lock); 352 atomic_inc(&ssi->tx_usecnt); 353 354 return 0; 355 } 356 EXPORT_SYMBOL_GPL(ssip_slave_start_tx); 357 358 int ssip_slave_stop_tx(struct hsi_client *master) 359 { 360 struct ssi_protocol *ssi = hsi_client_drvdata(master); 361 362 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); 363 364 if (atomic_dec_and_test(&ssi->tx_usecnt)) { 365 spin_lock_bh(&ssi->lock); 366 if ((ssi->send_state == SEND_READY) || 367 (ssi->send_state == WAIT4READY)) { 368 ssip_set_txstate(ssi, SEND_IDLE); 369 hsi_stop_tx(master); 370 } 371 spin_unlock_bh(&ssi->lock); 372 } 373 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); 374 375 return 0; 376 } 377 EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); 378 379 int ssip_slave_running(struct hsi_client *master) 380 { 381 struct ssi_protocol *ssi = hsi_client_drvdata(master); 382 return netif_running(ssi->netdev); 383 } 384 EXPORT_SYMBOL_GPL(ssip_slave_running); 385 386 static void ssip_reset(struct hsi_client *cl) 387 { 388 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 389 struct list_head *head, *tmp; 390 struct hsi_msg *msg; 391 392 if (netif_running(ssi->netdev)) 393 netif_carrier_off(ssi->netdev); 394 hsi_flush(cl); 395 spin_lock_bh(&ssi->lock); 396 if (ssi->send_state != SEND_IDLE) 397 hsi_stop_tx(cl); 398 spin_unlock_bh(&ssi->lock); 399 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 400 ssi_waketest(cl, 0); /* FIXME: To be removed */ 401 spin_lock_bh(&ssi->lock); 402 del_timer(&ssi->rx_wd); 403 del_timer(&ssi->tx_wd); 404 del_timer(&ssi->keep_alive); 405 ssi->main_state = 0; 406 ssi->send_state = 0; 407 ssi->recv_state = 0; 408 ssi->flags = 0; 409 ssi->rxid = 0; 410 ssi->txid = 0; 411 list_for_each_safe(head, tmp, &ssi->txqueue) { 412 msg = list_entry(head, struct hsi_msg, link); 413 dev_dbg(&cl->device, "Pending TX data\n"); 414 list_del(head); 415 ssip_free_data(msg); 416 } 417 ssi->txqueue_len = 0; 418 spin_unlock_bh(&ssi->lock); 419 } 420 421 static void ssip_dump_state(struct hsi_client *cl) 422 { 423 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 424 struct hsi_msg *msg; 425 426 spin_lock_bh(&ssi->lock); 427 dev_err(&cl->device, "Main state: %d\n", ssi->main_state); 428 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); 429 dev_err(&cl->device, "Send state: %d\n", ssi->send_state); 430 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? 431 "Online" : "Offline"); 432 dev_err(&cl->device, "Wake test %d\n", 433 test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); 434 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); 435 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); 436 437 list_for_each_entry(msg, &ssi->txqueue, link) 438 dev_err(&cl->device, "pending TX data (%p)\n", msg); 439 spin_unlock_bh(&ssi->lock); 440 } 441 442 static void ssip_error(struct hsi_client *cl) 443 { 444 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 445 struct hsi_msg *msg; 446 447 ssip_dump_state(cl); 448 ssip_reset(cl); 449 msg = ssip_claim_cmd(ssi); 450 msg->complete = ssip_rxcmd_complete; 451 hsi_async_read(cl, msg); 452 } 453 454 static void ssip_keep_alive(struct timer_list *t) 455 { 456 struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive); 457 struct hsi_client *cl = ssi->cl; 458 459 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", 460 ssi->main_state, ssi->recv_state, ssi->send_state); 461 462 spin_lock(&ssi->lock); 463 if (ssi->recv_state == RECV_IDLE) 464 switch (ssi->send_state) { 465 case SEND_READY: 466 if (atomic_read(&ssi->tx_usecnt) == 0) 467 break; 468 fallthrough; 469 /* 470 * Workaround for cmt-speech in that case 471 * we relay on audio timers. 472 */ 473 case SEND_IDLE: 474 spin_unlock(&ssi->lock); 475 return; 476 } 477 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); 478 spin_unlock(&ssi->lock); 479 } 480 481 static void ssip_rx_wd(struct timer_list *t) 482 { 483 struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd); 484 struct hsi_client *cl = ssi->cl; 485 486 dev_err(&cl->device, "Watchdog triggered\n"); 487 ssip_error(cl); 488 } 489 490 static void ssip_tx_wd(struct timer_list *t) 491 { 492 struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd); 493 struct hsi_client *cl = ssi->cl; 494 495 dev_err(&cl->device, "Watchdog triggered\n"); 496 ssip_error(cl); 497 } 498 499 static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) 500 { 501 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 502 struct hsi_msg *msg; 503 504 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); 505 msg = ssip_claim_cmd(ssi); 506 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); 507 msg->complete = ssip_release_cmd; 508 hsi_async_write(cl, msg); 509 dev_dbg(&cl->device, "Issuing RX command\n"); 510 msg = ssip_claim_cmd(ssi); 511 msg->complete = ssip_rxcmd_complete; 512 hsi_async_read(cl, msg); 513 } 514 515 static void ssip_start_rx(struct hsi_client *cl) 516 { 517 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 518 struct hsi_msg *msg; 519 520 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, 521 ssi->recv_state); 522 spin_lock_bh(&ssi->lock); 523 /* 524 * We can have two UP events in a row due to a short low 525 * high transition. Therefore we need to ignore the sencond UP event. 526 */ 527 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { 528 spin_unlock_bh(&ssi->lock); 529 return; 530 } 531 ssip_set_rxstate(ssi, RECV_READY); 532 spin_unlock_bh(&ssi->lock); 533 534 msg = ssip_claim_cmd(ssi); 535 ssip_set_cmd(msg, SSIP_READY_CMD); 536 msg->complete = ssip_release_cmd; 537 dev_dbg(&cl->device, "Send READY\n"); 538 hsi_async_write(cl, msg); 539 } 540 541 static void ssip_stop_rx(struct hsi_client *cl) 542 { 543 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 544 545 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); 546 spin_lock_bh(&ssi->lock); 547 if (likely(ssi->main_state == ACTIVE)) 548 ssip_set_rxstate(ssi, RECV_IDLE); 549 spin_unlock_bh(&ssi->lock); 550 } 551 552 static void ssip_free_strans(struct hsi_msg *msg) 553 { 554 ssip_free_data(msg->context); 555 ssip_release_cmd(msg); 556 } 557 558 static void ssip_strans_complete(struct hsi_msg *msg) 559 { 560 struct hsi_client *cl = msg->cl; 561 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 562 struct hsi_msg *data; 563 564 data = msg->context; 565 ssip_release_cmd(msg); 566 spin_lock_bh(&ssi->lock); 567 ssip_set_txstate(ssi, SENDING); 568 spin_unlock_bh(&ssi->lock); 569 hsi_async_write(cl, data); 570 } 571 572 static int ssip_xmit(struct hsi_client *cl) 573 { 574 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 575 struct hsi_msg *msg, *dmsg; 576 struct sk_buff *skb; 577 578 spin_lock_bh(&ssi->lock); 579 if (list_empty(&ssi->txqueue)) { 580 spin_unlock_bh(&ssi->lock); 581 return 0; 582 } 583 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); 584 list_del(&dmsg->link); 585 ssi->txqueue_len--; 586 spin_unlock_bh(&ssi->lock); 587 588 msg = ssip_claim_cmd(ssi); 589 skb = dmsg->context; 590 msg->context = dmsg; 591 msg->complete = ssip_strans_complete; 592 msg->destructor = ssip_free_strans; 593 594 spin_lock_bh(&ssi->lock); 595 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), 596 ssi->txid)); 597 ssi->txid++; 598 ssip_set_txstate(ssi, SENDING); 599 spin_unlock_bh(&ssi->lock); 600 601 dev_dbg(&cl->device, "Send STRANS (%d frames)\n", 602 SSIP_BYTES_TO_FRAMES(skb->len)); 603 604 return hsi_async_write(cl, msg); 605 } 606 607 /* In soft IRQ context */ 608 static void ssip_pn_rx(struct sk_buff *skb) 609 { 610 struct net_device *dev = skb->dev; 611 612 if (unlikely(!netif_running(dev))) { 613 dev_dbg(&dev->dev, "Drop RX packet\n"); 614 dev->stats.rx_dropped++; 615 dev_kfree_skb(skb); 616 return; 617 } 618 if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { 619 dev_dbg(&dev->dev, "Error drop RX packet\n"); 620 dev->stats.rx_errors++; 621 dev->stats.rx_length_errors++; 622 dev_kfree_skb(skb); 623 return; 624 } 625 dev->stats.rx_packets++; 626 dev->stats.rx_bytes += skb->len; 627 628 /* length field is exchanged in network byte order */ 629 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); 630 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", 631 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); 632 633 skb->protocol = htons(ETH_P_PHONET); 634 skb_reset_mac_header(skb); 635 __skb_pull(skb, 1); 636 netif_rx(skb); 637 } 638 639 static void ssip_rx_data_complete(struct hsi_msg *msg) 640 { 641 struct hsi_client *cl = msg->cl; 642 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 643 struct sk_buff *skb; 644 645 if (msg->status == HSI_STATUS_ERROR) { 646 dev_err(&cl->device, "RX data error\n"); 647 ssip_free_data(msg); 648 ssip_error(cl); 649 return; 650 } 651 del_timer(&ssi->rx_wd); /* FIXME: Revisit */ 652 skb = msg->context; 653 ssip_pn_rx(skb); 654 hsi_free_msg(msg); 655 } 656 657 static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) 658 { 659 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 660 struct hsi_msg *msg; 661 662 /* Workaroud: Ignore CMT Loader message leftover */ 663 if (cmd == SSIP_CMT_LOADER_SYNC) 664 return; 665 666 switch (ssi->main_state) { 667 case ACTIVE: 668 dev_err(&cl->device, "Boot info req on active state\n"); 669 ssip_error(cl); 670 fallthrough; 671 case INIT: 672 case HANDSHAKE: 673 spin_lock_bh(&ssi->lock); 674 ssi->main_state = HANDSHAKE; 675 spin_unlock_bh(&ssi->lock); 676 677 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 678 ssi_waketest(cl, 1); /* FIXME: To be removed */ 679 680 spin_lock_bh(&ssi->lock); 681 /* Start boot handshake watchdog */ 682 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 683 spin_unlock_bh(&ssi->lock); 684 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); 685 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 686 dev_warn(&cl->device, "boot info req verid mismatch\n"); 687 msg = ssip_claim_cmd(ssi); 688 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); 689 msg->complete = ssip_release_cmd; 690 hsi_async_write(cl, msg); 691 break; 692 default: 693 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); 694 break; 695 } 696 } 697 698 static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) 699 { 700 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 701 702 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 703 dev_warn(&cl->device, "boot info resp verid mismatch\n"); 704 705 spin_lock_bh(&ssi->lock); 706 if (ssi->main_state != ACTIVE) 707 /* Use tx_wd as a boot watchdog in non ACTIVE state */ 708 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 709 else 710 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", 711 ssi->main_state); 712 spin_unlock_bh(&ssi->lock); 713 } 714 715 static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) 716 { 717 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 718 unsigned int wkres = SSIP_PAYLOAD(cmd); 719 720 spin_lock_bh(&ssi->lock); 721 if (ssi->main_state != HANDSHAKE) { 722 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", 723 ssi->main_state); 724 spin_unlock_bh(&ssi->lock); 725 return; 726 } 727 spin_unlock_bh(&ssi->lock); 728 729 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 730 ssi_waketest(cl, 0); /* FIXME: To be removed */ 731 732 spin_lock_bh(&ssi->lock); 733 ssi->main_state = ACTIVE; 734 del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ 735 spin_unlock_bh(&ssi->lock); 736 737 dev_notice(&cl->device, "WAKELINES TEST %s\n", 738 wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); 739 if (wkres & SSIP_WAKETEST_FAILED) { 740 ssip_error(cl); 741 return; 742 } 743 dev_dbg(&cl->device, "CMT is ONLINE\n"); 744 netif_wake_queue(ssi->netdev); 745 netif_carrier_on(ssi->netdev); 746 } 747 748 static void ssip_rx_ready(struct hsi_client *cl) 749 { 750 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 751 752 spin_lock_bh(&ssi->lock); 753 if (unlikely(ssi->main_state != ACTIVE)) { 754 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", 755 ssi->send_state, ssi->main_state); 756 spin_unlock_bh(&ssi->lock); 757 return; 758 } 759 if (ssi->send_state != WAIT4READY) { 760 dev_dbg(&cl->device, "Ignore spurious READY command\n"); 761 spin_unlock_bh(&ssi->lock); 762 return; 763 } 764 ssip_set_txstate(ssi, SEND_READY); 765 spin_unlock_bh(&ssi->lock); 766 ssip_xmit(cl); 767 } 768 769 static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) 770 { 771 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 772 struct sk_buff *skb; 773 struct hsi_msg *msg; 774 int len = SSIP_PDU_LENGTH(cmd); 775 776 dev_dbg(&cl->device, "RX strans: %d frames\n", len); 777 spin_lock_bh(&ssi->lock); 778 if (unlikely(ssi->main_state != ACTIVE)) { 779 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", 780 ssi->send_state, ssi->main_state); 781 spin_unlock_bh(&ssi->lock); 782 return; 783 } 784 ssip_set_rxstate(ssi, RECEIVING); 785 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { 786 dev_err(&cl->device, "START TRANS id %d expected %d\n", 787 SSIP_MSG_ID(cmd), ssi->rxid); 788 spin_unlock_bh(&ssi->lock); 789 goto out1; 790 } 791 ssi->rxid++; 792 spin_unlock_bh(&ssi->lock); 793 skb = netdev_alloc_skb(ssi->netdev, len * 4); 794 if (unlikely(!skb)) { 795 dev_err(&cl->device, "No memory for rx skb\n"); 796 goto out1; 797 } 798 skb_put(skb, len * 4); 799 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 800 if (unlikely(!msg)) { 801 dev_err(&cl->device, "No memory for RX data msg\n"); 802 goto out2; 803 } 804 msg->complete = ssip_rx_data_complete; 805 hsi_async_read(cl, msg); 806 807 return; 808 out2: 809 dev_kfree_skb(skb); 810 out1: 811 ssip_error(cl); 812 } 813 814 static void ssip_rxcmd_complete(struct hsi_msg *msg) 815 { 816 struct hsi_client *cl = msg->cl; 817 u32 cmd = ssip_get_cmd(msg); 818 unsigned int cmdid = SSIP_COMMAND(cmd); 819 820 if (msg->status == HSI_STATUS_ERROR) { 821 dev_err(&cl->device, "RX error detected\n"); 822 ssip_release_cmd(msg); 823 ssip_error(cl); 824 return; 825 } 826 hsi_async_read(cl, msg); 827 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); 828 switch (cmdid) { 829 case SSIP_SW_BREAK: 830 /* Ignored */ 831 break; 832 case SSIP_BOOTINFO_REQ: 833 ssip_rx_bootinforeq(cl, cmd); 834 break; 835 case SSIP_BOOTINFO_RESP: 836 ssip_rx_bootinforesp(cl, cmd); 837 break; 838 case SSIP_WAKETEST_RESULT: 839 ssip_rx_waketest(cl, cmd); 840 break; 841 case SSIP_START_TRANS: 842 ssip_rx_strans(cl, cmd); 843 break; 844 case SSIP_READY: 845 ssip_rx_ready(cl); 846 break; 847 default: 848 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); 849 break; 850 } 851 } 852 853 static void ssip_swbreak_complete(struct hsi_msg *msg) 854 { 855 struct hsi_client *cl = msg->cl; 856 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 857 858 ssip_release_cmd(msg); 859 spin_lock_bh(&ssi->lock); 860 if (list_empty(&ssi->txqueue)) { 861 if (atomic_read(&ssi->tx_usecnt)) { 862 ssip_set_txstate(ssi, SEND_READY); 863 } else { 864 ssip_set_txstate(ssi, SEND_IDLE); 865 hsi_stop_tx(cl); 866 } 867 spin_unlock_bh(&ssi->lock); 868 } else { 869 spin_unlock_bh(&ssi->lock); 870 ssip_xmit(cl); 871 } 872 netif_wake_queue(ssi->netdev); 873 } 874 875 static void ssip_tx_data_complete(struct hsi_msg *msg) 876 { 877 struct hsi_client *cl = msg->cl; 878 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 879 struct hsi_msg *cmsg; 880 881 if (msg->status == HSI_STATUS_ERROR) { 882 dev_err(&cl->device, "TX data error\n"); 883 ssip_error(cl); 884 goto out; 885 } 886 spin_lock_bh(&ssi->lock); 887 if (list_empty(&ssi->txqueue)) { 888 ssip_set_txstate(ssi, SENDING_SWBREAK); 889 spin_unlock_bh(&ssi->lock); 890 cmsg = ssip_claim_cmd(ssi); 891 ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); 892 cmsg->complete = ssip_swbreak_complete; 893 dev_dbg(&cl->device, "Send SWBREAK\n"); 894 hsi_async_write(cl, cmsg); 895 } else { 896 spin_unlock_bh(&ssi->lock); 897 ssip_xmit(cl); 898 } 899 out: 900 ssip_free_data(msg); 901 } 902 903 static void ssip_port_event(struct hsi_client *cl, unsigned long event) 904 { 905 switch (event) { 906 case HSI_EVENT_START_RX: 907 ssip_start_rx(cl); 908 break; 909 case HSI_EVENT_STOP_RX: 910 ssip_stop_rx(cl); 911 break; 912 default: 913 return; 914 } 915 } 916 917 static int ssip_pn_open(struct net_device *dev) 918 { 919 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 920 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 921 int err; 922 923 err = hsi_claim_port(cl, 1); 924 if (err < 0) { 925 dev_err(&cl->device, "SSI port already claimed\n"); 926 return err; 927 } 928 err = hsi_register_port_event(cl, ssip_port_event); 929 if (err < 0) { 930 dev_err(&cl->device, "Register HSI port event failed (%d)\n", 931 err); 932 hsi_release_port(cl); 933 return err; 934 } 935 dev_dbg(&cl->device, "Configuring SSI port\n"); 936 hsi_setup(cl); 937 938 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 939 ssi_waketest(cl, 1); /* FIXME: To be removed */ 940 941 spin_lock_bh(&ssi->lock); 942 ssi->main_state = HANDSHAKE; 943 spin_unlock_bh(&ssi->lock); 944 945 ssip_send_bootinfo_req_cmd(cl); 946 947 return 0; 948 } 949 950 static int ssip_pn_stop(struct net_device *dev) 951 { 952 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 953 954 ssip_reset(cl); 955 hsi_unregister_port_event(cl); 956 hsi_release_port(cl); 957 958 return 0; 959 } 960 961 static void ssip_xmit_work(struct work_struct *work) 962 { 963 struct ssi_protocol *ssi = 964 container_of(work, struct ssi_protocol, work); 965 struct hsi_client *cl = ssi->cl; 966 967 ssip_xmit(cl); 968 } 969 970 static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) 971 { 972 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 973 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 974 struct hsi_msg *msg; 975 976 if ((skb->protocol != htons(ETH_P_PHONET)) || 977 (skb->len < SSIP_MIN_PN_HDR)) 978 goto drop; 979 /* Pad to 32-bits - FIXME: Revisit*/ 980 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) 981 goto inc_dropped; 982 983 /* 984 * Modem sends Phonet messages over SSI with its own endianness. 985 * Assume that modem has the same endianness as we do. 986 */ 987 if (skb_cow_head(skb, 0)) 988 goto drop; 989 990 /* length field is exchanged in network byte order */ 991 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); 992 993 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 994 if (!msg) { 995 dev_dbg(&cl->device, "Dropping tx data: No memory\n"); 996 goto drop; 997 } 998 msg->complete = ssip_tx_data_complete; 999 1000 spin_lock_bh(&ssi->lock); 1001 if (unlikely(ssi->main_state != ACTIVE)) { 1002 spin_unlock_bh(&ssi->lock); 1003 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); 1004 goto drop2; 1005 } 1006 list_add_tail(&msg->link, &ssi->txqueue); 1007 ssi->txqueue_len++; 1008 if (dev->tx_queue_len < ssi->txqueue_len) { 1009 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); 1010 netif_stop_queue(dev); 1011 } 1012 if (ssi->send_state == SEND_IDLE) { 1013 ssip_set_txstate(ssi, WAIT4READY); 1014 spin_unlock_bh(&ssi->lock); 1015 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); 1016 hsi_start_tx(cl); 1017 } else if (ssi->send_state == SEND_READY) { 1018 /* Needed for cmt-speech workaround */ 1019 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", 1020 ssi->txqueue_len); 1021 spin_unlock_bh(&ssi->lock); 1022 schedule_work(&ssi->work); 1023 } else { 1024 spin_unlock_bh(&ssi->lock); 1025 } 1026 dev->stats.tx_packets++; 1027 dev->stats.tx_bytes += skb->len; 1028 1029 return NETDEV_TX_OK; 1030 drop2: 1031 hsi_free_msg(msg); 1032 drop: 1033 dev_kfree_skb(skb); 1034 inc_dropped: 1035 dev->stats.tx_dropped++; 1036 1037 return NETDEV_TX_OK; 1038 } 1039 1040 /* CMT reset event handler */ 1041 void ssip_reset_event(struct hsi_client *master) 1042 { 1043 struct ssi_protocol *ssi = hsi_client_drvdata(master); 1044 dev_err(&ssi->cl->device, "CMT reset detected!\n"); 1045 ssip_error(ssi->cl); 1046 } 1047 EXPORT_SYMBOL_GPL(ssip_reset_event); 1048 1049 static const struct net_device_ops ssip_pn_ops = { 1050 .ndo_open = ssip_pn_open, 1051 .ndo_stop = ssip_pn_stop, 1052 .ndo_start_xmit = ssip_pn_xmit, 1053 }; 1054 1055 static void ssip_pn_setup(struct net_device *dev) 1056 { 1057 static const u8 addr = PN_MEDIA_SOS; 1058 1059 dev->features = 0; 1060 dev->netdev_ops = &ssip_pn_ops; 1061 dev->type = ARPHRD_PHONET; 1062 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1063 dev->mtu = SSIP_DEFAULT_MTU; 1064 dev->hard_header_len = 1; 1065 dev->addr_len = 1; 1066 dev_addr_set(dev, &addr); 1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1068 1069 dev->needs_free_netdev = true; 1070 dev->header_ops = &phonet_header_ops; 1071 } 1072 1073 static int ssi_protocol_probe(struct device *dev) 1074 { 1075 static const char ifname[] = "phonet%d"; 1076 struct hsi_client *cl = to_hsi_client(dev); 1077 struct ssi_protocol *ssi; 1078 int err; 1079 1080 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 1081 if (!ssi) 1082 return -ENOMEM; 1083 1084 spin_lock_init(&ssi->lock); 1085 timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); 1086 timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); 1087 timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); 1088 INIT_LIST_HEAD(&ssi->txqueue); 1089 INIT_LIST_HEAD(&ssi->cmdqueue); 1090 atomic_set(&ssi->tx_usecnt, 0); 1091 hsi_client_set_drvdata(cl, ssi); 1092 ssi->cl = cl; 1093 INIT_WORK(&ssi->work, ssip_xmit_work); 1094 1095 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); 1096 if (ssi->channel_id_cmd < 0) { 1097 err = ssi->channel_id_cmd; 1098 dev_err(dev, "Could not get cmd channel (%d)\n", err); 1099 goto out; 1100 } 1101 1102 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); 1103 if (ssi->channel_id_data < 0) { 1104 err = ssi->channel_id_data; 1105 dev_err(dev, "Could not get data channel (%d)\n", err); 1106 goto out; 1107 } 1108 1109 err = ssip_alloc_cmds(ssi); 1110 if (err < 0) { 1111 dev_err(dev, "No memory for commands\n"); 1112 goto out; 1113 } 1114 1115 ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup); 1116 if (!ssi->netdev) { 1117 dev_err(dev, "No memory for netdev\n"); 1118 err = -ENOMEM; 1119 goto out1; 1120 } 1121 1122 /* MTU range: 6 - 65535 */ 1123 ssi->netdev->min_mtu = PHONET_MIN_MTU; 1124 ssi->netdev->max_mtu = SSIP_MAX_MTU; 1125 1126 SET_NETDEV_DEV(ssi->netdev, dev); 1127 netif_carrier_off(ssi->netdev); 1128 err = register_netdev(ssi->netdev); 1129 if (err < 0) { 1130 dev_err(dev, "Register netdev failed (%d)\n", err); 1131 goto out2; 1132 } 1133 1134 list_add(&ssi->link, &ssip_list); 1135 1136 dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", 1137 ssi->channel_id_cmd, ssi->channel_id_data); 1138 1139 return 0; 1140 out2: 1141 free_netdev(ssi->netdev); 1142 out1: 1143 ssip_free_cmds(ssi); 1144 out: 1145 kfree(ssi); 1146 1147 return err; 1148 } 1149 1150 static int ssi_protocol_remove(struct device *dev) 1151 { 1152 struct hsi_client *cl = to_hsi_client(dev); 1153 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 1154 1155 list_del(&ssi->link); 1156 unregister_netdev(ssi->netdev); 1157 ssip_free_cmds(ssi); 1158 hsi_client_set_drvdata(cl, NULL); 1159 kfree(ssi); 1160 1161 return 0; 1162 } 1163 1164 static struct hsi_client_driver ssip_driver = { 1165 .driver = { 1166 .name = "ssi-protocol", 1167 .owner = THIS_MODULE, 1168 .probe = ssi_protocol_probe, 1169 .remove = ssi_protocol_remove, 1170 }, 1171 }; 1172 1173 static int __init ssip_init(void) 1174 { 1175 pr_info("SSI protocol aka McSAAB added\n"); 1176 1177 return hsi_register_client_driver(&ssip_driver); 1178 } 1179 module_init(ssip_init); 1180 1181 static void __exit ssip_exit(void) 1182 { 1183 hsi_unregister_client_driver(&ssip_driver); 1184 pr_info("SSI protocol driver removed\n"); 1185 } 1186 module_exit(ssip_exit); 1187 1188 MODULE_ALIAS("hsi:ssi-protocol"); 1189 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); 1190 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); 1191 MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); 1192 MODULE_LICENSE("GPL"); 1193