1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * FM Driver for Connectivity chip of Texas Instruments. 4 * 5 * This sub-module of FM driver is common for FM RX and TX 6 * functionality. This module is responsible for: 7 * 1) Forming group of Channel-8 commands to perform particular 8 * functionality (eg., frequency set require more than 9 * one Channel-8 command to be sent to the chip). 10 * 2) Sending each Channel-8 command to the chip and reading 11 * response back over Shared Transport. 12 * 3) Managing TX and RX Queues and BH bh Works. 13 * 4) Handling FM Interrupt packet and taking appropriate action. 14 * 5) Loading FM firmware to the chip (common, FM TX, and FM RX 15 * firmware files based on mode selection) 16 * 17 * Copyright (C) 2011 Texas Instruments 18 * Author: Raja Mani <raja_mani@ti.com> 19 * Author: Manjunatha Halli <manjunatha_halli@ti.com> 20 */ 21 22 #include <linux/delay.h> 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/nospec.h> 26 #include <linux/jiffies.h> 27 28 #include "fmdrv.h" 29 #include "fmdrv_v4l2.h" 30 #include "fmdrv_common.h" 31 #include <linux/ti_wilink_st.h> 32 #include "fmdrv_rx.h" 33 #include "fmdrv_tx.h" 34 35 /* Region info */ 36 static struct region_info region_configs[] = { 37 /* Europe/US */ 38 { 39 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL, 40 .bot_freq = 87500, /* 87.5 MHz */ 41 .top_freq = 108000, /* 108 MHz */ 42 .fm_band = 0, 43 }, 44 /* Japan */ 45 { 46 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL, 47 .bot_freq = 76000, /* 76 MHz */ 48 .top_freq = 90000, /* 90 MHz */ 49 .fm_band = 1, 50 }, 51 }; 52 53 /* Band selection */ 54 static u8 default_radio_region; /* Europe/US */ 55 module_param(default_radio_region, byte, 0); 56 MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan"); 57 58 /* RDS buffer blocks */ 59 static u32 default_rds_buf = 300; 60 module_param(default_rds_buf, uint, 0444); 61 MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries"); 62 63 /* Radio Nr */ 64 static u32 radio_nr = -1; 65 module_param(radio_nr, int, 0444); 66 MODULE_PARM_DESC(radio_nr, "Radio Nr"); 67 68 /* FM irq handlers forward declaration */ 69 static void fm_irq_send_flag_getcmd(struct fmdev *); 70 static void fm_irq_handle_flag_getcmd_resp(struct fmdev *); 71 static void fm_irq_handle_hw_malfunction(struct fmdev *); 72 static void fm_irq_handle_rds_start(struct fmdev *); 73 static void fm_irq_send_rdsdata_getcmd(struct fmdev *); 74 static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *); 75 static void fm_irq_handle_rds_finish(struct fmdev *); 76 static void fm_irq_handle_tune_op_ended(struct fmdev *); 77 static void fm_irq_handle_power_enb(struct fmdev *); 78 static void fm_irq_handle_low_rssi_start(struct fmdev *); 79 static void fm_irq_afjump_set_pi(struct fmdev *); 80 static void fm_irq_handle_set_pi_resp(struct fmdev *); 81 static void fm_irq_afjump_set_pimask(struct fmdev *); 82 static void fm_irq_handle_set_pimask_resp(struct fmdev *); 83 static void fm_irq_afjump_setfreq(struct fmdev *); 84 static void fm_irq_handle_setfreq_resp(struct fmdev *); 85 static void fm_irq_afjump_enableint(struct fmdev *); 86 static void fm_irq_afjump_enableint_resp(struct fmdev *); 87 static void fm_irq_start_afjump(struct fmdev *); 88 static void fm_irq_handle_start_afjump_resp(struct fmdev *); 89 static void fm_irq_afjump_rd_freq(struct fmdev *); 90 static void fm_irq_afjump_rd_freq_resp(struct fmdev *); 91 static void fm_irq_handle_low_rssi_finish(struct fmdev *); 92 static void fm_irq_send_intmsk_cmd(struct fmdev *); 93 static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *); 94 95 /* 96 * When FM common module receives interrupt packet, following handlers 97 * will be executed one after another to service the interrupt(s) 98 */ 99 enum fmc_irq_handler_index { 100 FM_SEND_FLAG_GETCMD_IDX, 101 FM_HANDLE_FLAG_GETCMD_RESP_IDX, 102 103 /* HW malfunction irq handler */ 104 FM_HW_MAL_FUNC_IDX, 105 106 /* RDS threshold reached irq handler */ 107 FM_RDS_START_IDX, 108 FM_RDS_SEND_RDS_GETCMD_IDX, 109 FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX, 110 FM_RDS_FINISH_IDX, 111 112 /* Tune operation ended irq handler */ 113 FM_HW_TUNE_OP_ENDED_IDX, 114 115 /* TX power enable irq handler */ 116 FM_HW_POWER_ENB_IDX, 117 118 /* Low RSSI irq handler */ 119 FM_LOW_RSSI_START_IDX, 120 FM_AF_JUMP_SETPI_IDX, 121 FM_AF_JUMP_HANDLE_SETPI_RESP_IDX, 122 FM_AF_JUMP_SETPI_MASK_IDX, 123 FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX, 124 FM_AF_JUMP_SET_AF_FREQ_IDX, 125 FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX, 126 FM_AF_JUMP_ENABLE_INT_IDX, 127 FM_AF_JUMP_ENABLE_INT_RESP_IDX, 128 FM_AF_JUMP_START_AFJUMP_IDX, 129 FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX, 130 FM_AF_JUMP_RD_FREQ_IDX, 131 FM_AF_JUMP_RD_FREQ_RESP_IDX, 132 FM_LOW_RSSI_FINISH_IDX, 133 134 /* Interrupt process post action */ 135 FM_SEND_INTMSK_CMD_IDX, 136 FM_HANDLE_INTMSK_CMD_RESP_IDX, 137 }; 138 139 /* FM interrupt handler table */ 140 static int_handler_prototype int_handler_table[] = { 141 fm_irq_send_flag_getcmd, 142 fm_irq_handle_flag_getcmd_resp, 143 fm_irq_handle_hw_malfunction, 144 fm_irq_handle_rds_start, /* RDS threshold reached irq handler */ 145 fm_irq_send_rdsdata_getcmd, 146 fm_irq_handle_rdsdata_getcmd_resp, 147 fm_irq_handle_rds_finish, 148 fm_irq_handle_tune_op_ended, 149 fm_irq_handle_power_enb, /* TX power enable irq handler */ 150 fm_irq_handle_low_rssi_start, 151 fm_irq_afjump_set_pi, 152 fm_irq_handle_set_pi_resp, 153 fm_irq_afjump_set_pimask, 154 fm_irq_handle_set_pimask_resp, 155 fm_irq_afjump_setfreq, 156 fm_irq_handle_setfreq_resp, 157 fm_irq_afjump_enableint, 158 fm_irq_afjump_enableint_resp, 159 fm_irq_start_afjump, 160 fm_irq_handle_start_afjump_resp, 161 fm_irq_afjump_rd_freq, 162 fm_irq_afjump_rd_freq_resp, 163 fm_irq_handle_low_rssi_finish, 164 fm_irq_send_intmsk_cmd, /* Interrupt process post action */ 165 fm_irq_handle_intmsk_cmd_resp 166 }; 167 168 static long (*g_st_write) (struct sk_buff *skb); 169 static struct completion wait_for_fmdrv_reg_comp; 170 171 static inline void fm_irq_call(struct fmdev *fmdev) 172 { 173 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev); 174 } 175 176 /* Continue next function in interrupt handler table */ 177 static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage) 178 { 179 fmdev->irq_info.stage = stage; 180 fm_irq_call(fmdev); 181 } 182 183 static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage) 184 { 185 fmdev->irq_info.stage = stage; 186 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT); 187 } 188 189 #ifdef FM_DUMP_TXRX_PKT 190 /* To dump outgoing FM Channel-8 packets */ 191 inline void dump_tx_skb_data(struct sk_buff *skb) 192 { 193 int len, len_org; 194 u8 index; 195 struct fm_cmd_msg_hdr *cmd_hdr; 196 197 cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data; 198 printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x", 199 fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr, 200 cmd_hdr->len, cmd_hdr->op, 201 cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen); 202 203 len_org = skb->len - FM_CMD_MSG_HDR_SIZE; 204 if (len_org > 0) { 205 printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen); 206 len = min(len_org, 14); 207 for (index = 0; index < len; index++) 208 printk(KERN_CONT "%x ", 209 skb->data[FM_CMD_MSG_HDR_SIZE + index]); 210 printk(KERN_CONT "%s", (len_org > 14) ? ".." : ""); 211 } 212 printk(KERN_CONT "\n"); 213 } 214 215 /* To dump incoming FM Channel-8 packets */ 216 inline void dump_rx_skb_data(struct sk_buff *skb) 217 { 218 int len, len_org; 219 u8 index; 220 struct fm_event_msg_hdr *evt_hdr; 221 222 evt_hdr = (struct fm_event_msg_hdr *)skb->data; 223 printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x", 224 evt_hdr->hdr, evt_hdr->len, 225 evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op, 226 (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen); 227 228 len_org = skb->len - FM_EVT_MSG_HDR_SIZE; 229 if (len_org > 0) { 230 printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen); 231 len = min(len_org, 14); 232 for (index = 0; index < len; index++) 233 printk(KERN_CONT "%x ", 234 skb->data[FM_EVT_MSG_HDR_SIZE + index]); 235 printk(KERN_CONT "%s", (len_org > 14) ? ".." : ""); 236 } 237 printk(KERN_CONT "\n"); 238 } 239 #endif 240 241 void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set) 242 { 243 fmdev->rx.region = region_configs[region_to_set]; 244 } 245 246 /* 247 * FM common sub-module will queue this bh work whenever it receives 248 * FM packet from ST driver. 249 */ 250 static void recv_bh_work(struct work_struct *t) 251 { 252 struct fmdev *fmdev; 253 struct fm_irq *irq_info; 254 struct fm_event_msg_hdr *evt_hdr; 255 struct sk_buff *skb; 256 u8 num_fm_hci_cmds; 257 unsigned long flags; 258 259 fmdev = from_work(fmdev, t, tx_bh_work); 260 irq_info = &fmdev->irq_info; 261 /* Process all packets in the RX queue */ 262 while ((skb = skb_dequeue(&fmdev->rx_q))) { 263 if (skb->len < sizeof(struct fm_event_msg_hdr)) { 264 fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n", 265 skb, 266 skb->len, sizeof(struct fm_event_msg_hdr)); 267 kfree_skb(skb); 268 continue; 269 } 270 271 evt_hdr = (void *)skb->data; 272 num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds; 273 274 /* FM interrupt packet? */ 275 if (evt_hdr->op == FM_INTERRUPT) { 276 /* FM interrupt handler started already? */ 277 if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) { 278 set_bit(FM_INTTASK_RUNNING, &fmdev->flag); 279 if (irq_info->stage != 0) { 280 fmerr("Inval stage resetting to zero\n"); 281 irq_info->stage = 0; 282 } 283 284 /* 285 * Execute first function in interrupt handler 286 * table. 287 */ 288 irq_info->handlers[irq_info->stage](fmdev); 289 } else { 290 set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag); 291 } 292 kfree_skb(skb); 293 } 294 /* Anyone waiting for this with completion handler? */ 295 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) { 296 297 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 298 fmdev->resp_skb = skb; 299 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 300 complete(fmdev->resp_comp); 301 302 fmdev->resp_comp = NULL; 303 atomic_set(&fmdev->tx_cnt, 1); 304 } 305 /* Is this for interrupt handler? */ 306 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) { 307 if (fmdev->resp_skb != NULL) 308 fmerr("Response SKB ptr not NULL\n"); 309 310 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 311 fmdev->resp_skb = skb; 312 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 313 314 /* Execute interrupt handler where state index points */ 315 irq_info->handlers[irq_info->stage](fmdev); 316 317 kfree_skb(skb); 318 atomic_set(&fmdev->tx_cnt, 1); 319 } else { 320 fmerr("Nobody claimed SKB(%p),purging\n", skb); 321 } 322 323 /* 324 * Check flow control field. If Num_FM_HCI_Commands field is 325 * not zero, queue FM TX bh work. 326 */ 327 if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt)) 328 if (!skb_queue_empty(&fmdev->tx_q)) 329 queue_work(system_bh_wq, &fmdev->tx_bh_work); 330 } 331 } 332 333 /* FM send_bh_work: is scheduled when FM packet has to be sent to chip */ 334 static void send_bh_work(struct work_struct *t) 335 { 336 struct fmdev *fmdev; 337 struct sk_buff *skb; 338 int len; 339 340 fmdev = from_work(fmdev, t, tx_bh_work); 341 342 if (!atomic_read(&fmdev->tx_cnt)) 343 return; 344 345 /* Check, is there any timeout happened to last transmitted packet */ 346 if (time_is_before_jiffies(fmdev->last_tx_jiffies + FM_DRV_TX_TIMEOUT)) { 347 fmerr("TX timeout occurred\n"); 348 atomic_set(&fmdev->tx_cnt, 1); 349 } 350 351 /* Send queued FM TX packets */ 352 skb = skb_dequeue(&fmdev->tx_q); 353 if (!skb) 354 return; 355 356 atomic_dec(&fmdev->tx_cnt); 357 fmdev->pre_op = fm_cb(skb)->fm_op; 358 359 if (fmdev->resp_comp != NULL) 360 fmerr("Response completion handler is not NULL\n"); 361 362 fmdev->resp_comp = fm_cb(skb)->completion; 363 364 /* Write FM packet to ST driver */ 365 len = g_st_write(skb); 366 if (len < 0) { 367 kfree_skb(skb); 368 fmdev->resp_comp = NULL; 369 fmerr("TX bh work failed to send skb(%p)\n", skb); 370 atomic_set(&fmdev->tx_cnt, 1); 371 } else { 372 fmdev->last_tx_jiffies = jiffies; 373 } 374 } 375 376 /* 377 * Queues FM Channel-8 packet to FM TX queue and schedules FM TX bh work for 378 * transmission 379 */ 380 static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, 381 int payload_len, struct completion *wait_completion) 382 { 383 struct sk_buff *skb; 384 struct fm_cmd_msg_hdr *hdr; 385 int size; 386 387 if (fm_op >= FM_INTERRUPT) { 388 fmerr("Invalid fm opcode - %d\n", fm_op); 389 return -EINVAL; 390 } 391 if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) { 392 fmerr("Payload data is NULL during fw download\n"); 393 return -EINVAL; 394 } 395 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag)) 396 size = 397 FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len); 398 else 399 size = payload_len; 400 401 skb = alloc_skb(size, GFP_ATOMIC); 402 if (!skb) { 403 fmerr("No memory to create new SKB\n"); 404 return -ENOMEM; 405 } 406 /* 407 * Don't fill FM header info for the commands which come from 408 * FM firmware file. 409 */ 410 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) || 411 test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) { 412 /* Fill command header info */ 413 hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE); 414 hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */ 415 416 /* 3 (fm_opcode,rd_wr,dlen) + payload len) */ 417 hdr->len = ((payload == NULL) ? 0 : payload_len) + 3; 418 419 /* FM opcode */ 420 hdr->op = fm_op; 421 422 /* read/write type */ 423 hdr->rd_wr = type; 424 hdr->dlen = payload_len; 425 fm_cb(skb)->fm_op = fm_op; 426 427 /* 428 * If firmware download has finished and the command is 429 * not a read command then payload is != NULL - a write 430 * command with u16 payload - convert to be16 431 */ 432 if (payload != NULL) 433 *(__be16 *)payload = cpu_to_be16(*(u16 *)payload); 434 435 } else if (payload != NULL) { 436 fm_cb(skb)->fm_op = *((u8 *)payload + 2); 437 } 438 if (payload != NULL) 439 skb_put_data(skb, payload, payload_len); 440 441 fm_cb(skb)->completion = wait_completion; 442 skb_queue_tail(&fmdev->tx_q, skb); 443 queue_work(system_bh_wq, &fmdev->tx_bh_work); 444 445 return 0; 446 } 447 448 /* Sends FM Channel-8 command to the chip and waits for the response */ 449 int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, 450 unsigned int payload_len, void *response, int *response_len) 451 { 452 struct sk_buff *skb; 453 struct fm_event_msg_hdr *evt_hdr; 454 unsigned long flags; 455 int ret; 456 457 init_completion(&fmdev->maintask_comp); 458 ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len, 459 &fmdev->maintask_comp); 460 if (ret) 461 return ret; 462 463 if (!wait_for_completion_timeout(&fmdev->maintask_comp, 464 FM_DRV_TX_TIMEOUT)) { 465 fmerr("Timeout(%d sec),didn't get regcompletion signal from RX bh work\n", 466 jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); 467 return -ETIMEDOUT; 468 } 469 if (!fmdev->resp_skb) { 470 fmerr("Response SKB is missing\n"); 471 return -EFAULT; 472 } 473 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 474 skb = fmdev->resp_skb; 475 fmdev->resp_skb = NULL; 476 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 477 478 evt_hdr = (void *)skb->data; 479 if (evt_hdr->status != 0) { 480 fmerr("Received event pkt status(%d) is not zero\n", 481 evt_hdr->status); 482 kfree_skb(skb); 483 return -EIO; 484 } 485 /* Send response data to caller */ 486 if (response != NULL && response_len != NULL && evt_hdr->dlen && 487 evt_hdr->dlen <= payload_len) { 488 /* Skip header info and copy only response data */ 489 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 490 memcpy(response, skb->data, evt_hdr->dlen); 491 *response_len = evt_hdr->dlen; 492 } else if (response_len != NULL && evt_hdr->dlen == 0) { 493 *response_len = 0; 494 } 495 kfree_skb(skb); 496 497 return 0; 498 } 499 500 /* --- Helper functions used in FM interrupt handlers ---*/ 501 static inline int check_cmdresp_status(struct fmdev *fmdev, 502 struct sk_buff **skb) 503 { 504 struct fm_event_msg_hdr *fm_evt_hdr; 505 unsigned long flags; 506 507 del_timer(&fmdev->irq_info.timer); 508 509 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 510 *skb = fmdev->resp_skb; 511 fmdev->resp_skb = NULL; 512 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 513 514 fm_evt_hdr = (void *)(*skb)->data; 515 if (fm_evt_hdr->status != 0) { 516 fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n", 517 fm_evt_hdr->op); 518 519 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT); 520 return -1; 521 } 522 523 return 0; 524 } 525 526 static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage) 527 { 528 struct sk_buff *skb; 529 530 if (!check_cmdresp_status(fmdev, &skb)) 531 fm_irq_call_stage(fmdev, stage); 532 } 533 534 /* 535 * Interrupt process timeout handler. 536 * One of the irq handler did not get proper response from the chip. So take 537 * recovery action here. FM interrupts are disabled in the beginning of 538 * interrupt process. Therefore reset stage index to re-enable default 539 * interrupts. So that next interrupt will be processed as usual. 540 */ 541 static void int_timeout_handler(struct timer_list *t) 542 { 543 struct fmdev *fmdev; 544 struct fm_irq *fmirq; 545 546 fmdbg("irq: timeout,trying to re-enable fm interrupts\n"); 547 fmdev = from_timer(fmdev, t, irq_info.timer); 548 fmirq = &fmdev->irq_info; 549 fmirq->retry++; 550 551 if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) { 552 /* Stop recovery action (interrupt reenable process) and 553 * reset stage index & retry count values */ 554 fmirq->stage = 0; 555 fmirq->retry = 0; 556 fmerr("Recovery action failed duringirq processing, max retry reached\n"); 557 return; 558 } 559 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX); 560 } 561 562 /* --------- FM interrupt handlers ------------*/ 563 static void fm_irq_send_flag_getcmd(struct fmdev *fmdev) 564 { 565 u16 flag; 566 567 /* Send FLAG_GET command , to know the source of interrupt */ 568 if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL)) 569 fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX); 570 } 571 572 static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev) 573 { 574 struct sk_buff *skb; 575 struct fm_event_msg_hdr *fm_evt_hdr; 576 577 if (check_cmdresp_status(fmdev, &skb)) 578 return; 579 580 fm_evt_hdr = (void *)skb->data; 581 if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag)) 582 return; 583 584 /* Skip header info and copy only response data */ 585 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 586 memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen); 587 588 fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag); 589 fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag); 590 591 /* Continue next function in interrupt handler table */ 592 fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX); 593 } 594 595 static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev) 596 { 597 if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask) 598 fmerr("irq: HW MAL int received - do nothing\n"); 599 600 /* Continue next function in interrupt handler table */ 601 fm_irq_call_stage(fmdev, FM_RDS_START_IDX); 602 } 603 604 static void fm_irq_handle_rds_start(struct fmdev *fmdev) 605 { 606 if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) { 607 fmdbg("irq: rds threshold reached\n"); 608 fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX; 609 } else { 610 /* Continue next function in interrupt handler table */ 611 fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX; 612 } 613 614 fm_irq_call(fmdev); 615 } 616 617 static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev) 618 { 619 /* Send the command to read RDS data from the chip */ 620 if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL, 621 (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL)) 622 fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX); 623 } 624 625 /* Keeps track of current RX channel AF (Alternate Frequency) */ 626 static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af) 627 { 628 struct tuned_station_info *stat_info = &fmdev->rx.stat_info; 629 u8 reg_idx = fmdev->rx.region.fm_band; 630 u8 index; 631 u32 freq; 632 633 /* First AF indicates the number of AF follows. Reset the list */ 634 if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) { 635 fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1); 636 fmdev->rx.stat_info.afcache_size = 0; 637 fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max); 638 return; 639 } 640 641 if (af < FM_RDS_MIN_AF) 642 return; 643 if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF) 644 return; 645 if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN) 646 return; 647 648 freq = fmdev->rx.region.bot_freq + (af * 100); 649 if (freq == fmdev->rx.freq) { 650 fmdbg("Current freq(%d) is matching with received AF(%d)\n", 651 fmdev->rx.freq, freq); 652 return; 653 } 654 /* Do check in AF cache */ 655 for (index = 0; index < stat_info->afcache_size; index++) { 656 if (stat_info->af_cache[index] == freq) 657 break; 658 } 659 /* Reached the limit of the list - ignore the next AF */ 660 if (index == stat_info->af_list_max) { 661 fmdbg("AF cache is full\n"); 662 return; 663 } 664 /* 665 * If we reached the end of the list then this AF is not 666 * in the list - add it. 667 */ 668 if (index == stat_info->afcache_size) { 669 fmdbg("Storing AF %d to cache index %d\n", freq, index); 670 stat_info->af_cache[index] = freq; 671 stat_info->afcache_size++; 672 } 673 } 674 675 /* 676 * Converts RDS buffer data from big endian format 677 * to little endian format. 678 */ 679 static void fm_rdsparse_swapbytes(struct fmdev *fmdev, 680 struct fm_rdsdata_format *rds_format) 681 { 682 u8 index = 0; 683 u8 *rds_buff; 684 685 /* 686 * Since in Orca the 2 RDS Data bytes are in little endian and 687 * in Dolphin they are in big endian, the parsing of the RDS data 688 * is chip dependent 689 */ 690 if (fmdev->asci_id != 0x6350) { 691 rds_buff = &rds_format->data.groupdatabuff.buff[0]; 692 while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) { 693 swap(rds_buff[index], rds_buff[index + 1]); 694 index += 2; 695 } 696 } 697 } 698 699 static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev) 700 { 701 struct sk_buff *skb; 702 struct fm_rdsdata_format rds_fmt; 703 struct fm_rds *rds = &fmdev->rx.rds; 704 unsigned long group_idx, flags; 705 u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE]; 706 u8 type, blk_idx, idx; 707 u16 cur_picode; 708 u32 rds_len; 709 710 if (check_cmdresp_status(fmdev, &skb)) 711 return; 712 713 /* Skip header info */ 714 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 715 rds_data = skb->data; 716 rds_len = skb->len; 717 718 /* Parse the RDS data */ 719 while (rds_len >= FM_RDS_BLK_SIZE) { 720 meta_data = rds_data[2]; 721 /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */ 722 type = (meta_data & 0x07); 723 724 /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */ 725 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1)); 726 fmdbg("Block index:%d(%s)\n", blk_idx, 727 (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok"); 728 729 if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0) 730 break; 731 732 if (blk_idx > FM_RDS_BLK_IDX_D) { 733 fmdbg("Block sequence mismatch\n"); 734 rds->last_blk_idx = -1; 735 break; 736 } 737 738 /* Skip checkword (control) byte and copy only data byte */ 739 idx = array_index_nospec(blk_idx * (FM_RDS_BLK_SIZE - 1), 740 FM_RX_RDS_INFO_FIELD_MAX - (FM_RDS_BLK_SIZE - 1)); 741 742 memcpy(&rds_fmt.data.groupdatabuff.buff[idx], rds_data, 743 FM_RDS_BLK_SIZE - 1); 744 745 rds->last_blk_idx = blk_idx; 746 747 /* If completed a whole group then handle it */ 748 if (blk_idx == FM_RDS_BLK_IDX_D) { 749 fmdbg("Good block received\n"); 750 fm_rdsparse_swapbytes(fmdev, &rds_fmt); 751 752 /* 753 * Extract PI code and store in local cache. 754 * We need this during AF switch processing. 755 */ 756 cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata); 757 if (fmdev->rx.stat_info.picode != cur_picode) 758 fmdev->rx.stat_info.picode = cur_picode; 759 760 fmdbg("picode:%d\n", cur_picode); 761 762 group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3); 763 fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2, 764 (group_idx % 2) ? "B" : "A"); 765 766 group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3); 767 if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) { 768 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]); 769 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]); 770 } 771 } 772 rds_len -= FM_RDS_BLK_SIZE; 773 rds_data += FM_RDS_BLK_SIZE; 774 } 775 776 /* Copy raw rds data to internal rds buffer */ 777 rds_data = skb->data; 778 rds_len = skb->len; 779 780 spin_lock_irqsave(&fmdev->rds_buff_lock, flags); 781 while (rds_len > 0) { 782 /* 783 * Fill RDS buffer as per V4L2 specification. 784 * Store control byte 785 */ 786 type = (rds_data[2] & 0x07); 787 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1)); 788 tmpbuf[2] = blk_idx; /* Offset name */ 789 tmpbuf[2] |= blk_idx << 3; /* Received offset */ 790 791 /* Store data byte */ 792 tmpbuf[0] = rds_data[0]; 793 tmpbuf[1] = rds_data[1]; 794 795 memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE); 796 rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size; 797 798 /* Check for overflow & start over */ 799 if (rds->wr_idx == rds->rd_idx) { 800 fmdbg("RDS buffer overflow\n"); 801 rds->wr_idx = 0; 802 rds->rd_idx = 0; 803 break; 804 } 805 rds_len -= FM_RDS_BLK_SIZE; 806 rds_data += FM_RDS_BLK_SIZE; 807 } 808 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 809 810 /* Wakeup read queue */ 811 if (rds->wr_idx != rds->rd_idx) 812 wake_up_interruptible(&rds->read_queue); 813 814 fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX); 815 } 816 817 static void fm_irq_handle_rds_finish(struct fmdev *fmdev) 818 { 819 fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX); 820 } 821 822 static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev) 823 { 824 if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev-> 825 irq_info.mask) { 826 fmdbg("irq: tune ended/bandlimit reached\n"); 827 if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) { 828 fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX; 829 } else { 830 complete(&fmdev->maintask_comp); 831 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX; 832 } 833 } else 834 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX; 835 836 fm_irq_call(fmdev); 837 } 838 839 static void fm_irq_handle_power_enb(struct fmdev *fmdev) 840 { 841 if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) { 842 fmdbg("irq: Power Enabled/Disabled\n"); 843 complete(&fmdev->maintask_comp); 844 } 845 846 fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX); 847 } 848 849 static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev) 850 { 851 if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) && 852 (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) && 853 (fmdev->rx.freq != FM_UNDEFINED_FREQ) && 854 (fmdev->rx.stat_info.afcache_size != 0)) { 855 fmdbg("irq: rssi level has fallen below threshold level\n"); 856 857 /* Disable further low RSSI interrupts */ 858 fmdev->irq_info.mask &= ~FM_LEV_EVENT; 859 860 fmdev->rx.afjump_idx = 0; 861 fmdev->rx.freq_before_jump = fmdev->rx.freq; 862 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX; 863 } else { 864 /* Continue next function in interrupt handler table */ 865 fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX; 866 } 867 868 fm_irq_call(fmdev); 869 } 870 871 static void fm_irq_afjump_set_pi(struct fmdev *fmdev) 872 { 873 u16 payload; 874 875 /* Set PI code - must be updated if the AF list is not empty */ 876 payload = fmdev->rx.stat_info.picode; 877 if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL)) 878 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX); 879 } 880 881 static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev) 882 { 883 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX); 884 } 885 886 /* 887 * Set PI mask. 888 * 0xFFFF = Enable PI code matching 889 * 0x0000 = Disable PI code matching 890 */ 891 static void fm_irq_afjump_set_pimask(struct fmdev *fmdev) 892 { 893 u16 payload; 894 895 payload = 0x0000; 896 if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL)) 897 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX); 898 } 899 900 static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev) 901 { 902 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX); 903 } 904 905 static void fm_irq_afjump_setfreq(struct fmdev *fmdev) 906 { 907 u16 frq_index; 908 u16 payload; 909 910 fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]); 911 frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] - 912 fmdev->rx.region.bot_freq) / FM_FREQ_MUL; 913 914 payload = frq_index; 915 if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL)) 916 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX); 917 } 918 919 static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev) 920 { 921 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX); 922 } 923 924 static void fm_irq_afjump_enableint(struct fmdev *fmdev) 925 { 926 u16 payload; 927 928 /* Enable FR (tuning operation ended) interrupt */ 929 payload = FM_FR_EVENT; 930 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL)) 931 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX); 932 } 933 934 static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev) 935 { 936 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX); 937 } 938 939 static void fm_irq_start_afjump(struct fmdev *fmdev) 940 { 941 u16 payload; 942 943 payload = FM_TUNER_AF_JUMP_MODE; 944 if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload, 945 sizeof(payload), NULL)) 946 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX); 947 } 948 949 static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev) 950 { 951 struct sk_buff *skb; 952 953 if (check_cmdresp_status(fmdev, &skb)) 954 return; 955 956 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX; 957 set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag); 958 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag); 959 } 960 961 static void fm_irq_afjump_rd_freq(struct fmdev *fmdev) 962 { 963 u16 payload; 964 965 if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL)) 966 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX); 967 } 968 969 static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev) 970 { 971 struct sk_buff *skb; 972 u16 read_freq; 973 u32 curr_freq, jumped_freq; 974 975 if (check_cmdresp_status(fmdev, &skb)) 976 return; 977 978 /* Skip header info and copy only response data */ 979 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 980 memcpy(&read_freq, skb->data, sizeof(read_freq)); 981 read_freq = be16_to_cpu((__force __be16)read_freq); 982 curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL); 983 984 jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]; 985 986 /* If the frequency was changed the jump succeeded */ 987 if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) { 988 fmdbg("Successfully switched to alternate freq %d\n", curr_freq); 989 fmdev->rx.freq = curr_freq; 990 fm_rx_reset_rds_cache(fmdev); 991 992 /* AF feature is on, enable low level RSSI interrupt */ 993 if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) 994 fmdev->irq_info.mask |= FM_LEV_EVENT; 995 996 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX; 997 } else { /* jump to the next freq in the AF list */ 998 fmdev->rx.afjump_idx++; 999 1000 /* If we reached the end of the list - stop searching */ 1001 if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) { 1002 fmdbg("AF switch processing failed\n"); 1003 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX; 1004 } else { /* AF List is not over - try next one */ 1005 1006 fmdbg("Trying next freq in AF cache\n"); 1007 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX; 1008 } 1009 } 1010 fm_irq_call(fmdev); 1011 } 1012 1013 static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev) 1014 { 1015 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX); 1016 } 1017 1018 static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev) 1019 { 1020 u16 payload; 1021 1022 /* Re-enable FM interrupts */ 1023 payload = fmdev->irq_info.mask; 1024 1025 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, 1026 sizeof(payload), NULL)) 1027 fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX); 1028 } 1029 1030 static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev) 1031 { 1032 struct sk_buff *skb; 1033 1034 if (check_cmdresp_status(fmdev, &skb)) 1035 return; 1036 /* 1037 * This is last function in interrupt table to be executed. 1038 * So, reset stage index to 0. 1039 */ 1040 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX; 1041 1042 /* Start processing any pending interrupt */ 1043 if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag)) 1044 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev); 1045 else 1046 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag); 1047 } 1048 1049 /* Returns availability of RDS data in internal buffer */ 1050 int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file, 1051 struct poll_table_struct *pts) 1052 { 1053 poll_wait(file, &fmdev->rx.rds.read_queue, pts); 1054 if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx) 1055 return 0; 1056 1057 return -EAGAIN; 1058 } 1059 1060 /* Copies RDS data from internal buffer to user buffer */ 1061 int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file, 1062 u8 __user *buf, size_t count) 1063 { 1064 u32 block_count; 1065 u8 tmpbuf[FM_RDS_BLK_SIZE]; 1066 unsigned long flags; 1067 int ret; 1068 1069 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) { 1070 if (file->f_flags & O_NONBLOCK) 1071 return -EWOULDBLOCK; 1072 1073 ret = wait_event_interruptible(fmdev->rx.rds.read_queue, 1074 (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx)); 1075 if (ret) 1076 return -EINTR; 1077 } 1078 1079 /* Calculate block count from byte count */ 1080 count /= FM_RDS_BLK_SIZE; 1081 block_count = 0; 1082 ret = 0; 1083 1084 while (block_count < count) { 1085 spin_lock_irqsave(&fmdev->rds_buff_lock, flags); 1086 1087 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) { 1088 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 1089 break; 1090 } 1091 memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx], 1092 FM_RDS_BLK_SIZE); 1093 fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE; 1094 if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size) 1095 fmdev->rx.rds.rd_idx = 0; 1096 1097 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 1098 1099 if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE)) 1100 break; 1101 1102 block_count++; 1103 buf += FM_RDS_BLK_SIZE; 1104 ret += FM_RDS_BLK_SIZE; 1105 } 1106 return ret; 1107 } 1108 1109 int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set) 1110 { 1111 switch (fmdev->curr_fmmode) { 1112 case FM_MODE_RX: 1113 return fm_rx_set_freq(fmdev, freq_to_set); 1114 1115 case FM_MODE_TX: 1116 return fm_tx_set_freq(fmdev, freq_to_set); 1117 1118 default: 1119 return -EINVAL; 1120 } 1121 } 1122 1123 int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq) 1124 { 1125 if (fmdev->rx.freq == FM_UNDEFINED_FREQ) { 1126 fmerr("RX frequency is not set\n"); 1127 return -EPERM; 1128 } 1129 if (cur_tuned_frq == NULL) { 1130 fmerr("Invalid memory\n"); 1131 return -ENOMEM; 1132 } 1133 1134 switch (fmdev->curr_fmmode) { 1135 case FM_MODE_RX: 1136 *cur_tuned_frq = fmdev->rx.freq; 1137 return 0; 1138 1139 case FM_MODE_TX: 1140 *cur_tuned_frq = 0; /* TODO : Change this later */ 1141 return 0; 1142 1143 default: 1144 return -EINVAL; 1145 } 1146 1147 } 1148 1149 int fmc_set_region(struct fmdev *fmdev, u8 region_to_set) 1150 { 1151 switch (fmdev->curr_fmmode) { 1152 case FM_MODE_RX: 1153 return fm_rx_set_region(fmdev, region_to_set); 1154 1155 case FM_MODE_TX: 1156 return fm_tx_set_region(fmdev, region_to_set); 1157 1158 default: 1159 return -EINVAL; 1160 } 1161 } 1162 1163 int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset) 1164 { 1165 switch (fmdev->curr_fmmode) { 1166 case FM_MODE_RX: 1167 return fm_rx_set_mute_mode(fmdev, mute_mode_toset); 1168 1169 case FM_MODE_TX: 1170 return fm_tx_set_mute_mode(fmdev, mute_mode_toset); 1171 1172 default: 1173 return -EINVAL; 1174 } 1175 } 1176 1177 int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode) 1178 { 1179 switch (fmdev->curr_fmmode) { 1180 case FM_MODE_RX: 1181 return fm_rx_set_stereo_mono(fmdev, mode); 1182 1183 case FM_MODE_TX: 1184 return fm_tx_set_stereo_mono(fmdev, mode); 1185 1186 default: 1187 return -EINVAL; 1188 } 1189 } 1190 1191 int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis) 1192 { 1193 switch (fmdev->curr_fmmode) { 1194 case FM_MODE_RX: 1195 return fm_rx_set_rds_mode(fmdev, rds_en_dis); 1196 1197 case FM_MODE_TX: 1198 return fm_tx_set_rds_mode(fmdev, rds_en_dis); 1199 1200 default: 1201 return -EINVAL; 1202 } 1203 } 1204 1205 /* Sends power off command to the chip */ 1206 static int fm_power_down(struct fmdev *fmdev) 1207 { 1208 u16 payload; 1209 int ret; 1210 1211 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1212 fmerr("FM core is not ready\n"); 1213 return -EPERM; 1214 } 1215 if (fmdev->curr_fmmode == FM_MODE_OFF) { 1216 fmdbg("FM chip is already in OFF state\n"); 1217 return 0; 1218 } 1219 1220 payload = 0x0; 1221 ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload, 1222 sizeof(payload), NULL, NULL); 1223 if (ret < 0) 1224 return ret; 1225 1226 return fmc_release(fmdev); 1227 } 1228 1229 /* Reads init command from FM firmware file and loads to the chip */ 1230 static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name) 1231 { 1232 const struct firmware *fw_entry; 1233 struct bts_header *fw_header; 1234 struct bts_action *action; 1235 struct bts_action_delay *delay; 1236 u8 *fw_data; 1237 int ret, fw_len; 1238 1239 set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag); 1240 1241 ret = request_firmware(&fw_entry, fw_name, 1242 &fmdev->radio_dev->dev); 1243 if (ret < 0) { 1244 fmerr("Unable to read firmware(%s) content\n", fw_name); 1245 return ret; 1246 } 1247 fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size); 1248 1249 fw_data = (void *)fw_entry->data; 1250 fw_len = fw_entry->size; 1251 1252 fw_header = (struct bts_header *)fw_data; 1253 if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) { 1254 fmerr("%s not a legal TI firmware file\n", fw_name); 1255 ret = -EINVAL; 1256 goto rel_fw; 1257 } 1258 fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic); 1259 1260 /* Skip file header info , we already verified it */ 1261 fw_data += sizeof(struct bts_header); 1262 fw_len -= sizeof(struct bts_header); 1263 1264 while (fw_data && fw_len > 0) { 1265 action = (struct bts_action *)fw_data; 1266 1267 switch (action->type) { 1268 case ACTION_SEND_COMMAND: /* Send */ 1269 ret = fmc_send_cmd(fmdev, 0, 0, action->data, 1270 action->size, NULL, NULL); 1271 if (ret) 1272 goto rel_fw; 1273 1274 break; 1275 1276 case ACTION_DELAY: /* Delay */ 1277 delay = (struct bts_action_delay *)action->data; 1278 mdelay(delay->msec); 1279 break; 1280 } 1281 1282 fw_data += (sizeof(struct bts_action) + (action->size)); 1283 fw_len -= (sizeof(struct bts_action) + (action->size)); 1284 } 1285 fmdbg("Transferred only %d of %d bytes of the firmware to chip\n", 1286 fw_entry->size - fw_len, fw_entry->size); 1287 rel_fw: 1288 release_firmware(fw_entry); 1289 clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag); 1290 1291 return ret; 1292 } 1293 1294 /* Loads default RX configuration to the chip */ 1295 static int load_default_rx_configuration(struct fmdev *fmdev) 1296 { 1297 int ret; 1298 1299 ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME); 1300 if (ret < 0) 1301 return ret; 1302 1303 return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD); 1304 } 1305 1306 /* Does FM power on sequence */ 1307 static int fm_power_up(struct fmdev *fmdev, u8 mode) 1308 { 1309 u16 payload; 1310 __be16 asic_id = 0, asic_ver = 0; 1311 int resp_len, ret; 1312 u8 fw_name[50]; 1313 1314 if (mode >= FM_MODE_ENTRY_MAX) { 1315 fmerr("Invalid firmware download option\n"); 1316 return -EINVAL; 1317 } 1318 1319 /* 1320 * Initialize FM common module. FM GPIO toggling is 1321 * taken care in Shared Transport driver. 1322 */ 1323 ret = fmc_prepare(fmdev); 1324 if (ret < 0) { 1325 fmerr("Unable to prepare FM Common\n"); 1326 return ret; 1327 } 1328 1329 payload = FM_ENABLE; 1330 if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload, 1331 sizeof(payload), NULL, NULL)) 1332 goto rel; 1333 1334 /* Allow the chip to settle down in Channel-8 mode */ 1335 msleep(20); 1336 1337 if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL, 1338 sizeof(asic_id), &asic_id, &resp_len)) 1339 goto rel; 1340 1341 if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL, 1342 sizeof(asic_ver), &asic_ver, &resp_len)) 1343 goto rel; 1344 1345 fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n", 1346 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1347 1348 sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START, 1349 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1350 1351 ret = fm_download_firmware(fmdev, fw_name); 1352 if (ret < 0) { 1353 fmdbg("Failed to download firmware file %s\n", fw_name); 1354 goto rel; 1355 } 1356 sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ? 1357 FM_RX_FW_FILE_START : FM_TX_FW_FILE_START, 1358 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1359 1360 ret = fm_download_firmware(fmdev, fw_name); 1361 if (ret < 0) { 1362 fmdbg("Failed to download firmware file %s\n", fw_name); 1363 goto rel; 1364 } else 1365 return ret; 1366 rel: 1367 return fmc_release(fmdev); 1368 } 1369 1370 /* Set FM Modes(TX, RX, OFF) */ 1371 int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode) 1372 { 1373 int ret = 0; 1374 1375 if (fm_mode >= FM_MODE_ENTRY_MAX) { 1376 fmerr("Invalid FM mode\n"); 1377 return -EINVAL; 1378 } 1379 if (fmdev->curr_fmmode == fm_mode) { 1380 fmdbg("Already fm is in mode(%d)\n", fm_mode); 1381 return ret; 1382 } 1383 1384 switch (fm_mode) { 1385 case FM_MODE_OFF: /* OFF Mode */ 1386 ret = fm_power_down(fmdev); 1387 if (ret < 0) { 1388 fmerr("Failed to set OFF mode\n"); 1389 return ret; 1390 } 1391 break; 1392 1393 case FM_MODE_TX: /* TX Mode */ 1394 case FM_MODE_RX: /* RX Mode */ 1395 /* Power down before switching to TX or RX mode */ 1396 if (fmdev->curr_fmmode != FM_MODE_OFF) { 1397 ret = fm_power_down(fmdev); 1398 if (ret < 0) { 1399 fmerr("Failed to set OFF mode\n"); 1400 return ret; 1401 } 1402 msleep(30); 1403 } 1404 ret = fm_power_up(fmdev, fm_mode); 1405 if (ret < 0) { 1406 fmerr("Failed to load firmware\n"); 1407 return ret; 1408 } 1409 } 1410 fmdev->curr_fmmode = fm_mode; 1411 1412 /* Set default configuration */ 1413 if (fmdev->curr_fmmode == FM_MODE_RX) { 1414 fmdbg("Loading default rx configuration..\n"); 1415 ret = load_default_rx_configuration(fmdev); 1416 if (ret < 0) 1417 fmerr("Failed to load default values\n"); 1418 } 1419 1420 return ret; 1421 } 1422 1423 /* Returns current FM mode (TX, RX, OFF) */ 1424 int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode) 1425 { 1426 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1427 fmerr("FM core is not ready\n"); 1428 return -EPERM; 1429 } 1430 if (fmmode == NULL) { 1431 fmerr("Invalid memory\n"); 1432 return -ENOMEM; 1433 } 1434 1435 *fmmode = fmdev->curr_fmmode; 1436 return 0; 1437 } 1438 1439 /* Called by ST layer when FM packet is available */ 1440 static long fm_st_receive(void *arg, struct sk_buff *skb) 1441 { 1442 struct fmdev *fmdev; 1443 1444 fmdev = arg; 1445 1446 if (skb == NULL) { 1447 fmerr("Invalid SKB received from ST\n"); 1448 return -EFAULT; 1449 } 1450 1451 if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) { 1452 fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb); 1453 return -EINVAL; 1454 } 1455 1456 memcpy(skb_push(skb, 1), &skb->cb[0], 1); 1457 skb_queue_tail(&fmdev->rx_q, skb); 1458 queue_work(system_bh_wq, &fmdev->rx_bh_work); 1459 1460 return 0; 1461 } 1462 1463 /* 1464 * Called by ST layer to indicate protocol registration completion 1465 * status. 1466 */ 1467 static void fm_st_reg_comp_cb(void *arg, int data) 1468 { 1469 struct fmdev *fmdev; 1470 1471 fmdev = (struct fmdev *)arg; 1472 fmdev->streg_cbdata = data; 1473 complete(&wait_for_fmdrv_reg_comp); 1474 } 1475 1476 /* 1477 * This function will be called from FM V4L2 open function. 1478 * Register with ST driver and initialize driver data. 1479 */ 1480 int fmc_prepare(struct fmdev *fmdev) 1481 { 1482 static struct st_proto_s fm_st_proto; 1483 int ret; 1484 1485 if (test_bit(FM_CORE_READY, &fmdev->flag)) { 1486 fmdbg("FM Core is already up\n"); 1487 return 0; 1488 } 1489 1490 memset(&fm_st_proto, 0, sizeof(fm_st_proto)); 1491 fm_st_proto.recv = fm_st_receive; 1492 fm_st_proto.match_packet = NULL; 1493 fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb; 1494 fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */ 1495 fm_st_proto.priv_data = fmdev; 1496 fm_st_proto.chnl_id = 0x08; 1497 fm_st_proto.max_frame_size = 0xff; 1498 fm_st_proto.hdr_len = 1; 1499 fm_st_proto.offset_len_in_hdr = 0; 1500 fm_st_proto.len_size = 1; 1501 fm_st_proto.reserve = 1; 1502 1503 ret = st_register(&fm_st_proto); 1504 if (ret == -EINPROGRESS) { 1505 init_completion(&wait_for_fmdrv_reg_comp); 1506 fmdev->streg_cbdata = -EINPROGRESS; 1507 fmdbg("%s waiting for ST reg completion signal\n", __func__); 1508 1509 if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp, 1510 FM_ST_REG_TIMEOUT)) { 1511 fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n", 1512 jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000); 1513 return -ETIMEDOUT; 1514 } 1515 if (fmdev->streg_cbdata != 0) { 1516 fmerr("ST reg comp CB called with error status %d\n", 1517 fmdev->streg_cbdata); 1518 return -EAGAIN; 1519 } 1520 1521 ret = 0; 1522 } else if (ret < 0) { 1523 fmerr("st_register failed %d\n", ret); 1524 return -EAGAIN; 1525 } 1526 1527 if (fm_st_proto.write != NULL) { 1528 g_st_write = fm_st_proto.write; 1529 } else { 1530 fmerr("Failed to get ST write func pointer\n"); 1531 ret = st_unregister(&fm_st_proto); 1532 if (ret < 0) 1533 fmerr("st_unregister failed %d\n", ret); 1534 return -EAGAIN; 1535 } 1536 1537 spin_lock_init(&fmdev->rds_buff_lock); 1538 spin_lock_init(&fmdev->resp_skb_lock); 1539 1540 /* Initialize TX queue and TX bh work */ 1541 skb_queue_head_init(&fmdev->tx_q); 1542 INIT_WORK(&fmdev->tx_bh_work, send_bh_work); 1543 1544 /* Initialize RX Queue and RX bh work */ 1545 skb_queue_head_init(&fmdev->rx_q); 1546 INIT_WORK(&fmdev->rx_bh_work, recv_bh_work); 1547 1548 fmdev->irq_info.stage = 0; 1549 atomic_set(&fmdev->tx_cnt, 1); 1550 fmdev->resp_comp = NULL; 1551 1552 timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0); 1553 /*TODO: add FM_STIC_EVENT later */ 1554 fmdev->irq_info.mask = FM_MAL_EVENT; 1555 1556 /* Region info */ 1557 fmdev->rx.region = region_configs[default_radio_region]; 1558 1559 fmdev->rx.mute_mode = FM_MUTE_OFF; 1560 fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF; 1561 fmdev->rx.rds.flag = FM_RDS_DISABLE; 1562 fmdev->rx.freq = FM_UNDEFINED_FREQ; 1563 fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS; 1564 fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF; 1565 fmdev->irq_info.retry = 0; 1566 1567 fm_rx_reset_rds_cache(fmdev); 1568 init_waitqueue_head(&fmdev->rx.rds.read_queue); 1569 1570 fm_rx_reset_station_info(fmdev); 1571 set_bit(FM_CORE_READY, &fmdev->flag); 1572 1573 return ret; 1574 } 1575 1576 /* 1577 * This function will be called from FM V4L2 release function. 1578 * Unregister from ST driver. 1579 */ 1580 int fmc_release(struct fmdev *fmdev) 1581 { 1582 static struct st_proto_s fm_st_proto; 1583 int ret; 1584 1585 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1586 fmdbg("FM Core is already down\n"); 1587 return 0; 1588 } 1589 /* Service pending read */ 1590 wake_up_interruptible(&fmdev->rx.rds.read_queue); 1591 1592 cancel_work_sync(&fmdev->tx_bh_work); 1593 cancel_work_sync(&fmdev->rx_bh_work); 1594 1595 skb_queue_purge(&fmdev->tx_q); 1596 skb_queue_purge(&fmdev->rx_q); 1597 1598 fmdev->resp_comp = NULL; 1599 fmdev->rx.freq = 0; 1600 1601 memset(&fm_st_proto, 0, sizeof(fm_st_proto)); 1602 fm_st_proto.chnl_id = 0x08; 1603 1604 ret = st_unregister(&fm_st_proto); 1605 1606 if (ret < 0) 1607 fmerr("Failed to de-register FM from ST %d\n", ret); 1608 else 1609 fmdbg("Successfully unregistered from ST\n"); 1610 1611 clear_bit(FM_CORE_READY, &fmdev->flag); 1612 return ret; 1613 } 1614 1615 /* 1616 * Module init function. Ask FM V4L module to register video device. 1617 * Allocate memory for FM driver context and RX RDS buffer. 1618 */ 1619 static int __init fm_drv_init(void) 1620 { 1621 struct fmdev *fmdev = NULL; 1622 int ret = -ENOMEM; 1623 1624 fmdbg("FM driver version %s\n", FM_DRV_VERSION); 1625 1626 fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL); 1627 if (NULL == fmdev) { 1628 fmerr("Can't allocate operation structure memory\n"); 1629 return ret; 1630 } 1631 fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE; 1632 fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL); 1633 if (NULL == fmdev->rx.rds.buff) { 1634 fmerr("Can't allocate rds ring buffer\n"); 1635 goto rel_dev; 1636 } 1637 1638 ret = fm_v4l2_init_video_device(fmdev, radio_nr); 1639 if (ret < 0) 1640 goto rel_rdsbuf; 1641 1642 fmdev->irq_info.handlers = int_handler_table; 1643 fmdev->curr_fmmode = FM_MODE_OFF; 1644 fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF; 1645 fmdev->tx_data.preemph = FM_TX_PREEMPH_50US; 1646 return ret; 1647 1648 rel_rdsbuf: 1649 kfree(fmdev->rx.rds.buff); 1650 rel_dev: 1651 kfree(fmdev); 1652 1653 return ret; 1654 } 1655 1656 /* Module exit function. Ask FM V4L module to unregister video device */ 1657 static void __exit fm_drv_exit(void) 1658 { 1659 struct fmdev *fmdev = NULL; 1660 1661 fmdev = fm_v4l2_deinit_video_device(); 1662 if (fmdev != NULL) { 1663 kfree(fmdev->rx.rds.buff); 1664 kfree(fmdev); 1665 } 1666 } 1667 1668 module_init(fm_drv_init); 1669 module_exit(fm_drv_exit); 1670 1671 /* ------------- Module Info ------------- */ 1672 MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>"); 1673 MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION); 1674 MODULE_VERSION(FM_DRV_VERSION); 1675 MODULE_LICENSE("GPL"); 1676