1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller 4 * 5 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com> 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/gpio/consumer.h> 10 #include <linux/spi/spi.h> 11 #include <linux/workqueue.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/skbuff.h> 15 #include <linux/of_gpio.h> 16 #include <linux/regmap.h> 17 #include <linux/ieee802154.h> 18 #include <linux/debugfs.h> 19 20 #include <net/mac802154.h> 21 #include <net/cfg802154.h> 22 23 #include <linux/device.h> 24 25 #include "mcr20a.h" 26 27 #define SPI_COMMAND_BUFFER 3 28 29 #define REGISTER_READ BIT(7) 30 #define REGISTER_WRITE (0 << 7) 31 #define REGISTER_ACCESS (0 << 6) 32 #define PACKET_BUFF_BURST_ACCESS BIT(6) 33 #define PACKET_BUFF_BYTE_ACCESS BIT(5) 34 35 #define MCR20A_WRITE_REG(x) (x) 36 #define MCR20A_READ_REG(x) (REGISTER_READ | (x)) 37 #define MCR20A_BURST_READ_PACKET_BUF (0xC0) 38 #define MCR20A_BURST_WRITE_PACKET_BUF (0x40) 39 40 #define MCR20A_CMD_REG 0x80 41 #define MCR20A_CMD_REG_MASK 0x3f 42 #define MCR20A_CMD_WRITE 0x40 43 #define MCR20A_CMD_FB 0x20 44 45 /* Number of Interrupt Request Status Register */ 46 #define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */ 47 48 /* MCR20A CCA Type */ 49 enum { 50 MCR20A_CCA_ED, // energy detect - CCA bit not active, 51 // not to be used for T and CCCA sequences 52 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE 53 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE 54 MCR20A_CCA_MODE3 55 }; 56 57 enum { 58 MCR20A_XCVSEQ_IDLE = 0x00, 59 MCR20A_XCVSEQ_RX = 0x01, 60 MCR20A_XCVSEQ_TX = 0x02, 61 MCR20A_XCVSEQ_CCA = 0x03, 62 MCR20A_XCVSEQ_TR = 0x04, 63 MCR20A_XCVSEQ_CCCA = 0x05, 64 }; 65 66 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ 67 #define MCR20A_MIN_CHANNEL (11) 68 #define MCR20A_MAX_CHANNEL (26) 69 #define MCR20A_CHANNEL_SPACING (5) 70 71 /* MCR20A CCA Threshold constans */ 72 #define MCR20A_MIN_CCA_THRESHOLD (0x6EU) 73 #define MCR20A_MAX_CCA_THRESHOLD (0x00U) 74 75 /* version 0C */ 76 #define MCR20A_OVERWRITE_VERSION (0x0C) 77 78 /* MCR20A PLL configurations */ 79 static const u8 PLL_INT[16] = { 80 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B, 81 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B, 82 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C, 83 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C, 84 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D, 85 /* 2480 */ 0x0D 86 }; 87 88 static const u8 PLL_FRAC[16] = { 89 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78, 90 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0, 91 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68, 92 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0, 93 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58, 94 /* 2480 */ 0x80 95 }; 96 97 static const struct reg_sequence mar20a_iar_overwrites[] = { 98 { IAR_MISC_PAD_CTRL, 0x02 }, 99 { IAR_VCO_CTRL1, 0xB3 }, 100 { IAR_VCO_CTRL2, 0x07 }, 101 { IAR_PA_TUNING, 0x71 }, 102 { IAR_CHF_IBUF, 0x2F }, 103 { IAR_CHF_QBUF, 0x2F }, 104 { IAR_CHF_IRIN, 0x24 }, 105 { IAR_CHF_QRIN, 0x24 }, 106 { IAR_CHF_IL, 0x24 }, 107 { IAR_CHF_QL, 0x24 }, 108 { IAR_CHF_CC1, 0x32 }, 109 { IAR_CHF_CCL, 0x1D }, 110 { IAR_CHF_CC2, 0x2D }, 111 { IAR_CHF_IROUT, 0x24 }, 112 { IAR_CHF_QROUT, 0x24 }, 113 { IAR_PA_CAL, 0x28 }, 114 { IAR_AGC_THR1, 0x55 }, 115 { IAR_AGC_THR2, 0x2D }, 116 { IAR_ATT_RSSI1, 0x5F }, 117 { IAR_ATT_RSSI2, 0x8F }, 118 { IAR_RSSI_OFFSET, 0x61 }, 119 { IAR_CHF_PMA_GAIN, 0x03 }, 120 { IAR_CCA1_THRESH, 0x50 }, 121 { IAR_CORR_NVAL, 0x13 }, 122 { IAR_ACKDELAY, 0x3D }, 123 }; 124 125 #define MCR20A_VALID_CHANNELS (0x07FFF800) 126 #define MCR20A_MAX_BUF (127) 127 128 #define printdev(X) (&X->spi->dev) 129 130 /* regmap information for Direct Access Register (DAR) access */ 131 #define MCR20A_DAR_WRITE 0x01 132 #define MCR20A_DAR_READ 0x00 133 #define MCR20A_DAR_NUMREGS 0x3F 134 135 /* regmap information for Indirect Access Register (IAR) access */ 136 #define MCR20A_IAR_ACCESS 0x80 137 #define MCR20A_IAR_NUMREGS 0xBEFF 138 139 /* Read/Write SPI Commands for DAR and IAR registers. */ 140 #define MCR20A_READSHORT(reg) ((reg) << 1) 141 #define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1) 142 #define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5) 143 #define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) 144 145 /* Type definitions for link configuration of instantiable layers */ 146 #define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12) 147 148 static bool 149 mcr20a_dar_writeable(struct device *dev, unsigned int reg) 150 { 151 switch (reg) { 152 case DAR_IRQ_STS1: 153 case DAR_IRQ_STS2: 154 case DAR_IRQ_STS3: 155 case DAR_PHY_CTRL1: 156 case DAR_PHY_CTRL2: 157 case DAR_PHY_CTRL3: 158 case DAR_PHY_CTRL4: 159 case DAR_SRC_CTRL: 160 case DAR_SRC_ADDRS_SUM_LSB: 161 case DAR_SRC_ADDRS_SUM_MSB: 162 case DAR_T3CMP_LSB: 163 case DAR_T3CMP_MSB: 164 case DAR_T3CMP_USB: 165 case DAR_T2PRIMECMP_LSB: 166 case DAR_T2PRIMECMP_MSB: 167 case DAR_T1CMP_LSB: 168 case DAR_T1CMP_MSB: 169 case DAR_T1CMP_USB: 170 case DAR_T2CMP_LSB: 171 case DAR_T2CMP_MSB: 172 case DAR_T2CMP_USB: 173 case DAR_T4CMP_LSB: 174 case DAR_T4CMP_MSB: 175 case DAR_T4CMP_USB: 176 case DAR_PLL_INT0: 177 case DAR_PLL_FRAC0_LSB: 178 case DAR_PLL_FRAC0_MSB: 179 case DAR_PA_PWR: 180 /* no DAR_ACM */ 181 case DAR_OVERWRITE_VER: 182 case DAR_CLK_OUT_CTRL: 183 case DAR_PWR_MODES: 184 return true; 185 default: 186 return false; 187 } 188 } 189 190 static bool 191 mcr20a_dar_readable(struct device *dev, unsigned int reg) 192 { 193 bool rc; 194 195 /* all writeable are also readable */ 196 rc = mcr20a_dar_writeable(dev, reg); 197 if (rc) 198 return rc; 199 200 /* readonly regs */ 201 switch (reg) { 202 case DAR_RX_FRM_LEN: 203 case DAR_CCA1_ED_FNL: 204 case DAR_EVENT_TMR_LSB: 205 case DAR_EVENT_TMR_MSB: 206 case DAR_EVENT_TMR_USB: 207 case DAR_TIMESTAMP_LSB: 208 case DAR_TIMESTAMP_MSB: 209 case DAR_TIMESTAMP_USB: 210 case DAR_SEQ_STATE: 211 case DAR_LQI_VALUE: 212 case DAR_RSSI_CCA_CONT: 213 return true; 214 default: 215 return false; 216 } 217 } 218 219 static bool 220 mcr20a_dar_volatile(struct device *dev, unsigned int reg) 221 { 222 /* can be changed during runtime */ 223 switch (reg) { 224 case DAR_IRQ_STS1: 225 case DAR_IRQ_STS2: 226 case DAR_IRQ_STS3: 227 /* use them in spi_async and regmap so it's volatile */ 228 return true; 229 default: 230 return false; 231 } 232 } 233 234 static bool 235 mcr20a_dar_precious(struct device *dev, unsigned int reg) 236 { 237 /* don't clear irq line on read */ 238 switch (reg) { 239 case DAR_IRQ_STS1: 240 case DAR_IRQ_STS2: 241 case DAR_IRQ_STS3: 242 return true; 243 default: 244 return false; 245 } 246 } 247 248 static const struct regmap_config mcr20a_dar_regmap = { 249 .name = "mcr20a_dar", 250 .reg_bits = 8, 251 .val_bits = 8, 252 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE, 253 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ, 254 .cache_type = REGCACHE_RBTREE, 255 .writeable_reg = mcr20a_dar_writeable, 256 .readable_reg = mcr20a_dar_readable, 257 .volatile_reg = mcr20a_dar_volatile, 258 .precious_reg = mcr20a_dar_precious, 259 .fast_io = true, 260 .can_multi_write = true, 261 }; 262 263 static bool 264 mcr20a_iar_writeable(struct device *dev, unsigned int reg) 265 { 266 switch (reg) { 267 case IAR_XTAL_TRIM: 268 case IAR_PMC_LP_TRIM: 269 case IAR_MACPANID0_LSB: 270 case IAR_MACPANID0_MSB: 271 case IAR_MACSHORTADDRS0_LSB: 272 case IAR_MACSHORTADDRS0_MSB: 273 case IAR_MACLONGADDRS0_0: 274 case IAR_MACLONGADDRS0_8: 275 case IAR_MACLONGADDRS0_16: 276 case IAR_MACLONGADDRS0_24: 277 case IAR_MACLONGADDRS0_32: 278 case IAR_MACLONGADDRS0_40: 279 case IAR_MACLONGADDRS0_48: 280 case IAR_MACLONGADDRS0_56: 281 case IAR_RX_FRAME_FILTER: 282 case IAR_PLL_INT1: 283 case IAR_PLL_FRAC1_LSB: 284 case IAR_PLL_FRAC1_MSB: 285 case IAR_MACPANID1_LSB: 286 case IAR_MACPANID1_MSB: 287 case IAR_MACSHORTADDRS1_LSB: 288 case IAR_MACSHORTADDRS1_MSB: 289 case IAR_MACLONGADDRS1_0: 290 case IAR_MACLONGADDRS1_8: 291 case IAR_MACLONGADDRS1_16: 292 case IAR_MACLONGADDRS1_24: 293 case IAR_MACLONGADDRS1_32: 294 case IAR_MACLONGADDRS1_40: 295 case IAR_MACLONGADDRS1_48: 296 case IAR_MACLONGADDRS1_56: 297 case IAR_DUAL_PAN_CTRL: 298 case IAR_DUAL_PAN_DWELL: 299 case IAR_CCA1_THRESH: 300 case IAR_CCA1_ED_OFFSET_COMP: 301 case IAR_LQI_OFFSET_COMP: 302 case IAR_CCA_CTRL: 303 case IAR_CCA2_CORR_PEAKS: 304 case IAR_CCA2_CORR_THRESH: 305 case IAR_TMR_PRESCALE: 306 case IAR_ANT_PAD_CTRL: 307 case IAR_MISC_PAD_CTRL: 308 case IAR_BSM_CTRL: 309 case IAR_RNG: 310 case IAR_RX_WTR_MARK: 311 case IAR_SOFT_RESET: 312 case IAR_TXDELAY: 313 case IAR_ACKDELAY: 314 case IAR_CORR_NVAL: 315 case IAR_ANT_AGC_CTRL: 316 case IAR_AGC_THR1: 317 case IAR_AGC_THR2: 318 case IAR_PA_CAL: 319 case IAR_ATT_RSSI1: 320 case IAR_ATT_RSSI2: 321 case IAR_RSSI_OFFSET: 322 case IAR_XTAL_CTRL: 323 case IAR_CHF_PMA_GAIN: 324 case IAR_CHF_IBUF: 325 case IAR_CHF_QBUF: 326 case IAR_CHF_IRIN: 327 case IAR_CHF_QRIN: 328 case IAR_CHF_IL: 329 case IAR_CHF_QL: 330 case IAR_CHF_CC1: 331 case IAR_CHF_CCL: 332 case IAR_CHF_CC2: 333 case IAR_CHF_IROUT: 334 case IAR_CHF_QROUT: 335 case IAR_PA_TUNING: 336 case IAR_VCO_CTRL1: 337 case IAR_VCO_CTRL2: 338 return true; 339 default: 340 return false; 341 } 342 } 343 344 static bool 345 mcr20a_iar_readable(struct device *dev, unsigned int reg) 346 { 347 bool rc; 348 349 /* all writeable are also readable */ 350 rc = mcr20a_iar_writeable(dev, reg); 351 if (rc) 352 return rc; 353 354 /* readonly regs */ 355 switch (reg) { 356 case IAR_PART_ID: 357 case IAR_DUAL_PAN_STS: 358 case IAR_RX_BYTE_COUNT: 359 case IAR_FILTERFAIL_CODE1: 360 case IAR_FILTERFAIL_CODE2: 361 case IAR_RSSI: 362 return true; 363 default: 364 return false; 365 } 366 } 367 368 static bool 369 mcr20a_iar_volatile(struct device *dev, unsigned int reg) 370 { 371 /* can be changed during runtime */ 372 switch (reg) { 373 case IAR_DUAL_PAN_STS: 374 case IAR_RX_BYTE_COUNT: 375 case IAR_FILTERFAIL_CODE1: 376 case IAR_FILTERFAIL_CODE2: 377 case IAR_RSSI: 378 return true; 379 default: 380 return false; 381 } 382 } 383 384 static const struct regmap_config mcr20a_iar_regmap = { 385 .name = "mcr20a_iar", 386 .reg_bits = 16, 387 .val_bits = 8, 388 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX, 389 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX, 390 .cache_type = REGCACHE_RBTREE, 391 .writeable_reg = mcr20a_iar_writeable, 392 .readable_reg = mcr20a_iar_readable, 393 .volatile_reg = mcr20a_iar_volatile, 394 .fast_io = true, 395 }; 396 397 struct mcr20a_local { 398 struct spi_device *spi; 399 400 struct ieee802154_hw *hw; 401 struct regmap *regmap_dar; 402 struct regmap *regmap_iar; 403 404 u8 *buf; 405 406 bool is_tx; 407 408 /* for writing tx buffer */ 409 struct spi_message tx_buf_msg; 410 u8 tx_header[1]; 411 /* burst buffer write command */ 412 struct spi_transfer tx_xfer_header; 413 u8 tx_len[1]; 414 /* len of tx packet */ 415 struct spi_transfer tx_xfer_len; 416 /* data of tx packet */ 417 struct spi_transfer tx_xfer_buf; 418 struct sk_buff *tx_skb; 419 420 /* for read length rxfifo */ 421 struct spi_message reg_msg; 422 u8 reg_cmd[1]; 423 u8 reg_data[MCR20A_IRQSTS_NUM]; 424 struct spi_transfer reg_xfer_cmd; 425 struct spi_transfer reg_xfer_data; 426 427 /* receive handling */ 428 struct spi_message rx_buf_msg; 429 u8 rx_header[1]; 430 struct spi_transfer rx_xfer_header; 431 u8 rx_lqi[1]; 432 struct spi_transfer rx_xfer_lqi; 433 u8 rx_buf[MCR20A_MAX_BUF]; 434 struct spi_transfer rx_xfer_buf; 435 436 /* isr handling for reading intstat */ 437 struct spi_message irq_msg; 438 u8 irq_header[1]; 439 u8 irq_data[MCR20A_IRQSTS_NUM]; 440 struct spi_transfer irq_xfer_data; 441 struct spi_transfer irq_xfer_header; 442 }; 443 444 static void 445 mcr20a_write_tx_buf_complete(void *context) 446 { 447 struct mcr20a_local *lp = context; 448 int ret; 449 450 dev_dbg(printdev(lp), "%s\n", __func__); 451 452 lp->reg_msg.complete = NULL; 453 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); 454 lp->reg_data[0] = MCR20A_XCVSEQ_TX; 455 lp->reg_xfer_data.len = 1; 456 457 ret = spi_async(lp->spi, &lp->reg_msg); 458 if (ret) 459 dev_err(printdev(lp), "failed to set SEQ TX\n"); 460 } 461 462 static int 463 mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) 464 { 465 struct mcr20a_local *lp = hw->priv; 466 467 dev_dbg(printdev(lp), "%s\n", __func__); 468 469 lp->tx_skb = skb; 470 471 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1, 472 skb->data, skb->len, 0); 473 474 lp->is_tx = 1; 475 476 lp->reg_msg.complete = NULL; 477 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); 478 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE; 479 lp->reg_xfer_data.len = 1; 480 481 return spi_async(lp->spi, &lp->reg_msg); 482 } 483 484 static int 485 mcr20a_ed(struct ieee802154_hw *hw, u8 *level) 486 { 487 WARN_ON(!level); 488 *level = 0xbe; 489 return 0; 490 } 491 492 static int 493 mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) 494 { 495 struct mcr20a_local *lp = hw->priv; 496 int ret; 497 498 dev_dbg(printdev(lp), "%s\n", __func__); 499 500 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */ 501 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]); 502 if (ret) 503 return ret; 504 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00); 505 if (ret) 506 return ret; 507 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB, 508 PLL_FRAC[channel - 11]); 509 if (ret) 510 return ret; 511 512 return 0; 513 } 514 515 static int 516 mcr20a_start(struct ieee802154_hw *hw) 517 { 518 struct mcr20a_local *lp = hw->priv; 519 int ret; 520 521 dev_dbg(printdev(lp), "%s\n", __func__); 522 523 /* No slotted operation */ 524 dev_dbg(printdev(lp), "no slotted operation\n"); 525 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 526 DAR_PHY_CTRL1_SLOTTED, 0x0); 527 if (ret < 0) 528 return ret; 529 530 /* enable irq */ 531 enable_irq(lp->spi->irq); 532 533 /* Unmask SEQ interrupt */ 534 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, 535 DAR_PHY_CTRL2_SEQMSK, 0x0); 536 if (ret < 0) 537 return ret; 538 539 /* Start the RX sequence */ 540 dev_dbg(printdev(lp), "start the RX sequence\n"); 541 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 542 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); 543 if (ret < 0) 544 return ret; 545 546 return 0; 547 } 548 549 static void 550 mcr20a_stop(struct ieee802154_hw *hw) 551 { 552 struct mcr20a_local *lp = hw->priv; 553 554 dev_dbg(printdev(lp), "%s\n", __func__); 555 556 /* stop all running sequence */ 557 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 558 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); 559 560 /* disable irq */ 561 disable_irq(lp->spi->irq); 562 } 563 564 static int 565 mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw, 566 struct ieee802154_hw_addr_filt *filt, 567 unsigned long changed) 568 { 569 struct mcr20a_local *lp = hw->priv; 570 571 dev_dbg(printdev(lp), "%s\n", __func__); 572 573 if (changed & IEEE802154_AFILT_SADDR_CHANGED) { 574 u16 addr = le16_to_cpu(filt->short_addr); 575 576 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr); 577 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8); 578 } 579 580 if (changed & IEEE802154_AFILT_PANID_CHANGED) { 581 u16 pan = le16_to_cpu(filt->pan_id); 582 583 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan); 584 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8); 585 } 586 587 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { 588 u8 addr[8], i; 589 590 memcpy(addr, &filt->ieee_addr, 8); 591 for (i = 0; i < 8; i++) 592 regmap_write(lp->regmap_iar, 593 IAR_MACLONGADDRS0_0 + i, addr[i]); 594 } 595 596 if (changed & IEEE802154_AFILT_PANC_CHANGED) { 597 if (filt->pan_coord) { 598 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, 599 DAR_PHY_CTRL4_PANCORDNTR0, 0x10); 600 } else { 601 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, 602 DAR_PHY_CTRL4_PANCORDNTR0, 0x00); 603 } 604 } 605 606 return 0; 607 } 608 609 /* -30 dBm to 10 dBm */ 610 #define MCR20A_MAX_TX_POWERS 0x14 611 static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = { 612 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400, 613 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000 614 }; 615 616 static int 617 mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm) 618 { 619 struct mcr20a_local *lp = hw->priv; 620 u32 i; 621 622 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm); 623 624 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { 625 if (lp->hw->phy->supported.tx_powers[i] == mbm) 626 return regmap_write(lp->regmap_dar, DAR_PA_PWR, 627 ((i + 8) & 0x1F)); 628 } 629 630 return -EINVAL; 631 } 632 633 #define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD 634 static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1]; 635 636 static int 637 mcr20a_set_cca_mode(struct ieee802154_hw *hw, 638 const struct wpan_phy_cca *cca) 639 { 640 struct mcr20a_local *lp = hw->priv; 641 unsigned int cca_mode = 0xff; 642 bool cca_mode_and = false; 643 int ret; 644 645 dev_dbg(printdev(lp), "%s\n", __func__); 646 647 /* mapping 802.15.4 to driver spec */ 648 switch (cca->mode) { 649 case NL802154_CCA_ENERGY: 650 cca_mode = MCR20A_CCA_MODE1; 651 break; 652 case NL802154_CCA_CARRIER: 653 cca_mode = MCR20A_CCA_MODE2; 654 break; 655 case NL802154_CCA_ENERGY_CARRIER: 656 switch (cca->opt) { 657 case NL802154_CCA_OPT_ENERGY_CARRIER_AND: 658 cca_mode = MCR20A_CCA_MODE3; 659 cca_mode_and = true; 660 break; 661 case NL802154_CCA_OPT_ENERGY_CARRIER_OR: 662 cca_mode = MCR20A_CCA_MODE3; 663 cca_mode_and = false; 664 break; 665 default: 666 return -EINVAL; 667 } 668 break; 669 default: 670 return -EINVAL; 671 } 672 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, 673 DAR_PHY_CTRL4_CCATYPE_MASK, 674 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT); 675 if (ret < 0) 676 return ret; 677 678 if (cca_mode == MCR20A_CCA_MODE3) { 679 if (cca_mode_and) { 680 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL, 681 IAR_CCA_CTRL_CCA3_AND_NOT_OR, 682 0x08); 683 } else { 684 ret = regmap_update_bits(lp->regmap_iar, 685 IAR_CCA_CTRL, 686 IAR_CCA_CTRL_CCA3_AND_NOT_OR, 687 0x00); 688 } 689 if (ret < 0) 690 return ret; 691 } 692 693 return ret; 694 } 695 696 static int 697 mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) 698 { 699 struct mcr20a_local *lp = hw->priv; 700 u32 i; 701 702 dev_dbg(printdev(lp), "%s\n", __func__); 703 704 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { 705 if (hw->phy->supported.cca_ed_levels[i] == mbm) 706 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i); 707 } 708 709 return 0; 710 } 711 712 static int 713 mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) 714 { 715 struct mcr20a_local *lp = hw->priv; 716 int ret; 717 u8 rx_frame_filter_reg = 0x0; 718 719 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on); 720 721 if (on) { 722 /* All frame types accepted*/ 723 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER); 724 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT | 725 IAR_RX_FRAME_FLT_NS_FT); 726 727 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, 728 DAR_PHY_CTRL4_PROMISCUOUS, 729 DAR_PHY_CTRL4_PROMISCUOUS); 730 if (ret < 0) 731 return ret; 732 733 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, 734 rx_frame_filter_reg); 735 if (ret < 0) 736 return ret; 737 } else { 738 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, 739 DAR_PHY_CTRL4_PROMISCUOUS, 0x0); 740 if (ret < 0) 741 return ret; 742 743 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, 744 IAR_RX_FRAME_FLT_FRM_VER | 745 IAR_RX_FRAME_FLT_BEACON_FT | 746 IAR_RX_FRAME_FLT_DATA_FT | 747 IAR_RX_FRAME_FLT_CMD_FT); 748 if (ret < 0) 749 return ret; 750 } 751 752 return 0; 753 } 754 755 static const struct ieee802154_ops mcr20a_hw_ops = { 756 .owner = THIS_MODULE, 757 .xmit_async = mcr20a_xmit, 758 .ed = mcr20a_ed, 759 .set_channel = mcr20a_set_channel, 760 .start = mcr20a_start, 761 .stop = mcr20a_stop, 762 .set_hw_addr_filt = mcr20a_set_hw_addr_filt, 763 .set_txpower = mcr20a_set_txpower, 764 .set_cca_mode = mcr20a_set_cca_mode, 765 .set_cca_ed_level = mcr20a_set_cca_ed_level, 766 .set_promiscuous_mode = mcr20a_set_promiscuous_mode, 767 }; 768 769 static int 770 mcr20a_request_rx(struct mcr20a_local *lp) 771 { 772 dev_dbg(printdev(lp), "%s\n", __func__); 773 774 /* Start the RX sequence */ 775 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, 776 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); 777 778 return 0; 779 } 780 781 static void 782 mcr20a_handle_rx_read_buf_complete(void *context) 783 { 784 struct mcr20a_local *lp = context; 785 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; 786 struct sk_buff *skb; 787 788 dev_dbg(printdev(lp), "%s\n", __func__); 789 790 dev_dbg(printdev(lp), "RX is done\n"); 791 792 if (!ieee802154_is_valid_psdu_len(len)) { 793 dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); 794 len = IEEE802154_MTU; 795 } 796 797 len = len - 2; /* get rid of frame check field */ 798 799 skb = dev_alloc_skb(len); 800 if (!skb) 801 return; 802 803 memcpy(skb_put(skb, len), lp->rx_buf, len); 804 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]); 805 806 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1, 807 lp->rx_buf, len, 0); 808 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]); 809 810 /* start RX sequence */ 811 mcr20a_request_rx(lp); 812 } 813 814 static void 815 mcr20a_handle_rx_read_len_complete(void *context) 816 { 817 struct mcr20a_local *lp = context; 818 u8 len; 819 int ret; 820 821 dev_dbg(printdev(lp), "%s\n", __func__); 822 823 /* get the length of received frame */ 824 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; 825 dev_dbg(printdev(lp), "frame len : %d\n", len); 826 827 /* prepare to read the rx buf */ 828 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; 829 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF; 830 lp->rx_xfer_buf.len = len; 831 832 ret = spi_async(lp->spi, &lp->rx_buf_msg); 833 if (ret) 834 dev_err(printdev(lp), "failed to read rx buffer length\n"); 835 } 836 837 static int 838 mcr20a_handle_rx(struct mcr20a_local *lp) 839 { 840 dev_dbg(printdev(lp), "%s\n", __func__); 841 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete; 842 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN); 843 lp->reg_xfer_data.len = 1; 844 845 return spi_async(lp->spi, &lp->reg_msg); 846 } 847 848 static int 849 mcr20a_handle_tx_complete(struct mcr20a_local *lp) 850 { 851 dev_dbg(printdev(lp), "%s\n", __func__); 852 853 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); 854 855 return mcr20a_request_rx(lp); 856 } 857 858 static int 859 mcr20a_handle_tx(struct mcr20a_local *lp) 860 { 861 int ret; 862 863 dev_dbg(printdev(lp), "%s\n", __func__); 864 865 /* write tx buffer */ 866 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF; 867 /* add 2 bytes of FCS */ 868 lp->tx_len[0] = lp->tx_skb->len + 2; 869 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data; 870 /* add 1 byte psduLength */ 871 lp->tx_xfer_buf.len = lp->tx_skb->len + 1; 872 873 ret = spi_async(lp->spi, &lp->tx_buf_msg); 874 if (ret) { 875 dev_err(printdev(lp), "SPI write Failed for TX buf\n"); 876 return ret; 877 } 878 879 return 0; 880 } 881 882 static void 883 mcr20a_irq_clean_complete(void *context) 884 { 885 struct mcr20a_local *lp = context; 886 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK; 887 888 dev_dbg(printdev(lp), "%s\n", __func__); 889 890 enable_irq(lp->spi->irq); 891 892 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n", 893 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]); 894 895 switch (seq_state) { 896 /* TX IRQ, RX IRQ and SEQ IRQ */ 897 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 898 if (lp->is_tx) { 899 lp->is_tx = 0; 900 dev_dbg(printdev(lp), "TX is done. No ACK\n"); 901 mcr20a_handle_tx_complete(lp); 902 } 903 break; 904 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): 905 /* rx is starting */ 906 dev_dbg(printdev(lp), "RX is starting\n"); 907 mcr20a_handle_rx(lp); 908 break; 909 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 910 if (lp->is_tx) { 911 /* tx is done */ 912 lp->is_tx = 0; 913 dev_dbg(printdev(lp), "TX is done. Get ACK\n"); 914 mcr20a_handle_tx_complete(lp); 915 } else { 916 /* rx is starting */ 917 dev_dbg(printdev(lp), "RX is starting\n"); 918 mcr20a_handle_rx(lp); 919 } 920 break; 921 case (DAR_IRQSTS1_SEQIRQ): 922 if (lp->is_tx) { 923 dev_dbg(printdev(lp), "TX is starting\n"); 924 mcr20a_handle_tx(lp); 925 } else { 926 dev_dbg(printdev(lp), "MCR20A is stop\n"); 927 } 928 break; 929 } 930 } 931 932 static void mcr20a_irq_status_complete(void *context) 933 { 934 int ret; 935 struct mcr20a_local *lp = context; 936 937 dev_dbg(printdev(lp), "%s\n", __func__); 938 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, 939 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); 940 941 lp->reg_msg.complete = mcr20a_irq_clean_complete; 942 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1); 943 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM); 944 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM; 945 946 ret = spi_async(lp->spi, &lp->reg_msg); 947 948 if (ret) 949 dev_err(printdev(lp), "failed to clean irq status\n"); 950 } 951 952 static irqreturn_t mcr20a_irq_isr(int irq, void *data) 953 { 954 struct mcr20a_local *lp = data; 955 int ret; 956 957 disable_irq_nosync(irq); 958 959 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1); 960 /* read IRQSTSx */ 961 ret = spi_async(lp->spi, &lp->irq_msg); 962 if (ret) { 963 enable_irq(irq); 964 return IRQ_NONE; 965 } 966 967 return IRQ_HANDLED; 968 } 969 970 static void mcr20a_hw_setup(struct mcr20a_local *lp) 971 { 972 u8 i; 973 struct ieee802154_hw *hw = lp->hw; 974 struct wpan_phy *phy = lp->hw->phy; 975 976 dev_dbg(printdev(lp), "%s\n", __func__); 977 978 phy->symbol_duration = 16; 979 phy->lifs_period = 40; 980 phy->sifs_period = 12; 981 982 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | 983 IEEE802154_HW_AFILT | 984 IEEE802154_HW_PROMISCUOUS; 985 986 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | 987 WPAN_PHY_FLAG_CCA_MODE; 988 989 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | 990 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); 991 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | 992 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); 993 994 /* initiating cca_ed_levels */ 995 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1; 996 ++i) { 997 mcr20a_ed_levels[i] = -i * 100; 998 } 999 1000 phy->supported.cca_ed_levels = mcr20a_ed_levels; 1001 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels); 1002 1003 phy->cca.mode = NL802154_CCA_ENERGY; 1004 1005 phy->supported.channels[0] = MCR20A_VALID_CHANNELS; 1006 phy->current_page = 0; 1007 /* MCR20A default reset value */ 1008 phy->current_channel = 20; 1009 phy->symbol_duration = 16; 1010 phy->supported.tx_powers = mcr20a_powers; 1011 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); 1012 phy->cca_ed_level = phy->supported.cca_ed_levels[75]; 1013 phy->transmit_power = phy->supported.tx_powers[0x0F]; 1014 } 1015 1016 static void 1017 mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp) 1018 { 1019 spi_message_init(&lp->tx_buf_msg); 1020 lp->tx_buf_msg.context = lp; 1021 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete; 1022 1023 lp->tx_xfer_header.len = 1; 1024 lp->tx_xfer_header.tx_buf = lp->tx_header; 1025 1026 lp->tx_xfer_len.len = 1; 1027 lp->tx_xfer_len.tx_buf = lp->tx_len; 1028 1029 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg); 1030 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg); 1031 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg); 1032 } 1033 1034 static void 1035 mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp) 1036 { 1037 spi_message_init(&lp->reg_msg); 1038 lp->reg_msg.context = lp; 1039 1040 lp->reg_xfer_cmd.len = 1; 1041 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd; 1042 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd; 1043 1044 lp->reg_xfer_data.rx_buf = lp->reg_data; 1045 lp->reg_xfer_data.tx_buf = lp->reg_data; 1046 1047 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg); 1048 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg); 1049 1050 spi_message_init(&lp->rx_buf_msg); 1051 lp->rx_buf_msg.context = lp; 1052 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; 1053 lp->rx_xfer_header.len = 1; 1054 lp->rx_xfer_header.tx_buf = lp->rx_header; 1055 lp->rx_xfer_header.rx_buf = lp->rx_header; 1056 1057 lp->rx_xfer_buf.rx_buf = lp->rx_buf; 1058 1059 lp->rx_xfer_lqi.len = 1; 1060 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi; 1061 1062 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg); 1063 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg); 1064 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg); 1065 } 1066 1067 static void 1068 mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp) 1069 { 1070 spi_message_init(&lp->irq_msg); 1071 lp->irq_msg.context = lp; 1072 lp->irq_msg.complete = mcr20a_irq_status_complete; 1073 lp->irq_xfer_header.len = 1; 1074 lp->irq_xfer_header.tx_buf = lp->irq_header; 1075 lp->irq_xfer_header.rx_buf = lp->irq_header; 1076 1077 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM; 1078 lp->irq_xfer_data.rx_buf = lp->irq_data; 1079 1080 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg); 1081 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg); 1082 } 1083 1084 static int 1085 mcr20a_phy_init(struct mcr20a_local *lp) 1086 { 1087 u8 index; 1088 unsigned int phy_reg = 0; 1089 int ret; 1090 1091 dev_dbg(printdev(lp), "%s\n", __func__); 1092 1093 /* Disable Tristate on COCO MISO for SPI reads */ 1094 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02); 1095 if (ret) 1096 goto err_ret; 1097 1098 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts 1099 * immediately after init 1100 */ 1101 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF); 1102 if (ret) 1103 goto err_ret; 1104 1105 /* Clear all PP IRQ bits in IRQSTS2 */ 1106 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2, 1107 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ | 1108 DAR_IRQSTS2_WAKE_IRQ); 1109 if (ret) 1110 goto err_ret; 1111 1112 /* Disable all timer interrupts */ 1113 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF); 1114 if (ret) 1115 goto err_ret; 1116 1117 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */ 1118 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 1119 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK); 1120 1121 /* PHY_CTRL2 : disable all interrupts */ 1122 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF); 1123 if (ret) 1124 goto err_ret; 1125 1126 /* PHY_CTRL3 : disable all timers and remaining interrupts */ 1127 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3, 1128 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK | 1129 DAR_PHY_CTRL3_WAKE_MSK); 1130 if (ret) 1131 goto err_ret; 1132 1133 /* SRC_CTRL : enable Acknowledge Frame Pending and 1134 * Source Address Matching Enable 1135 */ 1136 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, 1137 DAR_SRC_CTRL_ACK_FRM_PND | 1138 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT)); 1139 if (ret) 1140 goto err_ret; 1141 1142 /* RX_FRAME_FILTER */ 1143 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */ 1144 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, 1145 IAR_RX_FRAME_FLT_FRM_VER | 1146 IAR_RX_FRAME_FLT_BEACON_FT | 1147 IAR_RX_FRAME_FLT_DATA_FT | 1148 IAR_RX_FRAME_FLT_CMD_FT); 1149 if (ret) 1150 goto err_ret; 1151 1152 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n", 1153 MCR20A_OVERWRITE_VERSION); 1154 1155 /* Overwrites direct registers */ 1156 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER, 1157 MCR20A_OVERWRITE_VERSION); 1158 if (ret) 1159 goto err_ret; 1160 1161 /* Overwrites indirect registers */ 1162 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites, 1163 ARRAY_SIZE(mar20a_iar_overwrites)); 1164 if (ret) 1165 goto err_ret; 1166 1167 /* Clear HW indirect queue */ 1168 dev_dbg(printdev(lp), "clear HW indirect queue\n"); 1169 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) { 1170 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) << 1171 DAR_SRC_CTRL_INDEX_SHIFT) 1172 | (DAR_SRC_CTRL_SRCADDR_EN) 1173 | (DAR_SRC_CTRL_INDEX_DISABLE)); 1174 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg); 1175 if (ret) 1176 goto err_ret; 1177 phy_reg = 0; 1178 } 1179 1180 /* Assign HW Indirect hash table to PAN0 */ 1181 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg); 1182 if (ret) 1183 goto err_ret; 1184 1185 /* Clear current lvl */ 1186 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK; 1187 1188 /* Set new lvl */ 1189 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE << 1190 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT; 1191 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg); 1192 if (ret) 1193 goto err_ret; 1194 1195 /* Set CCA threshold to -75 dBm */ 1196 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B); 1197 if (ret) 1198 goto err_ret; 1199 1200 /* Set prescaller to obtain 1 symbol (16us) timebase */ 1201 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05); 1202 if (ret) 1203 goto err_ret; 1204 1205 /* Enable autodoze mode. */ 1206 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES, 1207 DAR_PWR_MODES_AUTODOZE, 1208 DAR_PWR_MODES_AUTODOZE); 1209 if (ret) 1210 goto err_ret; 1211 1212 /* Disable clk_out */ 1213 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL, 1214 DAR_CLK_OUT_CTRL_EN, 0x0); 1215 if (ret) 1216 goto err_ret; 1217 1218 return 0; 1219 1220 err_ret: 1221 return ret; 1222 } 1223 1224 static int 1225 mcr20a_probe(struct spi_device *spi) 1226 { 1227 struct ieee802154_hw *hw; 1228 struct mcr20a_local *lp; 1229 struct gpio_desc *rst_b; 1230 int irq_type; 1231 int ret = -ENOMEM; 1232 1233 dev_dbg(&spi->dev, "%s\n", __func__); 1234 1235 if (!spi->irq) { 1236 dev_err(&spi->dev, "no IRQ specified\n"); 1237 return -EINVAL; 1238 } 1239 1240 rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH); 1241 if (IS_ERR(rst_b)) { 1242 ret = PTR_ERR(rst_b); 1243 if (ret != -EPROBE_DEFER) 1244 dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret); 1245 return ret; 1246 } 1247 1248 /* reset mcr20a */ 1249 usleep_range(10, 20); 1250 gpiod_set_value_cansleep(rst_b, 1); 1251 usleep_range(10, 20); 1252 gpiod_set_value_cansleep(rst_b, 0); 1253 usleep_range(120, 240); 1254 1255 /* allocate ieee802154_hw and private data */ 1256 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); 1257 if (!hw) { 1258 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); 1259 return ret; 1260 } 1261 1262 /* init mcr20a local data */ 1263 lp = hw->priv; 1264 lp->hw = hw; 1265 lp->spi = spi; 1266 1267 /* init ieee802154_hw */ 1268 hw->parent = &spi->dev; 1269 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); 1270 1271 /* init buf */ 1272 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); 1273 1274 if (!lp->buf) { 1275 ret = -ENOMEM; 1276 goto free_dev; 1277 } 1278 1279 mcr20a_setup_tx_spi_messages(lp); 1280 mcr20a_setup_rx_spi_messages(lp); 1281 mcr20a_setup_irq_spi_messages(lp); 1282 1283 /* setup regmap */ 1284 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap); 1285 if (IS_ERR(lp->regmap_dar)) { 1286 ret = PTR_ERR(lp->regmap_dar); 1287 dev_err(&spi->dev, "Failed to allocate dar map: %d\n", 1288 ret); 1289 goto free_dev; 1290 } 1291 1292 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap); 1293 if (IS_ERR(lp->regmap_iar)) { 1294 ret = PTR_ERR(lp->regmap_iar); 1295 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret); 1296 goto free_dev; 1297 } 1298 1299 mcr20a_hw_setup(lp); 1300 1301 spi_set_drvdata(spi, lp); 1302 1303 ret = mcr20a_phy_init(lp); 1304 if (ret < 0) { 1305 dev_crit(&spi->dev, "mcr20a_phy_init failed\n"); 1306 goto free_dev; 1307 } 1308 1309 irq_type = irq_get_trigger_type(spi->irq); 1310 if (!irq_type) 1311 irq_type = IRQF_TRIGGER_FALLING; 1312 1313 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr, 1314 irq_type, dev_name(&spi->dev), lp); 1315 if (ret) { 1316 dev_err(&spi->dev, "could not request_irq for mcr20a\n"); 1317 ret = -ENODEV; 1318 goto free_dev; 1319 } 1320 1321 /* disable_irq by default and wait for starting hardware */ 1322 disable_irq(spi->irq); 1323 1324 ret = ieee802154_register_hw(hw); 1325 if (ret) { 1326 dev_crit(&spi->dev, "ieee802154_register_hw failed\n"); 1327 goto free_dev; 1328 } 1329 1330 return ret; 1331 1332 free_dev: 1333 ieee802154_free_hw(lp->hw); 1334 1335 return ret; 1336 } 1337 1338 static int mcr20a_remove(struct spi_device *spi) 1339 { 1340 struct mcr20a_local *lp = spi_get_drvdata(spi); 1341 1342 dev_dbg(&spi->dev, "%s\n", __func__); 1343 1344 ieee802154_unregister_hw(lp->hw); 1345 ieee802154_free_hw(lp->hw); 1346 1347 return 0; 1348 } 1349 1350 static const struct of_device_id mcr20a_of_match[] = { 1351 { .compatible = "nxp,mcr20a", }, 1352 { }, 1353 }; 1354 MODULE_DEVICE_TABLE(of, mcr20a_of_match); 1355 1356 static const struct spi_device_id mcr20a_device_id[] = { 1357 { .name = "mcr20a", }, 1358 { }, 1359 }; 1360 MODULE_DEVICE_TABLE(spi, mcr20a_device_id); 1361 1362 static struct spi_driver mcr20a_driver = { 1363 .id_table = mcr20a_device_id, 1364 .driver = { 1365 .of_match_table = of_match_ptr(mcr20a_of_match), 1366 .name = "mcr20a", 1367 }, 1368 .probe = mcr20a_probe, 1369 .remove = mcr20a_remove, 1370 }; 1371 1372 module_spi_driver(mcr20a_driver); 1373 1374 MODULE_DESCRIPTION("MCR20A Transceiver Driver"); 1375 MODULE_LICENSE("GPL v2"); 1376 MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>"); 1377