1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
4 *
5 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/gpio/consumer.h>
10 #include <linux/spi/spi.h>
11 #include <linux/workqueue.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/skbuff.h>
15 #include <linux/regmap.h>
16 #include <linux/ieee802154.h>
17 #include <linux/debugfs.h>
18
19 #include <net/mac802154.h>
20 #include <net/cfg802154.h>
21
22 #include <linux/device.h>
23
24 #include "mcr20a.h"
25
26 #define SPI_COMMAND_BUFFER 3
27
28 #define REGISTER_READ BIT(7)
29 #define REGISTER_WRITE (0 << 7)
30 #define REGISTER_ACCESS (0 << 6)
31 #define PACKET_BUFF_BURST_ACCESS BIT(6)
32 #define PACKET_BUFF_BYTE_ACCESS BIT(5)
33
34 #define MCR20A_WRITE_REG(x) (x)
35 #define MCR20A_READ_REG(x) (REGISTER_READ | (x))
36 #define MCR20A_BURST_READ_PACKET_BUF (0xC0)
37 #define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
38
39 #define MCR20A_CMD_REG 0x80
40 #define MCR20A_CMD_REG_MASK 0x3f
41 #define MCR20A_CMD_WRITE 0x40
42 #define MCR20A_CMD_FB 0x20
43
44 /* Number of Interrupt Request Status Register */
45 #define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
46
47 /* MCR20A CCA Type */
48 enum {
49 MCR20A_CCA_ED, // energy detect - CCA bit not active,
50 // not to be used for T and CCCA sequences
51 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
52 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
53 MCR20A_CCA_MODE3
54 };
55
56 enum {
57 MCR20A_XCVSEQ_IDLE = 0x00,
58 MCR20A_XCVSEQ_RX = 0x01,
59 MCR20A_XCVSEQ_TX = 0x02,
60 MCR20A_XCVSEQ_CCA = 0x03,
61 MCR20A_XCVSEQ_TR = 0x04,
62 MCR20A_XCVSEQ_CCCA = 0x05,
63 };
64
65 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
66 #define MCR20A_MIN_CHANNEL (11)
67 #define MCR20A_MAX_CHANNEL (26)
68 #define MCR20A_CHANNEL_SPACING (5)
69
70 /* MCR20A CCA Threshold constans */
71 #define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
72 #define MCR20A_MAX_CCA_THRESHOLD (0x00U)
73
74 /* version 0C */
75 #define MCR20A_OVERWRITE_VERSION (0x0C)
76
77 /* MCR20A PLL configurations */
78 static const u8 PLL_INT[16] = {
79 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
80 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
81 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
82 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
83 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
84 /* 2480 */ 0x0D
85 };
86
87 static const u8 PLL_FRAC[16] = {
88 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
89 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
90 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
91 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
92 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
93 /* 2480 */ 0x80
94 };
95
96 static const struct reg_sequence mar20a_iar_overwrites[] = {
97 { IAR_MISC_PAD_CTRL, 0x02 },
98 { IAR_VCO_CTRL1, 0xB3 },
99 { IAR_VCO_CTRL2, 0x07 },
100 { IAR_PA_TUNING, 0x71 },
101 { IAR_CHF_IBUF, 0x2F },
102 { IAR_CHF_QBUF, 0x2F },
103 { IAR_CHF_IRIN, 0x24 },
104 { IAR_CHF_QRIN, 0x24 },
105 { IAR_CHF_IL, 0x24 },
106 { IAR_CHF_QL, 0x24 },
107 { IAR_CHF_CC1, 0x32 },
108 { IAR_CHF_CCL, 0x1D },
109 { IAR_CHF_CC2, 0x2D },
110 { IAR_CHF_IROUT, 0x24 },
111 { IAR_CHF_QROUT, 0x24 },
112 { IAR_PA_CAL, 0x28 },
113 { IAR_AGC_THR1, 0x55 },
114 { IAR_AGC_THR2, 0x2D },
115 { IAR_ATT_RSSI1, 0x5F },
116 { IAR_ATT_RSSI2, 0x8F },
117 { IAR_RSSI_OFFSET, 0x61 },
118 { IAR_CHF_PMA_GAIN, 0x03 },
119 { IAR_CCA1_THRESH, 0x50 },
120 { IAR_CORR_NVAL, 0x13 },
121 { IAR_ACKDELAY, 0x3D },
122 };
123
124 #define MCR20A_VALID_CHANNELS (0x07FFF800)
125 #define MCR20A_MAX_BUF (127)
126
127 #define printdev(X) (&X->spi->dev)
128
129 /* regmap information for Direct Access Register (DAR) access */
130 #define MCR20A_DAR_WRITE 0x01
131 #define MCR20A_DAR_READ 0x00
132 #define MCR20A_DAR_NUMREGS 0x3F
133
134 /* regmap information for Indirect Access Register (IAR) access */
135 #define MCR20A_IAR_ACCESS 0x80
136 #define MCR20A_IAR_NUMREGS 0xBEFF
137
138 /* Read/Write SPI Commands for DAR and IAR registers. */
139 #define MCR20A_READSHORT(reg) ((reg) << 1)
140 #define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
141 #define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
142 #define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
143
144 /* Type definitions for link configuration of instantiable layers */
145 #define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
146
147 static bool
mcr20a_dar_writeable(struct device * dev,unsigned int reg)148 mcr20a_dar_writeable(struct device *dev, unsigned int reg)
149 {
150 switch (reg) {
151 case DAR_IRQ_STS1:
152 case DAR_IRQ_STS2:
153 case DAR_IRQ_STS3:
154 case DAR_PHY_CTRL1:
155 case DAR_PHY_CTRL2:
156 case DAR_PHY_CTRL3:
157 case DAR_PHY_CTRL4:
158 case DAR_SRC_CTRL:
159 case DAR_SRC_ADDRS_SUM_LSB:
160 case DAR_SRC_ADDRS_SUM_MSB:
161 case DAR_T3CMP_LSB:
162 case DAR_T3CMP_MSB:
163 case DAR_T3CMP_USB:
164 case DAR_T2PRIMECMP_LSB:
165 case DAR_T2PRIMECMP_MSB:
166 case DAR_T1CMP_LSB:
167 case DAR_T1CMP_MSB:
168 case DAR_T1CMP_USB:
169 case DAR_T2CMP_LSB:
170 case DAR_T2CMP_MSB:
171 case DAR_T2CMP_USB:
172 case DAR_T4CMP_LSB:
173 case DAR_T4CMP_MSB:
174 case DAR_T4CMP_USB:
175 case DAR_PLL_INT0:
176 case DAR_PLL_FRAC0_LSB:
177 case DAR_PLL_FRAC0_MSB:
178 case DAR_PA_PWR:
179 /* no DAR_ACM */
180 case DAR_OVERWRITE_VER:
181 case DAR_CLK_OUT_CTRL:
182 case DAR_PWR_MODES:
183 return true;
184 default:
185 return false;
186 }
187 }
188
189 static bool
mcr20a_dar_readable(struct device * dev,unsigned int reg)190 mcr20a_dar_readable(struct device *dev, unsigned int reg)
191 {
192 bool rc;
193
194 /* all writeable are also readable */
195 rc = mcr20a_dar_writeable(dev, reg);
196 if (rc)
197 return rc;
198
199 /* readonly regs */
200 switch (reg) {
201 case DAR_RX_FRM_LEN:
202 case DAR_CCA1_ED_FNL:
203 case DAR_EVENT_TMR_LSB:
204 case DAR_EVENT_TMR_MSB:
205 case DAR_EVENT_TMR_USB:
206 case DAR_TIMESTAMP_LSB:
207 case DAR_TIMESTAMP_MSB:
208 case DAR_TIMESTAMP_USB:
209 case DAR_SEQ_STATE:
210 case DAR_LQI_VALUE:
211 case DAR_RSSI_CCA_CONT:
212 return true;
213 default:
214 return false;
215 }
216 }
217
218 static bool
mcr20a_dar_volatile(struct device * dev,unsigned int reg)219 mcr20a_dar_volatile(struct device *dev, unsigned int reg)
220 {
221 /* can be changed during runtime */
222 switch (reg) {
223 case DAR_IRQ_STS1:
224 case DAR_IRQ_STS2:
225 case DAR_IRQ_STS3:
226 /* use them in spi_async and regmap so it's volatile */
227 return true;
228 default:
229 return false;
230 }
231 }
232
233 static bool
mcr20a_dar_precious(struct device * dev,unsigned int reg)234 mcr20a_dar_precious(struct device *dev, unsigned int reg)
235 {
236 /* don't clear irq line on read */
237 switch (reg) {
238 case DAR_IRQ_STS1:
239 case DAR_IRQ_STS2:
240 case DAR_IRQ_STS3:
241 return true;
242 default:
243 return false;
244 }
245 }
246
247 static const struct regmap_config mcr20a_dar_regmap = {
248 .name = "mcr20a_dar",
249 .reg_bits = 8,
250 .val_bits = 8,
251 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
252 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
253 .cache_type = REGCACHE_MAPLE,
254 .writeable_reg = mcr20a_dar_writeable,
255 .readable_reg = mcr20a_dar_readable,
256 .volatile_reg = mcr20a_dar_volatile,
257 .precious_reg = mcr20a_dar_precious,
258 .fast_io = true,
259 .can_multi_write = true,
260 };
261
262 static bool
mcr20a_iar_writeable(struct device * dev,unsigned int reg)263 mcr20a_iar_writeable(struct device *dev, unsigned int reg)
264 {
265 switch (reg) {
266 case IAR_XTAL_TRIM:
267 case IAR_PMC_LP_TRIM:
268 case IAR_MACPANID0_LSB:
269 case IAR_MACPANID0_MSB:
270 case IAR_MACSHORTADDRS0_LSB:
271 case IAR_MACSHORTADDRS0_MSB:
272 case IAR_MACLONGADDRS0_0:
273 case IAR_MACLONGADDRS0_8:
274 case IAR_MACLONGADDRS0_16:
275 case IAR_MACLONGADDRS0_24:
276 case IAR_MACLONGADDRS0_32:
277 case IAR_MACLONGADDRS0_40:
278 case IAR_MACLONGADDRS0_48:
279 case IAR_MACLONGADDRS0_56:
280 case IAR_RX_FRAME_FILTER:
281 case IAR_PLL_INT1:
282 case IAR_PLL_FRAC1_LSB:
283 case IAR_PLL_FRAC1_MSB:
284 case IAR_MACPANID1_LSB:
285 case IAR_MACPANID1_MSB:
286 case IAR_MACSHORTADDRS1_LSB:
287 case IAR_MACSHORTADDRS1_MSB:
288 case IAR_MACLONGADDRS1_0:
289 case IAR_MACLONGADDRS1_8:
290 case IAR_MACLONGADDRS1_16:
291 case IAR_MACLONGADDRS1_24:
292 case IAR_MACLONGADDRS1_32:
293 case IAR_MACLONGADDRS1_40:
294 case IAR_MACLONGADDRS1_48:
295 case IAR_MACLONGADDRS1_56:
296 case IAR_DUAL_PAN_CTRL:
297 case IAR_DUAL_PAN_DWELL:
298 case IAR_CCA1_THRESH:
299 case IAR_CCA1_ED_OFFSET_COMP:
300 case IAR_LQI_OFFSET_COMP:
301 case IAR_CCA_CTRL:
302 case IAR_CCA2_CORR_PEAKS:
303 case IAR_CCA2_CORR_THRESH:
304 case IAR_TMR_PRESCALE:
305 case IAR_ANT_PAD_CTRL:
306 case IAR_MISC_PAD_CTRL:
307 case IAR_BSM_CTRL:
308 case IAR_RNG:
309 case IAR_RX_WTR_MARK:
310 case IAR_SOFT_RESET:
311 case IAR_TXDELAY:
312 case IAR_ACKDELAY:
313 case IAR_CORR_NVAL:
314 case IAR_ANT_AGC_CTRL:
315 case IAR_AGC_THR1:
316 case IAR_AGC_THR2:
317 case IAR_PA_CAL:
318 case IAR_ATT_RSSI1:
319 case IAR_ATT_RSSI2:
320 case IAR_RSSI_OFFSET:
321 case IAR_XTAL_CTRL:
322 case IAR_CHF_PMA_GAIN:
323 case IAR_CHF_IBUF:
324 case IAR_CHF_QBUF:
325 case IAR_CHF_IRIN:
326 case IAR_CHF_QRIN:
327 case IAR_CHF_IL:
328 case IAR_CHF_QL:
329 case IAR_CHF_CC1:
330 case IAR_CHF_CCL:
331 case IAR_CHF_CC2:
332 case IAR_CHF_IROUT:
333 case IAR_CHF_QROUT:
334 case IAR_PA_TUNING:
335 case IAR_VCO_CTRL1:
336 case IAR_VCO_CTRL2:
337 return true;
338 default:
339 return false;
340 }
341 }
342
343 static bool
mcr20a_iar_readable(struct device * dev,unsigned int reg)344 mcr20a_iar_readable(struct device *dev, unsigned int reg)
345 {
346 bool rc;
347
348 /* all writeable are also readable */
349 rc = mcr20a_iar_writeable(dev, reg);
350 if (rc)
351 return rc;
352
353 /* readonly regs */
354 switch (reg) {
355 case IAR_PART_ID:
356 case IAR_DUAL_PAN_STS:
357 case IAR_RX_BYTE_COUNT:
358 case IAR_FILTERFAIL_CODE1:
359 case IAR_FILTERFAIL_CODE2:
360 case IAR_RSSI:
361 return true;
362 default:
363 return false;
364 }
365 }
366
367 static bool
mcr20a_iar_volatile(struct device * dev,unsigned int reg)368 mcr20a_iar_volatile(struct device *dev, unsigned int reg)
369 {
370 /* can be changed during runtime */
371 switch (reg) {
372 case IAR_DUAL_PAN_STS:
373 case IAR_RX_BYTE_COUNT:
374 case IAR_FILTERFAIL_CODE1:
375 case IAR_FILTERFAIL_CODE2:
376 case IAR_RSSI:
377 return true;
378 default:
379 return false;
380 }
381 }
382
383 static const struct regmap_config mcr20a_iar_regmap = {
384 .name = "mcr20a_iar",
385 .reg_bits = 16,
386 .val_bits = 8,
387 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
388 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
389 .cache_type = REGCACHE_MAPLE,
390 .writeable_reg = mcr20a_iar_writeable,
391 .readable_reg = mcr20a_iar_readable,
392 .volatile_reg = mcr20a_iar_volatile,
393 .fast_io = true,
394 };
395
396 struct mcr20a_local {
397 struct spi_device *spi;
398
399 struct ieee802154_hw *hw;
400 struct regmap *regmap_dar;
401 struct regmap *regmap_iar;
402
403 u8 *buf;
404
405 bool is_tx;
406
407 /* for writing tx buffer */
408 struct spi_message tx_buf_msg;
409 u8 tx_header[1];
410 /* burst buffer write command */
411 struct spi_transfer tx_xfer_header;
412 u8 tx_len[1];
413 /* len of tx packet */
414 struct spi_transfer tx_xfer_len;
415 /* data of tx packet */
416 struct spi_transfer tx_xfer_buf;
417 struct sk_buff *tx_skb;
418
419 /* for read length rxfifo */
420 struct spi_message reg_msg;
421 u8 reg_cmd[1];
422 u8 reg_data[MCR20A_IRQSTS_NUM];
423 struct spi_transfer reg_xfer_cmd;
424 struct spi_transfer reg_xfer_data;
425
426 /* receive handling */
427 struct spi_message rx_buf_msg;
428 u8 rx_header[1];
429 struct spi_transfer rx_xfer_header;
430 u8 rx_lqi[1];
431 struct spi_transfer rx_xfer_lqi;
432 u8 rx_buf[MCR20A_MAX_BUF];
433 struct spi_transfer rx_xfer_buf;
434
435 /* isr handling for reading intstat */
436 struct spi_message irq_msg;
437 u8 irq_header[1];
438 u8 irq_data[MCR20A_IRQSTS_NUM];
439 struct spi_transfer irq_xfer_data;
440 struct spi_transfer irq_xfer_header;
441 };
442
443 static void
mcr20a_write_tx_buf_complete(void * context)444 mcr20a_write_tx_buf_complete(void *context)
445 {
446 struct mcr20a_local *lp = context;
447 int ret;
448
449 dev_dbg(printdev(lp), "%s\n", __func__);
450
451 lp->reg_msg.complete = NULL;
452 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
453 lp->reg_data[0] = MCR20A_XCVSEQ_TX;
454 lp->reg_xfer_data.len = 1;
455
456 ret = spi_async(lp->spi, &lp->reg_msg);
457 if (ret)
458 dev_err(printdev(lp), "failed to set SEQ TX\n");
459 }
460
461 static int
mcr20a_xmit(struct ieee802154_hw * hw,struct sk_buff * skb)462 mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
463 {
464 struct mcr20a_local *lp = hw->priv;
465
466 dev_dbg(printdev(lp), "%s\n", __func__);
467
468 lp->tx_skb = skb;
469
470 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
471 skb->data, skb->len, 0);
472
473 lp->is_tx = 1;
474
475 lp->reg_msg.complete = NULL;
476 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
477 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
478 lp->reg_xfer_data.len = 1;
479
480 return spi_async(lp->spi, &lp->reg_msg);
481 }
482
483 static int
mcr20a_ed(struct ieee802154_hw * hw,u8 * level)484 mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
485 {
486 WARN_ON(!level);
487 *level = 0xbe;
488 return 0;
489 }
490
491 static int
mcr20a_set_channel(struct ieee802154_hw * hw,u8 page,u8 channel)492 mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
493 {
494 struct mcr20a_local *lp = hw->priv;
495 int ret;
496
497 dev_dbg(printdev(lp), "%s\n", __func__);
498
499 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
500 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
501 if (ret)
502 return ret;
503 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
504 if (ret)
505 return ret;
506 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
507 PLL_FRAC[channel - 11]);
508 if (ret)
509 return ret;
510
511 return 0;
512 }
513
514 static int
mcr20a_start(struct ieee802154_hw * hw)515 mcr20a_start(struct ieee802154_hw *hw)
516 {
517 struct mcr20a_local *lp = hw->priv;
518 int ret;
519
520 dev_dbg(printdev(lp), "%s\n", __func__);
521
522 /* No slotted operation */
523 dev_dbg(printdev(lp), "no slotted operation\n");
524 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
525 DAR_PHY_CTRL1_SLOTTED, 0x0);
526 if (ret < 0)
527 return ret;
528
529 /* enable irq */
530 enable_irq(lp->spi->irq);
531
532 /* Unmask SEQ interrupt */
533 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
534 DAR_PHY_CTRL2_SEQMSK, 0x0);
535 if (ret < 0)
536 return ret;
537
538 /* Start the RX sequence */
539 dev_dbg(printdev(lp), "start the RX sequence\n");
540 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
541 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
542 if (ret < 0)
543 return ret;
544
545 return 0;
546 }
547
548 static void
mcr20a_stop(struct ieee802154_hw * hw)549 mcr20a_stop(struct ieee802154_hw *hw)
550 {
551 struct mcr20a_local *lp = hw->priv;
552
553 dev_dbg(printdev(lp), "%s\n", __func__);
554
555 /* stop all running sequence */
556 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
557 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
558
559 /* disable irq */
560 disable_irq(lp->spi->irq);
561 }
562
563 static int
mcr20a_set_hw_addr_filt(struct ieee802154_hw * hw,struct ieee802154_hw_addr_filt * filt,unsigned long changed)564 mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
565 struct ieee802154_hw_addr_filt *filt,
566 unsigned long changed)
567 {
568 struct mcr20a_local *lp = hw->priv;
569
570 dev_dbg(printdev(lp), "%s\n", __func__);
571
572 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
573 u16 addr = le16_to_cpu(filt->short_addr);
574
575 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
576 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
577 }
578
579 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
580 u16 pan = le16_to_cpu(filt->pan_id);
581
582 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
583 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
584 }
585
586 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
587 u8 addr[8], i;
588
589 memcpy(addr, &filt->ieee_addr, 8);
590 for (i = 0; i < 8; i++)
591 regmap_write(lp->regmap_iar,
592 IAR_MACLONGADDRS0_0 + i, addr[i]);
593 }
594
595 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
596 if (filt->pan_coord) {
597 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
598 DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
599 } else {
600 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
601 DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
602 }
603 }
604
605 return 0;
606 }
607
608 /* -30 dBm to 10 dBm */
609 #define MCR20A_MAX_TX_POWERS 0x14
610 static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
611 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
612 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
613 };
614
615 static int
mcr20a_set_txpower(struct ieee802154_hw * hw,s32 mbm)616 mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
617 {
618 struct mcr20a_local *lp = hw->priv;
619 u32 i;
620
621 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
622
623 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
624 if (lp->hw->phy->supported.tx_powers[i] == mbm)
625 return regmap_write(lp->regmap_dar, DAR_PA_PWR,
626 ((i + 8) & 0x1F));
627 }
628
629 return -EINVAL;
630 }
631
632 #define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
633 static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
634
635 static int
mcr20a_set_cca_mode(struct ieee802154_hw * hw,const struct wpan_phy_cca * cca)636 mcr20a_set_cca_mode(struct ieee802154_hw *hw,
637 const struct wpan_phy_cca *cca)
638 {
639 struct mcr20a_local *lp = hw->priv;
640 unsigned int cca_mode = 0xff;
641 bool cca_mode_and = false;
642 int ret;
643
644 dev_dbg(printdev(lp), "%s\n", __func__);
645
646 /* mapping 802.15.4 to driver spec */
647 switch (cca->mode) {
648 case NL802154_CCA_ENERGY:
649 cca_mode = MCR20A_CCA_MODE1;
650 break;
651 case NL802154_CCA_CARRIER:
652 cca_mode = MCR20A_CCA_MODE2;
653 break;
654 case NL802154_CCA_ENERGY_CARRIER:
655 switch (cca->opt) {
656 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
657 cca_mode = MCR20A_CCA_MODE3;
658 cca_mode_and = true;
659 break;
660 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
661 cca_mode = MCR20A_CCA_MODE3;
662 cca_mode_and = false;
663 break;
664 default:
665 return -EINVAL;
666 }
667 break;
668 default:
669 return -EINVAL;
670 }
671 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
672 DAR_PHY_CTRL4_CCATYPE_MASK,
673 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
674 if (ret < 0)
675 return ret;
676
677 if (cca_mode == MCR20A_CCA_MODE3) {
678 if (cca_mode_and) {
679 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
680 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
681 0x08);
682 } else {
683 ret = regmap_update_bits(lp->regmap_iar,
684 IAR_CCA_CTRL,
685 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
686 0x00);
687 }
688 if (ret < 0)
689 return ret;
690 }
691
692 return ret;
693 }
694
695 static int
mcr20a_set_cca_ed_level(struct ieee802154_hw * hw,s32 mbm)696 mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
697 {
698 struct mcr20a_local *lp = hw->priv;
699 u32 i;
700
701 dev_dbg(printdev(lp), "%s\n", __func__);
702
703 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
704 if (hw->phy->supported.cca_ed_levels[i] == mbm)
705 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
706 }
707
708 return 0;
709 }
710
711 static int
mcr20a_set_promiscuous_mode(struct ieee802154_hw * hw,const bool on)712 mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
713 {
714 struct mcr20a_local *lp = hw->priv;
715 int ret;
716 u8 rx_frame_filter_reg = 0x0;
717
718 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
719
720 if (on) {
721 /* All frame types accepted*/
722 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
723 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
724 IAR_RX_FRAME_FLT_NS_FT);
725
726 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
727 DAR_PHY_CTRL4_PROMISCUOUS,
728 DAR_PHY_CTRL4_PROMISCUOUS);
729 if (ret < 0)
730 return ret;
731
732 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
733 rx_frame_filter_reg);
734 if (ret < 0)
735 return ret;
736 } else {
737 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
738 DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
739 if (ret < 0)
740 return ret;
741
742 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
743 IAR_RX_FRAME_FLT_FRM_VER |
744 IAR_RX_FRAME_FLT_BEACON_FT |
745 IAR_RX_FRAME_FLT_DATA_FT |
746 IAR_RX_FRAME_FLT_CMD_FT);
747 if (ret < 0)
748 return ret;
749 }
750
751 return 0;
752 }
753
754 static const struct ieee802154_ops mcr20a_hw_ops = {
755 .owner = THIS_MODULE,
756 .xmit_async = mcr20a_xmit,
757 .ed = mcr20a_ed,
758 .set_channel = mcr20a_set_channel,
759 .start = mcr20a_start,
760 .stop = mcr20a_stop,
761 .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
762 .set_txpower = mcr20a_set_txpower,
763 .set_cca_mode = mcr20a_set_cca_mode,
764 .set_cca_ed_level = mcr20a_set_cca_ed_level,
765 .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
766 };
767
768 static int
mcr20a_request_rx(struct mcr20a_local * lp)769 mcr20a_request_rx(struct mcr20a_local *lp)
770 {
771 dev_dbg(printdev(lp), "%s\n", __func__);
772
773 /* Start the RX sequence */
774 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
775 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
776
777 return 0;
778 }
779
780 static void
mcr20a_handle_rx_read_buf_complete(void * context)781 mcr20a_handle_rx_read_buf_complete(void *context)
782 {
783 struct mcr20a_local *lp = context;
784 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
785 struct sk_buff *skb;
786
787 dev_dbg(printdev(lp), "%s\n", __func__);
788
789 dev_dbg(printdev(lp), "RX is done\n");
790
791 if (!ieee802154_is_valid_psdu_len(len)) {
792 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
793 len = IEEE802154_MTU;
794 }
795
796 len = len - 2; /* get rid of frame check field */
797
798 skb = dev_alloc_skb(len);
799 if (!skb)
800 return;
801
802 __skb_put_data(skb, lp->rx_buf, len);
803 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
804
805 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
806 lp->rx_buf, len, 0);
807 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
808
809 /* start RX sequence */
810 mcr20a_request_rx(lp);
811 }
812
813 static void
mcr20a_handle_rx_read_len_complete(void * context)814 mcr20a_handle_rx_read_len_complete(void *context)
815 {
816 struct mcr20a_local *lp = context;
817 u8 len;
818 int ret;
819
820 dev_dbg(printdev(lp), "%s\n", __func__);
821
822 /* get the length of received frame */
823 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
824 dev_dbg(printdev(lp), "frame len : %d\n", len);
825
826 /* prepare to read the rx buf */
827 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
828 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
829 lp->rx_xfer_buf.len = len;
830
831 ret = spi_async(lp->spi, &lp->rx_buf_msg);
832 if (ret)
833 dev_err(printdev(lp), "failed to read rx buffer length\n");
834 }
835
836 static int
mcr20a_handle_rx(struct mcr20a_local * lp)837 mcr20a_handle_rx(struct mcr20a_local *lp)
838 {
839 dev_dbg(printdev(lp), "%s\n", __func__);
840 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
841 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
842 lp->reg_xfer_data.len = 1;
843
844 return spi_async(lp->spi, &lp->reg_msg);
845 }
846
847 static int
mcr20a_handle_tx_complete(struct mcr20a_local * lp)848 mcr20a_handle_tx_complete(struct mcr20a_local *lp)
849 {
850 dev_dbg(printdev(lp), "%s\n", __func__);
851
852 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
853
854 return mcr20a_request_rx(lp);
855 }
856
857 static int
mcr20a_handle_tx(struct mcr20a_local * lp)858 mcr20a_handle_tx(struct mcr20a_local *lp)
859 {
860 int ret;
861
862 dev_dbg(printdev(lp), "%s\n", __func__);
863
864 /* write tx buffer */
865 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
866 /* add 2 bytes of FCS */
867 lp->tx_len[0] = lp->tx_skb->len + 2;
868 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
869 /* add 1 byte psduLength */
870 lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
871
872 ret = spi_async(lp->spi, &lp->tx_buf_msg);
873 if (ret) {
874 dev_err(printdev(lp), "SPI write Failed for TX buf\n");
875 return ret;
876 }
877
878 return 0;
879 }
880
881 static void
mcr20a_irq_clean_complete(void * context)882 mcr20a_irq_clean_complete(void *context)
883 {
884 struct mcr20a_local *lp = context;
885 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
886
887 dev_dbg(printdev(lp), "%s\n", __func__);
888
889 enable_irq(lp->spi->irq);
890
891 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
892 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
893
894 switch (seq_state) {
895 /* TX IRQ, RX IRQ and SEQ IRQ */
896 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
897 if (lp->is_tx) {
898 lp->is_tx = 0;
899 dev_dbg(printdev(lp), "TX is done. No ACK\n");
900 mcr20a_handle_tx_complete(lp);
901 }
902 break;
903 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
904 /* rx is starting */
905 dev_dbg(printdev(lp), "RX is starting\n");
906 mcr20a_handle_rx(lp);
907 break;
908 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
909 if (lp->is_tx) {
910 /* tx is done */
911 lp->is_tx = 0;
912 dev_dbg(printdev(lp), "TX is done. Get ACK\n");
913 mcr20a_handle_tx_complete(lp);
914 } else {
915 /* rx is starting */
916 dev_dbg(printdev(lp), "RX is starting\n");
917 mcr20a_handle_rx(lp);
918 }
919 break;
920 case (DAR_IRQSTS1_SEQIRQ):
921 if (lp->is_tx) {
922 dev_dbg(printdev(lp), "TX is starting\n");
923 mcr20a_handle_tx(lp);
924 } else {
925 dev_dbg(printdev(lp), "MCR20A is stop\n");
926 }
927 break;
928 }
929 }
930
mcr20a_irq_status_complete(void * context)931 static void mcr20a_irq_status_complete(void *context)
932 {
933 int ret;
934 struct mcr20a_local *lp = context;
935
936 dev_dbg(printdev(lp), "%s\n", __func__);
937 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
938 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
939
940 lp->reg_msg.complete = mcr20a_irq_clean_complete;
941 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
942 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
943 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
944
945 ret = spi_async(lp->spi, &lp->reg_msg);
946
947 if (ret)
948 dev_err(printdev(lp), "failed to clean irq status\n");
949 }
950
mcr20a_irq_isr(int irq,void * data)951 static irqreturn_t mcr20a_irq_isr(int irq, void *data)
952 {
953 struct mcr20a_local *lp = data;
954 int ret;
955
956 disable_irq_nosync(irq);
957
958 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
959 /* read IRQSTSx */
960 ret = spi_async(lp->spi, &lp->irq_msg);
961 if (ret) {
962 enable_irq(irq);
963 return IRQ_NONE;
964 }
965
966 return IRQ_HANDLED;
967 }
968
mcr20a_hw_setup(struct mcr20a_local * lp)969 static void mcr20a_hw_setup(struct mcr20a_local *lp)
970 {
971 u8 i;
972 struct ieee802154_hw *hw = lp->hw;
973 struct wpan_phy *phy = lp->hw->phy;
974
975 dev_dbg(printdev(lp), "%s\n", __func__);
976
977 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
978 IEEE802154_HW_AFILT |
979 IEEE802154_HW_PROMISCUOUS;
980
981 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
982 WPAN_PHY_FLAG_CCA_MODE;
983
984 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
985 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
986 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
987 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
988
989 /* initiating cca_ed_levels */
990 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
991 ++i) {
992 mcr20a_ed_levels[i] = -i * 100;
993 }
994
995 phy->supported.cca_ed_levels = mcr20a_ed_levels;
996 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
997
998 phy->cca.mode = NL802154_CCA_ENERGY;
999
1000 phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
1001 phy->current_page = 0;
1002 /* MCR20A default reset value */
1003 phy->current_channel = 20;
1004 phy->supported.tx_powers = mcr20a_powers;
1005 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
1006 phy->cca_ed_level = phy->supported.cca_ed_levels[75];
1007 phy->transmit_power = phy->supported.tx_powers[0x0F];
1008 }
1009
1010 static void
mcr20a_setup_tx_spi_messages(struct mcr20a_local * lp)1011 mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
1012 {
1013 spi_message_init(&lp->tx_buf_msg);
1014 lp->tx_buf_msg.context = lp;
1015 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
1016
1017 lp->tx_xfer_header.len = 1;
1018 lp->tx_xfer_header.tx_buf = lp->tx_header;
1019
1020 lp->tx_xfer_len.len = 1;
1021 lp->tx_xfer_len.tx_buf = lp->tx_len;
1022
1023 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
1024 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
1025 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
1026 }
1027
1028 static void
mcr20a_setup_rx_spi_messages(struct mcr20a_local * lp)1029 mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
1030 {
1031 spi_message_init(&lp->reg_msg);
1032 lp->reg_msg.context = lp;
1033
1034 lp->reg_xfer_cmd.len = 1;
1035 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
1036 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
1037
1038 lp->reg_xfer_data.rx_buf = lp->reg_data;
1039 lp->reg_xfer_data.tx_buf = lp->reg_data;
1040
1041 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
1042 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
1043
1044 spi_message_init(&lp->rx_buf_msg);
1045 lp->rx_buf_msg.context = lp;
1046 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
1047 lp->rx_xfer_header.len = 1;
1048 lp->rx_xfer_header.tx_buf = lp->rx_header;
1049 lp->rx_xfer_header.rx_buf = lp->rx_header;
1050
1051 lp->rx_xfer_buf.rx_buf = lp->rx_buf;
1052
1053 lp->rx_xfer_lqi.len = 1;
1054 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
1055
1056 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
1057 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
1058 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
1059 }
1060
1061 static void
mcr20a_setup_irq_spi_messages(struct mcr20a_local * lp)1062 mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
1063 {
1064 spi_message_init(&lp->irq_msg);
1065 lp->irq_msg.context = lp;
1066 lp->irq_msg.complete = mcr20a_irq_status_complete;
1067 lp->irq_xfer_header.len = 1;
1068 lp->irq_xfer_header.tx_buf = lp->irq_header;
1069 lp->irq_xfer_header.rx_buf = lp->irq_header;
1070
1071 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
1072 lp->irq_xfer_data.rx_buf = lp->irq_data;
1073
1074 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
1075 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
1076 }
1077
1078 static int
mcr20a_phy_init(struct mcr20a_local * lp)1079 mcr20a_phy_init(struct mcr20a_local *lp)
1080 {
1081 u8 index;
1082 unsigned int phy_reg = 0;
1083 int ret;
1084
1085 dev_dbg(printdev(lp), "%s\n", __func__);
1086
1087 /* Disable Tristate on COCO MISO for SPI reads */
1088 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
1089 if (ret)
1090 goto err_ret;
1091
1092 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
1093 * immediately after init
1094 */
1095 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
1096 if (ret)
1097 goto err_ret;
1098
1099 /* Clear all PP IRQ bits in IRQSTS2 */
1100 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
1101 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
1102 DAR_IRQSTS2_WAKE_IRQ);
1103 if (ret)
1104 goto err_ret;
1105
1106 /* Disable all timer interrupts */
1107 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
1108 if (ret)
1109 goto err_ret;
1110
1111 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
1112 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1113 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
1114
1115 /* PHY_CTRL2 : disable all interrupts */
1116 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
1117 if (ret)
1118 goto err_ret;
1119
1120 /* PHY_CTRL3 : disable all timers and remaining interrupts */
1121 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
1122 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
1123 DAR_PHY_CTRL3_WAKE_MSK);
1124 if (ret)
1125 goto err_ret;
1126
1127 /* SRC_CTRL : enable Acknowledge Frame Pending and
1128 * Source Address Matching Enable
1129 */
1130 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
1131 DAR_SRC_CTRL_ACK_FRM_PND |
1132 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
1133 if (ret)
1134 goto err_ret;
1135
1136 /* RX_FRAME_FILTER */
1137 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
1138 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
1139 IAR_RX_FRAME_FLT_FRM_VER |
1140 IAR_RX_FRAME_FLT_BEACON_FT |
1141 IAR_RX_FRAME_FLT_DATA_FT |
1142 IAR_RX_FRAME_FLT_CMD_FT);
1143 if (ret)
1144 goto err_ret;
1145
1146 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
1147 MCR20A_OVERWRITE_VERSION);
1148
1149 /* Overwrites direct registers */
1150 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
1151 MCR20A_OVERWRITE_VERSION);
1152 if (ret)
1153 goto err_ret;
1154
1155 /* Overwrites indirect registers */
1156 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
1157 ARRAY_SIZE(mar20a_iar_overwrites));
1158 if (ret)
1159 goto err_ret;
1160
1161 /* Clear HW indirect queue */
1162 dev_dbg(printdev(lp), "clear HW indirect queue\n");
1163 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
1164 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
1165 DAR_SRC_CTRL_INDEX_SHIFT)
1166 | (DAR_SRC_CTRL_SRCADDR_EN)
1167 | (DAR_SRC_CTRL_INDEX_DISABLE));
1168 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
1169 if (ret)
1170 goto err_ret;
1171 phy_reg = 0;
1172 }
1173
1174 /* Assign HW Indirect hash table to PAN0 */
1175 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
1176 if (ret)
1177 goto err_ret;
1178
1179 /* Clear current lvl */
1180 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
1181
1182 /* Set new lvl */
1183 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
1184 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
1185 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
1186 if (ret)
1187 goto err_ret;
1188
1189 /* Set CCA threshold to -75 dBm */
1190 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
1191 if (ret)
1192 goto err_ret;
1193
1194 /* Set prescaller to obtain 1 symbol (16us) timebase */
1195 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
1196 if (ret)
1197 goto err_ret;
1198
1199 /* Enable autodoze mode. */
1200 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
1201 DAR_PWR_MODES_AUTODOZE,
1202 DAR_PWR_MODES_AUTODOZE);
1203 if (ret)
1204 goto err_ret;
1205
1206 /* Disable clk_out */
1207 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
1208 DAR_CLK_OUT_CTRL_EN, 0x0);
1209 if (ret)
1210 goto err_ret;
1211
1212 return 0;
1213
1214 err_ret:
1215 return ret;
1216 }
1217
1218 static int
mcr20a_probe(struct spi_device * spi)1219 mcr20a_probe(struct spi_device *spi)
1220 {
1221 struct ieee802154_hw *hw;
1222 struct mcr20a_local *lp;
1223 struct gpio_desc *rst_b;
1224 int irq_type;
1225 int ret = -ENOMEM;
1226
1227 dev_dbg(&spi->dev, "%s\n", __func__);
1228
1229 if (!spi->irq) {
1230 dev_err(&spi->dev, "no IRQ specified\n");
1231 return -EINVAL;
1232 }
1233
1234 rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
1235 if (IS_ERR(rst_b))
1236 return dev_err_probe(&spi->dev, PTR_ERR(rst_b),
1237 "Failed to get 'rst_b' gpio");
1238
1239 /* reset mcr20a */
1240 usleep_range(10, 20);
1241 gpiod_set_value_cansleep(rst_b, 1);
1242 usleep_range(10, 20);
1243 gpiod_set_value_cansleep(rst_b, 0);
1244 usleep_range(120, 240);
1245
1246 /* allocate ieee802154_hw and private data */
1247 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
1248 if (!hw) {
1249 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
1250 return ret;
1251 }
1252
1253 /* init mcr20a local data */
1254 lp = hw->priv;
1255 lp->hw = hw;
1256 lp->spi = spi;
1257
1258 /* init ieee802154_hw */
1259 hw->parent = &spi->dev;
1260 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
1261
1262 /* init buf */
1263 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
1264
1265 if (!lp->buf) {
1266 ret = -ENOMEM;
1267 goto free_dev;
1268 }
1269
1270 mcr20a_setup_tx_spi_messages(lp);
1271 mcr20a_setup_rx_spi_messages(lp);
1272 mcr20a_setup_irq_spi_messages(lp);
1273
1274 /* setup regmap */
1275 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
1276 if (IS_ERR(lp->regmap_dar)) {
1277 ret = PTR_ERR(lp->regmap_dar);
1278 dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
1279 ret);
1280 goto free_dev;
1281 }
1282
1283 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
1284 if (IS_ERR(lp->regmap_iar)) {
1285 ret = PTR_ERR(lp->regmap_iar);
1286 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
1287 goto free_dev;
1288 }
1289
1290 mcr20a_hw_setup(lp);
1291
1292 spi_set_drvdata(spi, lp);
1293
1294 ret = mcr20a_phy_init(lp);
1295 if (ret < 0) {
1296 dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
1297 goto free_dev;
1298 }
1299
1300 irq_type = irq_get_trigger_type(spi->irq);
1301 if (!irq_type)
1302 irq_type = IRQF_TRIGGER_FALLING;
1303
1304 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
1305 irq_type | IRQF_NO_AUTOEN, dev_name(&spi->dev), lp);
1306 if (ret) {
1307 dev_err(&spi->dev, "could not request_irq for mcr20a\n");
1308 ret = -ENODEV;
1309 goto free_dev;
1310 }
1311
1312 ret = ieee802154_register_hw(hw);
1313 if (ret) {
1314 dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
1315 goto free_dev;
1316 }
1317
1318 return ret;
1319
1320 free_dev:
1321 ieee802154_free_hw(lp->hw);
1322
1323 return ret;
1324 }
1325
mcr20a_remove(struct spi_device * spi)1326 static void mcr20a_remove(struct spi_device *spi)
1327 {
1328 struct mcr20a_local *lp = spi_get_drvdata(spi);
1329
1330 dev_dbg(&spi->dev, "%s\n", __func__);
1331
1332 ieee802154_unregister_hw(lp->hw);
1333 ieee802154_free_hw(lp->hw);
1334 }
1335
1336 static const struct of_device_id mcr20a_of_match[] = {
1337 { .compatible = "nxp,mcr20a", },
1338 { },
1339 };
1340 MODULE_DEVICE_TABLE(of, mcr20a_of_match);
1341
1342 static const struct spi_device_id mcr20a_device_id[] = {
1343 { .name = "mcr20a", },
1344 { },
1345 };
1346 MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
1347
1348 static struct spi_driver mcr20a_driver = {
1349 .id_table = mcr20a_device_id,
1350 .driver = {
1351 .of_match_table = mcr20a_of_match,
1352 .name = "mcr20a",
1353 },
1354 .probe = mcr20a_probe,
1355 .remove = mcr20a_remove,
1356 };
1357
1358 module_spi_driver(mcr20a_driver);
1359
1360 MODULE_DESCRIPTION("MCR20A Transceiver Driver");
1361 MODULE_LICENSE("GPL v2");
1362 MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
1363