1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/firmware.h> 20 #include <linux/delay.h> 21 22 #include "mt76.h" 23 #include "mt76x02_mcu.h" 24 #include "mt76x02_dma.h" 25 26 struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) 27 { 28 struct sk_buff *skb; 29 30 skb = alloc_skb(len, GFP_KERNEL); 31 if (!skb) 32 return NULL; 33 memcpy(skb_put(skb, len), data, len); 34 35 return skb; 36 } 37 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); 38 39 static struct sk_buff * 40 mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) 41 { 42 unsigned long timeout; 43 44 if (!time_is_after_jiffies(expires)) 45 return NULL; 46 47 timeout = expires - jiffies; 48 wait_event_timeout(dev->mmio.mcu.wait, 49 !skb_queue_empty(&dev->mmio.mcu.res_q), 50 timeout); 51 return skb_dequeue(&dev->mmio.mcu.res_q); 52 } 53 54 static int 55 mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, 56 struct sk_buff *skb, int cmd, int seq) 57 { 58 struct mt76_queue *q = &dev->q_tx[qid]; 59 struct mt76_queue_buf buf; 60 dma_addr_t addr; 61 u32 tx_info; 62 63 tx_info = MT_MCU_MSG_TYPE_CMD | 64 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | 65 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | 66 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | 67 FIELD_PREP(MT_MCU_MSG_LEN, skb->len); 68 69 addr = dma_map_single(dev->dev, skb->data, skb->len, 70 DMA_TO_DEVICE); 71 if (dma_mapping_error(dev->dev, addr)) 72 return -ENOMEM; 73 74 buf.addr = addr; 75 buf.len = skb->len; 76 spin_lock_bh(&q->lock); 77 dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 78 dev->queue_ops->kick(dev, q); 79 spin_unlock_bh(&q->lock); 80 81 return 0; 82 } 83 84 int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, 85 int cmd, bool wait_resp) 86 { 87 unsigned long expires = jiffies + HZ; 88 int ret; 89 u8 seq; 90 91 if (!skb) 92 return -EINVAL; 93 94 mutex_lock(&dev->mmio.mcu.mutex); 95 96 seq = ++dev->mmio.mcu.msg_seq & 0xf; 97 if (!seq) 98 seq = ++dev->mmio.mcu.msg_seq & 0xf; 99 100 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); 101 if (ret) 102 goto out; 103 104 while (wait_resp) { 105 u32 *rxfce; 106 bool check_seq = false; 107 108 skb = mt76x02_mcu_get_response(dev, expires); 109 if (!skb) { 110 dev_err(dev->dev, 111 "MCU message %d (seq %d) timed out\n", cmd, 112 seq); 113 ret = -ETIMEDOUT; 114 break; 115 } 116 117 rxfce = (u32 *) skb->cb; 118 119 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) 120 check_seq = true; 121 122 dev_kfree_skb(skb); 123 if (check_seq) 124 break; 125 } 126 127 out: 128 mutex_unlock(&dev->mmio.mcu.mutex); 129 130 return ret; 131 } 132 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); 133 134 int mt76x02_mcu_function_select(struct mt76_dev *dev, 135 enum mcu_function func, 136 u32 val, bool wait_resp) 137 { 138 struct sk_buff *skb; 139 struct { 140 __le32 id; 141 __le32 value; 142 } __packed __aligned(4) msg = { 143 .id = cpu_to_le32(func), 144 .value = cpu_to_le32(val), 145 }; 146 147 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 148 return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP, 149 wait_resp); 150 } 151 EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); 152 153 int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, 154 bool wait_resp) 155 { 156 struct sk_buff *skb; 157 struct { 158 __le32 mode; 159 __le32 level; 160 } __packed __aligned(4) msg = { 161 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), 162 .level = cpu_to_le32(0), 163 }; 164 165 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 166 return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, 167 wait_resp); 168 } 169 EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); 170 171 int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, 172 u32 param, bool wait) 173 { 174 struct sk_buff *skb; 175 struct { 176 __le32 id; 177 __le32 value; 178 } __packed __aligned(4) msg = { 179 .id = cpu_to_le32(type), 180 .value = cpu_to_le32(param), 181 }; 182 int ret; 183 184 if (wait) 185 dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 186 187 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 188 ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); 189 if (ret) 190 return ret; 191 192 if (wait && 193 WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0, 194 BIT(31), BIT(31), 100))) 195 return -ETIMEDOUT; 196 197 return 0; 198 } 199 EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); 200 201 int mt76x02_mcu_cleanup(struct mt76_dev *dev) 202 { 203 struct sk_buff *skb; 204 205 dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1); 206 usleep_range(20000, 30000); 207 208 while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL) 209 dev_kfree_skb(skb); 210 211 return 0; 212 } 213 EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); 214 215 void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, 216 const struct mt76x02_fw_header *h) 217 { 218 u16 bld = le16_to_cpu(h->build_ver); 219 u16 ver = le16_to_cpu(h->fw_ver); 220 221 snprintf(dev->hw->wiphy->fw_version, 222 sizeof(dev->hw->wiphy->fw_version), 223 "%d.%d.%02d-b%x", 224 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); 225 } 226 EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver); 227