1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/dmapool.h> 34 #include <linux/platform_device.h> 35 #include "hns_roce_common.h" 36 #include "hns_roce_device.h" 37 #include "hns_roce_cmd.h" 38 39 #define CMD_POLL_TOKEN 0xffff 40 #define CMD_MAX_NUM 32 41 #define STATUS_MASK 0xff 42 #define CMD_TOKEN_MASK 0x1f 43 #define GO_BIT_TIMEOUT_MSECS 10000 44 45 enum { 46 HCR_TOKEN_OFFSET = 0x14, 47 HCR_STATUS_OFFSET = 0x18, 48 HCR_GO_BIT = 15, 49 }; 50 51 static int cmd_pending(struct hns_roce_dev *hr_dev) 52 { 53 u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET); 54 55 return (!!(status & (1 << HCR_GO_BIT))); 56 } 57 58 /* this function should be serialized with "hcr_mutex" */ 59 static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, 60 u64 in_param, u64 out_param, 61 u32 in_modifier, u8 op_modifier, u16 op, 62 u16 token, int event) 63 { 64 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 65 struct device *dev = &hr_dev->pdev->dev; 66 u32 __iomem *hcr = (u32 *)cmd->hcr; 67 int ret = -EAGAIN; 68 unsigned long end; 69 u32 val = 0; 70 71 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; 72 while (cmd_pending(hr_dev)) { 73 if (time_after(jiffies, end)) { 74 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies, 75 (int)end); 76 goto out; 77 } 78 cond_resched(); 79 } 80 81 roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, 82 op); 83 roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, 84 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); 85 roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event); 86 roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); 87 roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M, 88 ROCEE_MB6_ROCEE_MB_TOKEN_S, token); 89 90 __raw_writeq(cpu_to_le64(in_param), hcr + 0); 91 __raw_writeq(cpu_to_le64(out_param), hcr + 2); 92 __raw_writel(cpu_to_le32(in_modifier), hcr + 4); 93 /* Memory barrier */ 94 wmb(); 95 96 __raw_writel(cpu_to_le32(val), hcr + 5); 97 98 mmiowb(); 99 ret = 0; 100 101 out: 102 return ret; 103 } 104 105 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, 106 u64 out_param, u32 in_modifier, 107 u8 op_modifier, u16 op, u16 token, 108 int event) 109 { 110 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 111 int ret = -EAGAIN; 112 113 mutex_lock(&cmd->hcr_mutex); 114 ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, 115 in_modifier, op_modifier, op, token, 116 event); 117 mutex_unlock(&cmd->hcr_mutex); 118 119 return ret; 120 } 121 122 /* this should be called with "poll_sem" */ 123 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, 124 u64 out_param, unsigned long in_modifier, 125 u8 op_modifier, u16 op, 126 unsigned long timeout) 127 { 128 struct device *dev = &hr_dev->pdev->dev; 129 u8 __iomem *hcr = hr_dev->cmd.hcr; 130 unsigned long end = 0; 131 u32 status = 0; 132 int ret; 133 134 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, 135 in_modifier, op_modifier, op, 136 CMD_POLL_TOKEN, 0); 137 if (ret) { 138 dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n"); 139 goto out; 140 } 141 142 end = msecs_to_jiffies(timeout) + jiffies; 143 while (cmd_pending(hr_dev) && time_before(jiffies, end)) 144 cond_resched(); 145 146 if (cmd_pending(hr_dev)) { 147 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); 148 ret = -ETIMEDOUT; 149 goto out; 150 } 151 152 status = le32_to_cpu((__force __be32) 153 __raw_readl(hcr + HCR_STATUS_OFFSET)); 154 if ((status & STATUS_MASK) != 0x1) { 155 dev_err(dev, "mailbox status 0x%x!\n", status); 156 ret = -EBUSY; 157 goto out; 158 } 159 160 out: 161 return ret; 162 } 163 164 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, 165 u64 out_param, unsigned long in_modifier, 166 u8 op_modifier, u16 op, unsigned long timeout) 167 { 168 int ret; 169 170 down(&hr_dev->cmd.poll_sem); 171 ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, 172 op_modifier, op, timeout); 173 up(&hr_dev->cmd.poll_sem); 174 175 return ret; 176 } 177 178 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 179 u64 out_param) 180 { 181 struct hns_roce_cmd_context 182 *context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask]; 183 184 if (token != context->token) 185 return; 186 187 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO); 188 context->out_param = out_param; 189 complete(&context->done); 190 } 191 192 /* this should be called with "use_events" */ 193 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, 194 u64 out_param, unsigned long in_modifier, 195 u8 op_modifier, u16 op, 196 unsigned long timeout) 197 { 198 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 199 struct device *dev = &hr_dev->pdev->dev; 200 struct hns_roce_cmd_context *context; 201 int ret = 0; 202 203 spin_lock(&cmd->context_lock); 204 WARN_ON(cmd->free_head < 0); 205 context = &cmd->context[cmd->free_head]; 206 context->token += cmd->token_mask + 1; 207 cmd->free_head = context->next; 208 spin_unlock(&cmd->context_lock); 209 210 init_completion(&context->done); 211 212 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, 213 in_modifier, op_modifier, op, 214 context->token, 1); 215 if (ret) 216 goto out; 217 218 /* 219 * It is timeout when wait_for_completion_timeout return 0 220 * The return value is the time limit set in advance 221 * how many seconds showing 222 */ 223 if (!wait_for_completion_timeout(&context->done, 224 msecs_to_jiffies(timeout))) { 225 dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n"); 226 ret = -EBUSY; 227 goto out; 228 } 229 230 ret = context->result; 231 if (ret) { 232 dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret); 233 goto out; 234 } 235 236 out: 237 spin_lock(&cmd->context_lock); 238 context->next = cmd->free_head; 239 cmd->free_head = context - cmd->context; 240 spin_unlock(&cmd->context_lock); 241 242 return ret; 243 } 244 245 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, 246 u64 out_param, unsigned long in_modifier, 247 u8 op_modifier, u16 op, unsigned long timeout) 248 { 249 int ret = 0; 250 251 down(&hr_dev->cmd.event_sem); 252 ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, 253 in_modifier, op_modifier, op, timeout); 254 up(&hr_dev->cmd.event_sem); 255 256 return ret; 257 } 258 259 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 260 unsigned long in_modifier, u8 op_modifier, u16 op, 261 unsigned long timeout) 262 { 263 if (hr_dev->cmd.use_events) 264 return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, 265 in_modifier, op_modifier, op, 266 timeout); 267 else 268 return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, 269 in_modifier, op_modifier, op, 270 timeout); 271 } 272 273 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) 274 { 275 struct device *dev = &hr_dev->pdev->dev; 276 277 mutex_init(&hr_dev->cmd.hcr_mutex); 278 sema_init(&hr_dev->cmd.poll_sem, 1); 279 hr_dev->cmd.use_events = 0; 280 hr_dev->cmd.toggle = 1; 281 hr_dev->cmd.max_cmds = CMD_MAX_NUM; 282 hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG; 283 hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, 284 HNS_ROCE_MAILBOX_SIZE, 285 HNS_ROCE_MAILBOX_SIZE, 0); 286 if (!hr_dev->cmd.pool) 287 return -ENOMEM; 288 289 return 0; 290 } 291 292 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev) 293 { 294 dma_pool_destroy(hr_dev->cmd.pool); 295 } 296 297 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) 298 { 299 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 300 int i; 301 302 hr_cmd->context = kmalloc(hr_cmd->max_cmds * 303 sizeof(struct hns_roce_cmd_context), 304 GFP_KERNEL); 305 if (!hr_cmd->context) 306 return -ENOMEM; 307 308 for (i = 0; i < hr_cmd->max_cmds; ++i) { 309 hr_cmd->context[i].token = i; 310 hr_cmd->context[i].next = i + 1; 311 } 312 313 hr_cmd->context[hr_cmd->max_cmds - 1].next = -1; 314 hr_cmd->free_head = 0; 315 316 sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds); 317 spin_lock_init(&hr_cmd->context_lock); 318 319 hr_cmd->token_mask = CMD_TOKEN_MASK; 320 hr_cmd->use_events = 1; 321 322 down(&hr_cmd->poll_sem); 323 324 return 0; 325 } 326 327 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) 328 { 329 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 330 int i; 331 332 hr_cmd->use_events = 0; 333 334 for (i = 0; i < hr_cmd->max_cmds; ++i) 335 down(&hr_cmd->event_sem); 336 337 kfree(hr_cmd->context); 338 up(&hr_cmd->poll_sem); 339 } 340 341 struct hns_roce_cmd_mailbox 342 *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev) 343 { 344 struct hns_roce_cmd_mailbox *mailbox; 345 346 mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); 347 if (!mailbox) 348 return ERR_PTR(-ENOMEM); 349 350 mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, 351 &mailbox->dma); 352 if (!mailbox->buf) { 353 kfree(mailbox); 354 return ERR_PTR(-ENOMEM); 355 } 356 357 return mailbox; 358 } 359 360 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, 361 struct hns_roce_cmd_mailbox *mailbox) 362 { 363 if (!mailbox) 364 return; 365 366 dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); 367 kfree(mailbox); 368 } 369