Lines Matching +full:mailbox +full:-

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
46 ret = hr_dev->hw->post_mbox(hr_dev, mbox_msg); in hns_roce_cmd_mbox_post_hw()
50 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POSTED_CNT]); in hns_roce_cmd_mbox_post_hw()
63 dev_err_ratelimited(hr_dev->dev, in __hns_roce_cmd_mbox_poll()
64 "failed to post mailbox 0x%x in poll mode, ret = %d.\n", in __hns_roce_cmd_mbox_poll()
65 mbox_msg->cmd, ret); in __hns_roce_cmd_mbox_poll()
69 ret = hr_dev->hw->poll_mbox_done(hr_dev); in __hns_roce_cmd_mbox_poll()
73 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POLLED_CNT]); in __hns_roce_cmd_mbox_poll()
83 down(&hr_dev->cmd.poll_sem); in hns_roce_cmd_mbox_poll()
85 up(&hr_dev->cmd.poll_sem); in hns_roce_cmd_mbox_poll()
94 &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds]; in hns_roce_cmd_event()
96 if (unlikely(token != context->token)) { in hns_roce_cmd_event()
97 dev_err_ratelimited(hr_dev->dev, in hns_roce_cmd_event()
99 token, context->token); in hns_roce_cmd_event()
103 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO); in hns_roce_cmd_event()
104 context->out_param = out_param; in hns_roce_cmd_event()
105 complete(&context->done); in hns_roce_cmd_event()
106 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_EVENT_CNT]); in hns_roce_cmd_event()
112 struct hns_roce_cmdq *cmd = &hr_dev->cmd; in __hns_roce_cmd_mbox_wait()
114 struct device *dev = hr_dev->dev; in __hns_roce_cmd_mbox_wait()
117 spin_lock(&cmd->context_lock); in __hns_roce_cmd_mbox_wait()
120 context = &cmd->context[cmd->free_head]; in __hns_roce_cmd_mbox_wait()
121 cmd->free_head = context->next; in __hns_roce_cmd_mbox_wait()
122 } while (context->busy); in __hns_roce_cmd_mbox_wait()
124 context->busy = 1; in __hns_roce_cmd_mbox_wait()
125 context->token += cmd->max_cmds; in __hns_roce_cmd_mbox_wait()
127 spin_unlock(&cmd->context_lock); in __hns_roce_cmd_mbox_wait()
129 reinit_completion(&context->done); in __hns_roce_cmd_mbox_wait()
131 mbox_msg->token = context->token; in __hns_roce_cmd_mbox_wait()
135 "failed to post mailbox 0x%x in event mode, ret = %d.\n", in __hns_roce_cmd_mbox_wait()
136 mbox_msg->cmd, ret); in __hns_roce_cmd_mbox_wait()
140 if (!wait_for_completion_timeout(&context->done, in __hns_roce_cmd_mbox_wait()
142 dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", in __hns_roce_cmd_mbox_wait()
143 context->token, mbox_msg->cmd); in __hns_roce_cmd_mbox_wait()
144 ret = -EBUSY; in __hns_roce_cmd_mbox_wait()
148 ret = context->result; in __hns_roce_cmd_mbox_wait()
150 dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", in __hns_roce_cmd_mbox_wait()
151 context->token, mbox_msg->cmd, ret); in __hns_roce_cmd_mbox_wait()
154 context->busy = 0; in __hns_roce_cmd_mbox_wait()
163 down(&hr_dev->cmd.event_sem); in hns_roce_cmd_mbox_wait()
165 up(&hr_dev->cmd.event_sem); in hns_roce_cmd_mbox_wait()
176 if (hr_dev->hw->chk_mbox_avail) in hns_roce_cmd_mbox()
177 if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) in hns_roce_cmd_mbox()
178 return is_busy ? -EBUSY : 0; in hns_roce_cmd_mbox()
185 if (hr_dev->cmd.use_events) { in hns_roce_cmd_mbox()
199 sema_init(&hr_dev->cmd.poll_sem, 1); in hns_roce_cmd_init()
200 hr_dev->cmd.use_events = 0; in hns_roce_cmd_init()
201 hr_dev->cmd.max_cmds = CMD_MAX_NUM; in hns_roce_cmd_init()
202 hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev, in hns_roce_cmd_init()
205 if (!hr_dev->cmd.pool) in hns_roce_cmd_init()
206 return -ENOMEM; in hns_roce_cmd_init()
213 dma_pool_destroy(hr_dev->cmd.pool); in hns_roce_cmd_cleanup()
218 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; in hns_roce_cmd_use_events()
221 hr_cmd->context = in hns_roce_cmd_use_events()
222 kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); in hns_roce_cmd_use_events()
223 if (!hr_cmd->context) { in hns_roce_cmd_use_events()
224 hr_dev->cmd_mod = 0; in hns_roce_cmd_use_events()
225 return -ENOMEM; in hns_roce_cmd_use_events()
228 for (i = 0; i < hr_cmd->max_cmds; ++i) { in hns_roce_cmd_use_events()
229 hr_cmd->context[i].token = i; in hns_roce_cmd_use_events()
230 hr_cmd->context[i].next = i + 1; in hns_roce_cmd_use_events()
231 init_completion(&hr_cmd->context[i].done); in hns_roce_cmd_use_events()
233 hr_cmd->context[hr_cmd->max_cmds - 1].next = 0; in hns_roce_cmd_use_events()
234 hr_cmd->free_head = 0; in hns_roce_cmd_use_events()
236 sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds); in hns_roce_cmd_use_events()
237 spin_lock_init(&hr_cmd->context_lock); in hns_roce_cmd_use_events()
239 hr_cmd->use_events = 1; in hns_roce_cmd_use_events()
246 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; in hns_roce_cmd_use_polling()
248 kfree(hr_cmd->context); in hns_roce_cmd_use_polling()
249 hr_cmd->use_events = 0; in hns_roce_cmd_use_polling()
255 struct hns_roce_cmd_mailbox *mailbox; in hns_roce_alloc_cmd_mailbox() local
257 mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); in hns_roce_alloc_cmd_mailbox()
258 if (!mailbox) in hns_roce_alloc_cmd_mailbox()
259 return ERR_PTR(-ENOMEM); in hns_roce_alloc_cmd_mailbox()
261 mailbox->buf = in hns_roce_alloc_cmd_mailbox()
262 dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma); in hns_roce_alloc_cmd_mailbox()
263 if (!mailbox->buf) { in hns_roce_alloc_cmd_mailbox()
264 kfree(mailbox); in hns_roce_alloc_cmd_mailbox()
265 return ERR_PTR(-ENOMEM); in hns_roce_alloc_cmd_mailbox()
268 return mailbox; in hns_roce_alloc_cmd_mailbox()
272 struct hns_roce_cmd_mailbox *mailbox) in hns_roce_free_cmd_mailbox() argument
274 if (!mailbox) in hns_roce_free_cmd_mailbox()
277 dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); in hns_roce_free_cmd_mailbox()
278 kfree(mailbox); in hns_roce_free_cmd_mailbox()
282 struct hns_roce_cmd_mailbox *mailbox, in hns_roce_create_hw_ctx() argument
285 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx); in hns_roce_create_hw_ctx()