xref: /linux/drivers/infiniband/hw/hns/hns_roce_cmd.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/dmapool.h>
34 #include "hns_roce_common.h"
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 
38 #define CMD_POLL_TOKEN 0xffff
39 #define CMD_MAX_NUM 32
40 
hns_roce_cmd_mbox_post_hw(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)41 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
42 				     struct hns_roce_mbox_msg *mbox_msg)
43 {
44 	int ret;
45 
46 	ret = hr_dev->hw->post_mbox(hr_dev, mbox_msg);
47 	if (ret)
48 		return ret;
49 
50 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POSTED_CNT]);
51 
52 	return 0;
53 }
54 
55 /* this should be called with "poll_sem" */
__hns_roce_cmd_mbox_poll(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)56 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
57 				    struct hns_roce_mbox_msg *mbox_msg)
58 {
59 	int ret;
60 
61 	ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
62 	if (ret) {
63 		dev_err_ratelimited(hr_dev->dev,
64 				    "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
65 				    mbox_msg->cmd, ret);
66 		return ret;
67 	}
68 
69 	ret = hr_dev->hw->poll_mbox_done(hr_dev);
70 	if (ret)
71 		return ret;
72 
73 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POLLED_CNT]);
74 
75 	return 0;
76 }
77 
hns_roce_cmd_mbox_poll(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)78 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
79 				  struct hns_roce_mbox_msg *mbox_msg)
80 {
81 	int ret;
82 
83 	down(&hr_dev->cmd.poll_sem);
84 	ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg);
85 	up(&hr_dev->cmd.poll_sem);
86 
87 	return ret;
88 }
89 
hns_roce_cmd_event(struct hns_roce_dev * hr_dev,u16 token,u8 status,u64 out_param)90 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
91 			u64 out_param)
92 {
93 	struct hns_roce_cmd_context *context =
94 		&hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
95 
96 	if (unlikely(token != context->token)) {
97 		dev_err_ratelimited(hr_dev->dev,
98 				    "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
99 				    token, context->token);
100 		return;
101 	}
102 
103 	context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
104 	context->out_param = out_param;
105 	complete(&context->done);
106 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_EVENT_CNT]);
107 }
108 
__hns_roce_cmd_mbox_wait(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)109 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
110 				    struct hns_roce_mbox_msg *mbox_msg)
111 {
112 	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
113 	struct hns_roce_cmd_context *context;
114 	struct device *dev = hr_dev->dev;
115 	int ret;
116 
117 	spin_lock(&cmd->context_lock);
118 
119 	do {
120 		context = &cmd->context[cmd->free_head];
121 		cmd->free_head = context->next;
122 	} while (context->busy);
123 
124 	context->busy = 1;
125 	context->token += cmd->max_cmds;
126 
127 	spin_unlock(&cmd->context_lock);
128 
129 	reinit_completion(&context->done);
130 
131 	mbox_msg->token = context->token;
132 	ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
133 	if (ret) {
134 		dev_err_ratelimited(dev,
135 				    "failed to post mailbox 0x%x in event mode, ret = %d.\n",
136 				    mbox_msg->cmd, ret);
137 		goto out;
138 	}
139 
140 	if (!wait_for_completion_timeout(&context->done,
141 				msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) {
142 		dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
143 				    context->token, mbox_msg->cmd);
144 		ret = -EBUSY;
145 		goto out;
146 	}
147 
148 	ret = context->result;
149 	if (ret)
150 		dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
151 				    context->token, mbox_msg->cmd, ret);
152 
153 out:
154 	context->busy = 0;
155 	return ret;
156 }
157 
hns_roce_cmd_mbox_wait(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)158 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
159 				  struct hns_roce_mbox_msg *mbox_msg)
160 {
161 	int ret;
162 
163 	down(&hr_dev->cmd.event_sem);
164 	ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg);
165 	up(&hr_dev->cmd.event_sem);
166 
167 	return ret;
168 }
169 
hns_roce_cmd_mbox(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,u8 cmd,unsigned long tag)170 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
171 		      u8 cmd, unsigned long tag)
172 {
173 	struct hns_roce_mbox_msg mbox_msg = {};
174 	bool is_busy;
175 
176 	if (hr_dev->hw->chk_mbox_avail)
177 		if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
178 			return is_busy ? -EBUSY : 0;
179 
180 	mbox_msg.in_param = in_param;
181 	mbox_msg.out_param = out_param;
182 	mbox_msg.cmd = cmd;
183 	mbox_msg.tag = tag;
184 
185 	if (hr_dev->cmd.use_events) {
186 		mbox_msg.event_en = 1;
187 
188 		return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg);
189 	} else {
190 		mbox_msg.event_en = 0;
191 		mbox_msg.token = CMD_POLL_TOKEN;
192 
193 		return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg);
194 	}
195 }
196 
hns_roce_cmd_init(struct hns_roce_dev * hr_dev)197 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
198 {
199 	sema_init(&hr_dev->cmd.poll_sem, 1);
200 	hr_dev->cmd.use_events = 0;
201 	hr_dev->cmd.max_cmds = CMD_MAX_NUM;
202 	hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev,
203 					   HNS_ROCE_MAILBOX_SIZE,
204 					   HNS_ROCE_MAILBOX_SIZE, 0);
205 	if (!hr_dev->cmd.pool)
206 		return -ENOMEM;
207 
208 	return 0;
209 }
210 
hns_roce_cmd_cleanup(struct hns_roce_dev * hr_dev)211 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
212 {
213 	dma_pool_destroy(hr_dev->cmd.pool);
214 }
215 
hns_roce_cmd_use_events(struct hns_roce_dev * hr_dev)216 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
217 {
218 	struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
219 	int i;
220 
221 	hr_cmd->context =
222 		kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
223 	if (!hr_cmd->context) {
224 		hr_dev->cmd_mod = 0;
225 		return -ENOMEM;
226 	}
227 
228 	for (i = 0; i < hr_cmd->max_cmds; ++i) {
229 		hr_cmd->context[i].token = i;
230 		hr_cmd->context[i].next = i + 1;
231 		init_completion(&hr_cmd->context[i].done);
232 	}
233 	hr_cmd->context[hr_cmd->max_cmds - 1].next = 0;
234 	hr_cmd->free_head = 0;
235 
236 	sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
237 	spin_lock_init(&hr_cmd->context_lock);
238 
239 	hr_cmd->use_events = 1;
240 
241 	return 0;
242 }
243 
hns_roce_cmd_use_polling(struct hns_roce_dev * hr_dev)244 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
245 {
246 	struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
247 
248 	kfree(hr_cmd->context);
249 	hr_cmd->use_events = 0;
250 }
251 
252 struct hns_roce_cmd_mailbox *
hns_roce_alloc_cmd_mailbox(struct hns_roce_dev * hr_dev)253 hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
254 {
255 	struct hns_roce_cmd_mailbox *mailbox;
256 
257 	mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
258 	if (!mailbox)
259 		return ERR_PTR(-ENOMEM);
260 
261 	mailbox->buf =
262 		dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma);
263 	if (!mailbox->buf) {
264 		kfree(mailbox);
265 		return ERR_PTR(-ENOMEM);
266 	}
267 
268 	return mailbox;
269 }
270 
hns_roce_free_cmd_mailbox(struct hns_roce_dev * hr_dev,struct hns_roce_cmd_mailbox * mailbox)271 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
272 			       struct hns_roce_cmd_mailbox *mailbox)
273 {
274 	if (!mailbox)
275 		return;
276 
277 	dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
278 	kfree(mailbox);
279 }
280 
hns_roce_create_hw_ctx(struct hns_roce_dev * dev,struct hns_roce_cmd_mailbox * mailbox,u8 cmd,unsigned long idx)281 int hns_roce_create_hw_ctx(struct hns_roce_dev *dev,
282 			   struct hns_roce_cmd_mailbox *mailbox,
283 			   u8 cmd, unsigned long idx)
284 {
285 	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx);
286 }
287 
hns_roce_destroy_hw_ctx(struct hns_roce_dev * dev,u8 cmd,unsigned long idx)288 int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx)
289 {
290 	return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx);
291 }
292