xref: /linux/drivers/mmc/host/mmc_hsq.c (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * MMC software queue support based on command queue interfaces
5  *
6  * Copyright (C) 2019 Linaro, Inc.
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
13 
14 #include "mmc_hsq.h"
15 
16 static void mmc_hsq_retry_handler(struct work_struct *work)
17 {
18 	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 	struct mmc_host *mmc = hsq->mmc;
20 
21 	mmc->ops->request(mmc, hsq->mrq);
22 }
23 
24 static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
25 {
26 	struct mmc_host *mmc = hsq->mmc;
27 	struct mmc_request *mrq;
28 	unsigned int tag, need_change = 0;
29 
30 	mmc->hsq_depth = HSQ_NORMAL_DEPTH;
31 	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
32 		mrq = hsq->slot[tag].mrq;
33 		if (mrq && mrq->data &&
34 		   (mrq->data->blksz * mrq->data->blocks == 4096) &&
35 		   (mrq->data->flags & MMC_DATA_WRITE) &&
36 		   (++need_change == 2)) {
37 			mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
38 			break;
39 		}
40 	}
41 }
42 
43 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
44 {
45 	struct mmc_host *mmc = hsq->mmc;
46 	struct hsq_slot *slot;
47 	unsigned long flags;
48 	int ret = 0;
49 
50 	spin_lock_irqsave(&hsq->lock, flags);
51 
52 	/* Make sure we are not already running a request now */
53 	if (hsq->mrq || hsq->recovery_halt) {
54 		spin_unlock_irqrestore(&hsq->lock, flags);
55 		return;
56 	}
57 
58 	/* Make sure there are remain requests need to pump */
59 	if (!hsq->qcnt || !hsq->enabled) {
60 		spin_unlock_irqrestore(&hsq->lock, flags);
61 		return;
62 	}
63 
64 	mmc_hsq_modify_threshold(hsq);
65 
66 	slot = &hsq->slot[hsq->next_tag];
67 	hsq->mrq = slot->mrq;
68 	hsq->qcnt--;
69 
70 	spin_unlock_irqrestore(&hsq->lock, flags);
71 
72 	if (mmc->ops->request_atomic)
73 		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
74 	else
75 		mmc->ops->request(mmc, hsq->mrq);
76 
77 	/*
78 	 * If returning BUSY from request_atomic(), which means the card
79 	 * may be busy now, and we should change to non-atomic context to
80 	 * try again for this unusual case, to avoid time-consuming operations
81 	 * in the atomic context.
82 	 *
83 	 * Note: we just give a warning for other error cases, since the host
84 	 * driver will handle them.
85 	 */
86 	if (ret == -EBUSY)
87 		schedule_work(&hsq->retry_work);
88 	else
89 		WARN_ON_ONCE(ret);
90 }
91 
92 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
93 {
94 	int tag;
95 
96 	/*
97 	 * If there are no remain requests in software queue, then set a invalid
98 	 * tag.
99 	 */
100 	if (!remains) {
101 		hsq->next_tag = HSQ_INVALID_TAG;
102 		hsq->tail_tag = HSQ_INVALID_TAG;
103 		return;
104 	}
105 
106 	tag = hsq->tag_slot[hsq->next_tag];
107 	hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
108 	hsq->next_tag = tag;
109 }
110 
111 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112 {
113 	unsigned long flags;
114 	int remains;
115 
116 	spin_lock_irqsave(&hsq->lock, flags);
117 
118 	remains = hsq->qcnt;
119 	hsq->mrq = NULL;
120 
121 	/* Update the next available tag to be queued. */
122 	mmc_hsq_update_next_tag(hsq, remains);
123 
124 	if (hsq->waiting_for_idle && !remains) {
125 		hsq->waiting_for_idle = false;
126 		wake_up(&hsq->wait_queue);
127 	}
128 
129 	/* Do not pump new request in recovery mode. */
130 	if (hsq->recovery_halt) {
131 		spin_unlock_irqrestore(&hsq->lock, flags);
132 		return;
133 	}
134 
135 	spin_unlock_irqrestore(&hsq->lock, flags);
136 
137 	 /*
138 	  * Try to pump new request to host controller as fast as possible,
139 	  * after completing previous request.
140 	  */
141 	if (remains > 0)
142 		mmc_hsq_pump_requests(hsq);
143 }
144 
145 /**
146  * mmc_hsq_finalize_request - finalize one request if the request is done
147  * @mmc: the host controller
148  * @mrq: the request need to be finalized
149  *
150  * Return true if we finalized the corresponding request in software queue,
151  * otherwise return false.
152  */
153 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154 {
155 	struct mmc_hsq *hsq = mmc->cqe_private;
156 	unsigned long flags;
157 
158 	spin_lock_irqsave(&hsq->lock, flags);
159 
160 	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 		spin_unlock_irqrestore(&hsq->lock, flags);
162 		return false;
163 	}
164 
165 	/*
166 	 * Clear current completed slot request to make a room for new request.
167 	 */
168 	hsq->slot[hsq->next_tag].mrq = NULL;
169 
170 	spin_unlock_irqrestore(&hsq->lock, flags);
171 
172 	mmc_cqe_request_done(mmc, hsq->mrq);
173 
174 	mmc_hsq_post_request(hsq);
175 
176 	return true;
177 }
178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179 
180 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181 {
182 	struct mmc_hsq *hsq = mmc->cqe_private;
183 	unsigned long flags;
184 
185 	spin_lock_irqsave(&hsq->lock, flags);
186 
187 	hsq->recovery_halt = true;
188 
189 	spin_unlock_irqrestore(&hsq->lock, flags);
190 }
191 
192 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193 {
194 	struct mmc_hsq *hsq = mmc->cqe_private;
195 	int remains;
196 
197 	spin_lock_irq(&hsq->lock);
198 
199 	hsq->recovery_halt = false;
200 	remains = hsq->qcnt;
201 
202 	spin_unlock_irq(&hsq->lock);
203 
204 	/*
205 	 * Try to pump new request if there are request pending in software
206 	 * queue after finishing recovery.
207 	 */
208 	if (remains > 0)
209 		mmc_hsq_pump_requests(hsq);
210 }
211 
212 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213 {
214 	struct mmc_hsq *hsq = mmc->cqe_private;
215 	int tag = mrq->tag;
216 
217 	spin_lock_irq(&hsq->lock);
218 
219 	if (!hsq->enabled) {
220 		spin_unlock_irq(&hsq->lock);
221 		return -ESHUTDOWN;
222 	}
223 
224 	/* Do not queue any new requests in recovery mode. */
225 	if (hsq->recovery_halt) {
226 		spin_unlock_irq(&hsq->lock);
227 		return -EBUSY;
228 	}
229 
230 	hsq->slot[tag].mrq = mrq;
231 
232 	/*
233 	 * Set the next tag as current request tag if no available
234 	 * next tag.
235 	 */
236 	if (hsq->next_tag == HSQ_INVALID_TAG) {
237 		hsq->next_tag = tag;
238 		hsq->tail_tag = tag;
239 		hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
240 	} else {
241 		hsq->tag_slot[hsq->tail_tag] = tag;
242 		hsq->tail_tag = tag;
243 	}
244 
245 	hsq->qcnt++;
246 
247 	spin_unlock_irq(&hsq->lock);
248 
249 	mmc_hsq_pump_requests(hsq);
250 
251 	return 0;
252 }
253 
254 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
255 {
256 	if (mmc->ops->post_req)
257 		mmc->ops->post_req(mmc, mrq, 0);
258 }
259 
260 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
261 {
262 	bool is_idle;
263 
264 	spin_lock_irq(&hsq->lock);
265 
266 	is_idle = (!hsq->mrq && !hsq->qcnt) ||
267 		hsq->recovery_halt;
268 
269 	*ret = hsq->recovery_halt ? -EBUSY : 0;
270 	hsq->waiting_for_idle = !is_idle;
271 
272 	spin_unlock_irq(&hsq->lock);
273 
274 	return is_idle;
275 }
276 
277 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
278 {
279 	struct mmc_hsq *hsq = mmc->cqe_private;
280 	int ret;
281 
282 	wait_event(hsq->wait_queue,
283 		   mmc_hsq_queue_is_idle(hsq, &ret));
284 
285 	return ret;
286 }
287 
288 static void mmc_hsq_disable(struct mmc_host *mmc)
289 {
290 	struct mmc_hsq *hsq = mmc->cqe_private;
291 	u32 timeout = 500;
292 	int ret;
293 
294 	spin_lock_irq(&hsq->lock);
295 
296 	if (!hsq->enabled) {
297 		spin_unlock_irq(&hsq->lock);
298 		return;
299 	}
300 
301 	spin_unlock_irq(&hsq->lock);
302 
303 	ret = wait_event_timeout(hsq->wait_queue,
304 				 mmc_hsq_queue_is_idle(hsq, &ret),
305 				 msecs_to_jiffies(timeout));
306 	if (ret == 0) {
307 		pr_warn("could not stop mmc software queue\n");
308 		return;
309 	}
310 
311 	spin_lock_irq(&hsq->lock);
312 
313 	hsq->enabled = false;
314 
315 	spin_unlock_irq(&hsq->lock);
316 }
317 
318 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
319 {
320 	struct mmc_hsq *hsq = mmc->cqe_private;
321 
322 	spin_lock_irq(&hsq->lock);
323 
324 	if (hsq->enabled) {
325 		spin_unlock_irq(&hsq->lock);
326 		return -EBUSY;
327 	}
328 
329 	hsq->enabled = true;
330 
331 	spin_unlock_irq(&hsq->lock);
332 
333 	return 0;
334 }
335 
336 static const struct mmc_cqe_ops mmc_hsq_ops = {
337 	.cqe_enable = mmc_hsq_enable,
338 	.cqe_disable = mmc_hsq_disable,
339 	.cqe_request = mmc_hsq_request,
340 	.cqe_post_req = mmc_hsq_post_req,
341 	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
342 	.cqe_recovery_start = mmc_hsq_recovery_start,
343 	.cqe_recovery_finish = mmc_hsq_recovery_finish,
344 };
345 
346 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
347 {
348 	int i;
349 	hsq->num_slots = HSQ_NUM_SLOTS;
350 	hsq->next_tag = HSQ_INVALID_TAG;
351 	hsq->tail_tag = HSQ_INVALID_TAG;
352 
353 	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
354 				 sizeof(struct hsq_slot), GFP_KERNEL);
355 	if (!hsq->slot)
356 		return -ENOMEM;
357 
358 	hsq->mmc = mmc;
359 	hsq->mmc->cqe_private = hsq;
360 	mmc->cqe_ops = &mmc_hsq_ops;
361 	mmc->hsq_depth = HSQ_NORMAL_DEPTH;
362 
363 	for (i = 0; i < HSQ_NUM_SLOTS; i++)
364 		hsq->tag_slot[i] = HSQ_INVALID_TAG;
365 
366 	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
367 	spin_lock_init(&hsq->lock);
368 	init_waitqueue_head(&hsq->wait_queue);
369 
370 	return 0;
371 }
372 EXPORT_SYMBOL_GPL(mmc_hsq_init);
373 
374 void mmc_hsq_suspend(struct mmc_host *mmc)
375 {
376 	mmc_hsq_disable(mmc);
377 }
378 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
379 
380 int mmc_hsq_resume(struct mmc_host *mmc)
381 {
382 	return mmc_hsq_enable(mmc, NULL);
383 }
384 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
385 
386 MODULE_DESCRIPTION("MMC Host Software Queue support");
387 MODULE_LICENSE("GPL v2");
388