xref: /linux/drivers/infiniband/sw/rxe/rxe_task.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include "rxe.h"
8 
9 static struct workqueue_struct *rxe_wq;
10 
11 int rxe_alloc_wq(void)
12 {
13 	rxe_wq = alloc_workqueue("rxe_wq", WQ_UNBOUND, WQ_MAX_ACTIVE);
14 	if (!rxe_wq)
15 		return -ENOMEM;
16 
17 	return 0;
18 }
19 
20 void rxe_destroy_wq(void)
21 {
22 	destroy_workqueue(rxe_wq);
23 }
24 
25 /* Check if task is idle i.e. not running, not scheduled in
26  * work queue and not draining. If so move to busy to
27  * reserve a slot in do_task() by setting to busy and taking
28  * a qp reference to cover the gap from now until the task finishes.
29  * state will move out of busy if task returns a non zero value
30  * in do_task(). If state is already busy it is raised to armed
31  * to indicate to do_task that additional pass should be made
32  * over the task.
33  * Context: caller should hold task->lock.
34  * Returns: true if state transitioned from idle to busy else false.
35  */
36 static bool __reserve_if_idle(struct rxe_task *task)
37 {
38 	WARN_ON(rxe_read(task->qp) <= 0);
39 
40 	if (task->state == TASK_STATE_IDLE) {
41 		rxe_get(task->qp);
42 		task->state = TASK_STATE_BUSY;
43 		task->num_sched++;
44 		return true;
45 	}
46 
47 	if (task->state == TASK_STATE_BUSY)
48 		task->state = TASK_STATE_ARMED;
49 
50 	return false;
51 }
52 
53 /* check if task is idle or drained and not currently
54  * scheduled in the work queue. This routine is
55  * called by rxe_cleanup_task or rxe_disable_task to
56  * see if the queue is empty.
57  * Context: caller should hold task->lock.
58  * Returns true if done else false.
59  */
60 static bool __is_done(struct rxe_task *task)
61 {
62 	if (work_pending(&task->work))
63 		return false;
64 
65 	if (task->state == TASK_STATE_IDLE ||
66 	    task->state == TASK_STATE_DRAINED) {
67 		return true;
68 	}
69 
70 	return false;
71 }
72 
73 /* a locked version of __is_done */
74 static bool is_done(struct rxe_task *task)
75 {
76 	unsigned long flags;
77 	int done;
78 
79 	spin_lock_irqsave(&task->lock, flags);
80 	done = __is_done(task);
81 	spin_unlock_irqrestore(&task->lock, flags);
82 
83 	return done;
84 }
85 
86 /* do_task is a wrapper for the three tasks (requester,
87  * completer, responder) and calls them in a loop until
88  * they return a non-zero value. It is called indirectly
89  * when rxe_sched_task schedules the task. They must
90  * call __reserve_if_idle to move the task to busy before
91  * calling or scheduling. The task can also be moved to
92  * drained or invalid by calls to rxe_cleanup_task or
93  * rxe_disable_task. In that case tasks which get here
94  * are not executed but just flushed. The tasks are
95  * designed to look to see if there is work to do and
96  * then do part of it before returning here with a return
97  * value of zero until all the work has been consumed then
98  * it returns a non-zero value.
99  * The number of times the task can be run is limited by
100  * max iterations so one task cannot hold the cpu forever.
101  * If the limit is hit and work remains the task is rescheduled.
102  */
103 static void do_task(struct rxe_task *task)
104 {
105 	unsigned int iterations;
106 	unsigned long flags;
107 	int resched = 0;
108 	int cont;
109 	int ret;
110 
111 	WARN_ON(rxe_read(task->qp) <= 0);
112 
113 	spin_lock_irqsave(&task->lock, flags);
114 	if (task->state >= TASK_STATE_DRAINED) {
115 		rxe_put(task->qp);
116 		task->num_done++;
117 		spin_unlock_irqrestore(&task->lock, flags);
118 		return;
119 	}
120 	spin_unlock_irqrestore(&task->lock, flags);
121 
122 	do {
123 		iterations = RXE_MAX_ITERATIONS;
124 		cont = 0;
125 
126 		do {
127 			ret = task->func(task->qp);
128 		} while (ret == 0 && iterations-- > 0);
129 
130 		spin_lock_irqsave(&task->lock, flags);
131 		/* we're not done yet but we ran out of iterations.
132 		 * yield the cpu and reschedule the task
133 		 */
134 		if (!ret) {
135 			if (task->state != TASK_STATE_DRAINING) {
136 				task->state = TASK_STATE_IDLE;
137 				resched = 1;
138 			} else {
139 				cont = 1;
140 			}
141 			goto exit;
142 		}
143 
144 		switch (task->state) {
145 		case TASK_STATE_BUSY:
146 			task->state = TASK_STATE_IDLE;
147 			break;
148 
149 		/* someone tried to schedule the task while we
150 		 * were running, keep going
151 		 */
152 		case TASK_STATE_ARMED:
153 			task->state = TASK_STATE_BUSY;
154 			cont = 1;
155 			break;
156 
157 		case TASK_STATE_DRAINING:
158 			task->state = TASK_STATE_DRAINED;
159 			break;
160 
161 		default:
162 			WARN_ON(1);
163 			rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
164 				   task->state);
165 			task->state = TASK_STATE_IDLE;
166 		}
167 
168 exit:
169 		if (!cont) {
170 			task->num_done++;
171 			if (WARN_ON(task->num_done != task->num_sched))
172 				rxe_dbg_qp(
173 					task->qp,
174 					"%ld tasks scheduled, %ld tasks done\n",
175 					task->num_sched, task->num_done);
176 		}
177 		spin_unlock_irqrestore(&task->lock, flags);
178 	} while (cont);
179 
180 	task->ret = ret;
181 
182 	if (resched)
183 		rxe_sched_task(task);
184 
185 	rxe_put(task->qp);
186 }
187 
188 /* wrapper around do_task to fix argument for work queue */
189 static void do_work(struct work_struct *work)
190 {
191 	do_task(container_of(work, struct rxe_task, work));
192 }
193 
194 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
195 		  int (*func)(struct rxe_qp *))
196 {
197 	WARN_ON(rxe_read(qp) <= 0);
198 
199 	task->qp = qp;
200 	task->func = func;
201 	task->state = TASK_STATE_IDLE;
202 	spin_lock_init(&task->lock);
203 	INIT_WORK(&task->work, do_work);
204 
205 	return 0;
206 }
207 
208 /* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
209  * process context. The qp is already completed with no
210  * remaining references. Once the queue is drained the
211  * task is moved to invalid and returns. The qp cleanup
212  * code then calls the task functions directly without
213  * using the task struct to drain any late arriving packets
214  * or work requests.
215  */
216 void rxe_cleanup_task(struct rxe_task *task)
217 {
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&task->lock, flags);
221 	if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
222 		task->state = TASK_STATE_DRAINING;
223 	} else {
224 		task->state = TASK_STATE_INVALID;
225 		spin_unlock_irqrestore(&task->lock, flags);
226 		return;
227 	}
228 	spin_unlock_irqrestore(&task->lock, flags);
229 
230 	/* now the task cannot be scheduled or run just wait
231 	 * for the previously scheduled tasks to finish.
232 	 */
233 	while (!is_done(task))
234 		cond_resched();
235 
236 	spin_lock_irqsave(&task->lock, flags);
237 	task->state = TASK_STATE_INVALID;
238 	spin_unlock_irqrestore(&task->lock, flags);
239 }
240 
241 /* schedule the task to run later as a work queue entry.
242  * the queue_work call can be called holding
243  * the lock.
244  */
245 void rxe_sched_task(struct rxe_task *task)
246 {
247 	unsigned long flags;
248 
249 	WARN_ON(rxe_read(task->qp) <= 0);
250 
251 	spin_lock_irqsave(&task->lock, flags);
252 	if (__reserve_if_idle(task))
253 		queue_work(rxe_wq, &task->work);
254 	spin_unlock_irqrestore(&task->lock, flags);
255 }
256 
257 /* rxe_disable/enable_task are only called from
258  * rxe_modify_qp in process context. Task is moved
259  * to the drained state by do_task.
260  */
261 void rxe_disable_task(struct rxe_task *task)
262 {
263 	unsigned long flags;
264 
265 	WARN_ON(rxe_read(task->qp) <= 0);
266 
267 	spin_lock_irqsave(&task->lock, flags);
268 	if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
269 		task->state = TASK_STATE_DRAINING;
270 	} else {
271 		task->state = TASK_STATE_DRAINED;
272 		spin_unlock_irqrestore(&task->lock, flags);
273 		return;
274 	}
275 	spin_unlock_irqrestore(&task->lock, flags);
276 
277 	while (!is_done(task))
278 		cond_resched();
279 
280 	spin_lock_irqsave(&task->lock, flags);
281 	task->state = TASK_STATE_DRAINED;
282 	spin_unlock_irqrestore(&task->lock, flags);
283 }
284 
285 void rxe_enable_task(struct rxe_task *task)
286 {
287 	unsigned long flags;
288 
289 	WARN_ON(rxe_read(task->qp) <= 0);
290 
291 	spin_lock_irqsave(&task->lock, flags);
292 	if (task->state == TASK_STATE_INVALID) {
293 		spin_unlock_irqrestore(&task->lock, flags);
294 		return;
295 	}
296 
297 	task->state = TASK_STATE_IDLE;
298 	spin_unlock_irqrestore(&task->lock, flags);
299 }
300