xref: /linux/drivers/platform/surface/aggregator/ssh_request_layer.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * SSH request transport layer.
4  *
5  * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
6  */
7 
8 #include <linux/unaligned.h>
9 #include <linux/atomic.h>
10 #include <linux/completion.h>
11 #include <linux/error-injection.h>
12 #include <linux/ktime.h>
13 #include <linux/limits.h>
14 #include <linux/list.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 
20 #include <linux/surface_aggregator/serial_hub.h>
21 #include <linux/surface_aggregator/controller.h>
22 
23 #include "ssh_packet_layer.h"
24 #include "ssh_request_layer.h"
25 
26 #include "trace.h"
27 
28 /*
29  * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
30  *
31  * Timeout as ktime_t delta for request responses. If we have not received a
32  * response in this time-frame after finishing the underlying packet
33  * transmission, the request will be completed with %-ETIMEDOUT as status
34  * code.
35  */
36 #define SSH_RTL_REQUEST_TIMEOUT			ms_to_ktime(3000)
37 
38 /*
39  * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
40  *
41  * Time-resolution for timeouts. Should be larger than one jiffy to avoid
42  * direct re-scheduling of reaper work_struct.
43  */
44 #define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION	ms_to_ktime(max(2000 / HZ, 50))
45 
46 /*
47  * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
48  *
49  * Maximum number of requests concurrently waiting to be completed (i.e.
50  * waiting for the corresponding packet transmission to finish if they don't
51  * have a response or waiting for a response if they have one).
52  */
53 #define SSH_RTL_MAX_PENDING		3
54 
55 /*
56  * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
57  * Used to prevent livelocking of the workqueue. Value chosen via educated
58  * guess, may be adjusted.
59  */
60 #define SSH_RTL_TX_BATCH		10
61 
62 #ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
63 
64 /**
65  * ssh_rtl_should_drop_response() - Error injection hook to drop request
66  * responses.
67  *
68  * Useful to cause request transmission timeouts in the driver by dropping the
69  * response to a request.
70  */
ssh_rtl_should_drop_response(void)71 static noinline bool ssh_rtl_should_drop_response(void)
72 {
73 	return false;
74 }
75 ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
76 
77 #else
78 
ssh_rtl_should_drop_response(void)79 static inline bool ssh_rtl_should_drop_response(void)
80 {
81 	return false;
82 }
83 
84 #endif
85 
ssh_request_get_rqid(struct ssh_request * rqst)86 static u16 ssh_request_get_rqid(struct ssh_request *rqst)
87 {
88 	return get_unaligned_le16(rqst->packet.data.ptr
89 				  + SSH_MSGOFFSET_COMMAND(rqid));
90 }
91 
ssh_request_get_rqid_safe(struct ssh_request * rqst)92 static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
93 {
94 	if (!rqst->packet.data.ptr)
95 		return U32_MAX;
96 
97 	return ssh_request_get_rqid(rqst);
98 }
99 
ssh_rtl_queue_remove(struct ssh_request * rqst)100 static void ssh_rtl_queue_remove(struct ssh_request *rqst)
101 {
102 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
103 
104 	spin_lock(&rtl->queue.lock);
105 
106 	if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
107 		spin_unlock(&rtl->queue.lock);
108 		return;
109 	}
110 
111 	list_del(&rqst->node);
112 
113 	spin_unlock(&rtl->queue.lock);
114 	ssh_request_put(rqst);
115 }
116 
ssh_rtl_queue_empty(struct ssh_rtl * rtl)117 static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
118 {
119 	bool empty;
120 
121 	spin_lock(&rtl->queue.lock);
122 	empty = list_empty(&rtl->queue.head);
123 	spin_unlock(&rtl->queue.lock);
124 
125 	return empty;
126 }
127 
ssh_rtl_pending_remove(struct ssh_request * rqst)128 static void ssh_rtl_pending_remove(struct ssh_request *rqst)
129 {
130 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
131 
132 	spin_lock(&rtl->pending.lock);
133 
134 	if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
135 		spin_unlock(&rtl->pending.lock);
136 		return;
137 	}
138 
139 	atomic_dec(&rtl->pending.count);
140 	list_del(&rqst->node);
141 
142 	spin_unlock(&rtl->pending.lock);
143 
144 	ssh_request_put(rqst);
145 }
146 
ssh_rtl_tx_pending_push(struct ssh_request * rqst)147 static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
148 {
149 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
150 
151 	spin_lock(&rtl->pending.lock);
152 
153 	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
154 		spin_unlock(&rtl->pending.lock);
155 		return -EINVAL;
156 	}
157 
158 	if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
159 		spin_unlock(&rtl->pending.lock);
160 		return -EALREADY;
161 	}
162 
163 	atomic_inc(&rtl->pending.count);
164 	list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
165 
166 	spin_unlock(&rtl->pending.lock);
167 	return 0;
168 }
169 
ssh_rtl_complete_with_status(struct ssh_request * rqst,int status)170 static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
171 {
172 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
173 
174 	trace_ssam_request_complete(rqst, status);
175 
176 	/* rtl/ptl may not be set if we're canceling before submitting. */
177 	rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
178 		     ssh_request_get_rqid_safe(rqst), status);
179 
180 	rqst->ops->complete(rqst, NULL, NULL, status);
181 }
182 
ssh_rtl_complete_with_rsp(struct ssh_request * rqst,const struct ssh_command * cmd,const struct ssam_span * data)183 static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
184 				      const struct ssh_command *cmd,
185 				      const struct ssam_span *data)
186 {
187 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
188 
189 	trace_ssam_request_complete(rqst, 0);
190 
191 	rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
192 		ssh_request_get_rqid(rqst));
193 
194 	rqst->ops->complete(rqst, cmd, data, 0);
195 }
196 
ssh_rtl_tx_can_process(struct ssh_request * rqst)197 static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
198 {
199 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
200 
201 	if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
202 		return !atomic_read(&rtl->pending.count);
203 
204 	return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
205 }
206 
ssh_rtl_tx_next(struct ssh_rtl * rtl)207 static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
208 {
209 	struct ssh_request *rqst = ERR_PTR(-ENOENT);
210 	struct ssh_request *p, *n;
211 
212 	spin_lock(&rtl->queue.lock);
213 
214 	/* Find first non-locked request and remove it. */
215 	list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
216 		if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
217 			continue;
218 
219 		if (!ssh_rtl_tx_can_process(p)) {
220 			rqst = ERR_PTR(-EBUSY);
221 			break;
222 		}
223 
224 		/* Remove from queue and mark as transmitting. */
225 		set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
226 		/* Ensure state never gets zero. */
227 		smp_mb__before_atomic();
228 		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
229 
230 		list_del(&p->node);
231 
232 		rqst = p;
233 		break;
234 	}
235 
236 	spin_unlock(&rtl->queue.lock);
237 	return rqst;
238 }
239 
ssh_rtl_tx_try_process_one(struct ssh_rtl * rtl)240 static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
241 {
242 	struct ssh_request *rqst;
243 	int status;
244 
245 	/* Get and prepare next request for transmit. */
246 	rqst = ssh_rtl_tx_next(rtl);
247 	if (IS_ERR(rqst))
248 		return PTR_ERR(rqst);
249 
250 	/* Add it to/mark it as pending. */
251 	status = ssh_rtl_tx_pending_push(rqst);
252 	if (status) {
253 		ssh_request_put(rqst);
254 		return -EAGAIN;
255 	}
256 
257 	/* Submit packet. */
258 	status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
259 	if (status == -ESHUTDOWN) {
260 		/*
261 		 * Packet has been refused due to the packet layer shutting
262 		 * down. Complete it here.
263 		 */
264 		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
265 		/*
266 		 * Note: A barrier is not required here, as there are only two
267 		 * references in the system at this point: The one that we have,
268 		 * and the other one that belongs to the pending set. Due to the
269 		 * request being marked as "transmitting", our process is the
270 		 * only one allowed to remove the pending node and change the
271 		 * state. Normally, the task would fall to the packet callback,
272 		 * but as this is a path where submission failed, this callback
273 		 * will never be executed.
274 		 */
275 
276 		ssh_rtl_pending_remove(rqst);
277 		ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
278 
279 		ssh_request_put(rqst);
280 		return -ESHUTDOWN;
281 
282 	} else if (status) {
283 		/*
284 		 * If submitting the packet failed and the packet layer isn't
285 		 * shutting down, the packet has either been submitted/queued
286 		 * before (-EALREADY, which cannot happen as we have
287 		 * guaranteed that requests cannot be re-submitted), or the
288 		 * packet was marked as locked (-EINVAL). To mark the packet
289 		 * locked at this stage, the request, and thus the packets
290 		 * itself, had to have been canceled. Simply drop the
291 		 * reference. Cancellation itself will remove it from the set
292 		 * of pending requests.
293 		 */
294 
295 		WARN_ON(status != -EINVAL);
296 
297 		ssh_request_put(rqst);
298 		return -EAGAIN;
299 	}
300 
301 	ssh_request_put(rqst);
302 	return 0;
303 }
304 
ssh_rtl_tx_schedule(struct ssh_rtl * rtl)305 static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
306 {
307 	if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
308 		return false;
309 
310 	if (ssh_rtl_queue_empty(rtl))
311 		return false;
312 
313 	return schedule_work(&rtl->tx.work);
314 }
315 
ssh_rtl_tx_work_fn(struct work_struct * work)316 static void ssh_rtl_tx_work_fn(struct work_struct *work)
317 {
318 	struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
319 	unsigned int iterations = SSH_RTL_TX_BATCH;
320 	int status;
321 
322 	/*
323 	 * Try to be nice and not block/live-lock the workqueue: Run a maximum
324 	 * of 10 tries, then re-submit if necessary. This should not be
325 	 * necessary for normal execution, but guarantee it anyway.
326 	 */
327 	do {
328 		status = ssh_rtl_tx_try_process_one(rtl);
329 		if (status == -ENOENT || status == -EBUSY)
330 			return;		/* No more requests to process. */
331 
332 		if (status == -ESHUTDOWN) {
333 			/*
334 			 * Packet system shutting down. No new packets can be
335 			 * transmitted. Return silently, the party initiating
336 			 * the shutdown should handle the rest.
337 			 */
338 			return;
339 		}
340 
341 		WARN_ON(status != 0 && status != -EAGAIN);
342 	} while (--iterations);
343 
344 	/* Out of tries, reschedule. */
345 	ssh_rtl_tx_schedule(rtl);
346 }
347 
348 /**
349  * ssh_rtl_submit() - Submit a request to the transport layer.
350  * @rtl:  The request transport layer.
351  * @rqst: The request to submit.
352  *
353  * Submits a request to the transport layer. A single request may not be
354  * submitted multiple times without reinitializing it.
355  *
356  * Return: Returns zero on success, %-EINVAL if the request type is invalid or
357  * the request has been canceled prior to submission, %-EALREADY if the
358  * request has already been submitted, or %-ESHUTDOWN in case the request
359  * transport layer has been shut down.
360  */
ssh_rtl_submit(struct ssh_rtl * rtl,struct ssh_request * rqst)361 int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
362 {
363 	trace_ssam_request_submit(rqst);
364 
365 	/*
366 	 * Ensure that requests expecting a response are sequenced. If this
367 	 * invariant ever changes, see the comment in ssh_rtl_complete() on what
368 	 * is required to be changed in the code.
369 	 */
370 	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
371 		if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
372 			return -EINVAL;
373 
374 	spin_lock(&rtl->queue.lock);
375 
376 	/*
377 	 * Try to set ptl and check if this request has already been submitted.
378 	 *
379 	 * Must be inside lock as we might run into a lost update problem
380 	 * otherwise: If this were outside of the lock, cancellation in
381 	 * ssh_rtl_cancel_nonpending() may run after we've set the ptl
382 	 * reference but before we enter the lock. In that case, we'd detect
383 	 * that the request is being added to the queue and would try to remove
384 	 * it from that, but removal might fail because it hasn't actually been
385 	 * added yet. By putting this cmpxchg in the critical section, we
386 	 * ensure that the queuing detection only triggers when we are already
387 	 * in the critical section and the remove process will wait until the
388 	 * push operation has been completed (via lock) due to that. Only then,
389 	 * we can safely try to remove it.
390 	 */
391 	if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
392 		spin_unlock(&rtl->queue.lock);
393 		return -EALREADY;
394 	}
395 
396 	/*
397 	 * Ensure that we set ptl reference before we continue modifying state.
398 	 * This is required for non-pending cancellation. This barrier is paired
399 	 * with the one in ssh_rtl_cancel_nonpending().
400 	 *
401 	 * By setting the ptl reference before we test for "locked", we can
402 	 * check if the "locked" test may have already run. See comments in
403 	 * ssh_rtl_cancel_nonpending() for more detail.
404 	 */
405 	smp_mb__after_atomic();
406 
407 	if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
408 		spin_unlock(&rtl->queue.lock);
409 		return -ESHUTDOWN;
410 	}
411 
412 	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
413 		spin_unlock(&rtl->queue.lock);
414 		return -EINVAL;
415 	}
416 
417 	set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
418 	list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
419 
420 	spin_unlock(&rtl->queue.lock);
421 
422 	ssh_rtl_tx_schedule(rtl);
423 	return 0;
424 }
425 
ssh_rtl_timeout_reaper_mod(struct ssh_rtl * rtl,ktime_t now,ktime_t expires)426 static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
427 				       ktime_t expires)
428 {
429 	unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
430 	ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
431 
432 	spin_lock(&rtl->rtx_timeout.lock);
433 
434 	/* Re-adjust / schedule reaper only if it is above resolution delta. */
435 	if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
436 		rtl->rtx_timeout.expires = expires;
437 		mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
438 	}
439 
440 	spin_unlock(&rtl->rtx_timeout.lock);
441 }
442 
ssh_rtl_timeout_start(struct ssh_request * rqst)443 static void ssh_rtl_timeout_start(struct ssh_request *rqst)
444 {
445 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
446 	ktime_t timestamp = ktime_get_coarse_boottime();
447 	ktime_t timeout = rtl->rtx_timeout.timeout;
448 
449 	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
450 		return;
451 
452 	/*
453 	 * Note: The timestamp gets set only once. This happens on the packet
454 	 * callback. All other access to it is read-only.
455 	 */
456 	WRITE_ONCE(rqst->timestamp, timestamp);
457 	/*
458 	 * Ensure timestamp is set before starting the reaper. Paired with
459 	 * implicit barrier following check on ssh_request_get_expiration() in
460 	 * ssh_rtl_timeout_reap.
461 	 */
462 	smp_mb__after_atomic();
463 
464 	ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
465 }
466 
ssh_rtl_complete(struct ssh_rtl * rtl,const struct ssh_command * command,const struct ssam_span * command_data)467 static void ssh_rtl_complete(struct ssh_rtl *rtl,
468 			     const struct ssh_command *command,
469 			     const struct ssam_span *command_data)
470 {
471 	struct ssh_request *r = NULL;
472 	struct ssh_request *p, *n;
473 	u16 rqid = get_unaligned_le16(&command->rqid);
474 
475 	trace_ssam_rx_response_received(command, command_data->len);
476 
477 	/*
478 	 * Get request from pending based on request ID and mark it as response
479 	 * received and locked.
480 	 */
481 	spin_lock(&rtl->pending.lock);
482 	list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
483 		/* We generally expect requests to be processed in order. */
484 		if (unlikely(ssh_request_get_rqid(p) != rqid))
485 			continue;
486 
487 		/* Simulate response timeout. */
488 		if (ssh_rtl_should_drop_response()) {
489 			spin_unlock(&rtl->pending.lock);
490 
491 			trace_ssam_ei_rx_drop_response(p);
492 			rtl_info(rtl, "request error injection: dropping response for request %p\n",
493 				 &p->packet);
494 			return;
495 		}
496 
497 		/*
498 		 * Mark as "response received" and "locked" as we're going to
499 		 * complete it.
500 		 */
501 		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
502 		set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
503 		/* Ensure state never gets zero. */
504 		smp_mb__before_atomic();
505 		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
506 
507 		atomic_dec(&rtl->pending.count);
508 		list_del(&p->node);
509 
510 		r = p;
511 		break;
512 	}
513 	spin_unlock(&rtl->pending.lock);
514 
515 	if (!r) {
516 		rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
517 			 rqid);
518 		return;
519 	}
520 
521 	/* If the request hasn't been completed yet, we will do this now. */
522 	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
523 		ssh_request_put(r);
524 		ssh_rtl_tx_schedule(rtl);
525 		return;
526 	}
527 
528 	/*
529 	 * Make sure the request has been transmitted. In case of a sequenced
530 	 * request, we are guaranteed that the completion callback will run on
531 	 * the receiver thread directly when the ACK for the packet has been
532 	 * received. Similarly, this function is guaranteed to run on the
533 	 * receiver thread. Thus we are guaranteed that if the packet has been
534 	 * successfully transmitted and received an ACK, the transmitted flag
535 	 * has been set and is visible here.
536 	 *
537 	 * We are currently not handling unsequenced packets here, as those
538 	 * should never expect a response as ensured in ssh_rtl_submit. If this
539 	 * ever changes, one would have to test for
540 	 *
541 	 *	(r->state & (transmitting | transmitted))
542 	 *
543 	 * on unsequenced packets to determine if they could have been
544 	 * transmitted. There are no synchronization guarantees as in the
545 	 * sequenced case, since, in this case, the callback function will not
546 	 * run on the same thread. Thus an exact determination is impossible.
547 	 */
548 	if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
549 		rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
550 			rqid);
551 
552 		/*
553 		 * NB: Timeout has already been canceled, request already been
554 		 * removed from pending and marked as locked and completed. As
555 		 * we receive a "false" response, the packet might still be
556 		 * queued though.
557 		 */
558 		ssh_rtl_queue_remove(r);
559 
560 		ssh_rtl_complete_with_status(r, -EREMOTEIO);
561 		ssh_request_put(r);
562 
563 		ssh_rtl_tx_schedule(rtl);
564 		return;
565 	}
566 
567 	/*
568 	 * NB: Timeout has already been canceled, request already been
569 	 * removed from pending and marked as locked and completed. The request
570 	 * can also not be queued any more, as it has been marked as
571 	 * transmitting and later transmitted. Thus no need to remove it from
572 	 * anywhere.
573 	 */
574 
575 	ssh_rtl_complete_with_rsp(r, command, command_data);
576 	ssh_request_put(r);
577 
578 	ssh_rtl_tx_schedule(rtl);
579 }
580 
ssh_rtl_cancel_nonpending(struct ssh_request * r)581 static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
582 {
583 	struct ssh_rtl *rtl;
584 	unsigned long flags, fixed;
585 	bool remove;
586 
587 	/*
588 	 * Handle unsubmitted request: Try to mark the packet as locked,
589 	 * expecting the state to be zero (i.e. unsubmitted). Note that, if
590 	 * setting the state worked, we might still be adding the packet to the
591 	 * queue in a currently executing submit call. In that case, however,
592 	 * ptl reference must have been set previously, as locked is checked
593 	 * after setting ptl. Furthermore, when the ptl reference is set, the
594 	 * submission process is guaranteed to have entered the critical
595 	 * section. Thus only if we successfully locked this request and ptl is
596 	 * NULL, we have successfully removed the request, i.e. we are
597 	 * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
598 	 * packet will never be added. Otherwise, we need to try and grab it
599 	 * from the queue, where we are now guaranteed that the packet is or has
600 	 * been due to the critical section.
601 	 *
602 	 * Note that if the cmpxchg() fails, we are guaranteed that ptl has
603 	 * been set and is non-NULL, as states can only be nonzero after this
604 	 * has been set. Also note that we need to fetch the static (type)
605 	 * flags to ensure that they don't cause the cmpxchg() to fail.
606 	 */
607 	fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
608 	flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
609 
610 	/*
611 	 * Force correct ordering with regards to state and ptl reference access
612 	 * to safe-guard cancellation to concurrent submission against a
613 	 * lost-update problem. First try to exchange state, then also check
614 	 * ptl if that worked. This barrier is paired with the
615 	 * one in ssh_rtl_submit().
616 	 */
617 	smp_mb__after_atomic();
618 
619 	if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
620 		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
621 			return true;
622 
623 		ssh_rtl_complete_with_status(r, -ECANCELED);
624 		return true;
625 	}
626 
627 	rtl = ssh_request_rtl(r);
628 	spin_lock(&rtl->queue.lock);
629 
630 	/*
631 	 * Note: 1) Requests cannot be re-submitted. 2) If a request is
632 	 * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
633 	 * successfully remove the request here, we have removed all its
634 	 * occurrences in the system.
635 	 */
636 
637 	remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
638 	if (!remove) {
639 		spin_unlock(&rtl->queue.lock);
640 		return false;
641 	}
642 
643 	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
644 	list_del(&r->node);
645 
646 	spin_unlock(&rtl->queue.lock);
647 
648 	ssh_request_put(r);	/* Drop reference obtained from queue. */
649 
650 	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
651 		return true;
652 
653 	ssh_rtl_complete_with_status(r, -ECANCELED);
654 	return true;
655 }
656 
ssh_rtl_cancel_pending(struct ssh_request * r)657 static bool ssh_rtl_cancel_pending(struct ssh_request *r)
658 {
659 	/* If the packet is already locked, it's going to be removed shortly. */
660 	if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
661 		return true;
662 
663 	/*
664 	 * Now that we have locked the packet, we have guaranteed that it can't
665 	 * be added to the system any more. If ptl is NULL, the locked
666 	 * check in ssh_rtl_submit() has not been run and any submission,
667 	 * currently in progress or called later, won't add the packet. Thus we
668 	 * can directly complete it.
669 	 *
670 	 * The implicit memory barrier of test_and_set_bit() should be enough
671 	 * to ensure that the correct order (first lock, then check ptl) is
672 	 * ensured. This is paired with the barrier in ssh_rtl_submit().
673 	 */
674 	if (!READ_ONCE(r->packet.ptl)) {
675 		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
676 			return true;
677 
678 		ssh_rtl_complete_with_status(r, -ECANCELED);
679 		return true;
680 	}
681 
682 	/*
683 	 * Try to cancel the packet. If the packet has not been completed yet,
684 	 * this will subsequently (and synchronously) call the completion
685 	 * callback of the packet, which will complete the request.
686 	 */
687 	ssh_ptl_cancel(&r->packet);
688 
689 	/*
690 	 * If the packet has been completed with success, i.e. has not been
691 	 * canceled by the above call, the request may not have been completed
692 	 * yet (may be waiting for a response). Check if we need to do this
693 	 * here.
694 	 */
695 	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
696 		return true;
697 
698 	ssh_rtl_queue_remove(r);
699 	ssh_rtl_pending_remove(r);
700 	ssh_rtl_complete_with_status(r, -ECANCELED);
701 
702 	return true;
703 }
704 
705 /**
706  * ssh_rtl_cancel() - Cancel request.
707  * @rqst:    The request to cancel.
708  * @pending: Whether to also cancel pending requests.
709  *
710  * Cancels the given request. If @pending is %false, this will not cancel
711  * pending requests, i.e. requests that have already been submitted to the
712  * packet layer but not been completed yet. If @pending is %true, this will
713  * cancel the given request regardless of the state it is in.
714  *
715  * If the request has been canceled by calling this function, both completion
716  * and release callbacks of the request will be executed in a reasonable
717  * time-frame. This may happen during execution of this function, however,
718  * there is no guarantee for this. For example, a request currently
719  * transmitting will be canceled/completed only after transmission has
720  * completed, and the respective callbacks will be executed on the transmitter
721  * thread, which may happen during, but also some time after execution of the
722  * cancel function.
723  *
724  * Return: Returns %true if the given request has been canceled or completed,
725  * either by this function or prior to calling this function, %false
726  * otherwise. If @pending is %true, this function will always return %true.
727  */
ssh_rtl_cancel(struct ssh_request * rqst,bool pending)728 bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
729 {
730 	struct ssh_rtl *rtl;
731 	bool canceled;
732 
733 	if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
734 		return true;
735 
736 	trace_ssam_request_cancel(rqst);
737 
738 	if (pending)
739 		canceled = ssh_rtl_cancel_pending(rqst);
740 	else
741 		canceled = ssh_rtl_cancel_nonpending(rqst);
742 
743 	/* Note: rtl may be NULL if request has not been submitted yet. */
744 	rtl = ssh_request_rtl(rqst);
745 	if (canceled && rtl)
746 		ssh_rtl_tx_schedule(rtl);
747 
748 	return canceled;
749 }
750 
ssh_rtl_packet_callback(struct ssh_packet * p,int status)751 static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
752 {
753 	struct ssh_request *r = to_ssh_request(p);
754 
755 	if (unlikely(status)) {
756 		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
757 
758 		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
759 			return;
760 
761 		/*
762 		 * The packet may get canceled even though it has not been
763 		 * submitted yet. The request may still be queued. Check the
764 		 * queue and remove it if necessary. As the timeout would have
765 		 * been started in this function on success, there's no need
766 		 * to cancel it here.
767 		 */
768 		ssh_rtl_queue_remove(r);
769 		ssh_rtl_pending_remove(r);
770 		ssh_rtl_complete_with_status(r, status);
771 
772 		ssh_rtl_tx_schedule(ssh_request_rtl(r));
773 		return;
774 	}
775 
776 	/* Update state: Mark as transmitted and clear transmitting. */
777 	set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
778 	/* Ensure state never gets zero. */
779 	smp_mb__before_atomic();
780 	clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
781 
782 	/* If we expect a response, we just need to start the timeout. */
783 	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
784 		/*
785 		 * Note: This is the only place where the timestamp gets set,
786 		 * all other access to it is read-only.
787 		 */
788 		ssh_rtl_timeout_start(r);
789 		return;
790 	}
791 
792 	/*
793 	 * If we don't expect a response, lock, remove, and complete the
794 	 * request. Note that, at this point, the request is guaranteed to have
795 	 * left the queue and no timeout has been started. Thus we only need to
796 	 * remove it from pending. If the request has already been completed (it
797 	 * may have been canceled) return.
798 	 */
799 
800 	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
801 	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
802 		return;
803 
804 	ssh_rtl_pending_remove(r);
805 	ssh_rtl_complete_with_status(r, 0);
806 
807 	ssh_rtl_tx_schedule(ssh_request_rtl(r));
808 }
809 
ssh_request_get_expiration(struct ssh_request * r,ktime_t timeout)810 static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
811 {
812 	ktime_t timestamp = READ_ONCE(r->timestamp);
813 
814 	if (timestamp != KTIME_MAX)
815 		return ktime_add(timestamp, timeout);
816 	else
817 		return KTIME_MAX;
818 }
819 
ssh_rtl_timeout_reap(struct work_struct * work)820 static void ssh_rtl_timeout_reap(struct work_struct *work)
821 {
822 	struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
823 	struct ssh_request *r, *n;
824 	LIST_HEAD(claimed);
825 	ktime_t now = ktime_get_coarse_boottime();
826 	ktime_t timeout = rtl->rtx_timeout.timeout;
827 	ktime_t next = KTIME_MAX;
828 
829 	trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
830 
831 	/*
832 	 * Mark reaper as "not pending". This is done before checking any
833 	 * requests to avoid lost-update type problems.
834 	 */
835 	spin_lock(&rtl->rtx_timeout.lock);
836 	rtl->rtx_timeout.expires = KTIME_MAX;
837 	spin_unlock(&rtl->rtx_timeout.lock);
838 
839 	spin_lock(&rtl->pending.lock);
840 	list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
841 		ktime_t expires = ssh_request_get_expiration(r, timeout);
842 
843 		/*
844 		 * Check if the timeout hasn't expired yet. Find out next
845 		 * expiration date to be handled after this run.
846 		 */
847 		if (ktime_after(expires, now)) {
848 			next = ktime_before(expires, next) ? expires : next;
849 			continue;
850 		}
851 
852 		/* Avoid further transitions if locked. */
853 		if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
854 			continue;
855 
856 		/*
857 		 * We have now marked the packet as locked. Thus it cannot be
858 		 * added to the pending or queued lists again after we've
859 		 * removed it here. We can therefore re-use the node of this
860 		 * packet temporarily.
861 		 */
862 
863 		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
864 
865 		atomic_dec(&rtl->pending.count);
866 		list_move_tail(&r->node, &claimed);
867 	}
868 	spin_unlock(&rtl->pending.lock);
869 
870 	/* Cancel and complete the request. */
871 	list_for_each_entry_safe(r, n, &claimed, node) {
872 		trace_ssam_request_timeout(r);
873 
874 		/*
875 		 * At this point we've removed the packet from pending. This
876 		 * means that we've obtained the last (only) reference of the
877 		 * system to it. Thus we can just complete it.
878 		 */
879 		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
880 			ssh_rtl_complete_with_status(r, -ETIMEDOUT);
881 
882 		/*
883 		 * Drop the reference we've obtained by removing it from the
884 		 * pending set.
885 		 */
886 		list_del(&r->node);
887 		ssh_request_put(r);
888 	}
889 
890 	/* Ensure that the reaper doesn't run again immediately. */
891 	next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
892 	if (next != KTIME_MAX)
893 		ssh_rtl_timeout_reaper_mod(rtl, now, next);
894 
895 	ssh_rtl_tx_schedule(rtl);
896 }
897 
ssh_rtl_rx_event(struct ssh_rtl * rtl,const struct ssh_command * cmd,const struct ssam_span * data)898 static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
899 			     const struct ssam_span *data)
900 {
901 	trace_ssam_rx_event_received(cmd, data->len);
902 
903 	rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
904 		get_unaligned_le16(&cmd->rqid));
905 
906 	rtl->ops.handle_event(rtl, cmd, data);
907 }
908 
ssh_rtl_rx_command(struct ssh_ptl * p,const struct ssam_span * data)909 static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
910 {
911 	struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
912 	struct device *dev = &p->serdev->dev;
913 	struct ssh_command *command;
914 	struct ssam_span command_data;
915 
916 	if (sshp_parse_command(dev, data, &command, &command_data))
917 		return;
918 
919 	/*
920 	 * Check if the message was intended for us. If not, drop it.
921 	 *
922 	 * Note: We will need to change this to handle debug messages. On newer
923 	 * generation devices, these seem to be sent to SSAM_SSH_TID_DEBUG. We
924 	 * as host can still receive them as they can be forwarded via an
925 	 * override option on SAM, but doing so does not change the target ID
926 	 * to SSAM_SSH_TID_HOST.
927 	 */
928 	if (command->tid != SSAM_SSH_TID_HOST) {
929 		rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
930 			 command->tid);
931 		return;
932 	}
933 
934 	if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
935 		ssh_rtl_rx_event(rtl, command, &command_data);
936 	else
937 		ssh_rtl_complete(rtl, command, &command_data);
938 }
939 
ssh_rtl_rx_data(struct ssh_ptl * p,const struct ssam_span * data)940 static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
941 {
942 	if (!data->len) {
943 		ptl_err(p, "rtl: rx: no data frame payload\n");
944 		return;
945 	}
946 
947 	switch (data->ptr[0]) {
948 	case SSH_PLD_TYPE_CMD:
949 		ssh_rtl_rx_command(p, data);
950 		break;
951 
952 	default:
953 		ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
954 			data->ptr[0]);
955 		break;
956 	}
957 }
958 
ssh_rtl_packet_release(struct ssh_packet * p)959 static void ssh_rtl_packet_release(struct ssh_packet *p)
960 {
961 	struct ssh_request *rqst;
962 
963 	rqst = to_ssh_request(p);
964 	rqst->ops->release(rqst);
965 }
966 
967 static const struct ssh_packet_ops ssh_rtl_packet_ops = {
968 	.complete = ssh_rtl_packet_callback,
969 	.release = ssh_rtl_packet_release,
970 };
971 
972 /**
973  * ssh_request_init() - Initialize SSH request.
974  * @rqst:  The request to initialize.
975  * @flags: Request flags, determining the type of the request.
976  * @ops:   Request operations.
977  *
978  * Initializes the given SSH request and underlying packet. Sets the message
979  * buffer pointer to %NULL and the message buffer length to zero. This buffer
980  * has to be set separately via ssh_request_set_data() before submission and
981  * must contain a valid SSH request message.
982  *
983  * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
984  */
ssh_request_init(struct ssh_request * rqst,enum ssam_request_flags flags,const struct ssh_request_ops * ops)985 int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
986 		     const struct ssh_request_ops *ops)
987 {
988 	unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
989 
990 	/* Unsequenced requests cannot have a response. */
991 	if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
992 		return -EINVAL;
993 
994 	if (!(flags & SSAM_REQUEST_UNSEQUENCED))
995 		type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
996 
997 	ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
998 			&ssh_rtl_packet_ops);
999 
1000 	INIT_LIST_HEAD(&rqst->node);
1001 
1002 	rqst->state = 0;
1003 	if (flags & SSAM_REQUEST_HAS_RESPONSE)
1004 		rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
1005 
1006 	rqst->timestamp = KTIME_MAX;
1007 	rqst->ops = ops;
1008 
1009 	return 0;
1010 }
1011 
1012 /**
1013  * ssh_rtl_init() - Initialize request transport layer.
1014  * @rtl:    The request transport layer to initialize.
1015  * @serdev: The underlying serial device, i.e. the lower-level transport.
1016  * @ops:    Request transport layer operations.
1017  *
1018  * Initializes the given request transport layer and associated packet
1019  * transport layer. Transmitter and receiver threads must be started
1020  * separately via ssh_rtl_start(), after the request-layer has been
1021  * initialized and the lower-level serial device layer has been set up.
1022  *
1023  * Return: Returns zero on success and a nonzero error code on failure.
1024  */
ssh_rtl_init(struct ssh_rtl * rtl,struct serdev_device * serdev,const struct ssh_rtl_ops * ops)1025 int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
1026 		 const struct ssh_rtl_ops *ops)
1027 {
1028 	struct ssh_ptl_ops ptl_ops;
1029 	int status;
1030 
1031 	ptl_ops.data_received = ssh_rtl_rx_data;
1032 
1033 	status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
1034 	if (status)
1035 		return status;
1036 
1037 	spin_lock_init(&rtl->queue.lock);
1038 	INIT_LIST_HEAD(&rtl->queue.head);
1039 
1040 	spin_lock_init(&rtl->pending.lock);
1041 	INIT_LIST_HEAD(&rtl->pending.head);
1042 	atomic_set_release(&rtl->pending.count, 0);
1043 
1044 	INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
1045 
1046 	spin_lock_init(&rtl->rtx_timeout.lock);
1047 	rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
1048 	rtl->rtx_timeout.expires = KTIME_MAX;
1049 	INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
1050 
1051 	rtl->ops = *ops;
1052 
1053 	return 0;
1054 }
1055 
1056 /**
1057  * ssh_rtl_destroy() - Deinitialize request transport layer.
1058  * @rtl: The request transport layer to deinitialize.
1059  *
1060  * Deinitializes the given request transport layer and frees resources
1061  * associated with it. If receiver and/or transmitter threads have been
1062  * started, the layer must first be shut down via ssh_rtl_shutdown() before
1063  * this function can be called.
1064  */
ssh_rtl_destroy(struct ssh_rtl * rtl)1065 void ssh_rtl_destroy(struct ssh_rtl *rtl)
1066 {
1067 	ssh_ptl_destroy(&rtl->ptl);
1068 }
1069 
1070 /**
1071  * ssh_rtl_start() - Start request transmitter and receiver.
1072  * @rtl: The request transport layer.
1073  *
1074  * Return: Returns zero on success, a negative error code on failure.
1075  */
ssh_rtl_start(struct ssh_rtl * rtl)1076 int ssh_rtl_start(struct ssh_rtl *rtl)
1077 {
1078 	int status;
1079 
1080 	status = ssh_ptl_tx_start(&rtl->ptl);
1081 	if (status)
1082 		return status;
1083 
1084 	ssh_rtl_tx_schedule(rtl);
1085 
1086 	status = ssh_ptl_rx_start(&rtl->ptl);
1087 	if (status) {
1088 		ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
1089 		ssh_ptl_tx_stop(&rtl->ptl);
1090 		return status;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 struct ssh_flush_request {
1097 	struct ssh_request base;
1098 	struct completion completion;
1099 	int status;
1100 };
1101 
ssh_rtl_flush_request_complete(struct ssh_request * r,const struct ssh_command * cmd,const struct ssam_span * data,int status)1102 static void ssh_rtl_flush_request_complete(struct ssh_request *r,
1103 					   const struct ssh_command *cmd,
1104 					   const struct ssam_span *data,
1105 					   int status)
1106 {
1107 	struct ssh_flush_request *rqst;
1108 
1109 	rqst = container_of(r, struct ssh_flush_request, base);
1110 	rqst->status = status;
1111 }
1112 
ssh_rtl_flush_request_release(struct ssh_request * r)1113 static void ssh_rtl_flush_request_release(struct ssh_request *r)
1114 {
1115 	struct ssh_flush_request *rqst;
1116 
1117 	rqst = container_of(r, struct ssh_flush_request, base);
1118 	complete_all(&rqst->completion);
1119 }
1120 
1121 static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
1122 	.complete = ssh_rtl_flush_request_complete,
1123 	.release = ssh_rtl_flush_request_release,
1124 };
1125 
1126 /**
1127  * ssh_rtl_flush() - Flush the request transport layer.
1128  * @rtl:     request transport layer
1129  * @timeout: timeout for the flush operation in jiffies
1130  *
1131  * Queue a special flush request and wait for its completion. This request
1132  * will be completed after all other currently queued and pending requests
1133  * have been completed. Instead of a normal data packet, this request submits
1134  * a special flush packet, meaning that upon completion, also the underlying
1135  * packet transport layer has been flushed.
1136  *
1137  * Flushing the request layer guarantees that all previously submitted
1138  * requests have been fully completed before this call returns. Additionally,
1139  * flushing blocks execution of all later submitted requests until the flush
1140  * has been completed.
1141  *
1142  * If the caller ensures that no new requests are submitted after a call to
1143  * this function, the request transport layer is guaranteed to have no
1144  * remaining requests when this call returns. The same guarantee does not hold
1145  * for the packet layer, on which control packets may still be queued after
1146  * this call.
1147  *
1148  * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
1149  * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
1150  * and/or request transport layer has been shut down before this call. May
1151  * also return %-EINTR if the underlying packet transmission has been
1152  * interrupted.
1153  */
ssh_rtl_flush(struct ssh_rtl * rtl,unsigned long timeout)1154 int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
1155 {
1156 	const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
1157 	struct ssh_flush_request rqst;
1158 	int status;
1159 
1160 	ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
1161 	rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
1162 	rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
1163 	rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
1164 
1165 	init_completion(&rqst.completion);
1166 
1167 	status = ssh_rtl_submit(rtl, &rqst.base);
1168 	if (status)
1169 		return status;
1170 
1171 	ssh_request_put(&rqst.base);
1172 
1173 	if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
1174 		ssh_rtl_cancel(&rqst.base, true);
1175 		wait_for_completion(&rqst.completion);
1176 	}
1177 
1178 	WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
1179 		rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
1180 
1181 	return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
1182 }
1183 
1184 /**
1185  * ssh_rtl_shutdown() - Shut down request transport layer.
1186  * @rtl: The request transport layer.
1187  *
1188  * Shuts down the request transport layer, removing and canceling all queued
1189  * and pending requests. Requests canceled by this operation will be completed
1190  * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
1191  * stopped, the lower-level packet layer will be shutdown.
1192  *
1193  * As a result of this function, the transport layer will be marked as shut
1194  * down. Submission of requests after the transport layer has been shut down
1195  * will fail with %-ESHUTDOWN.
1196  */
ssh_rtl_shutdown(struct ssh_rtl * rtl)1197 void ssh_rtl_shutdown(struct ssh_rtl *rtl)
1198 {
1199 	struct ssh_request *r, *n;
1200 	LIST_HEAD(claimed);
1201 	int pending;
1202 
1203 	set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
1204 	/*
1205 	 * Ensure that the layer gets marked as shut-down before actually
1206 	 * stopping it. In combination with the check in ssh_rtl_submit(),
1207 	 * this guarantees that no new requests can be added and all already
1208 	 * queued requests are properly canceled.
1209 	 */
1210 	smp_mb__after_atomic();
1211 
1212 	/* Remove requests from queue. */
1213 	spin_lock(&rtl->queue.lock);
1214 	list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
1215 		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1216 		/* Ensure state never gets zero. */
1217 		smp_mb__before_atomic();
1218 		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
1219 
1220 		list_move_tail(&r->node, &claimed);
1221 	}
1222 	spin_unlock(&rtl->queue.lock);
1223 
1224 	/*
1225 	 * We have now guaranteed that the queue is empty and no more new
1226 	 * requests can be submitted (i.e. it will stay empty). This means that
1227 	 * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
1228 	 * we can simply call cancel_work_sync() on tx.work here and when that
1229 	 * returns, we've locked it down. This also means that after this call,
1230 	 * we don't submit any more packets to the underlying packet layer, so
1231 	 * we can also shut that down.
1232 	 */
1233 
1234 	cancel_work_sync(&rtl->tx.work);
1235 	ssh_ptl_shutdown(&rtl->ptl);
1236 	cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
1237 
1238 	/*
1239 	 * Shutting down the packet layer should also have canceled all
1240 	 * requests. Thus the pending set should be empty. Attempt to handle
1241 	 * this gracefully anyways, even though this should be dead code.
1242 	 */
1243 
1244 	pending = atomic_read(&rtl->pending.count);
1245 	if (WARN_ON(pending)) {
1246 		spin_lock(&rtl->pending.lock);
1247 		list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
1248 			set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1249 			/* Ensure state never gets zero. */
1250 			smp_mb__before_atomic();
1251 			clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
1252 
1253 			list_move_tail(&r->node, &claimed);
1254 		}
1255 		spin_unlock(&rtl->pending.lock);
1256 	}
1257 
1258 	/* Finally, cancel and complete the requests we claimed before. */
1259 	list_for_each_entry_safe(r, n, &claimed, node) {
1260 		/*
1261 		 * We need test_and_set() because we still might compete with
1262 		 * cancellation.
1263 		 */
1264 		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
1265 			ssh_rtl_complete_with_status(r, -ESHUTDOWN);
1266 
1267 		/*
1268 		 * Drop the reference we've obtained by removing it from the
1269 		 * lists.
1270 		 */
1271 		list_del(&r->node);
1272 		ssh_request_put(r);
1273 	}
1274 }
1275