xref: /linux/drivers/greybus/operation.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Greybus operations
4  *
5  * Copyright 2014-2015 Google Inc.
6  * Copyright 2014-2015 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
16 
17 #include "greybus_trace.h"
18 
19 static struct kmem_cache *gb_operation_cache;
20 static struct kmem_cache *gb_message_cache;
21 
22 /* Workqueue to handle Greybus operation completions. */
23 static struct workqueue_struct *gb_operation_completion_wq;
24 
25 /* Wait queue for synchronous cancellations. */
26 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
27 
28 /*
29  * Protects updates to operation->errno.
30  */
31 static DEFINE_SPINLOCK(gb_operations_lock);
32 
33 static int gb_operation_response_send(struct gb_operation *operation,
34 				      int errno);
35 
36 /*
37  * Increment operation active count and add to connection list unless the
38  * connection is going away.
39  *
40  * Caller holds operation reference.
41  */
gb_operation_get_active(struct gb_operation * operation)42 static int gb_operation_get_active(struct gb_operation *operation)
43 {
44 	struct gb_connection *connection = operation->connection;
45 	unsigned long flags;
46 
47 	spin_lock_irqsave(&connection->lock, flags);
48 	switch (connection->state) {
49 	case GB_CONNECTION_STATE_ENABLED:
50 		break;
51 	case GB_CONNECTION_STATE_ENABLED_TX:
52 		if (gb_operation_is_incoming(operation))
53 			goto err_unlock;
54 		break;
55 	case GB_CONNECTION_STATE_DISCONNECTING:
56 		if (!gb_operation_is_core(operation))
57 			goto err_unlock;
58 		break;
59 	default:
60 		goto err_unlock;
61 	}
62 
63 	if (operation->active++ == 0)
64 		list_add_tail(&operation->links, &connection->operations);
65 
66 	trace_gb_operation_get_active(operation);
67 
68 	spin_unlock_irqrestore(&connection->lock, flags);
69 
70 	return 0;
71 
72 err_unlock:
73 	spin_unlock_irqrestore(&connection->lock, flags);
74 
75 	return -ENOTCONN;
76 }
77 
78 /* Caller holds operation reference. */
gb_operation_put_active(struct gb_operation * operation)79 static void gb_operation_put_active(struct gb_operation *operation)
80 {
81 	struct gb_connection *connection = operation->connection;
82 	unsigned long flags;
83 
84 	spin_lock_irqsave(&connection->lock, flags);
85 
86 	trace_gb_operation_put_active(operation);
87 
88 	if (--operation->active == 0) {
89 		list_del(&operation->links);
90 		if (atomic_read(&operation->waiters))
91 			wake_up(&gb_operation_cancellation_queue);
92 	}
93 	spin_unlock_irqrestore(&connection->lock, flags);
94 }
95 
gb_operation_is_active(struct gb_operation * operation)96 static bool gb_operation_is_active(struct gb_operation *operation)
97 {
98 	struct gb_connection *connection = operation->connection;
99 	unsigned long flags;
100 	bool ret;
101 
102 	spin_lock_irqsave(&connection->lock, flags);
103 	ret = operation->active;
104 	spin_unlock_irqrestore(&connection->lock, flags);
105 
106 	return ret;
107 }
108 
109 /*
110  * Set an operation's result.
111  *
112  * Initially an outgoing operation's errno value is -EBADR.
113  * If no error occurs before sending the request message the only
114  * valid value operation->errno can be set to is -EINPROGRESS,
115  * indicating the request has been (or rather is about to be) sent.
116  * At that point nobody should be looking at the result until the
117  * response arrives.
118  *
119  * The first time the result gets set after the request has been
120  * sent, that result "sticks."  That is, if two concurrent threads
121  * race to set the result, the first one wins.  The return value
122  * tells the caller whether its result was recorded; if not the
123  * caller has nothing more to do.
124  *
125  * The result value -EILSEQ is reserved to signal an implementation
126  * error; if it's ever observed, the code performing the request has
127  * done something fundamentally wrong.  It is an error to try to set
128  * the result to -EBADR, and attempts to do so result in a warning,
129  * and -EILSEQ is used instead.  Similarly, the only valid result
130  * value to set for an operation in initial state is -EINPROGRESS.
131  * Attempts to do otherwise will also record a (successful) -EILSEQ
132  * operation result.
133  */
gb_operation_result_set(struct gb_operation * operation,int result)134 static bool gb_operation_result_set(struct gb_operation *operation, int result)
135 {
136 	unsigned long flags;
137 	int prev;
138 
139 	if (result == -EINPROGRESS) {
140 		/*
141 		 * -EINPROGRESS is used to indicate the request is
142 		 * in flight.  It should be the first result value
143 		 * set after the initial -EBADR.  Issue a warning
144 		 * and record an implementation error if it's
145 		 * set at any other time.
146 		 */
147 		spin_lock_irqsave(&gb_operations_lock, flags);
148 		prev = operation->errno;
149 		if (prev == -EBADR)
150 			operation->errno = result;
151 		else
152 			operation->errno = -EILSEQ;
153 		spin_unlock_irqrestore(&gb_operations_lock, flags);
154 		WARN_ON(prev != -EBADR);
155 
156 		return true;
157 	}
158 
159 	/*
160 	 * The first result value set after a request has been sent
161 	 * will be the final result of the operation.  Subsequent
162 	 * attempts to set the result are ignored.
163 	 *
164 	 * Note that -EBADR is a reserved "initial state" result
165 	 * value.  Attempts to set this value result in a warning,
166 	 * and the result code is set to -EILSEQ instead.
167 	 */
168 	if (WARN_ON(result == -EBADR))
169 		result = -EILSEQ; /* Nobody should be setting -EBADR */
170 
171 	spin_lock_irqsave(&gb_operations_lock, flags);
172 	prev = operation->errno;
173 	if (prev == -EINPROGRESS)
174 		operation->errno = result;	/* First and final result */
175 	spin_unlock_irqrestore(&gb_operations_lock, flags);
176 
177 	return prev == -EINPROGRESS;
178 }
179 
gb_operation_result(struct gb_operation * operation)180 int gb_operation_result(struct gb_operation *operation)
181 {
182 	int result = operation->errno;
183 
184 	WARN_ON(result == -EBADR);
185 	WARN_ON(result == -EINPROGRESS);
186 
187 	return result;
188 }
189 EXPORT_SYMBOL_GPL(gb_operation_result);
190 
191 /*
192  * Looks up an outgoing operation on a connection and returns a refcounted
193  * pointer if found, or NULL otherwise.
194  */
195 static struct gb_operation *
gb_operation_find_outgoing(struct gb_connection * connection,u16 operation_id)196 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
197 {
198 	struct gb_operation *operation;
199 	unsigned long flags;
200 	bool found = false;
201 
202 	spin_lock_irqsave(&connection->lock, flags);
203 	list_for_each_entry(operation, &connection->operations, links)
204 		if (operation->id == operation_id &&
205 		    !gb_operation_is_incoming(operation)) {
206 			gb_operation_get(operation);
207 			found = true;
208 			break;
209 		}
210 	spin_unlock_irqrestore(&connection->lock, flags);
211 
212 	return found ? operation : NULL;
213 }
214 
gb_message_send(struct gb_message * message,gfp_t gfp)215 static int gb_message_send(struct gb_message *message, gfp_t gfp)
216 {
217 	struct gb_connection *connection = message->operation->connection;
218 
219 	trace_gb_message_send(message);
220 	return connection->hd->driver->message_send(connection->hd,
221 					connection->hd_cport_id,
222 					message,
223 					gfp);
224 }
225 
226 /*
227  * Cancel a message we have passed to the host device layer to be sent.
228  */
gb_message_cancel(struct gb_message * message)229 static void gb_message_cancel(struct gb_message *message)
230 {
231 	struct gb_host_device *hd = message->operation->connection->hd;
232 
233 	hd->driver->message_cancel(message);
234 }
235 
gb_operation_request_handle(struct gb_operation * operation)236 static void gb_operation_request_handle(struct gb_operation *operation)
237 {
238 	struct gb_connection *connection = operation->connection;
239 	int status;
240 	int ret;
241 
242 	if (connection->handler) {
243 		status = connection->handler(operation);
244 	} else {
245 		dev_err(&connection->hd->dev,
246 			"%s: unexpected incoming request of type 0x%02x\n",
247 			connection->name, operation->type);
248 
249 		status = -EPROTONOSUPPORT;
250 	}
251 
252 	ret = gb_operation_response_send(operation, status);
253 	if (ret) {
254 		dev_err(&connection->hd->dev,
255 			"%s: failed to send response %d for type 0x%02x: %d\n",
256 			connection->name, status, operation->type, ret);
257 		return;
258 	}
259 }
260 
261 /*
262  * Process operation work.
263  *
264  * For incoming requests, call the protocol request handler. The operation
265  * result should be -EINPROGRESS at this point.
266  *
267  * For outgoing requests, the operation result value should have
268  * been set before queueing this.  The operation callback function
269  * allows the original requester to know the request has completed
270  * and its result is available.
271  */
gb_operation_work(struct work_struct * work)272 static void gb_operation_work(struct work_struct *work)
273 {
274 	struct gb_operation *operation;
275 	int ret;
276 
277 	operation = container_of(work, struct gb_operation, work);
278 
279 	if (gb_operation_is_incoming(operation)) {
280 		gb_operation_request_handle(operation);
281 	} else {
282 		ret = timer_delete_sync(&operation->timer);
283 		if (!ret) {
284 			/* Cancel request message if scheduled by timeout. */
285 			if (gb_operation_result(operation) == -ETIMEDOUT)
286 				gb_message_cancel(operation->request);
287 		}
288 
289 		operation->callback(operation);
290 	}
291 
292 	gb_operation_put_active(operation);
293 	gb_operation_put(operation);
294 }
295 
gb_operation_timeout(struct timer_list * t)296 static void gb_operation_timeout(struct timer_list *t)
297 {
298 	struct gb_operation *operation = timer_container_of(operation, t,
299 							    timer);
300 
301 	if (gb_operation_result_set(operation, -ETIMEDOUT)) {
302 		/*
303 		 * A stuck request message will be cancelled from the
304 		 * workqueue.
305 		 */
306 		queue_work(gb_operation_completion_wq, &operation->work);
307 	}
308 }
309 
gb_operation_message_init(struct gb_host_device * hd,struct gb_message * message,u16 operation_id,size_t payload_size,u8 type)310 static void gb_operation_message_init(struct gb_host_device *hd,
311 				      struct gb_message *message,
312 				      u16 operation_id,
313 				      size_t payload_size, u8 type)
314 {
315 	struct gb_operation_msg_hdr *header;
316 
317 	header = message->buffer;
318 
319 	message->header = header;
320 	message->payload = payload_size ? header + 1 : NULL;
321 	message->payload_size = payload_size;
322 
323 	/*
324 	 * The type supplied for incoming message buffers will be
325 	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
326 	 * arriving data so there's no need to initialize the message header.
327 	 */
328 	if (type != GB_REQUEST_TYPE_INVALID) {
329 		u16 message_size = (u16)(sizeof(*header) + payload_size);
330 
331 		/*
332 		 * For a request, the operation id gets filled in
333 		 * when the message is sent.  For a response, it
334 		 * will be copied from the request by the caller.
335 		 *
336 		 * The result field in a request message must be
337 		 * zero.  It will be set just prior to sending for
338 		 * a response.
339 		 */
340 		header->size = cpu_to_le16(message_size);
341 		header->operation_id = 0;
342 		header->type = type;
343 		header->result = 0;
344 	}
345 }
346 
347 /*
348  * Allocate a message to be used for an operation request or response.
349  * Both types of message contain a common header.  The request message
350  * for an outgoing operation is outbound, as is the response message
351  * for an incoming operation.  The message header for an outbound
352  * message is partially initialized here.
353  *
354  * The headers for inbound messages don't need to be initialized;
355  * they'll be filled in by arriving data.
356  *
357  * Our message buffers have the following layout:
358  *	message header  \_ these combined are
359  *	message payload /  the message size
360  */
361 static struct gb_message *
gb_operation_message_alloc(struct gb_host_device * hd,u8 type,size_t payload_size,gfp_t gfp_flags)362 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
363 			   size_t payload_size, gfp_t gfp_flags)
364 {
365 	struct gb_message *message;
366 	struct gb_operation_msg_hdr *header;
367 	size_t message_size = payload_size + sizeof(*header);
368 
369 	if (message_size > hd->buffer_size_max) {
370 		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
371 			 message_size, hd->buffer_size_max);
372 		return NULL;
373 	}
374 
375 	/* Allocate the message structure and buffer. */
376 	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
377 	if (!message)
378 		return NULL;
379 
380 	message->buffer = kzalloc(message_size, gfp_flags);
381 	if (!message->buffer)
382 		goto err_free_message;
383 
384 	/* Initialize the message.  Operation id is filled in later. */
385 	gb_operation_message_init(hd, message, 0, payload_size, type);
386 
387 	return message;
388 
389 err_free_message:
390 	kmem_cache_free(gb_message_cache, message);
391 
392 	return NULL;
393 }
394 
gb_operation_message_free(struct gb_message * message)395 static void gb_operation_message_free(struct gb_message *message)
396 {
397 	kfree(message->buffer);
398 	kmem_cache_free(gb_message_cache, message);
399 }
400 
401 /*
402  * Map an enum gb_operation_status value (which is represented in a
403  * message as a single byte) to an appropriate Linux negative errno.
404  */
gb_operation_status_map(u8 status)405 static int gb_operation_status_map(u8 status)
406 {
407 	switch (status) {
408 	case GB_OP_SUCCESS:
409 		return 0;
410 	case GB_OP_INTERRUPTED:
411 		return -EINTR;
412 	case GB_OP_TIMEOUT:
413 		return -ETIMEDOUT;
414 	case GB_OP_NO_MEMORY:
415 		return -ENOMEM;
416 	case GB_OP_PROTOCOL_BAD:
417 		return -EPROTONOSUPPORT;
418 	case GB_OP_OVERFLOW:
419 		return -EMSGSIZE;
420 	case GB_OP_INVALID:
421 		return -EINVAL;
422 	case GB_OP_RETRY:
423 		return -EAGAIN;
424 	case GB_OP_NONEXISTENT:
425 		return -ENODEV;
426 	case GB_OP_MALFUNCTION:
427 		return -EILSEQ;
428 	case GB_OP_UNKNOWN_ERROR:
429 	default:
430 		return -EIO;
431 	}
432 }
433 
434 /*
435  * Map a Linux errno value (from operation->errno) into the value
436  * that should represent it in a response message status sent
437  * over the wire.  Returns an enum gb_operation_status value (which
438  * is represented in a message as a single byte).
439  */
gb_operation_errno_map(int errno)440 static u8 gb_operation_errno_map(int errno)
441 {
442 	switch (errno) {
443 	case 0:
444 		return GB_OP_SUCCESS;
445 	case -EINTR:
446 		return GB_OP_INTERRUPTED;
447 	case -ETIMEDOUT:
448 		return GB_OP_TIMEOUT;
449 	case -ENOMEM:
450 		return GB_OP_NO_MEMORY;
451 	case -EPROTONOSUPPORT:
452 		return GB_OP_PROTOCOL_BAD;
453 	case -EMSGSIZE:
454 		return GB_OP_OVERFLOW;	/* Could be underflow too */
455 	case -EINVAL:
456 		return GB_OP_INVALID;
457 	case -EAGAIN:
458 		return GB_OP_RETRY;
459 	case -EILSEQ:
460 		return GB_OP_MALFUNCTION;
461 	case -ENODEV:
462 		return GB_OP_NONEXISTENT;
463 	case -EIO:
464 	default:
465 		return GB_OP_UNKNOWN_ERROR;
466 	}
467 }
468 
gb_operation_response_alloc(struct gb_operation * operation,size_t response_size,gfp_t gfp)469 bool gb_operation_response_alloc(struct gb_operation *operation,
470 				 size_t response_size, gfp_t gfp)
471 {
472 	struct gb_host_device *hd = operation->connection->hd;
473 	struct gb_operation_msg_hdr *request_header;
474 	struct gb_message *response;
475 	u8 type;
476 
477 	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
478 	response = gb_operation_message_alloc(hd, type, response_size, gfp);
479 	if (!response)
480 		return false;
481 	response->operation = operation;
482 
483 	/*
484 	 * Size and type get initialized when the message is
485 	 * allocated.  The errno will be set before sending.  All
486 	 * that's left is the operation id, which we copy from the
487 	 * request message header (as-is, in little-endian order).
488 	 */
489 	request_header = operation->request->header;
490 	response->header->operation_id = request_header->operation_id;
491 	operation->response = response;
492 
493 	return true;
494 }
495 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
496 
497 /*
498  * Create a Greybus operation to be sent over the given connection.
499  * The request buffer will be big enough for a payload of the given
500  * size.
501  *
502  * For outgoing requests, the request message's header will be
503  * initialized with the type of the request and the message size.
504  * Outgoing operations must also specify the response buffer size,
505  * which must be sufficient to hold all expected response data.  The
506  * response message header will eventually be overwritten, so there's
507  * no need to initialize it here.
508  *
509  * Request messages for incoming operations can arrive in interrupt
510  * context, so they must be allocated with GFP_ATOMIC.  In this case
511  * the request buffer will be immediately overwritten, so there is
512  * no need to initialize the message header.  Responsibility for
513  * allocating a response buffer lies with the incoming request
514  * handler for a protocol.  So we don't allocate that here.
515  *
516  * Returns a pointer to the new operation or a null pointer if an
517  * error occurs.
518  */
519 static struct gb_operation *
gb_operation_create_common(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long op_flags,gfp_t gfp_flags)520 gb_operation_create_common(struct gb_connection *connection, u8 type,
521 			   size_t request_size, size_t response_size,
522 			   unsigned long op_flags, gfp_t gfp_flags)
523 {
524 	struct gb_host_device *hd = connection->hd;
525 	struct gb_operation *operation;
526 
527 	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
528 	if (!operation)
529 		return NULL;
530 	operation->connection = connection;
531 
532 	operation->request = gb_operation_message_alloc(hd, type, request_size,
533 							gfp_flags);
534 	if (!operation->request)
535 		goto err_cache;
536 	operation->request->operation = operation;
537 
538 	/* Allocate the response buffer for outgoing operations */
539 	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
540 		if (!gb_operation_response_alloc(operation, response_size,
541 						 gfp_flags)) {
542 			goto err_request;
543 		}
544 
545 		timer_setup(&operation->timer, gb_operation_timeout, 0);
546 	}
547 
548 	operation->flags = op_flags;
549 	operation->type = type;
550 	operation->errno = -EBADR;  /* Initial value--means "never set" */
551 
552 	INIT_WORK(&operation->work, gb_operation_work);
553 	init_completion(&operation->completion);
554 	kref_init(&operation->kref);
555 	atomic_set(&operation->waiters, 0);
556 
557 	return operation;
558 
559 err_request:
560 	gb_operation_message_free(operation->request);
561 err_cache:
562 	kmem_cache_free(gb_operation_cache, operation);
563 
564 	return NULL;
565 }
566 
567 /*
568  * Create a new operation associated with the given connection.  The
569  * request and response sizes provided are the number of bytes
570  * required to hold the request/response payload only.  Both of
571  * these are allowed to be 0.  Note that 0x00 is reserved as an
572  * invalid operation type for all protocols, and this is enforced
573  * here.
574  */
575 struct gb_operation *
gb_operation_create_flags(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)576 gb_operation_create_flags(struct gb_connection *connection,
577 			  u8 type, size_t request_size,
578 			  size_t response_size, unsigned long flags,
579 			  gfp_t gfp)
580 {
581 	struct gb_operation *operation;
582 
583 	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
584 		return NULL;
585 	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
586 		type &= ~GB_MESSAGE_TYPE_RESPONSE;
587 
588 	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
589 		flags &= GB_OPERATION_FLAG_USER_MASK;
590 
591 	operation = gb_operation_create_common(connection, type,
592 					       request_size, response_size,
593 					       flags, gfp);
594 	if (operation)
595 		trace_gb_operation_create(operation);
596 
597 	return operation;
598 }
599 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
600 
601 struct gb_operation *
gb_operation_create_core(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)602 gb_operation_create_core(struct gb_connection *connection,
603 			 u8 type, size_t request_size,
604 			 size_t response_size, unsigned long flags,
605 			 gfp_t gfp)
606 {
607 	struct gb_operation *operation;
608 
609 	flags |= GB_OPERATION_FLAG_CORE;
610 
611 	operation = gb_operation_create_common(connection, type,
612 					       request_size, response_size,
613 					       flags, gfp);
614 	if (operation)
615 		trace_gb_operation_create_core(operation);
616 
617 	return operation;
618 }
619 
620 /* Do not export this function. */
621 
gb_operation_get_payload_size_max(struct gb_connection * connection)622 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
623 {
624 	struct gb_host_device *hd = connection->hd;
625 
626 	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
627 }
628 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
629 
630 static struct gb_operation *
gb_operation_create_incoming(struct gb_connection * connection,u16 id,u8 type,void * data,size_t size)631 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
632 			     u8 type, void *data, size_t size)
633 {
634 	struct gb_operation *operation;
635 	size_t request_size;
636 	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
637 
638 	/* Caller has made sure we at least have a message header. */
639 	request_size = size - sizeof(struct gb_operation_msg_hdr);
640 
641 	if (!id)
642 		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
643 
644 	operation = gb_operation_create_common(connection, type,
645 					       request_size,
646 					       GB_REQUEST_TYPE_INVALID,
647 					       flags, GFP_ATOMIC);
648 	if (!operation)
649 		return NULL;
650 
651 	operation->id = id;
652 	memcpy(operation->request->header, data, size);
653 	trace_gb_operation_create_incoming(operation);
654 
655 	return operation;
656 }
657 
658 /*
659  * Get an additional reference on an operation.
660  */
gb_operation_get(struct gb_operation * operation)661 void gb_operation_get(struct gb_operation *operation)
662 {
663 	kref_get(&operation->kref);
664 }
665 EXPORT_SYMBOL_GPL(gb_operation_get);
666 
667 /*
668  * Destroy a previously created operation.
669  */
_gb_operation_destroy(struct kref * kref)670 static void _gb_operation_destroy(struct kref *kref)
671 {
672 	struct gb_operation *operation;
673 
674 	operation = container_of(kref, struct gb_operation, kref);
675 
676 	trace_gb_operation_destroy(operation);
677 
678 	if (operation->response)
679 		gb_operation_message_free(operation->response);
680 	gb_operation_message_free(operation->request);
681 
682 	kmem_cache_free(gb_operation_cache, operation);
683 }
684 
685 /*
686  * Drop a reference on an operation, and destroy it when the last
687  * one is gone.
688  */
gb_operation_put(struct gb_operation * operation)689 void gb_operation_put(struct gb_operation *operation)
690 {
691 	if (WARN_ON(!operation))
692 		return;
693 
694 	kref_put(&operation->kref, _gb_operation_destroy);
695 }
696 EXPORT_SYMBOL_GPL(gb_operation_put);
697 
698 /* Tell the requester we're done */
gb_operation_sync_callback(struct gb_operation * operation)699 static void gb_operation_sync_callback(struct gb_operation *operation)
700 {
701 	complete(&operation->completion);
702 }
703 
704 /**
705  * gb_operation_request_send() - send an operation request message
706  * @operation:	the operation to initiate
707  * @callback:	the operation completion callback
708  * @timeout:	operation timeout in milliseconds, or zero for no timeout
709  * @gfp:	the memory flags to use for any allocations
710  *
711  * The caller has filled in any payload so the request message is ready to go.
712  * The callback function supplied will be called when the response message has
713  * arrived, a unidirectional request has been sent, or the operation is
714  * cancelled, indicating that the operation is complete. The callback function
715  * can fetch the result of the operation using gb_operation_result() if
716  * desired.
717  *
718  * Return: 0 if the request was successfully queued in the host-driver queues,
719  * or a negative errno.
720  */
gb_operation_request_send(struct gb_operation * operation,gb_operation_callback callback,unsigned int timeout,gfp_t gfp)721 int gb_operation_request_send(struct gb_operation *operation,
722 			      gb_operation_callback callback,
723 			      unsigned int timeout,
724 			      gfp_t gfp)
725 {
726 	struct gb_connection *connection = operation->connection;
727 	struct gb_operation_msg_hdr *header;
728 	unsigned int cycle;
729 	int ret;
730 
731 	if (gb_connection_is_offloaded(connection))
732 		return -EBUSY;
733 
734 	if (!callback)
735 		return -EINVAL;
736 
737 	/*
738 	 * Record the callback function, which is executed in
739 	 * non-atomic (workqueue) context when the final result
740 	 * of an operation has been set.
741 	 */
742 	operation->callback = callback;
743 
744 	/*
745 	 * Assign the operation's id, and store it in the request header.
746 	 * Zero is a reserved operation id for unidirectional operations.
747 	 */
748 	if (gb_operation_is_unidirectional(operation)) {
749 		operation->id = 0;
750 	} else {
751 		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
752 		operation->id = (u16)(cycle % U16_MAX + 1);
753 	}
754 
755 	header = operation->request->header;
756 	header->operation_id = cpu_to_le16(operation->id);
757 
758 	gb_operation_result_set(operation, -EINPROGRESS);
759 
760 	/*
761 	 * Get an extra reference on the operation. It'll be dropped when the
762 	 * operation completes.
763 	 */
764 	gb_operation_get(operation);
765 	ret = gb_operation_get_active(operation);
766 	if (ret)
767 		goto err_put;
768 
769 	ret = gb_message_send(operation->request, gfp);
770 	if (ret)
771 		goto err_put_active;
772 
773 	if (timeout) {
774 		operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
775 		add_timer(&operation->timer);
776 	}
777 
778 	return 0;
779 
780 err_put_active:
781 	gb_operation_put_active(operation);
782 err_put:
783 	gb_operation_put(operation);
784 
785 	return ret;
786 }
787 EXPORT_SYMBOL_GPL(gb_operation_request_send);
788 
789 /*
790  * Send a synchronous operation.  This function is expected to
791  * block, returning only when the response has arrived, (or when an
792  * error is detected.  The return value is the result of the
793  * operation.
794  */
gb_operation_request_send_sync_timeout(struct gb_operation * operation,unsigned int timeout)795 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
796 					   unsigned int timeout)
797 {
798 	int ret;
799 
800 	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
801 					timeout, GFP_KERNEL);
802 	if (ret)
803 		return ret;
804 
805 	ret = wait_for_completion_interruptible(&operation->completion);
806 	if (ret < 0) {
807 		/* Cancel the operation if interrupted */
808 		gb_operation_cancel(operation, -ECANCELED);
809 	}
810 
811 	return gb_operation_result(operation);
812 }
813 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
814 
815 /*
816  * Send a response for an incoming operation request.  A non-zero
817  * errno indicates a failed operation.
818  *
819  * If there is any response payload, the incoming request handler is
820  * responsible for allocating the response message.  Otherwise the
821  * it can simply supply the result errno; this function will
822  * allocate the response message if necessary.
823  */
gb_operation_response_send(struct gb_operation * operation,int errno)824 static int gb_operation_response_send(struct gb_operation *operation,
825 				      int errno)
826 {
827 	struct gb_connection *connection = operation->connection;
828 	int ret;
829 
830 	if (!operation->response &&
831 	    !gb_operation_is_unidirectional(operation)) {
832 		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
833 			return -ENOMEM;
834 	}
835 
836 	/* Record the result */
837 	if (!gb_operation_result_set(operation, errno)) {
838 		dev_err(&connection->hd->dev, "request result already set\n");
839 		return -EIO;	/* Shouldn't happen */
840 	}
841 
842 	/* Sender of request does not care about response. */
843 	if (gb_operation_is_unidirectional(operation))
844 		return 0;
845 
846 	/* Reference will be dropped when message has been sent. */
847 	gb_operation_get(operation);
848 	ret = gb_operation_get_active(operation);
849 	if (ret)
850 		goto err_put;
851 
852 	/* Fill in the response header and send it */
853 	operation->response->header->result = gb_operation_errno_map(errno);
854 
855 	ret = gb_message_send(operation->response, GFP_KERNEL);
856 	if (ret)
857 		goto err_put_active;
858 
859 	return 0;
860 
861 err_put_active:
862 	gb_operation_put_active(operation);
863 err_put:
864 	gb_operation_put(operation);
865 
866 	return ret;
867 }
868 
869 /*
870  * This function is called when a message send request has completed.
871  */
greybus_message_sent(struct gb_host_device * hd,struct gb_message * message,int status)872 void greybus_message_sent(struct gb_host_device *hd,
873 			  struct gb_message *message, int status)
874 {
875 	struct gb_operation *operation = message->operation;
876 	struct gb_connection *connection = operation->connection;
877 
878 	/*
879 	 * If the message was a response, we just need to drop our
880 	 * reference to the operation.  If an error occurred, report
881 	 * it.
882 	 *
883 	 * For requests, if there's no error and the operation in not
884 	 * unidirectional, there's nothing more to do until the response
885 	 * arrives. If an error occurred attempting to send it, or if the
886 	 * operation is unidrectional, record the result of the operation and
887 	 * schedule its completion.
888 	 */
889 	if (message == operation->response) {
890 		if (status) {
891 			dev_err(&connection->hd->dev,
892 				"%s: error sending response 0x%02x: %d\n",
893 				connection->name, operation->type, status);
894 		}
895 
896 		gb_operation_put_active(operation);
897 		gb_operation_put(operation);
898 	} else if (status || gb_operation_is_unidirectional(operation)) {
899 		if (gb_operation_result_set(operation, status)) {
900 			queue_work(gb_operation_completion_wq,
901 				   &operation->work);
902 		}
903 	}
904 }
905 EXPORT_SYMBOL_GPL(greybus_message_sent);
906 
907 /*
908  * We've received data on a connection, and it doesn't look like a
909  * response, so we assume it's a request.
910  *
911  * This is called in interrupt context, so just copy the incoming
912  * data into the request buffer and handle the rest via workqueue.
913  */
gb_connection_recv_request(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)914 static void gb_connection_recv_request(struct gb_connection *connection,
915 				const struct gb_operation_msg_hdr *header,
916 				void *data, size_t size)
917 {
918 	struct gb_operation *operation;
919 	u16 operation_id;
920 	u8 type;
921 	int ret;
922 
923 	operation_id = le16_to_cpu(header->operation_id);
924 	type = header->type;
925 
926 	operation = gb_operation_create_incoming(connection, operation_id,
927 						 type, data, size);
928 	if (!operation) {
929 		dev_err(&connection->hd->dev,
930 			"%s: can't create incoming operation\n",
931 			connection->name);
932 		return;
933 	}
934 
935 	ret = gb_operation_get_active(operation);
936 	if (ret) {
937 		gb_operation_put(operation);
938 		return;
939 	}
940 	trace_gb_message_recv_request(operation->request);
941 
942 	/*
943 	 * The initial reference to the operation will be dropped when the
944 	 * request handler returns.
945 	 */
946 	if (gb_operation_result_set(operation, -EINPROGRESS))
947 		queue_work(connection->wq, &operation->work);
948 }
949 
950 /*
951  * We've received data that appears to be an operation response
952  * message.  Look up the operation, and record that we've received
953  * its response.
954  *
955  * This is called in interrupt context, so just copy the incoming
956  * data into the response buffer and handle the rest via workqueue.
957  */
gb_connection_recv_response(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)958 static void gb_connection_recv_response(struct gb_connection *connection,
959 				const struct gb_operation_msg_hdr *header,
960 				void *data, size_t size)
961 {
962 	struct gb_operation *operation;
963 	struct gb_message *message;
964 	size_t message_size;
965 	u16 operation_id;
966 	int errno;
967 
968 	operation_id = le16_to_cpu(header->operation_id);
969 
970 	if (!operation_id) {
971 		dev_err_ratelimited(&connection->hd->dev,
972 				    "%s: invalid response id 0 received\n",
973 				    connection->name);
974 		return;
975 	}
976 
977 	operation = gb_operation_find_outgoing(connection, operation_id);
978 	if (!operation) {
979 		dev_err_ratelimited(&connection->hd->dev,
980 				    "%s: unexpected response id 0x%04x received\n",
981 				    connection->name, operation_id);
982 		return;
983 	}
984 
985 	errno = gb_operation_status_map(header->result);
986 	message = operation->response;
987 	message_size = sizeof(*header) + message->payload_size;
988 	if (!errno && size > message_size) {
989 		dev_err_ratelimited(&connection->hd->dev,
990 				    "%s: malformed response 0x%02x received (%zu > %zu)\n",
991 				    connection->name, header->type,
992 				    size, message_size);
993 		errno = -EMSGSIZE;
994 	} else if (!errno && size < message_size) {
995 		if (gb_operation_short_response_allowed(operation)) {
996 			message->payload_size = size - sizeof(*header);
997 		} else {
998 			dev_err_ratelimited(&connection->hd->dev,
999 					    "%s: short response 0x%02x received (%zu < %zu)\n",
1000 					    connection->name, header->type,
1001 					    size, message_size);
1002 			errno = -EMSGSIZE;
1003 		}
1004 	}
1005 
1006 	/* We must ignore the payload if a bad status is returned */
1007 	if (errno)
1008 		size = sizeof(*header);
1009 
1010 	/* The rest will be handled in work queue context */
1011 	if (gb_operation_result_set(operation, errno)) {
1012 		memcpy(message->buffer, data, size);
1013 
1014 		trace_gb_message_recv_response(message);
1015 
1016 		queue_work(gb_operation_completion_wq, &operation->work);
1017 	}
1018 
1019 	gb_operation_put(operation);
1020 }
1021 
1022 /*
1023  * Handle data arriving on a connection.  As soon as we return the
1024  * supplied data buffer will be reused (so unless we do something
1025  * with, it's effectively dropped).
1026  */
gb_connection_recv(struct gb_connection * connection,void * data,size_t size)1027 void gb_connection_recv(struct gb_connection *connection,
1028 			void *data, size_t size)
1029 {
1030 	struct gb_operation_msg_hdr header;
1031 	struct device *dev = &connection->hd->dev;
1032 	size_t msg_size;
1033 
1034 	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1035 	    gb_connection_is_offloaded(connection)) {
1036 		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1037 				     connection->name, size);
1038 		return;
1039 	}
1040 
1041 	if (size < sizeof(header)) {
1042 		dev_err_ratelimited(dev, "%s: short message received\n",
1043 				    connection->name);
1044 		return;
1045 	}
1046 
1047 	/* Use memcpy as data may be unaligned */
1048 	memcpy(&header, data, sizeof(header));
1049 	msg_size = le16_to_cpu(header.size);
1050 	if (size < msg_size) {
1051 		dev_err_ratelimited(dev,
1052 				    "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1053 				    connection->name,
1054 				    le16_to_cpu(header.operation_id),
1055 				    header.type, size, msg_size);
1056 		return;		/* XXX Should still complete operation */
1057 	}
1058 
1059 	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1060 		gb_connection_recv_response(connection,	&header, data,
1061 					    msg_size);
1062 	} else {
1063 		gb_connection_recv_request(connection, &header, data,
1064 					   msg_size);
1065 	}
1066 }
1067 
1068 /*
1069  * Cancel an outgoing operation synchronously, and record the given error to
1070  * indicate why.
1071  */
gb_operation_cancel(struct gb_operation * operation,int errno)1072 void gb_operation_cancel(struct gb_operation *operation, int errno)
1073 {
1074 	if (WARN_ON(gb_operation_is_incoming(operation)))
1075 		return;
1076 
1077 	if (gb_operation_result_set(operation, errno)) {
1078 		gb_message_cancel(operation->request);
1079 		queue_work(gb_operation_completion_wq, &operation->work);
1080 	}
1081 	trace_gb_message_cancel_outgoing(operation->request);
1082 
1083 	atomic_inc(&operation->waiters);
1084 	wait_event(gb_operation_cancellation_queue,
1085 		   !gb_operation_is_active(operation));
1086 	atomic_dec(&operation->waiters);
1087 }
1088 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1089 
1090 /*
1091  * Cancel an incoming operation synchronously. Called during connection tear
1092  * down.
1093  */
gb_operation_cancel_incoming(struct gb_operation * operation,int errno)1094 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1095 {
1096 	if (WARN_ON(!gb_operation_is_incoming(operation)))
1097 		return;
1098 
1099 	if (!gb_operation_is_unidirectional(operation)) {
1100 		/*
1101 		 * Make sure the request handler has submitted the response
1102 		 * before cancelling it.
1103 		 */
1104 		flush_work(&operation->work);
1105 		if (!gb_operation_result_set(operation, errno))
1106 			gb_message_cancel(operation->response);
1107 	}
1108 	trace_gb_message_cancel_incoming(operation->response);
1109 
1110 	atomic_inc(&operation->waiters);
1111 	wait_event(gb_operation_cancellation_queue,
1112 		   !gb_operation_is_active(operation));
1113 	atomic_dec(&operation->waiters);
1114 }
1115 
1116 /**
1117  * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1118  * @connection: the Greybus connection to send this to
1119  * @type: the type of operation to send
1120  * @request: pointer to a memory buffer to copy the request from
1121  * @request_size: size of @request
1122  * @response: pointer to a memory buffer to copy the response to
1123  * @response_size: the size of @response.
1124  * @timeout: operation timeout in milliseconds
1125  *
1126  * This function implements a simple synchronous Greybus operation.  It sends
1127  * the provided operation request and waits (sleeps) until the corresponding
1128  * operation response message has been successfully received, or an error
1129  * occurs.  @request and @response are buffers to hold the request and response
1130  * data respectively, and if they are not NULL, their size must be specified in
1131  * @request_size and @response_size.
1132  *
1133  * If a response payload is to come back, and @response is not NULL,
1134  * @response_size number of bytes will be copied into @response if the operation
1135  * is successful.
1136  *
1137  * If there is an error, the response buffer is left alone.
1138  */
gb_operation_sync_timeout(struct gb_connection * connection,int type,void * request,int request_size,void * response,int response_size,unsigned int timeout)1139 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1140 			      void *request, int request_size,
1141 			      void *response, int response_size,
1142 			      unsigned int timeout)
1143 {
1144 	struct gb_operation *operation;
1145 	int ret;
1146 
1147 	if ((response_size && !response) ||
1148 	    (request_size && !request))
1149 		return -EINVAL;
1150 
1151 	operation = gb_operation_create(connection, type,
1152 					request_size, response_size,
1153 					GFP_KERNEL);
1154 	if (!operation)
1155 		return -ENOMEM;
1156 
1157 	if (request_size)
1158 		memcpy(operation->request->payload, request, request_size);
1159 
1160 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1161 	if (ret) {
1162 		dev_err(&connection->hd->dev,
1163 			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1164 			connection->name, operation->id, type, ret);
1165 	} else {
1166 		if (response_size) {
1167 			memcpy(response, operation->response->payload,
1168 			       response_size);
1169 		}
1170 	}
1171 
1172 	gb_operation_put(operation);
1173 
1174 	return ret;
1175 }
1176 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1177 
1178 /**
1179  * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1180  * @connection:		connection to use
1181  * @type:		type of operation to send
1182  * @request:		memory buffer to copy the request from
1183  * @request_size:	size of @request
1184  * @timeout:		send timeout in milliseconds
1185  *
1186  * Initiate a unidirectional operation by sending a request message and
1187  * waiting for it to be acknowledged as sent by the host device.
1188  *
1189  * Note that successful send of a unidirectional operation does not imply that
1190  * the request as actually reached the remote end of the connection.
1191  */
gb_operation_unidirectional_timeout(struct gb_connection * connection,int type,void * request,int request_size,unsigned int timeout)1192 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1193 					int type, void *request,
1194 					int request_size,
1195 					unsigned int timeout)
1196 {
1197 	struct gb_operation *operation;
1198 	int ret;
1199 
1200 	if (request_size && !request)
1201 		return -EINVAL;
1202 
1203 	operation = gb_operation_create_flags(connection, type,
1204 					      request_size, 0,
1205 					      GB_OPERATION_FLAG_UNIDIRECTIONAL,
1206 					      GFP_KERNEL);
1207 	if (!operation)
1208 		return -ENOMEM;
1209 
1210 	if (request_size)
1211 		memcpy(operation->request->payload, request, request_size);
1212 
1213 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1214 	if (ret) {
1215 		dev_err(&connection->hd->dev,
1216 			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1217 			connection->name, type, ret);
1218 	}
1219 
1220 	gb_operation_put(operation);
1221 
1222 	return ret;
1223 }
1224 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1225 
gb_operation_init(void)1226 int __init gb_operation_init(void)
1227 {
1228 	gb_message_cache = kmem_cache_create("gb_message_cache",
1229 					     sizeof(struct gb_message), 0, 0,
1230 					     NULL);
1231 	if (!gb_message_cache)
1232 		return -ENOMEM;
1233 
1234 	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1235 					       sizeof(struct gb_operation), 0,
1236 					       0, NULL);
1237 	if (!gb_operation_cache)
1238 		goto err_destroy_message_cache;
1239 
1240 	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1241 						     0, 0);
1242 	if (!gb_operation_completion_wq)
1243 		goto err_destroy_operation_cache;
1244 
1245 	return 0;
1246 
1247 err_destroy_operation_cache:
1248 	kmem_cache_destroy(gb_operation_cache);
1249 	gb_operation_cache = NULL;
1250 err_destroy_message_cache:
1251 	kmem_cache_destroy(gb_message_cache);
1252 	gb_message_cache = NULL;
1253 
1254 	return -ENOMEM;
1255 }
1256 
gb_operation_exit(void)1257 void gb_operation_exit(void)
1258 {
1259 	destroy_workqueue(gb_operation_completion_wq);
1260 	gb_operation_completion_wq = NULL;
1261 	kmem_cache_destroy(gb_operation_cache);
1262 	gb_operation_cache = NULL;
1263 	kmem_cache_destroy(gb_message_cache);
1264 	gb_message_cache = NULL;
1265 }
1266