1c3c4e307SRoman Kiryanov // SPDX-License-Identifier: GPL-2.0
2c89f2750SDavid 'Digit' Turner /*
3c89f2750SDavid 'Digit' Turner * Copyright (C) 2012 Intel, Inc.
4c89f2750SDavid 'Digit' Turner * Copyright (C) 2013 Intel, Inc.
52f3be882SChristoffer Dall * Copyright (C) 2014 Linaro Limited
6726ea1a8SJin Qian * Copyright (C) 2011-2016 Google, Inc.
7c89f2750SDavid 'Digit' Turner *
8c89f2750SDavid 'Digit' Turner * This software is licensed under the terms of the GNU General Public
9c89f2750SDavid 'Digit' Turner * License version 2, as published by the Free Software Foundation, and
10c89f2750SDavid 'Digit' Turner * may be copied, distributed, and modified under those terms.
11c89f2750SDavid 'Digit' Turner *
12c89f2750SDavid 'Digit' Turner * This program is distributed in the hope that it will be useful,
13c89f2750SDavid 'Digit' Turner * but WITHOUT ANY WARRANTY; without even the implied warranty of
14c89f2750SDavid 'Digit' Turner * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15c89f2750SDavid 'Digit' Turner * GNU General Public License for more details.
16c89f2750SDavid 'Digit' Turner *
17c89f2750SDavid 'Digit' Turner */
18c89f2750SDavid 'Digit' Turner
19c89f2750SDavid 'Digit' Turner /* This source file contains the implementation of a special device driver
20c89f2750SDavid 'Digit' Turner * that intends to provide a *very* fast communication channel between the
21c89f2750SDavid 'Digit' Turner * guest system and the QEMU emulator.
22c89f2750SDavid 'Digit' Turner *
23c89f2750SDavid 'Digit' Turner * Usage from the guest is simply the following (error handling simplified):
24c89f2750SDavid 'Digit' Turner *
25c89f2750SDavid 'Digit' Turner * int fd = open("/dev/qemu_pipe",O_RDWR);
26c89f2750SDavid 'Digit' Turner * .... write() or read() through the pipe.
27c89f2750SDavid 'Digit' Turner *
28c89f2750SDavid 'Digit' Turner * This driver doesn't deal with the exact protocol used during the session.
29c89f2750SDavid 'Digit' Turner * It is intended to be as simple as something like:
30c89f2750SDavid 'Digit' Turner *
31c89f2750SDavid 'Digit' Turner * // do this _just_ after opening the fd to connect to a specific
32c89f2750SDavid 'Digit' Turner * // emulator service.
33c89f2750SDavid 'Digit' Turner * const char* msg = "<pipename>";
34c89f2750SDavid 'Digit' Turner * if (write(fd, msg, strlen(msg)+1) < 0) {
35c89f2750SDavid 'Digit' Turner * ... could not connect to <pipename> service
36c89f2750SDavid 'Digit' Turner * close(fd);
37c89f2750SDavid 'Digit' Turner * }
38c89f2750SDavid 'Digit' Turner *
39c89f2750SDavid 'Digit' Turner * // after this, simply read() and write() to communicate with the
40c89f2750SDavid 'Digit' Turner * // service. Exact protocol details left as an exercise to the reader.
41c89f2750SDavid 'Digit' Turner *
42c89f2750SDavid 'Digit' Turner * This driver is very fast because it doesn't copy any data through
43c89f2750SDavid 'Digit' Turner * intermediate buffers, since the emulator is capable of translating
44c89f2750SDavid 'Digit' Turner * guest user addresses into host ones.
45c89f2750SDavid 'Digit' Turner *
46c89f2750SDavid 'Digit' Turner * Note that we must however ensure that each user page involved in the
47c89f2750SDavid 'Digit' Turner * exchange is properly mapped during a transfer.
48c89f2750SDavid 'Digit' Turner */
49c89f2750SDavid 'Digit' Turner
50c89f2750SDavid 'Digit' Turner #include <linux/module.h>
51ac316725SRandy Dunlap #include <linux/mod_devicetable.h>
52c89f2750SDavid 'Digit' Turner #include <linux/interrupt.h>
53c89f2750SDavid 'Digit' Turner #include <linux/kernel.h>
54c89f2750SDavid 'Digit' Turner #include <linux/spinlock.h>
55c89f2750SDavid 'Digit' Turner #include <linux/miscdevice.h>
56c89f2750SDavid 'Digit' Turner #include <linux/platform_device.h>
57c89f2750SDavid 'Digit' Turner #include <linux/poll.h>
58c89f2750SDavid 'Digit' Turner #include <linux/sched.h>
59c89f2750SDavid 'Digit' Turner #include <linux/bitops.h>
60c89f2750SDavid 'Digit' Turner #include <linux/slab.h>
61c89f2750SDavid 'Digit' Turner #include <linux/io.h>
621d427da1SShraddha Barke #include <linux/dma-mapping.h>
632f3be882SChristoffer Dall #include <linux/mm.h>
64d62f324bSJason Hu #include <linux/acpi.h>
65d23069a5SRoman Kiryanov #include <linux/bug.h>
6695577010SRoman Kiryanov #include "goldfish_pipe_qemu.h"
67c89f2750SDavid 'Digit' Turner
68c89f2750SDavid 'Digit' Turner /*
69726ea1a8SJin Qian * Update this when something changes in the driver's behavior so the host
70726ea1a8SJin Qian * can benefit from knowing it
71726ea1a8SJin Qian */
72726ea1a8SJin Qian enum {
73726ea1a8SJin Qian PIPE_DRIVER_VERSION = 2,
74726ea1a8SJin Qian PIPE_CURRENT_DEVICE_VERSION = 2
75726ea1a8SJin Qian };
76726ea1a8SJin Qian
77726ea1a8SJin Qian enum {
78726ea1a8SJin Qian MAX_BUFFERS_PER_COMMAND = 336,
79726ea1a8SJin Qian MAX_SIGNALLED_PIPES = 64,
80726ea1a8SJin Qian INITIAL_PIPES_CAPACITY = 64
81726ea1a8SJin Qian };
82726ea1a8SJin Qian
83726ea1a8SJin Qian struct goldfish_pipe_dev;
84726ea1a8SJin Qian
85726ea1a8SJin Qian /* A per-pipe command structure, shared with the host */
86726ea1a8SJin Qian struct goldfish_pipe_command {
87726ea1a8SJin Qian s32 cmd; /* PipeCmdCode, guest -> host */
88726ea1a8SJin Qian s32 id; /* pipe id, guest -> host */
89726ea1a8SJin Qian s32 status; /* command execution status, host -> guest */
90726ea1a8SJin Qian s32 reserved; /* to pad to 64-bit boundary */
91726ea1a8SJin Qian union {
92726ea1a8SJin Qian /* Parameters for PIPE_CMD_{READ,WRITE} */
93726ea1a8SJin Qian struct {
94726ea1a8SJin Qian /* number of buffers, guest -> host */
95726ea1a8SJin Qian u32 buffers_count;
96726ea1a8SJin Qian /* number of consumed bytes, host -> guest */
97726ea1a8SJin Qian s32 consumed_size;
98726ea1a8SJin Qian /* buffer pointers, guest -> host */
99726ea1a8SJin Qian u64 ptrs[MAX_BUFFERS_PER_COMMAND];
100726ea1a8SJin Qian /* buffer sizes, guest -> host */
101726ea1a8SJin Qian u32 sizes[MAX_BUFFERS_PER_COMMAND];
102726ea1a8SJin Qian } rw_params;
103726ea1a8SJin Qian };
104726ea1a8SJin Qian };
105726ea1a8SJin Qian
106726ea1a8SJin Qian /* A single signalled pipe information */
107726ea1a8SJin Qian struct signalled_pipe_buffer {
108726ea1a8SJin Qian u32 id;
109c89f2750SDavid 'Digit' Turner u32 flags;
110c89f2750SDavid 'Digit' Turner };
111c89f2750SDavid 'Digit' Turner
112726ea1a8SJin Qian /* Parameters for the PIPE_CMD_OPEN command */
113726ea1a8SJin Qian struct open_command_param {
114726ea1a8SJin Qian u64 command_buffer_ptr;
115726ea1a8SJin Qian u32 rw_params_max_count;
116726ea1a8SJin Qian };
117726ea1a8SJin Qian
118726ea1a8SJin Qian /* Device-level set of buffers shared with the host */
119726ea1a8SJin Qian struct goldfish_pipe_dev_buffers {
120726ea1a8SJin Qian struct open_command_param open_command_params;
121562a74deSRoman Kiryanov struct signalled_pipe_buffer
122562a74deSRoman Kiryanov signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
123726ea1a8SJin Qian };
124726ea1a8SJin Qian
125726ea1a8SJin Qian /* This data type models a given pipe instance */
126726ea1a8SJin Qian struct goldfish_pipe {
127726ea1a8SJin Qian /* pipe ID - index into goldfish_pipe_dev::pipes array */
128726ea1a8SJin Qian u32 id;
12946928cc6SRoman Kiryanov
130726ea1a8SJin Qian /* The wake flags pipe is waiting for
131726ea1a8SJin Qian * Note: not protected with any lock, uses atomic operations
132726ea1a8SJin Qian * and barriers to make it thread-safe.
133726ea1a8SJin Qian */
134726ea1a8SJin Qian unsigned long flags;
13546928cc6SRoman Kiryanov
136726ea1a8SJin Qian /* wake flags host have signalled,
137726ea1a8SJin Qian * - protected by goldfish_pipe_dev::lock
138726ea1a8SJin Qian */
139726ea1a8SJin Qian unsigned long signalled_flags;
140726ea1a8SJin Qian
141726ea1a8SJin Qian /* A pointer to command buffer */
142726ea1a8SJin Qian struct goldfish_pipe_command *command_buffer;
143726ea1a8SJin Qian
144726ea1a8SJin Qian /* doubly linked list of signalled pipes, protected by
145726ea1a8SJin Qian * goldfish_pipe_dev::lock
146726ea1a8SJin Qian */
147726ea1a8SJin Qian struct goldfish_pipe *prev_signalled;
148726ea1a8SJin Qian struct goldfish_pipe *next_signalled;
149726ea1a8SJin Qian
150726ea1a8SJin Qian /*
151726ea1a8SJin Qian * A pipe's own lock. Protects the following:
152726ea1a8SJin Qian * - *command_buffer - makes sure a command can safely write its
153726ea1a8SJin Qian * parameters to the host and read the results back.
154726ea1a8SJin Qian */
155726ea1a8SJin Qian struct mutex lock;
156726ea1a8SJin Qian
157726ea1a8SJin Qian /* A wake queue for sleeping until host signals an event */
158726ea1a8SJin Qian wait_queue_head_t wake_queue;
15946928cc6SRoman Kiryanov
160726ea1a8SJin Qian /* Pointer to the parent goldfish_pipe_dev instance */
161726ea1a8SJin Qian struct goldfish_pipe_dev *dev;
16248a2d422SRoman Kiryanov
16348a2d422SRoman Kiryanov /* A buffer of pages, too large to fit into a stack frame */
16448a2d422SRoman Kiryanov struct page *pages[MAX_BUFFERS_PER_COMMAND];
165726ea1a8SJin Qian };
166726ea1a8SJin Qian
167c89f2750SDavid 'Digit' Turner /* The global driver data. Holds a reference to the i/o page used to
168c89f2750SDavid 'Digit' Turner * communicate with the emulator, and a wake queue for blocked tasks
169c89f2750SDavid 'Digit' Turner * waiting to be awoken.
170c89f2750SDavid 'Digit' Turner */
171c89f2750SDavid 'Digit' Turner struct goldfish_pipe_dev {
17208360e26SRoman Kiryanov /* A magic number to check if this is an instance of this struct */
17308360e26SRoman Kiryanov void *magic;
17408360e26SRoman Kiryanov
175726ea1a8SJin Qian /*
176726ea1a8SJin Qian * Global device spinlock. Protects the following members:
177726ea1a8SJin Qian * - pipes, pipes_capacity
178726ea1a8SJin Qian * - [*pipes, *pipes + pipes_capacity) - array data
179726ea1a8SJin Qian * - first_signalled_pipe,
180726ea1a8SJin Qian * goldfish_pipe::prev_signalled,
181726ea1a8SJin Qian * goldfish_pipe::next_signalled,
182726ea1a8SJin Qian * goldfish_pipe::signalled_flags - all singnalled-related fields,
183726ea1a8SJin Qian * in all allocated pipes
184726ea1a8SJin Qian * - open_command_params - PIPE_CMD_OPEN-related buffers
185726ea1a8SJin Qian *
186726ea1a8SJin Qian * It looks like a lot of different fields, but the trick is that
187726ea1a8SJin Qian * the only operation that happens often is the signalled pipes array
188726ea1a8SJin Qian * manipulation. That's why it's OK for now to keep the rest of the
189726ea1a8SJin Qian * fields under the same lock. If we notice too much contention because
190726ea1a8SJin Qian * of PIPE_CMD_OPEN, then we should add a separate lock there.
191726ea1a8SJin Qian */
192c89f2750SDavid 'Digit' Turner spinlock_t lock;
193726ea1a8SJin Qian
194726ea1a8SJin Qian /*
195726ea1a8SJin Qian * Array of the pipes of |pipes_capacity| elements,
196726ea1a8SJin Qian * indexed by goldfish_pipe::id
197726ea1a8SJin Qian */
198726ea1a8SJin Qian struct goldfish_pipe **pipes;
199726ea1a8SJin Qian u32 pipes_capacity;
200726ea1a8SJin Qian
201726ea1a8SJin Qian /* Pointers to the buffers host uses for interaction with this driver */
202726ea1a8SJin Qian struct goldfish_pipe_dev_buffers *buffers;
203726ea1a8SJin Qian
204726ea1a8SJin Qian /* Head of a doubly linked list of signalled pipes */
205726ea1a8SJin Qian struct goldfish_pipe *first_signalled_pipe;
206726ea1a8SJin Qian
20725b97d57SRoman Kiryanov /* ptr to platform device's device struct */
20825b97d57SRoman Kiryanov struct device *pdev_dev;
20925b97d57SRoman Kiryanov
210726ea1a8SJin Qian /* Some device-specific data */
211c89f2750SDavid 'Digit' Turner int irq;
212726ea1a8SJin Qian int version;
213726ea1a8SJin Qian unsigned char __iomem *base;
214c394cc3bSRoman Kiryanov
21543c2cc28SRoman Kiryanov struct miscdevice miscdev;
216c89f2750SDavid 'Digit' Turner };
217c89f2750SDavid 'Digit' Turner
goldfish_pipe_cmd_locked(struct goldfish_pipe * pipe,enum PipeCmdCode cmd)21892c320b9SRoman Kiryanov static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
21992c320b9SRoman Kiryanov enum PipeCmdCode cmd)
220c89f2750SDavid 'Digit' Turner {
221726ea1a8SJin Qian pipe->command_buffer->cmd = cmd;
222726ea1a8SJin Qian /* failure by default */
223726ea1a8SJin Qian pipe->command_buffer->status = PIPE_ERROR_INVAL;
224726ea1a8SJin Qian writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
225726ea1a8SJin Qian return pipe->command_buffer->status;
226726ea1a8SJin Qian }
227c89f2750SDavid 'Digit' Turner
goldfish_pipe_cmd(struct goldfish_pipe * pipe,enum PipeCmdCode cmd)22892c320b9SRoman Kiryanov static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
229726ea1a8SJin Qian {
230726ea1a8SJin Qian int status;
231726ea1a8SJin Qian
232726ea1a8SJin Qian if (mutex_lock_interruptible(&pipe->lock))
233726ea1a8SJin Qian return PIPE_ERROR_IO;
23492c320b9SRoman Kiryanov status = goldfish_pipe_cmd_locked(pipe, cmd);
235726ea1a8SJin Qian mutex_unlock(&pipe->lock);
236c89f2750SDavid 'Digit' Turner return status;
237c89f2750SDavid 'Digit' Turner }
238c89f2750SDavid 'Digit' Turner
239726ea1a8SJin Qian /*
240726ea1a8SJin Qian * This function converts an error code returned by the emulator through
241c89f2750SDavid 'Digit' Turner * the PIPE_REG_STATUS i/o register into a valid negative errno value.
242c89f2750SDavid 'Digit' Turner */
goldfish_pipe_error_convert(int status)243c89f2750SDavid 'Digit' Turner static int goldfish_pipe_error_convert(int status)
244c89f2750SDavid 'Digit' Turner {
245c89f2750SDavid 'Digit' Turner switch (status) {
246c89f2750SDavid 'Digit' Turner case PIPE_ERROR_AGAIN:
247c89f2750SDavid 'Digit' Turner return -EAGAIN;
248c89f2750SDavid 'Digit' Turner case PIPE_ERROR_NOMEM:
249c89f2750SDavid 'Digit' Turner return -ENOMEM;
250c89f2750SDavid 'Digit' Turner case PIPE_ERROR_IO:
251c89f2750SDavid 'Digit' Turner return -EIO;
252c89f2750SDavid 'Digit' Turner default:
253c89f2750SDavid 'Digit' Turner return -EINVAL;
254c89f2750SDavid 'Digit' Turner }
255c89f2750SDavid 'Digit' Turner }
256c89f2750SDavid 'Digit' Turner
goldfish_pin_pages(unsigned long first_page,unsigned long last_page,unsigned int last_page_size,int is_write,struct page * pages[MAX_BUFFERS_PER_COMMAND],unsigned int * iter_last_page_size)2571023369cSJohn Hubbard static int goldfish_pin_pages(unsigned long first_page,
25852bcc7d9SRoman Kiryanov unsigned long last_page,
25952bcc7d9SRoman Kiryanov unsigned int last_page_size,
26052bcc7d9SRoman Kiryanov int is_write,
261726ea1a8SJin Qian struct page *pages[MAX_BUFFERS_PER_COMMAND],
262726ea1a8SJin Qian unsigned int *iter_last_page_size)
263c89f2750SDavid 'Digit' Turner {
264726ea1a8SJin Qian int ret;
265726ea1a8SJin Qian int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
266c89f2750SDavid 'Digit' Turner
267726ea1a8SJin Qian if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
268726ea1a8SJin Qian requested_pages = MAX_BUFFERS_PER_COMMAND;
269726ea1a8SJin Qian *iter_last_page_size = PAGE_SIZE;
270726ea1a8SJin Qian } else {
271726ea1a8SJin Qian *iter_last_page_size = last_page_size;
272c89f2750SDavid 'Digit' Turner }
273c89f2750SDavid 'Digit' Turner
27457459435SJohn Hubbard ret = pin_user_pages_fast(first_page, requested_pages,
27573b0140bSIra Weiny !is_write ? FOLL_WRITE : 0,
27652bcc7d9SRoman Kiryanov pages);
277726ea1a8SJin Qian if (ret <= 0)
278c89f2750SDavid 'Digit' Turner return -EFAULT;
279726ea1a8SJin Qian if (ret < requested_pages)
280726ea1a8SJin Qian *iter_last_page_size = PAGE_SIZE;
281c89f2750SDavid 'Digit' Turner
2821d1021a0SRoman Kiryanov return ret;
283726ea1a8SJin Qian }
284726ea1a8SJin Qian
285726ea1a8SJin Qian /* Populate the call parameters, merging adjacent pages together */
populate_rw_params(struct page ** pages,int pages_count,unsigned long address,unsigned long address_end,unsigned long first_page,unsigned long last_page,unsigned int iter_last_page_size,int is_write,struct goldfish_pipe_command * command)28652bcc7d9SRoman Kiryanov static void populate_rw_params(struct page **pages,
28752bcc7d9SRoman Kiryanov int pages_count,
28852bcc7d9SRoman Kiryanov unsigned long address,
28952bcc7d9SRoman Kiryanov unsigned long address_end,
29052bcc7d9SRoman Kiryanov unsigned long first_page,
29152bcc7d9SRoman Kiryanov unsigned long last_page,
29252bcc7d9SRoman Kiryanov unsigned int iter_last_page_size,
29352bcc7d9SRoman Kiryanov int is_write,
294726ea1a8SJin Qian struct goldfish_pipe_command *command)
295726ea1a8SJin Qian {
296726ea1a8SJin Qian /*
297726ea1a8SJin Qian * Process the first page separately - it's the only page that
298726ea1a8SJin Qian * needs special handling for its start address.
299726ea1a8SJin Qian */
300726ea1a8SJin Qian unsigned long xaddr = page_to_phys(pages[0]);
301726ea1a8SJin Qian unsigned long xaddr_prev = xaddr;
302726ea1a8SJin Qian int buffer_idx = 0;
303726ea1a8SJin Qian int i = 1;
304726ea1a8SJin Qian int size_on_page = first_page == last_page
305726ea1a8SJin Qian ? (int)(address_end - address)
306726ea1a8SJin Qian : (PAGE_SIZE - (address & ~PAGE_MASK));
307726ea1a8SJin Qian command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
308726ea1a8SJin Qian command->rw_params.sizes[0] = size_on_page;
309726ea1a8SJin Qian for (; i < pages_count; ++i) {
310726ea1a8SJin Qian xaddr = page_to_phys(pages[i]);
311726ea1a8SJin Qian size_on_page = (i == pages_count - 1) ?
312726ea1a8SJin Qian iter_last_page_size : PAGE_SIZE;
313726ea1a8SJin Qian if (xaddr == xaddr_prev + PAGE_SIZE) {
314726ea1a8SJin Qian command->rw_params.sizes[buffer_idx] += size_on_page;
315726ea1a8SJin Qian } else {
316726ea1a8SJin Qian ++buffer_idx;
317726ea1a8SJin Qian command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
318726ea1a8SJin Qian command->rw_params.sizes[buffer_idx] = size_on_page;
319726ea1a8SJin Qian }
320726ea1a8SJin Qian xaddr_prev = xaddr;
321726ea1a8SJin Qian }
322726ea1a8SJin Qian command->rw_params.buffers_count = buffer_idx + 1;
323726ea1a8SJin Qian }
324726ea1a8SJin Qian
transfer_max_buffers(struct goldfish_pipe * pipe,unsigned long address,unsigned long address_end,int is_write,unsigned long last_page,unsigned int last_page_size,s32 * consumed_size,int * status)325726ea1a8SJin Qian static int transfer_max_buffers(struct goldfish_pipe *pipe,
32652bcc7d9SRoman Kiryanov unsigned long address,
32752bcc7d9SRoman Kiryanov unsigned long address_end,
32852bcc7d9SRoman Kiryanov int is_write,
32952bcc7d9SRoman Kiryanov unsigned long last_page,
33052bcc7d9SRoman Kiryanov unsigned int last_page_size,
33152bcc7d9SRoman Kiryanov s32 *consumed_size,
33252bcc7d9SRoman Kiryanov int *status)
333726ea1a8SJin Qian {
334726ea1a8SJin Qian unsigned long first_page = address & PAGE_MASK;
335726ea1a8SJin Qian unsigned int iter_last_page_size;
33648a2d422SRoman Kiryanov int pages_count;
337726ea1a8SJin Qian
338726ea1a8SJin Qian /* Serialize access to the pipe command buffers */
339c89f2750SDavid 'Digit' Turner if (mutex_lock_interruptible(&pipe->lock))
340c89f2750SDavid 'Digit' Turner return -ERESTARTSYS;
341c89f2750SDavid 'Digit' Turner
3421023369cSJohn Hubbard pages_count = goldfish_pin_pages(first_page, last_page,
34348a2d422SRoman Kiryanov last_page_size, is_write,
34448a2d422SRoman Kiryanov pipe->pages, &iter_last_page_size);
34548a2d422SRoman Kiryanov if (pages_count < 0) {
34648a2d422SRoman Kiryanov mutex_unlock(&pipe->lock);
34748a2d422SRoman Kiryanov return pages_count;
34848a2d422SRoman Kiryanov }
34948a2d422SRoman Kiryanov
35048a2d422SRoman Kiryanov populate_rw_params(pipe->pages, pages_count, address, address_end,
351726ea1a8SJin Qian first_page, last_page, iter_last_page_size, is_write,
352726ea1a8SJin Qian pipe->command_buffer);
353c89f2750SDavid 'Digit' Turner
354726ea1a8SJin Qian /* Transfer the data */
35592c320b9SRoman Kiryanov *status = goldfish_pipe_cmd_locked(pipe,
356726ea1a8SJin Qian is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
357c89f2750SDavid 'Digit' Turner
358726ea1a8SJin Qian *consumed_size = pipe->command_buffer->rw_params.consumed_size;
3594f42071cSYu Ning
360f1f6a7ddSJohn Hubbard unpin_user_pages_dirty_lock(pipe->pages, pages_count,
36157459435SJohn Hubbard !is_write && *consumed_size > 0);
362726ea1a8SJin Qian
363f563dab4SGreg Kroah-Hartman mutex_unlock(&pipe->lock);
364726ea1a8SJin Qian return 0;
365c89f2750SDavid 'Digit' Turner }
366c89f2750SDavid 'Digit' Turner
wait_for_host_signal(struct goldfish_pipe * pipe,int is_write)367726ea1a8SJin Qian static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
368726ea1a8SJin Qian {
36961b38f02SRoman Kiryanov u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
370c89f2750SDavid 'Digit' Turner
37161b38f02SRoman Kiryanov set_bit(wake_bit, &pipe->flags);
372c89f2750SDavid 'Digit' Turner
373c89f2750SDavid 'Digit' Turner /* Tell the emulator we're going to wait for a wake event */
37492c320b9SRoman Kiryanov goldfish_pipe_cmd(pipe,
375726ea1a8SJin Qian is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
376c89f2750SDavid 'Digit' Turner
37761b38f02SRoman Kiryanov while (test_bit(wake_bit, &pipe->flags)) {
378562a74deSRoman Kiryanov if (wait_event_interruptible(pipe->wake_queue,
37961b38f02SRoman Kiryanov !test_bit(wake_bit, &pipe->flags)))
380c89f2750SDavid 'Digit' Turner return -ERESTARTSYS;
381c89f2750SDavid 'Digit' Turner
382c89f2750SDavid 'Digit' Turner if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
383c89f2750SDavid 'Digit' Turner return -EIO;
384c89f2750SDavid 'Digit' Turner }
385c89f2750SDavid 'Digit' Turner
386726ea1a8SJin Qian return 0;
387c89f2750SDavid 'Digit' Turner }
3882f3be882SChristoffer Dall
goldfish_pipe_read_write(struct file * filp,char __user * buffer,size_t bufflen,int is_write)389726ea1a8SJin Qian static ssize_t goldfish_pipe_read_write(struct file *filp,
39052bcc7d9SRoman Kiryanov char __user *buffer,
39152bcc7d9SRoman Kiryanov size_t bufflen,
39252bcc7d9SRoman Kiryanov int is_write)
393726ea1a8SJin Qian {
394726ea1a8SJin Qian struct goldfish_pipe *pipe = filp->private_data;
395726ea1a8SJin Qian int count = 0, ret = -EINVAL;
396726ea1a8SJin Qian unsigned long address, address_end, last_page;
397726ea1a8SJin Qian unsigned int last_page_size;
398726ea1a8SJin Qian
399726ea1a8SJin Qian /* If the emulator already closed the pipe, no need to go further */
400726ea1a8SJin Qian if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
401726ea1a8SJin Qian return -EIO;
402726ea1a8SJin Qian /* Null reads or writes succeeds */
403726ea1a8SJin Qian if (unlikely(bufflen == 0))
404726ea1a8SJin Qian return 0;
405726ea1a8SJin Qian /* Check the buffer range for access */
40696d4f267SLinus Torvalds if (unlikely(!access_ok(buffer, bufflen)))
407726ea1a8SJin Qian return -EFAULT;
408726ea1a8SJin Qian
409726ea1a8SJin Qian address = (unsigned long)buffer;
410726ea1a8SJin Qian address_end = address + bufflen;
411726ea1a8SJin Qian last_page = (address_end - 1) & PAGE_MASK;
412726ea1a8SJin Qian last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
413726ea1a8SJin Qian
414726ea1a8SJin Qian while (address < address_end) {
415726ea1a8SJin Qian s32 consumed_size;
416726ea1a8SJin Qian int status;
417726ea1a8SJin Qian
418726ea1a8SJin Qian ret = transfer_max_buffers(pipe, address, address_end, is_write,
41952bcc7d9SRoman Kiryanov last_page, last_page_size,
42052bcc7d9SRoman Kiryanov &consumed_size, &status);
4212f3be882SChristoffer Dall if (ret < 0)
422726ea1a8SJin Qian break;
423726ea1a8SJin Qian
424726ea1a8SJin Qian if (consumed_size > 0) {
425726ea1a8SJin Qian /* No matter what's the status, we've transferred
426726ea1a8SJin Qian * something.
427726ea1a8SJin Qian */
428726ea1a8SJin Qian count += consumed_size;
429726ea1a8SJin Qian address += consumed_size;
430726ea1a8SJin Qian }
431726ea1a8SJin Qian if (status > 0)
432726ea1a8SJin Qian continue;
433726ea1a8SJin Qian if (status == 0) {
434726ea1a8SJin Qian /* EOF */
435726ea1a8SJin Qian ret = 0;
436726ea1a8SJin Qian break;
437726ea1a8SJin Qian }
438726ea1a8SJin Qian if (count > 0) {
439726ea1a8SJin Qian /*
440726ea1a8SJin Qian * An error occurred, but we already transferred
441726ea1a8SJin Qian * something on one of the previous iterations.
442726ea1a8SJin Qian * Just return what we already copied and log this
443726ea1a8SJin Qian * err.
444726ea1a8SJin Qian */
445726ea1a8SJin Qian if (status != PIPE_ERROR_AGAIN)
44625b97d57SRoman Kiryanov dev_err_ratelimited(pipe->dev->pdev_dev,
44725b97d57SRoman Kiryanov "backend error %d on %s\n",
448726ea1a8SJin Qian status, is_write ? "write" : "read");
449726ea1a8SJin Qian break;
450726ea1a8SJin Qian }
451726ea1a8SJin Qian
452726ea1a8SJin Qian /*
453726ea1a8SJin Qian * If the error is not PIPE_ERROR_AGAIN, or if we are in
454726ea1a8SJin Qian * non-blocking mode, just return the error code.
455726ea1a8SJin Qian */
456726ea1a8SJin Qian if (status != PIPE_ERROR_AGAIN ||
457726ea1a8SJin Qian (filp->f_flags & O_NONBLOCK) != 0) {
458726ea1a8SJin Qian ret = goldfish_pipe_error_convert(status);
459726ea1a8SJin Qian break;
460726ea1a8SJin Qian }
461726ea1a8SJin Qian
462726ea1a8SJin Qian status = wait_for_host_signal(pipe, is_write);
463726ea1a8SJin Qian if (status < 0)
464726ea1a8SJin Qian return status;
465726ea1a8SJin Qian }
466726ea1a8SJin Qian
467726ea1a8SJin Qian if (count > 0)
4682f3be882SChristoffer Dall return count;
469726ea1a8SJin Qian return ret;
470c89f2750SDavid 'Digit' Turner }
471c89f2750SDavid 'Digit' Turner
goldfish_pipe_read(struct file * filp,char __user * buffer,size_t bufflen,loff_t * ppos)472c89f2750SDavid 'Digit' Turner static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
473c89f2750SDavid 'Digit' Turner size_t bufflen, loff_t *ppos)
474c89f2750SDavid 'Digit' Turner {
475726ea1a8SJin Qian return goldfish_pipe_read_write(filp, buffer, bufflen,
476726ea1a8SJin Qian /* is_write */ 0);
477c89f2750SDavid 'Digit' Turner }
478c89f2750SDavid 'Digit' Turner
goldfish_pipe_write(struct file * filp,const char __user * buffer,size_t bufflen,loff_t * ppos)479c89f2750SDavid 'Digit' Turner static ssize_t goldfish_pipe_write(struct file *filp,
480c89f2750SDavid 'Digit' Turner const char __user *buffer, size_t bufflen,
481c89f2750SDavid 'Digit' Turner loff_t *ppos)
482c89f2750SDavid 'Digit' Turner {
48352bcc7d9SRoman Kiryanov /* cast away the const */
48452bcc7d9SRoman Kiryanov char __user *no_const_buffer = (char __user *)buffer;
48552bcc7d9SRoman Kiryanov
48652bcc7d9SRoman Kiryanov return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
487726ea1a8SJin Qian /* is_write */ 1);
488c89f2750SDavid 'Digit' Turner }
489c89f2750SDavid 'Digit' Turner
goldfish_pipe_poll(struct file * filp,poll_table * wait)490afc9a42bSAl Viro static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
491c89f2750SDavid 'Digit' Turner {
492c89f2750SDavid 'Digit' Turner struct goldfish_pipe *pipe = filp->private_data;
493afc9a42bSAl Viro __poll_t mask = 0;
494c89f2750SDavid 'Digit' Turner int status;
495c89f2750SDavid 'Digit' Turner
496c89f2750SDavid 'Digit' Turner poll_wait(filp, &pipe->wake_queue, wait);
497c89f2750SDavid 'Digit' Turner
49892c320b9SRoman Kiryanov status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
499726ea1a8SJin Qian if (status < 0)
500726ea1a8SJin Qian return -ERESTARTSYS;
501c89f2750SDavid 'Digit' Turner
502c89f2750SDavid 'Digit' Turner if (status & PIPE_POLL_IN)
503a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM;
504c89f2750SDavid 'Digit' Turner if (status & PIPE_POLL_OUT)
505a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
506c89f2750SDavid 'Digit' Turner if (status & PIPE_POLL_HUP)
507a9a08845SLinus Torvalds mask |= EPOLLHUP;
508c89f2750SDavid 'Digit' Turner if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
509a9a08845SLinus Torvalds mask |= EPOLLERR;
510c89f2750SDavid 'Digit' Turner
511c89f2750SDavid 'Digit' Turner return mask;
512c89f2750SDavid 'Digit' Turner }
513c89f2750SDavid 'Digit' Turner
signalled_pipes_add_locked(struct goldfish_pipe_dev * dev,u32 id,u32 flags)514726ea1a8SJin Qian static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
515726ea1a8SJin Qian u32 id, u32 flags)
516c89f2750SDavid 'Digit' Turner {
517c89f2750SDavid 'Digit' Turner struct goldfish_pipe *pipe;
51849a75c44SJun Tian
519726ea1a8SJin Qian if (WARN_ON(id >= dev->pipes_capacity))
520726ea1a8SJin Qian return;
52125c72c78SJun Tian
522726ea1a8SJin Qian pipe = dev->pipes[id];
523726ea1a8SJin Qian if (!pipe)
524726ea1a8SJin Qian return;
525726ea1a8SJin Qian pipe->signalled_flags |= flags;
526c89f2750SDavid 'Digit' Turner
527cc14057fSRoman Kiryanov if (pipe->prev_signalled || pipe->next_signalled ||
528cc14057fSRoman Kiryanov dev->first_signalled_pipe == pipe)
529726ea1a8SJin Qian return; /* already in the list */
530726ea1a8SJin Qian pipe->next_signalled = dev->first_signalled_pipe;
531726ea1a8SJin Qian if (dev->first_signalled_pipe)
532726ea1a8SJin Qian dev->first_signalled_pipe->prev_signalled = pipe;
533726ea1a8SJin Qian dev->first_signalled_pipe = pipe;
534c89f2750SDavid 'Digit' Turner }
535726ea1a8SJin Qian
signalled_pipes_remove_locked(struct goldfish_pipe_dev * dev,struct goldfish_pipe * pipe)536726ea1a8SJin Qian static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
53753bdf668SRoman Kiryanov struct goldfish_pipe *pipe)
53853bdf668SRoman Kiryanov {
539726ea1a8SJin Qian if (pipe->prev_signalled)
540726ea1a8SJin Qian pipe->prev_signalled->next_signalled = pipe->next_signalled;
541726ea1a8SJin Qian if (pipe->next_signalled)
542726ea1a8SJin Qian pipe->next_signalled->prev_signalled = pipe->prev_signalled;
543726ea1a8SJin Qian if (pipe == dev->first_signalled_pipe)
544726ea1a8SJin Qian dev->first_signalled_pipe = pipe->next_signalled;
545726ea1a8SJin Qian pipe->prev_signalled = NULL;
546726ea1a8SJin Qian pipe->next_signalled = NULL;
547726ea1a8SJin Qian }
548726ea1a8SJin Qian
signalled_pipes_pop_front(struct goldfish_pipe_dev * dev,int * wakes)549726ea1a8SJin Qian static struct goldfish_pipe *signalled_pipes_pop_front(
550726ea1a8SJin Qian struct goldfish_pipe_dev *dev, int *wakes)
551726ea1a8SJin Qian {
552726ea1a8SJin Qian struct goldfish_pipe *pipe;
553726ea1a8SJin Qian unsigned long flags;
554726ea1a8SJin Qian
555726ea1a8SJin Qian spin_lock_irqsave(&dev->lock, flags);
556726ea1a8SJin Qian
557726ea1a8SJin Qian pipe = dev->first_signalled_pipe;
558726ea1a8SJin Qian if (pipe) {
559726ea1a8SJin Qian *wakes = pipe->signalled_flags;
560726ea1a8SJin Qian pipe->signalled_flags = 0;
561726ea1a8SJin Qian /*
562726ea1a8SJin Qian * This is an optimized version of
563726ea1a8SJin Qian * signalled_pipes_remove_locked()
564726ea1a8SJin Qian * - We want to make it as fast as possible to
565726ea1a8SJin Qian * wake the sleeping pipe operations faster.
566726ea1a8SJin Qian */
567726ea1a8SJin Qian dev->first_signalled_pipe = pipe->next_signalled;
568726ea1a8SJin Qian if (dev->first_signalled_pipe)
569726ea1a8SJin Qian dev->first_signalled_pipe->prev_signalled = NULL;
570726ea1a8SJin Qian pipe->next_signalled = NULL;
571726ea1a8SJin Qian }
572726ea1a8SJin Qian
573726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
574726ea1a8SJin Qian return pipe;
575726ea1a8SJin Qian }
576726ea1a8SJin Qian
goldfish_interrupt_task(int irq,void * dev_addr)5772c104a46SDavidlohr Bueso static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
578726ea1a8SJin Qian {
579726ea1a8SJin Qian /* Iterate over the signalled pipes and wake them one by one */
5802c104a46SDavidlohr Bueso struct goldfish_pipe_dev *dev = dev_addr;
581726ea1a8SJin Qian struct goldfish_pipe *pipe;
582726ea1a8SJin Qian int wakes;
583726ea1a8SJin Qian
584c394cc3bSRoman Kiryanov while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
585726ea1a8SJin Qian if (wakes & PIPE_WAKE_CLOSED) {
586726ea1a8SJin Qian pipe->flags = 1 << BIT_CLOSED_ON_HOST;
587726ea1a8SJin Qian } else {
588c89f2750SDavid 'Digit' Turner if (wakes & PIPE_WAKE_READ)
589c89f2750SDavid 'Digit' Turner clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
590c89f2750SDavid 'Digit' Turner if (wakes & PIPE_WAKE_WRITE)
591c89f2750SDavid 'Digit' Turner clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
592c89f2750SDavid 'Digit' Turner }
593726ea1a8SJin Qian /*
594726ea1a8SJin Qian * wake_up_interruptible() implies a write barrier, so don't
595726ea1a8SJin Qian * explicitly add another one here.
596726ea1a8SJin Qian */
597726ea1a8SJin Qian wake_up_interruptible(&pipe->wake_queue);
598726ea1a8SJin Qian }
5992c104a46SDavidlohr Bueso return IRQ_HANDLED;
600726ea1a8SJin Qian }
601c89f2750SDavid 'Digit' Turner
60208360e26SRoman Kiryanov static void goldfish_pipe_device_deinit(struct platform_device *pdev,
60308360e26SRoman Kiryanov struct goldfish_pipe_dev *dev);
60408360e26SRoman Kiryanov
605726ea1a8SJin Qian /*
6062c104a46SDavidlohr Bueso * The general idea of the (threaded) interrupt handling:
607726ea1a8SJin Qian *
608726ea1a8SJin Qian * 1. device raises an interrupt if there's at least one signalled pipe
609726ea1a8SJin Qian * 2. IRQ handler reads the signalled pipes and their count from the device
610726ea1a8SJin Qian * 3. device writes them into a shared buffer and returns the count
611726ea1a8SJin Qian * it only resets the IRQ if it has returned all signalled pipes,
612726ea1a8SJin Qian * otherwise it leaves it raised, so IRQ handler will be called
613726ea1a8SJin Qian * again for the next chunk
614726ea1a8SJin Qian * 4. IRQ handler adds all returned pipes to the device's signalled pipes list
6152c104a46SDavidlohr Bueso * 5. IRQ handler defers processing the signalled pipes from the list in a
6162c104a46SDavidlohr Bueso * separate context
617726ea1a8SJin Qian */
goldfish_pipe_interrupt(int irq,void * dev_id)618726ea1a8SJin Qian static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
619726ea1a8SJin Qian {
620726ea1a8SJin Qian u32 count;
621726ea1a8SJin Qian u32 i;
622726ea1a8SJin Qian unsigned long flags;
623726ea1a8SJin Qian struct goldfish_pipe_dev *dev = dev_id;
624726ea1a8SJin Qian
62508360e26SRoman Kiryanov if (dev->magic != &goldfish_pipe_device_deinit)
626726ea1a8SJin Qian return IRQ_NONE;
627726ea1a8SJin Qian
628726ea1a8SJin Qian /* Request the signalled pipes from the device */
629726ea1a8SJin Qian spin_lock_irqsave(&dev->lock, flags);
630726ea1a8SJin Qian
631726ea1a8SJin Qian count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
632726ea1a8SJin Qian if (count == 0) {
633726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
634726ea1a8SJin Qian return IRQ_NONE;
635726ea1a8SJin Qian }
636726ea1a8SJin Qian if (count > MAX_SIGNALLED_PIPES)
637726ea1a8SJin Qian count = MAX_SIGNALLED_PIPES;
638726ea1a8SJin Qian
639726ea1a8SJin Qian for (i = 0; i < count; ++i)
640726ea1a8SJin Qian signalled_pipes_add_locked(dev,
641726ea1a8SJin Qian dev->buffers->signalled_pipe_buffers[i].id,
642726ea1a8SJin Qian dev->buffers->signalled_pipe_buffers[i].flags);
643726ea1a8SJin Qian
644726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
645726ea1a8SJin Qian
6462c104a46SDavidlohr Bueso return IRQ_WAKE_THREAD;
647726ea1a8SJin Qian }
648726ea1a8SJin Qian
get_free_pipe_id_locked(struct goldfish_pipe_dev * dev)649726ea1a8SJin Qian static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
650726ea1a8SJin Qian {
651726ea1a8SJin Qian int id;
652726ea1a8SJin Qian
653726ea1a8SJin Qian for (id = 0; id < dev->pipes_capacity; ++id)
654726ea1a8SJin Qian if (!dev->pipes[id])
655726ea1a8SJin Qian return id;
656726ea1a8SJin Qian
657726ea1a8SJin Qian {
65884ae527aSRoman Kiryanov /* Reallocate the array.
65984ae527aSRoman Kiryanov * Since get_free_pipe_id_locked runs with interrupts disabled,
66084ae527aSRoman Kiryanov * we don't want to make calls that could lead to sleep.
66184ae527aSRoman Kiryanov */
662726ea1a8SJin Qian u32 new_capacity = 2 * dev->pipes_capacity;
663726ea1a8SJin Qian struct goldfish_pipe **pipes =
6643eff8ecdSWei Yongjun kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
665726ea1a8SJin Qian if (!pipes)
666726ea1a8SJin Qian return -ENOMEM;
667726ea1a8SJin Qian memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
668726ea1a8SJin Qian kfree(dev->pipes);
669726ea1a8SJin Qian dev->pipes = pipes;
670726ea1a8SJin Qian id = dev->pipes_capacity;
671726ea1a8SJin Qian dev->pipes_capacity = new_capacity;
672726ea1a8SJin Qian }
673726ea1a8SJin Qian return id;
674c89f2750SDavid 'Digit' Turner }
675c89f2750SDavid 'Digit' Turner
67608360e26SRoman Kiryanov /* A helper function to get the instance of goldfish_pipe_dev from file */
to_goldfish_pipe_dev(struct file * file)67708360e26SRoman Kiryanov static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
67808360e26SRoman Kiryanov {
67908360e26SRoman Kiryanov struct miscdevice *miscdev = file->private_data;
68008360e26SRoman Kiryanov
68108360e26SRoman Kiryanov return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
68208360e26SRoman Kiryanov }
68308360e26SRoman Kiryanov
684c89f2750SDavid 'Digit' Turner /**
685c89f2750SDavid 'Digit' Turner * goldfish_pipe_open - open a channel to the AVD
686c89f2750SDavid 'Digit' Turner * @inode: inode of device
687c89f2750SDavid 'Digit' Turner * @file: file struct of opener
688c89f2750SDavid 'Digit' Turner *
689c89f2750SDavid 'Digit' Turner * Create a new pipe link between the emulator and the use application.
690c89f2750SDavid 'Digit' Turner * Each new request produces a new pipe.
691c89f2750SDavid 'Digit' Turner *
692c89f2750SDavid 'Digit' Turner * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
693c89f2750SDavid 'Digit' Turner * right now so this is fine. A move to 64bit will need this addressing
694c89f2750SDavid 'Digit' Turner */
goldfish_pipe_open(struct inode * inode,struct file * file)695c89f2750SDavid 'Digit' Turner static int goldfish_pipe_open(struct inode *inode, struct file *file)
696c89f2750SDavid 'Digit' Turner {
69708360e26SRoman Kiryanov struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
698726ea1a8SJin Qian unsigned long flags;
699726ea1a8SJin Qian int id;
700726ea1a8SJin Qian int status;
701c89f2750SDavid 'Digit' Turner
702c89f2750SDavid 'Digit' Turner /* Allocate new pipe kernel object */
703726ea1a8SJin Qian struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
7046d7d725bSRoman Kiryanov
705bfb8e838SRoman Kiryanov if (!pipe)
706c89f2750SDavid 'Digit' Turner return -ENOMEM;
707c89f2750SDavid 'Digit' Turner
708c89f2750SDavid 'Digit' Turner pipe->dev = dev;
709c89f2750SDavid 'Digit' Turner mutex_init(&pipe->lock);
710c89f2750SDavid 'Digit' Turner init_waitqueue_head(&pipe->wake_queue);
711c89f2750SDavid 'Digit' Turner
712c89f2750SDavid 'Digit' Turner /*
713726ea1a8SJin Qian * Command buffer needs to be allocated on its own page to make sure
714726ea1a8SJin Qian * it is physically contiguous in host's address space.
715c89f2750SDavid 'Digit' Turner */
716d23069a5SRoman Kiryanov BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
717726ea1a8SJin Qian pipe->command_buffer =
718726ea1a8SJin Qian (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
719726ea1a8SJin Qian if (!pipe->command_buffer) {
720726ea1a8SJin Qian status = -ENOMEM;
721726ea1a8SJin Qian goto err_pipe;
722726ea1a8SJin Qian }
723c89f2750SDavid 'Digit' Turner
724726ea1a8SJin Qian spin_lock_irqsave(&dev->lock, flags);
725726ea1a8SJin Qian
726726ea1a8SJin Qian id = get_free_pipe_id_locked(dev);
727726ea1a8SJin Qian if (id < 0) {
728726ea1a8SJin Qian status = id;
729726ea1a8SJin Qian goto err_id_locked;
730726ea1a8SJin Qian }
731726ea1a8SJin Qian
732726ea1a8SJin Qian dev->pipes[id] = pipe;
733726ea1a8SJin Qian pipe->id = id;
734726ea1a8SJin Qian pipe->command_buffer->id = id;
735726ea1a8SJin Qian
736726ea1a8SJin Qian /* Now tell the emulator we're opening a new pipe. */
737726ea1a8SJin Qian dev->buffers->open_command_params.rw_params_max_count =
738726ea1a8SJin Qian MAX_BUFFERS_PER_COMMAND;
739726ea1a8SJin Qian dev->buffers->open_command_params.command_buffer_ptr =
740726ea1a8SJin Qian (u64)(unsigned long)__pa(pipe->command_buffer);
74192c320b9SRoman Kiryanov status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
742726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
743726ea1a8SJin Qian if (status < 0)
744726ea1a8SJin Qian goto err_cmd;
745726ea1a8SJin Qian /* All is done, save the pipe into the file's private data field */
746726ea1a8SJin Qian file->private_data = pipe;
747726ea1a8SJin Qian return 0;
748726ea1a8SJin Qian
749726ea1a8SJin Qian err_cmd:
750726ea1a8SJin Qian spin_lock_irqsave(&dev->lock, flags);
751726ea1a8SJin Qian dev->pipes[id] = NULL;
752726ea1a8SJin Qian err_id_locked:
753726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
754726ea1a8SJin Qian free_page((unsigned long)pipe->command_buffer);
755726ea1a8SJin Qian err_pipe:
756c89f2750SDavid 'Digit' Turner kfree(pipe);
757c89f2750SDavid 'Digit' Turner return status;
758c89f2750SDavid 'Digit' Turner }
759c89f2750SDavid 'Digit' Turner
goldfish_pipe_release(struct inode * inode,struct file * filp)760c89f2750SDavid 'Digit' Turner static int goldfish_pipe_release(struct inode *inode, struct file *filp)
761c89f2750SDavid 'Digit' Turner {
762726ea1a8SJin Qian unsigned long flags;
763c89f2750SDavid 'Digit' Turner struct goldfish_pipe *pipe = filp->private_data;
764726ea1a8SJin Qian struct goldfish_pipe_dev *dev = pipe->dev;
765c89f2750SDavid 'Digit' Turner
766c89f2750SDavid 'Digit' Turner /* The guest is closing the channel, so tell the emulator right now */
76792c320b9SRoman Kiryanov goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
768726ea1a8SJin Qian
769726ea1a8SJin Qian spin_lock_irqsave(&dev->lock, flags);
770726ea1a8SJin Qian dev->pipes[pipe->id] = NULL;
771726ea1a8SJin Qian signalled_pipes_remove_locked(dev, pipe);
772726ea1a8SJin Qian spin_unlock_irqrestore(&dev->lock, flags);
773726ea1a8SJin Qian
774c89f2750SDavid 'Digit' Turner filp->private_data = NULL;
775726ea1a8SJin Qian free_page((unsigned long)pipe->command_buffer);
776726ea1a8SJin Qian kfree(pipe);
777c89f2750SDavid 'Digit' Turner return 0;
778c89f2750SDavid 'Digit' Turner }
779c89f2750SDavid 'Digit' Turner
780c89f2750SDavid 'Digit' Turner static const struct file_operations goldfish_pipe_fops = {
781c89f2750SDavid 'Digit' Turner .owner = THIS_MODULE,
782c89f2750SDavid 'Digit' Turner .read = goldfish_pipe_read,
783c89f2750SDavid 'Digit' Turner .write = goldfish_pipe_write,
784c89f2750SDavid 'Digit' Turner .poll = goldfish_pipe_poll,
785c89f2750SDavid 'Digit' Turner .open = goldfish_pipe_open,
786c89f2750SDavid 'Digit' Turner .release = goldfish_pipe_release,
787c89f2750SDavid 'Digit' Turner };
788c89f2750SDavid 'Digit' Turner
init_miscdevice(struct miscdevice * miscdev)78943c2cc28SRoman Kiryanov static void init_miscdevice(struct miscdevice *miscdev)
79043c2cc28SRoman Kiryanov {
79143c2cc28SRoman Kiryanov memset(miscdev, 0, sizeof(*miscdev));
79243c2cc28SRoman Kiryanov
79343c2cc28SRoman Kiryanov miscdev->minor = MISC_DYNAMIC_MINOR;
79443c2cc28SRoman Kiryanov miscdev->name = "goldfish_pipe";
79543c2cc28SRoman Kiryanov miscdev->fops = &goldfish_pipe_fops;
79643c2cc28SRoman Kiryanov }
797c89f2750SDavid 'Digit' Turner
write_pa_addr(void * addr,void __iomem * portl,void __iomem * porth)798610a72b7SRoman Kiryanov static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
799610a72b7SRoman Kiryanov {
800610a72b7SRoman Kiryanov const unsigned long paddr = __pa(addr);
801610a72b7SRoman Kiryanov
802610a72b7SRoman Kiryanov writel(upper_32_bits(paddr), porth);
803610a72b7SRoman Kiryanov writel(lower_32_bits(paddr), portl);
804610a72b7SRoman Kiryanov }
805610a72b7SRoman Kiryanov
goldfish_pipe_device_init(struct platform_device * pdev,struct goldfish_pipe_dev * dev)80608360e26SRoman Kiryanov static int goldfish_pipe_device_init(struct platform_device *pdev,
80708360e26SRoman Kiryanov struct goldfish_pipe_dev *dev)
808726ea1a8SJin Qian {
809c394cc3bSRoman Kiryanov int err;
810c394cc3bSRoman Kiryanov
8112c104a46SDavidlohr Bueso err = devm_request_threaded_irq(&pdev->dev, dev->irq,
812726ea1a8SJin Qian goldfish_pipe_interrupt,
8132c104a46SDavidlohr Bueso goldfish_interrupt_task,
814726ea1a8SJin Qian IRQF_SHARED, "goldfish_pipe", dev);
815726ea1a8SJin Qian if (err) {
816726ea1a8SJin Qian dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
817726ea1a8SJin Qian return err;
818726ea1a8SJin Qian }
819726ea1a8SJin Qian
82043c2cc28SRoman Kiryanov init_miscdevice(&dev->miscdev);
82143c2cc28SRoman Kiryanov err = misc_register(&dev->miscdev);
822726ea1a8SJin Qian if (err) {
823726ea1a8SJin Qian dev_err(&pdev->dev, "unable to register v2 device\n");
824726ea1a8SJin Qian return err;
825726ea1a8SJin Qian }
826726ea1a8SJin Qian
82725b97d57SRoman Kiryanov dev->pdev_dev = &pdev->dev;
828726ea1a8SJin Qian dev->first_signalled_pipe = NULL;
829726ea1a8SJin Qian dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
830726ea1a8SJin Qian dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
831726ea1a8SJin Qian GFP_KERNEL);
83260a6e523SRoman Kiryanov if (!dev->pipes) {
83360a6e523SRoman Kiryanov misc_deregister(&dev->miscdev);
834726ea1a8SJin Qian return -ENOMEM;
83560a6e523SRoman Kiryanov }
836726ea1a8SJin Qian
837726ea1a8SJin Qian /*
838726ea1a8SJin Qian * We're going to pass two buffers, open_command_params and
839726ea1a8SJin Qian * signalled_pipe_buffers, to the host. This means each of those buffers
840726ea1a8SJin Qian * needs to be contained in a single physical page. The easiest choice
841726ea1a8SJin Qian * is to just allocate a page and place the buffers in it.
842726ea1a8SJin Qian */
843d23069a5SRoman Kiryanov BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
8442ed43e53SRoman Kiryanov dev->buffers = (struct goldfish_pipe_dev_buffers *)
8452ed43e53SRoman Kiryanov __get_free_page(GFP_KERNEL);
8462ed43e53SRoman Kiryanov if (!dev->buffers) {
847726ea1a8SJin Qian kfree(dev->pipes);
84860a6e523SRoman Kiryanov misc_deregister(&dev->miscdev);
849726ea1a8SJin Qian return -ENOMEM;
850726ea1a8SJin Qian }
851726ea1a8SJin Qian
852726ea1a8SJin Qian /* Send the buffer addresses to the host */
853610a72b7SRoman Kiryanov write_pa_addr(&dev->buffers->signalled_pipe_buffers,
854610a72b7SRoman Kiryanov dev->base + PIPE_REG_SIGNAL_BUFFER,
855726ea1a8SJin Qian dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
856610a72b7SRoman Kiryanov
857468e62f9SRoman Kiryanov writel(MAX_SIGNALLED_PIPES,
858726ea1a8SJin Qian dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
859726ea1a8SJin Qian
860610a72b7SRoman Kiryanov write_pa_addr(&dev->buffers->open_command_params,
861610a72b7SRoman Kiryanov dev->base + PIPE_REG_OPEN_BUFFER,
862726ea1a8SJin Qian dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
863610a72b7SRoman Kiryanov
86408360e26SRoman Kiryanov platform_set_drvdata(pdev, dev);
865726ea1a8SJin Qian return 0;
866726ea1a8SJin Qian }
867726ea1a8SJin Qian
goldfish_pipe_device_deinit(struct platform_device * pdev,struct goldfish_pipe_dev * dev)86808360e26SRoman Kiryanov static void goldfish_pipe_device_deinit(struct platform_device *pdev,
86908360e26SRoman Kiryanov struct goldfish_pipe_dev *dev)
870726ea1a8SJin Qian {
87108360e26SRoman Kiryanov misc_deregister(&dev->miscdev);
87208360e26SRoman Kiryanov kfree(dev->pipes);
87308360e26SRoman Kiryanov free_page((unsigned long)dev->buffers);
874726ea1a8SJin Qian }
875726ea1a8SJin Qian
goldfish_pipe_probe(struct platform_device * pdev)876c89f2750SDavid 'Digit' Turner static int goldfish_pipe_probe(struct platform_device *pdev)
877c89f2750SDavid 'Digit' Turner {
878c89f2750SDavid 'Digit' Turner struct resource *r;
87908360e26SRoman Kiryanov struct goldfish_pipe_dev *dev;
880c89f2750SDavid 'Digit' Turner
88108360e26SRoman Kiryanov dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
88208360e26SRoman Kiryanov if (!dev)
88308360e26SRoman Kiryanov return -ENOMEM;
884c89f2750SDavid 'Digit' Turner
88508360e26SRoman Kiryanov dev->magic = &goldfish_pipe_device_deinit;
886c89f2750SDavid 'Digit' Turner spin_lock_init(&dev->lock);
887c89f2750SDavid 'Digit' Turner
888c89f2750SDavid 'Digit' Turner r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889bfb8e838SRoman Kiryanov if (!r || resource_size(r) < PAGE_SIZE) {
890c89f2750SDavid 'Digit' Turner dev_err(&pdev->dev, "can't allocate i/o page\n");
891c89f2750SDavid 'Digit' Turner return -EINVAL;
892c89f2750SDavid 'Digit' Turner }
893c89f2750SDavid 'Digit' Turner dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
894bfb8e838SRoman Kiryanov if (!dev->base) {
895c89f2750SDavid 'Digit' Turner dev_err(&pdev->dev, "ioremap failed\n");
896c89f2750SDavid 'Digit' Turner return -EINVAL;
897c89f2750SDavid 'Digit' Turner }
898c89f2750SDavid 'Digit' Turner
89914562776SLad Prabhakar dev->irq = platform_get_irq(pdev, 0);
90014562776SLad Prabhakar if (dev->irq < 0)
90114562776SLad Prabhakar return dev->irq;
902c89f2750SDavid 'Digit' Turner
903726ea1a8SJin Qian /*
904726ea1a8SJin Qian * Exchange the versions with the host device
905726ea1a8SJin Qian *
906726ea1a8SJin Qian * Note: v1 driver used to not report its version, so we write it before
907726ea1a8SJin Qian * reading device version back: this allows the host implementation to
908726ea1a8SJin Qian * detect the old driver (if there was no version write before read).
9094f42071cSYu Ning */
910e6fb3193SRoman Kiryanov writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
9114f42071cSYu Ning dev->version = readl(dev->base + PIPE_REG_VERSION);
912726ea1a8SJin Qian if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
913726ea1a8SJin Qian return -EINVAL;
914726ea1a8SJin Qian
91508360e26SRoman Kiryanov return goldfish_pipe_device_init(pdev, dev);
916c89f2750SDavid 'Digit' Turner }
917c89f2750SDavid 'Digit' Turner
goldfish_pipe_remove(struct platform_device * pdev)918736dfbdeSUwe Kleine-König static void goldfish_pipe_remove(struct platform_device *pdev)
919c89f2750SDavid 'Digit' Turner {
92008360e26SRoman Kiryanov struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
92108360e26SRoman Kiryanov
92208360e26SRoman Kiryanov goldfish_pipe_device_deinit(pdev, dev);
923c89f2750SDavid 'Digit' Turner }
924c89f2750SDavid 'Digit' Turner
925d62f324bSJason Hu static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
926d62f324bSJason Hu { "GFSH0003", 0 },
927d62f324bSJason Hu { },
928d62f324bSJason Hu };
929d62f324bSJason Hu MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
930d62f324bSJason Hu
93191a18a41SGreg Hackmann static const struct of_device_id goldfish_pipe_of_match[] = {
93291a18a41SGreg Hackmann { .compatible = "google,android-pipe", },
93391a18a41SGreg Hackmann {},
93491a18a41SGreg Hackmann };
93591a18a41SGreg Hackmann MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
93691a18a41SGreg Hackmann
937726ea1a8SJin Qian static struct platform_driver goldfish_pipe_driver = {
938c89f2750SDavid 'Digit' Turner .probe = goldfish_pipe_probe,
939736dfbdeSUwe Kleine-König .remove_new = goldfish_pipe_remove,
940c89f2750SDavid 'Digit' Turner .driver = {
94191a18a41SGreg Hackmann .name = "goldfish_pipe",
94291a18a41SGreg Hackmann .of_match_table = goldfish_pipe_of_match,
943d62f324bSJason Hu .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
944c89f2750SDavid 'Digit' Turner }
945c89f2750SDavid 'Digit' Turner };
946c89f2750SDavid 'Digit' Turner
947726ea1a8SJin Qian module_platform_driver(goldfish_pipe_driver);
948c89f2750SDavid 'Digit' Turner MODULE_AUTHOR("David Turner <digit@google.com>");
949*6e03b4aeSJeff Johnson MODULE_DESCRIPTION("Goldfish virtual device for QEMU pipes");
950c3c4e307SRoman Kiryanov MODULE_LICENSE("GPL v2");
951